1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (108 commits)
  ehea: Fixing statistics
  bonding: Fix lockdep warning after bond_vlan_rx_register()
  tunnels: Fix tunnels change rcu protection
  caif-u5500: Build config for CAIF shared mem driver
  caif-u5500: CAIF shared memory mailbox interface
  caif-u5500: CAIF shared memory transport protocol
  caif-u5500: Adding shared memory include
  drivers/isdn: delete double assignment
  drivers/net/typhoon.c: delete double assignment
  drivers/net/sb1000.c: delete double assignment
  qlcnic: define valid vlan id range
  qlcnic: reduce rx ring size
  qlcnic: fix mac learning
  ehea: fix use after free
  inetpeer: __rcu annotations
  fib_rules: __rcu annotates ctarget
  tunnels: add __rcu annotations
  net: add __rcu annotations to protocol
  ipv4: add __rcu annotations to routes.c
  qlge: bugfix: Restoring the vlan setting.
  ...
hifive-unleashed-5.1
Linus Torvalds 2010-10-27 18:28:00 -07:00
commit 22cdbd1d57
145 changed files with 4008 additions and 1822 deletions

View File

@ -177,18 +177,6 @@ Doing it all yourself
A convenience function to print out the PHY status neatly.
int phy_clear_interrupt(struct phy_device *phydev);
int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
Clear the PHY's interrupt, and configure which ones are allowed,
respectively. Currently only supports all on, or all off.
int phy_enable_interrupts(struct phy_device *phydev);
int phy_disable_interrupts(struct phy_device *phydev);
Functions which enable/disable PHY interrupts, clearing them
before and after, respectively.
int phy_start_interrupts(struct phy_device *phydev);
int phy_stop_interrupts(struct phy_device *phydev);
@ -213,12 +201,6 @@ Doing it all yourself
Fills the phydev structure with up-to-date information about the current
settings in the PHY.
void phy_sanitize_settings(struct phy_device *phydev)
Resolves differences between currently desired settings, and
supported settings for the given PHY device. Does not make
the changes in the hardware, though.
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);

View File

@ -1736,9 +1736,10 @@ static int __devinit eni_do_init(struct atm_dev *dev)
eprom = (base+EPROM_SIZE-sizeof(struct midway_eprom));
if (readl(&eprom->magic) != ENI155_MAGIC) {
printk("\n");
printk(KERN_ERR KERN_ERR DEV_LABEL "(itf %d): bad "
"magic - expected 0x%x, got 0x%x\n",dev->number,
ENI155_MAGIC,(unsigned) readl(&eprom->magic));
printk(KERN_ERR DEV_LABEL
"(itf %d): bad magic - expected 0x%x, got 0x%x\n",
dev->number, ENI155_MAGIC,
(unsigned)readl(&eprom->magic));
error = -EINVAL;
goto unmap;
}

View File

@ -31,48 +31,6 @@
#include <linux/connector.h>
#include <linux/delay.h>
/*
* This job is sent to the kevent workqueue.
* While no event is once sent to any callback, the connector workqueue
* is not created to avoid a useless waiting kernel task.
* Once the first event is received, we create this dedicated workqueue which
* is necessary because the flow of data can be high and we don't want
* to encumber keventd with that.
*/
static void cn_queue_create(struct work_struct *work)
{
struct cn_queue_dev *dev;
dev = container_of(work, struct cn_queue_dev, wq_creation);
dev->cn_queue = create_singlethread_workqueue(dev->name);
/* If we fail, we will use keventd for all following connector jobs */
WARN_ON(!dev->cn_queue);
}
/*
* Queue a data sent to a callback.
* If the connector workqueue is already created, we queue the job on it.
* Otherwise, we queue the job to kevent and queue the connector workqueue
* creation too.
*/
int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work)
{
struct cn_queue_dev *pdev = cbq->pdev;
if (likely(pdev->cn_queue))
return queue_work(pdev->cn_queue, work);
/* Don't create the connector workqueue twice */
if (atomic_inc_return(&pdev->wq_requested) == 1)
schedule_work(&pdev->wq_creation);
else
atomic_dec(&pdev->wq_requested);
return schedule_work(work);
}
void cn_queue_wrapper(struct work_struct *work)
{
struct cn_callback_entry *cbq =
@ -111,11 +69,7 @@ cn_queue_alloc_callback_entry(char *name, struct cb_id *id,
static void cn_queue_free_callback(struct cn_callback_entry *cbq)
{
/* The first jobs have been sent to kevent, flush them too */
flush_scheduled_work();
if (cbq->pdev->cn_queue)
flush_workqueue(cbq->pdev->cn_queue);
flush_workqueue(cbq->pdev->cn_queue);
kfree(cbq);
}
@ -193,11 +147,14 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
atomic_set(&dev->refcnt, 0);
INIT_LIST_HEAD(&dev->queue_list);
spin_lock_init(&dev->queue_lock);
init_waitqueue_head(&dev->wq_created);
dev->nls = nls;
INIT_WORK(&dev->wq_creation, cn_queue_create);
dev->cn_queue = alloc_ordered_workqueue(dev->name, 0);
if (!dev->cn_queue) {
kfree(dev);
return NULL;
}
return dev;
}
@ -205,25 +162,9 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
void cn_queue_free_dev(struct cn_queue_dev *dev)
{
struct cn_callback_entry *cbq, *n;
long timeout;
DEFINE_WAIT(wait);
/* Flush the first pending jobs queued on kevent */
flush_scheduled_work();
/* If the connector workqueue creation is still pending, wait for it */
prepare_to_wait(&dev->wq_created, &wait, TASK_UNINTERRUPTIBLE);
if (atomic_read(&dev->wq_requested) && !dev->cn_queue) {
timeout = schedule_timeout(HZ * 2);
if (!timeout && !dev->cn_queue)
WARN_ON(1);
}
finish_wait(&dev->wq_created, &wait);
if (dev->cn_queue) {
flush_workqueue(dev->cn_queue);
destroy_workqueue(dev->cn_queue);
}
flush_workqueue(dev->cn_queue);
destroy_workqueue(dev->cn_queue);
spin_lock_bh(&dev->queue_lock);
list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)

View File

@ -133,7 +133,8 @@ static int cn_call_callback(struct sk_buff *skb)
__cbq->data.skb == NULL)) {
__cbq->data.skb = skb;
if (queue_cn_work(__cbq, &__cbq->work))
if (queue_work(dev->cbdev->cn_queue,
&__cbq->work))
err = 0;
else
err = -EINVAL;
@ -148,13 +149,11 @@ static int cn_call_callback(struct sk_buff *skb)
d->callback = __cbq->data.callback;
d->free = __new_cbq;
__new_cbq->pdev = __cbq->pdev;
INIT_WORK(&__new_cbq->work,
&cn_queue_wrapper);
if (queue_cn_work(__new_cbq,
&__new_cbq->work))
if (queue_work(dev->cbdev->cn_queue,
&__new_cbq->work))
err = 0;
else {
kfree(__new_cbq);

View File

@ -563,7 +563,7 @@ reset_inf(struct inf_hw *hw)
mdelay(10);
hw->ipac.isac.adf2 = 0x87;
hw->ipac.hscx[0].slot = 0x1f;
hw->ipac.hscx[0].slot = 0x23;
hw->ipac.hscx[1].slot = 0x23;
break;
case INF_GAZEL_R753:
val = inl((u32)hw->cfg.start + GAZEL_CNTRL);

View File

@ -164,11 +164,9 @@ l3_1tr6_setup(struct l3_process *pc, u_char pr, void *arg)
char tmp[80];
struct sk_buff *skb = arg;
p = skb->data;
/* Channel Identification */
p = skb->data;
if ((p = findie(p, skb->len, WE0_chanID, 0))) {
p = findie(skb->data, skb->len, WE0_chanID, 0);
if (p) {
if (p[1] != 1) {
l3_1tr6_error(pc, "setup wrong chanID len", skb);
return;

View File

@ -631,8 +631,6 @@ struct atl1c_adapter {
extern char atl1c_driver_name[];
extern char atl1c_driver_version[];
extern int atl1c_up(struct atl1c_adapter *adapter);
extern void atl1c_down(struct atl1c_adapter *adapter);
extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
extern void atl1c_set_ethtool_ops(struct net_device *netdev);

View File

@ -66,6 +66,8 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup);
static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter);
static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
int *work_done, int work_to_do);
static int atl1c_up(struct atl1c_adapter *adapter);
static void atl1c_down(struct atl1c_adapter *adapter);
static const u16 atl1c_pay_load_size[] = {
128, 256, 512, 1024, 2048, 4096,
@ -2309,7 +2311,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
return err;
}
int atl1c_up(struct atl1c_adapter *adapter)
static int atl1c_up(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int num;
@ -2351,7 +2353,7 @@ err_alloc_rx:
return err;
}
void atl1c_down(struct atl1c_adapter *adapter)
static void atl1c_down(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;

View File

@ -91,6 +91,8 @@ MODULE_VERSION(ATLX_DRIVER_VERSION);
/* Temporary hack for merging atl1 and atl2 */
#include "atlx.c"
static const struct ethtool_ops atl1_ethtool_ops;
/*
* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
@ -353,7 +355,7 @@ static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
* hw - Struct containing variables accessed by shared code
* reg_addr - address of the PHY register to read
*/
s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
{
u32 val;
int i;
@ -553,7 +555,7 @@ static s32 atl1_read_mac_addr(struct atl1_hw *hw)
* 1. calcu 32bit CRC for multicast address
* 2. reverse crc with MSB to LSB
*/
u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
{
u32 crc32, value = 0;
int i;
@ -570,7 +572,7 @@ u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
* hw - Struct containing variables accessed by shared code
* hash_value - Multicast address hash value
*/
void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
{
u32 hash_bit, hash_reg;
u32 mta;
@ -914,7 +916,7 @@ static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex
return 0;
}
void atl1_set_mac_addr(struct atl1_hw *hw)
static void atl1_set_mac_addr(struct atl1_hw *hw)
{
u32 value;
/*
@ -3658,7 +3660,7 @@ static int atl1_nway_reset(struct net_device *netdev)
return 0;
}
const struct ethtool_ops atl1_ethtool_ops = {
static const struct ethtool_ops atl1_ethtool_ops = {
.get_settings = atl1_get_settings,
.set_settings = atl1_set_settings,
.get_drvinfo = atl1_get_drvinfo,

View File

@ -56,16 +56,13 @@ struct atl1_adapter;
struct atl1_hw;
/* function prototypes needed by multiple files */
u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
void atl1_set_mac_addr(struct atl1_hw *hw);
static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
static void atl1_set_mac_addr(struct atl1_hw *hw);
static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd);
static u32 atl1_check_link(struct atl1_adapter *adapter);
extern const struct ethtool_ops atl1_ethtool_ops;
/* hardware definitions specific to L1 */
/* Block IDLE Status Register */

View File

@ -41,6 +41,10 @@
#include "atlx.h"
static s32 atlx_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
static u32 atlx_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
static void atlx_set_mac_addr(struct atl1_hw *hw);
static struct atlx_spi_flash_dev flash_table[] = {
/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */
{"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62},

View File

@ -1471,42 +1471,6 @@ err:
return status;
}
/* Uses sync mcc */
int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
u8 *connector)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_port_type *req;
int status;
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
status = -EBUSY;
goto err;
}
req = embedded_payload(wrb);
be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
OPCODE_COMMON_READ_TRANSRECV_DATA);
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
req->port = cpu_to_le32(port);
req->page_num = cpu_to_le32(TR_PAGE_A0);
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
*connector = resp->data.connector;
}
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 flash_type, u32 flash_opcode, u32 buf_size)
{

View File

@ -1022,8 +1022,6 @@ extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
u8 port_num, u8 beacon, u8 status, u8 state);
extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
u8 port_num, u32 *state);
extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
u8 *connector);
extern int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_dma_mem *cmd, u32 flash_oper,
u32 flash_opcode, u32 buf_size);

View File

@ -849,20 +849,16 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
stats->rx_mcast_pkts++;
}
static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
{
u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
u8 l4_cksm, ipv6, ipcksm;
l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
if (ip_version) {
tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
}
ipv6_chk = (ip_version && (tcpf || udpf));
ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
/* Ignore ipcksm for ipv6 pkts */
return l4_cksm && (ipcksm || ipv6);
}
static struct be_rx_page_info *
@ -1017,10 +1013,10 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
if (do_pkt_csum(rxcp, adapter->rx_csum))
skb_checksum_none_assert(skb);
else
if (likely(adapter->rx_csum && csum_passed(rxcp)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, adapter->netdev);
@ -1674,7 +1670,7 @@ static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
return (tcp_frame && !err) ? true : false;
}
int be_poll_rx(struct napi_struct *napi, int budget)
static int be_poll_rx(struct napi_struct *napi, int budget)
{
struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
@ -1806,6 +1802,20 @@ static void be_worker(struct work_struct *work)
struct be_rx_obj *rxo;
int i;
/* when interrupts are not yet enabled, just reap any pending
* mcc completions */
if (!netif_running(adapter->netdev)) {
int mcc_compl, status = 0;
mcc_compl = be_process_mcc(adapter, &status);
if (mcc_compl) {
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
}
goto reschedule;
}
if (!adapter->stats_ioctl_sent)
be_cmd_get_stats(adapter, &adapter->stats_cmd);
@ -1824,6 +1834,7 @@ static void be_worker(struct work_struct *work)
if (!adapter->ue_detected)
be_detect_dump_ue(adapter);
reschedule:
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
}
@ -2019,8 +2030,6 @@ static int be_close(struct net_device *netdev)
struct be_eq_obj *tx_eq = &adapter->tx_eq;
int vec, i;
cancel_delayed_work_sync(&adapter->work);
be_async_mcc_disable(adapter);
netif_stop_queue(netdev);
@ -2085,8 +2094,6 @@ static int be_open(struct net_device *netdev)
/* Now that interrupts are on we can process async mcc */
be_async_mcc_enable(adapter);
schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
&link_speed);
if (status)
@ -2299,9 +2306,6 @@ static int be_clear(struct be_adapter *adapter)
#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
char flash_cookie[2][16] = {"*** SE FLAS",
"H DIRECTORY *** "};
static bool be_flash_redboot(struct be_adapter *adapter,
const u8 *p, u32 img_start, int image_size,
int hdr_size)
@ -2559,7 +2563,6 @@ static void be_netdev_init(struct net_device *netdev)
netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
BE_NAPI_WEIGHT);
netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
@ -2715,6 +2718,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
if (!adapter)
return;
cancel_delayed_work_sync(&adapter->work);
unregister_netdev(adapter->netdev);
be_clear(adapter);
@ -2868,8 +2873,10 @@ static int __devinit be_probe(struct pci_dev *pdev,
status = register_netdev(netdev);
if (status != 0)
goto unsetup;
netif_carrier_off(netdev);
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
unsetup:

View File

@ -1288,15 +1288,11 @@ struct bnx2x_func_init_params {
#define WAIT_RAMROD_POLL 0x01
#define WAIT_RAMROD_COMMON 0x02
int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
int *state_p, int flags);
/* dmae */
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
u32 len32);
void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
u32 addr, u32 len);
void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
@ -1307,7 +1303,6 @@ int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,

View File

@ -25,6 +25,7 @@
#include "bnx2x_init.h"
static int bnx2x_setup_irqs(struct bnx2x *bp);
/* free skb in the packet ring at pos idx
* return idx of last bd freed
@ -2187,7 +2188,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
}
int bnx2x_setup_irqs(struct bnx2x *bp)
static int bnx2x_setup_irqs(struct bnx2x *bp)
{
int rc = 0;
if (bp->flags & USING_MSIX_FLAG) {

View File

@ -116,13 +116,6 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
*/
void bnx2x_int_enable(struct bnx2x *bp);
/**
* Disable HW interrupts.
*
* @param bp
*/
void bnx2x_int_disable(struct bnx2x *bp);
/**
* Disable interrupts. This function ensures that there are no
* ISRs or SP DPCs (sp_task) are running after it returns.
@ -191,17 +184,6 @@ void bnx2x_free_mem(struct bnx2x *bp);
int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
int is_leading);
/**
* Bring down an eth client.
*
* @param bp
* @param p
*
* @return int
*/
int bnx2x_stop_fw_client(struct bnx2x *bp,
struct bnx2x_client_ramrod_params *p);
/**
* Set number of queues according to mode
*
@ -250,34 +232,6 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
*/
void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
#ifdef BCM_CNIC
/**
* Set iSCSI MAC(s) at the next enties in the CAM after the ETH
* MAC(s). The function will wait until the ramrod completion
* returns.
*
* @param bp driver handle
* @param set set or clear the CAM entry
*
* @return 0 if cussess, -ENODEV if ramrod doesn't return.
*/
int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
#endif
/**
* Initialize status block in FW and HW
*
* @param bp driver handle
* @param dma_addr_t mapping
* @param int sb_id
* @param int vfid
* @param u8 vf_valid
* @param int fw_sb_id
* @param int igu_sb_id
*/
void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id);
/**
* Set MAC filtering configurations.
*
@ -326,7 +280,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
* @return int
*/
int bnx2x_func_start(struct bnx2x *bp);
int bnx2x_func_stop(struct bnx2x *bp);
/**
* Prepare ILT configurations according to current driver
@ -395,14 +348,6 @@ int bnx2x_enable_msix(struct bnx2x *bp);
*/
int bnx2x_enable_msi(struct bnx2x *bp);
/**
* Request IRQ vectors from OS.
*
* @param bp
*
* @return int
*/
int bnx2x_setup_irqs(struct bnx2x *bp);
/**
* NAPI callback
*

View File

@ -16,7 +16,9 @@
#define BNX2X_INIT_OPS_H
static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
u32 addr, u32 len);
static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
u32 len)
@ -589,7 +591,7 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
return rc;
}
int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
{
int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
if (!rc)
@ -635,7 +637,7 @@ static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
}
}
void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
struct ilt_client_info *ilt_cli,
u32 ilt_start, u8 initop)
{
@ -688,8 +690,10 @@ void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
}
}
void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
struct ilt_client_info *ilt_cli, u8 initop)
static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
struct bnx2x_ilt *ilt,
struct ilt_client_info *ilt_cli,
u8 initop)
{
int i;
@ -703,8 +707,8 @@ void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
}
void bnx2x_ilt_client_init_op(struct bnx2x *bp,
struct ilt_client_info *ilt_cli, u8 initop)
static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
struct ilt_client_info *ilt_cli, u8 initop)
{
struct bnx2x_ilt *ilt = BP_ILT(bp);
@ -720,7 +724,7 @@ static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
}
void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
@ -752,7 +756,7 @@ static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
* called during init common stage, ilt clients should be initialized
* prioir to calling this function
*/
void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
PXP2_REG_RQ_CDU_P_SIZE, initop);
@ -772,8 +776,8 @@ void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
/* called during init port stage */
void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
u8 initop)
static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
u8 initop)
{
int port = BP_PORT(bp);
@ -814,8 +818,8 @@ static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
}
/* called during init common stage */
void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
u8 initop)
static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
u8 initop)
{
if (!QM_INIT(qm_cid_count))
return;
@ -836,8 +840,8 @@ void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
****************************************************************************/
/* called during init func stage */
void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
dma_addr_t t2_mapping, int src_cid_count)
static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
dma_addr_t t2_mapping, int src_cid_count)
{
int i;
int port = BP_PORT(bp);

View File

@ -181,6 +181,12 @@
(_bank + (_addr & 0xf)), \
_val)
static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
u8 devad, u16 reg, u16 *ret_val);
static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
u8 devad, u16 reg, u16 val);
static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
@ -594,7 +600,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
return 0;
}
u8 bnx2x_bmac_enable(struct link_params *params,
static u8 bnx2x_bmac_enable(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
{
@ -2537,122 +2543,6 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
}
}
/*
*------------------------------------------------------------------------
* bnx2x_override_led_value -
*
* Override the led value of the requested led
*
*------------------------------------------------------------------------
*/
u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
u32 led_idx, u32 value)
{
u32 reg_val;
/* If port 0 then use EMAC0, else use EMAC1*/
u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
DP(NETIF_MSG_LINK,
"bnx2x_override_led_value() port %x led_idx %d value %d\n",
port, led_idx, value);
switch (led_idx) {
case 0: /* 10MB led */
/* Read the current value of the LED register in
the EMAC block */
reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
/* Set the OVERRIDE bit to 1 */
reg_val |= EMAC_LED_OVERRIDE;
/* If value is 1, set the 10M_OVERRIDE bit,
otherwise reset it.*/
reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
(reg_val & ~EMAC_LED_10MB_OVERRIDE);
REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
break;
case 1: /*100MB led */
/*Read the current value of the LED register in
the EMAC block */
reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
/* Set the OVERRIDE bit to 1 */
reg_val |= EMAC_LED_OVERRIDE;
/* If value is 1, set the 100M_OVERRIDE bit,
otherwise reset it.*/
reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
(reg_val & ~EMAC_LED_100MB_OVERRIDE);
REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
break;
case 2: /* 1000MB led */
/* Read the current value of the LED register in the
EMAC block */
reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
/* Set the OVERRIDE bit to 1 */
reg_val |= EMAC_LED_OVERRIDE;
/* If value is 1, set the 1000M_OVERRIDE bit, otherwise
reset it. */
reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
(reg_val & ~EMAC_LED_1000MB_OVERRIDE);
REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
break;
case 3: /* 2500MB led */
/* Read the current value of the LED register in the
EMAC block*/
reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
/* Set the OVERRIDE bit to 1 */
reg_val |= EMAC_LED_OVERRIDE;
/* If value is 1, set the 2500M_OVERRIDE bit, otherwise
reset it.*/
reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
(reg_val & ~EMAC_LED_2500MB_OVERRIDE);
REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
break;
case 4: /*10G led */
if (port == 0) {
REG_WR(bp, NIG_REG_LED_10G_P0,
value);
} else {
REG_WR(bp, NIG_REG_LED_10G_P1,
value);
}
break;
case 5: /* TRAFFIC led */
/* Find if the traffic control is via BMAC or EMAC */
if (port == 0)
reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
else
reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
/* Override the traffic led in the EMAC:*/
if (reg_val == 1) {
/* Read the current value of the LED register in
the EMAC block */
reg_val = REG_RD(bp, emac_base +
EMAC_REG_EMAC_LED);
/* Set the TRAFFIC_OVERRIDE bit to 1 */
reg_val |= EMAC_LED_OVERRIDE;
/* If value is 1, set the TRAFFIC bit, otherwise
reset it.*/
reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
(reg_val & ~EMAC_LED_TRAFFIC);
REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
} else { /* Override the traffic led in the BMAC: */
REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
+ port*4, 1);
REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
value);
}
break;
default:
DP(NETIF_MSG_LINK,
"bnx2x_override_led_value() unknown led index %d "
"(should be 0-5)\n", led_idx);
return -EINVAL;
}
return 0;
}
u8 bnx2x_set_led(struct link_params *params,
struct link_vars *vars, u8 mode, u32 speed)
{
@ -4099,9 +3989,9 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params, u16 addr,
u8 byte_cnt, u8 *o_buf)
static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params, u16 addr,
u8 byte_cnt, u8 *o_buf)
{
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
@ -6819,13 +6709,6 @@ u8 bnx2x_phy_probe(struct link_params *params)
return 0;
}
u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx)
{
if (phy_idx < params->num_phys)
return params->phy[phy_idx].supported;
return 0;
}
static void set_phy_vars(struct link_params *params)
{
struct bnx2x *bp = params->bp;

View File

@ -279,12 +279,6 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 val);
u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
u8 devad, u16 reg, u16 *ret_val);
u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
u8 devad, u16 reg, u16 val);
/* Reads the link_status from the shmem,
and update the link vars accordingly */
void bnx2x_link_status_update(struct link_params *input,
@ -304,8 +298,6 @@ u8 bnx2x_set_led(struct link_params *params, struct link_vars *vars,
#define LED_MODE_OPER 2
#define LED_MODE_FRONT_PANEL_OFF 3
u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value);
/* bnx2x_handle_module_detect_int should be called upon module detection
interrupt */
void bnx2x_handle_module_detect_int(struct link_params *params);
@ -325,19 +317,12 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
/* Reset the external of SFX7101 */
void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params, u16 addr,
u8 byte_cnt, u8 *o_buf);
void bnx2x_hw_reset_phy(struct link_params *params);
/* Checks if HW lock is required for this phy/board type */
u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base);
/* Returns the aggregative supported attributes of the phys on board */
u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx);
/* Check swap bit and adjust PHY order */
u32 bnx2x_phy_selection(struct link_params *params);

View File

@ -403,7 +403,7 @@ static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
/* used only at init
* locking is done by mcp
*/
void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
{
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
@ -429,7 +429,8 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
#define DMAE_DP_DST_NONE "dst_addr [none]"
void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
int msglvl)
{
u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
@ -551,8 +552,9 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
return opcode;
}
void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
u8 src_type, u8 dst_type)
static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
struct dmae_command *dmae,
u8 src_type, u8 dst_type)
{
memset(dmae, 0, sizeof(struct dmae_command));
@ -567,7 +569,8 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
}
/* issue a dmae command over the init-channel and wailt for completion */
int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
struct dmae_command *dmae)
{
u32 *wb_comp = bnx2x_sp(bp, wb_comp);
int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
@ -674,8 +677,8 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
bnx2x_issue_dmae_with_comp(bp, &dmae);
}
void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
u32 addr, u32 len)
static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
u32 addr, u32 len)
{
int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
int offset = 0;
@ -1267,7 +1270,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
BNX2X_ERR("BUG! proper val not read from IGU!\n");
}
void bnx2x_int_disable(struct bnx2x *bp)
static void bnx2x_int_disable(struct bnx2x *bp)
{
if (bp->common.int_block == INT_BLOCK_HC)
bnx2x_hc_int_disable(bp);
@ -2236,7 +2239,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
}
/* must be called under rtnl_lock */
void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
{
u32 mask = (1 << cl_id);
@ -2303,7 +2306,7 @@ void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
bp->mac_filters.unmatched_unicast & ~mask;
}
void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
{
struct tstorm_eth_function_common_config tcfg = {0};
u16 rss_flgs;
@ -2460,7 +2463,7 @@ static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
}
void bnx2x_pf_init(struct bnx2x *bp)
static void bnx2x_pf_init(struct bnx2x *bp)
{
struct bnx2x_func_init_params func_init = {0};
struct bnx2x_rss_params rss = {0};
@ -3928,7 +3931,7 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
hc_sm->time_to_expire = 0xFFFFFFFF;
}
void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id)
{
int igu_seg_id;
@ -6021,6 +6024,9 @@ alloc_mem_err:
/*
* Init service functions
*/
static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
int *state_p, int flags);
int bnx2x_func_start(struct bnx2x *bp)
{
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
@ -6030,7 +6036,7 @@ int bnx2x_func_start(struct bnx2x *bp)
WAIT_RAMROD_COMMON);
}
int bnx2x_func_stop(struct bnx2x *bp)
static int bnx2x_func_stop(struct bnx2x *bp)
{
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
@ -6103,8 +6109,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
}
int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
int *state_p, int flags)
static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
int *state_p, int flags)
{
/* can take a while if any port is running */
int cnt = 5000;
@ -6154,7 +6160,7 @@ int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
return -EBUSY;
}
u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
{
if (CHIP_IS_E1H(bp))
return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
@ -6273,7 +6279,7 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
*
* @return 0 if cussess, -ENODEV if ramrod doesn't return.
*/
int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
{
u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
@ -6383,11 +6389,11 @@ static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
ETH_CONNECTION_TYPE);
}
int bnx2x_setup_fw_client(struct bnx2x *bp,
struct bnx2x_client_init_params *params,
u8 activate,
struct client_init_ramrod_data *data,
dma_addr_t data_mapping)
static int bnx2x_setup_fw_client(struct bnx2x *bp,
struct bnx2x_client_init_params *params,
u8 activate,
struct client_init_ramrod_data *data,
dma_addr_t data_mapping)
{
u16 hc_usec;
int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
@ -6633,7 +6639,8 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return rc;
}
int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
static int bnx2x_stop_fw_client(struct bnx2x *bp,
struct bnx2x_client_ramrod_params *p)
{
int rc;
@ -7440,7 +7447,7 @@ reset_task_exit:
* Init service functions
*/
u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
{
u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;

View File

@ -493,9 +493,9 @@ static void bond_vlan_rx_register(struct net_device *bond_dev,
struct slave *slave;
int i;
write_lock(&bond->lock);
write_lock_bh(&bond->lock);
bond->vlgrp = grp;
write_unlock(&bond->lock);
write_unlock_bh(&bond->lock);
bond_for_each_slave(bond, slave, i) {
struct net_device *slave_dev = slave->dev;

View File

@ -31,3 +31,10 @@ config CAIF_SPI_SYNC
Putting the next command and length in the start of the frame can
help to synchronize to the next transfer in case of over or under-runs.
This option also needs to be enabled on the modem.
config CAIF_SHM
tristate "CAIF shared memory protocol driver"
depends on CAIF && U5500_MBOX
default n
---help---
The CAIF shared memory protocol driver for the STE UX5500 platform.

View File

@ -8,3 +8,7 @@ obj-$(CONFIG_CAIF_TTY) += caif_serial.o
# SPI slave physical interfaces module
cfspi_slave-objs := caif_spi.o caif_spi_slave.o
obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
# Shared memory
caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
obj-$(CONFIG_CAIF_SHM) += caif_shm.o

View File

@ -0,0 +1,129 @@
/*
* Copyright (C) ST-Ericsson AB 2010
* Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
* Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
* License terms: GNU General Public License (GPL) version 2
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
#include <linux/version.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <mach/mbox.h>
#include <net/caif/caif_shm.h>
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CAIF Shared Memory protocol driver");
#define MAX_SHM_INSTANCES 1
enum {
MBX_ACC0,
MBX_ACC1,
MBX_DSP
};
static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES];
static unsigned int shm_start;
static unsigned int shm_size;
module_param(shm_size, uint , 0440);
MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory");
module_param(shm_start, uint , 0440);
MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory");
static int shmdev_send_msg(u32 dev_id, u32 mbx_msg)
{
/* Always block until msg is written successfully */
mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true);
return 0;
}
static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev,
void *pshm_drv)
{
/*
* For UX5500, we have only 1 SHM instance which uses MBX0
* for communication with the peer modem
*/
pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv);
if (!pshm_dev->hmbx)
return -ENODEV;
else
return 0;
}
static int __init caif_shmdev_init(void)
{
int i, result;
/* Loop is currently overkill, there is only one instance */
for (i = 0; i < MAX_SHM_INSTANCES; i++) {
shmdev_lyr[i].shm_base_addr = shm_start;
shmdev_lyr[i].shm_total_sz = shm_size;
if (((char *)shmdev_lyr[i].shm_base_addr == NULL)
|| (shmdev_lyr[i].shm_total_sz <= 0)) {
pr_warn("ERROR,"
"Shared memory Address and/or Size incorrect"
", Bailing out ...\n");
result = -EINVAL;
goto clean;
}
pr_info("SHM AREA (instance %d) STARTS"
" AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr);
shmdev_lyr[i].shm_id = i;
shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg;
shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup;
/*
* Finally, CAIF core module is called with details in place:
* 1. SHM base address
* 2. SHM size
* 3. MBX handle
*/
result = caif_shmcore_probe(&shmdev_lyr[i]);
if (result) {
pr_warn("ERROR[%d],"
"Could not probe SHM core (instance %d)"
" Bailing out ...\n", result, i);
goto clean;
}
}
return 0;
clean:
/*
* For now, we assume that even if one instance of SHM fails, we bail
* out of the driver support completely. For this, we need to release
* any memory allocated and unregister any instance of SHM net device.
*/
for (i = 0; i < MAX_SHM_INSTANCES; i++) {
if (shmdev_lyr[i].pshm_netdev)
unregister_netdev(shmdev_lyr[i].pshm_netdev);
}
return result;
}
static void __exit caif_shmdev_exit(void)
{
int i;
for (i = 0; i < MAX_SHM_INSTANCES; i++) {
caif_shmcore_remove(shmdev_lyr[i].pshm_netdev);
kfree((void *)shmdev_lyr[i].shm_base_addr);
}
}
module_init(caif_shmdev_init);
module_exit(caif_shmdev_exit);

View File

@ -0,0 +1,744 @@
/*
* Copyright (C) ST-Ericsson AB 2010
* Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
* Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
* Daniel Martensson / daniel.martensson@stericsson.com
* License terms: GNU General Public License (GPL) version 2
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <net/caif/caif_device.h>
#include <net/caif/caif_shm.h>
#define NR_TX_BUF 6
#define NR_RX_BUF 6
#define TX_BUF_SZ 0x2000
#define RX_BUF_SZ 0x2000
#define CAIF_NEEDED_HEADROOM 32
#define CAIF_FLOW_ON 1
#define CAIF_FLOW_OFF 0
#define LOW_WATERMARK 3
#define HIGH_WATERMARK 4
/* Maximum number of CAIF buffers per shared memory buffer. */
#define SHM_MAX_FRMS_PER_BUF 10
/*
* Size in bytes of the descriptor area
* (With end of descriptor signalling)
*/
#define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
sizeof(struct shm_pck_desc))
/*
* Offset to the first CAIF frame within a shared memory buffer.
* Aligned on 32 bytes.
*/
#define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
/* Number of bytes for CAIF shared memory header. */
#define SHM_HDR_LEN 1
/* Number of padding bytes for the complete CAIF frame. */
#define SHM_FRM_PAD_LEN 4
#define CAIF_MAX_MTU 4096
#define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
#define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
#define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
#define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
#define SHM_FULL_MASK (0x0F << 0)
#define SHM_EMPTY_MASK (0x0F << 4)
struct shm_pck_desc {
/*
* Offset from start of shared memory area to start of
* shared memory CAIF frame.
*/
u32 frm_ofs;
u32 frm_len;
};
struct buf_list {
unsigned char *desc_vptr;
u32 phy_addr;
u32 index;
u32 len;
u32 frames;
u32 frm_ofs;
struct list_head list;
};
struct shm_caif_frm {
/* Number of bytes of padding before the CAIF frame. */
u8 hdr_ofs;
};
struct shmdrv_layer {
/* caif_dev_common must always be first in the structure*/
struct caif_dev_common cfdev;
u32 shm_tx_addr;
u32 shm_rx_addr;
u32 shm_base_addr;
u32 tx_empty_available;
spinlock_t lock;
struct list_head tx_empty_list;
struct list_head tx_pend_list;
struct list_head tx_full_list;
struct list_head rx_empty_list;
struct list_head rx_pend_list;
struct list_head rx_full_list;
struct workqueue_struct *pshm_tx_workqueue;
struct workqueue_struct *pshm_rx_workqueue;
struct work_struct shm_tx_work;
struct work_struct shm_rx_work;
struct sk_buff_head sk_qhead;
struct shmdev_layer *pshm_dev;
};
static int shm_netdev_open(struct net_device *shm_netdev)
{
netif_wake_queue(shm_netdev);
return 0;
}
static int shm_netdev_close(struct net_device *shm_netdev)
{
netif_stop_queue(shm_netdev);
return 0;
}
int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
{
struct buf_list *pbuf;
struct shmdrv_layer *pshm_drv;
struct list_head *pos;
u32 avail_emptybuff = 0;
unsigned long flags = 0;
pshm_drv = (struct shmdrv_layer *)priv;
/* Check for received buffers. */
if (mbx_msg & SHM_FULL_MASK) {
int idx;
spin_lock_irqsave(&pshm_drv->lock, flags);
/* Check whether we have any outstanding buffers. */
if (list_empty(&pshm_drv->rx_empty_list)) {
/* Release spin lock. */
spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* We print even in IRQ context... */
pr_warn("No empty Rx buffers to fill: "
"mbx_msg:%x\n", mbx_msg);
/* Bail out. */
goto err_sync;
}
pbuf =
list_entry(pshm_drv->rx_empty_list.next,
struct buf_list, list);
idx = pbuf->index;
/* Check buffer synchronization. */
if (idx != SHM_GET_FULL(mbx_msg)) {
/* We print even in IRQ context... */
pr_warn(
"phyif_shm_mbx_msg_cb: RX full out of sync:"
" idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
idx, mbx_msg, SHM_GET_FULL(mbx_msg));
spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* Bail out. */
goto err_sync;
}
list_del_init(&pbuf->list);
list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* Schedule RX work queue. */
if (!work_pending(&pshm_drv->shm_rx_work))
queue_work(pshm_drv->pshm_rx_workqueue,
&pshm_drv->shm_rx_work);
}
/* Check for emptied buffers. */
if (mbx_msg & SHM_EMPTY_MASK) {
int idx;
spin_lock_irqsave(&pshm_drv->lock, flags);
/* Check whether we have any outstanding buffers. */
if (list_empty(&pshm_drv->tx_full_list)) {
/* We print even in IRQ context... */
pr_warn("No TX to empty: msg:%x\n", mbx_msg);
spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* Bail out. */
goto err_sync;
}
pbuf =
list_entry(pshm_drv->tx_full_list.next,
struct buf_list, list);
idx = pbuf->index;
/* Check buffer synchronization. */
if (idx != SHM_GET_EMPTY(mbx_msg)) {
spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* We print even in IRQ context... */
pr_warn("TX empty "
"out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
/* Bail out. */
goto err_sync;
}
list_del_init(&pbuf->list);
/* Reset buffer parameters. */
pbuf->frames = 0;
pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
/* Check the available no. of buffers in the empty list */
list_for_each(pos, &pshm_drv->tx_empty_list)
avail_emptybuff++;
/* Check whether we have to wake up the transmitter. */
if ((avail_emptybuff > HIGH_WATERMARK) &&
(!pshm_drv->tx_empty_available)) {
pshm_drv->tx_empty_available = 1;
pshm_drv->cfdev.flowctrl
(pshm_drv->pshm_dev->pshm_netdev,
CAIF_FLOW_ON);
spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* Schedule the work queue. if required */
if (!work_pending(&pshm_drv->shm_tx_work))
queue_work(pshm_drv->pshm_tx_workqueue,
&pshm_drv->shm_tx_work);
} else
spin_unlock_irqrestore(&pshm_drv->lock, flags);
}
return 0;
err_sync:
return -EIO;
}
static void shm_rx_work_func(struct work_struct *rx_work)
{
struct shmdrv_layer *pshm_drv;
struct buf_list *pbuf;
unsigned long flags = 0;
struct sk_buff *skb;
char *p;
int ret;
pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
while (1) {
struct shm_pck_desc *pck_desc;
spin_lock_irqsave(&pshm_drv->lock, flags);
/* Check for received buffers. */
if (list_empty(&pshm_drv->rx_full_list)) {
spin_unlock_irqrestore(&pshm_drv->lock, flags);
break;
}
pbuf =
list_entry(pshm_drv->rx_full_list.next, struct buf_list,
list);
list_del_init(&pbuf->list);
/* Retrieve pointer to start of the packet descriptor area. */
pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
/*
* Check whether descriptor contains a CAIF shared memory
* frame.
*/
while (pck_desc->frm_ofs) {
unsigned int frm_buf_ofs;
unsigned int frm_pck_ofs;
unsigned int frm_pck_len;
/*
* Check whether offset is within buffer limits
* (lower).
*/
if (pck_desc->frm_ofs <
(pbuf->phy_addr - pshm_drv->shm_base_addr))
break;
/*
* Check whether offset is within buffer limits
* (higher).
*/
if (pck_desc->frm_ofs >
((pbuf->phy_addr - pshm_drv->shm_base_addr) +
pbuf->len))
break;
/* Calculate offset from start of buffer. */
frm_buf_ofs =
pck_desc->frm_ofs - (pbuf->phy_addr -
pshm_drv->shm_base_addr);
/*
* Calculate offset and length of CAIF packet while
* taking care of the shared memory header.
*/
frm_pck_ofs =
frm_buf_ofs + SHM_HDR_LEN +
(*(pbuf->desc_vptr + frm_buf_ofs));
frm_pck_len =
(pck_desc->frm_len - SHM_HDR_LEN -
(*(pbuf->desc_vptr + frm_buf_ofs)));
/* Check whether CAIF packet is within buffer limits */
if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
break;
/* Get a suitable CAIF packet and copy in data. */
skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
frm_pck_len + 1);
BUG_ON(skb == NULL);
p = skb_put(skb, frm_pck_len);
memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
skb->protocol = htons(ETH_P_CAIF);
skb_reset_mac_header(skb);
skb->dev = pshm_drv->pshm_dev->pshm_netdev;
/* Push received packet up the stack. */
ret = netif_rx_ni(skb);
if (!ret) {
pshm_drv->pshm_dev->pshm_netdev->stats.
rx_packets++;
pshm_drv->pshm_dev->pshm_netdev->stats.
rx_bytes += pck_desc->frm_len;
} else
++pshm_drv->pshm_dev->pshm_netdev->stats.
rx_dropped;
/* Move to next packet descriptor. */
pck_desc++;
}
list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
spin_unlock_irqrestore(&pshm_drv->lock, flags);
}
/* Schedule the work queue. if required */
if (!work_pending(&pshm_drv->shm_tx_work))
queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
}
static void shm_tx_work_func(struct work_struct *tx_work)
{
u32 mbox_msg;
unsigned int frmlen, avail_emptybuff, append = 0;
unsigned long flags = 0;
struct buf_list *pbuf = NULL;
struct shmdrv_layer *pshm_drv;
struct shm_caif_frm *frm;
struct sk_buff *skb;
struct shm_pck_desc *pck_desc;
struct list_head *pos;
pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
do {
/* Initialize mailbox message. */
mbox_msg = 0x00;
avail_emptybuff = 0;
spin_lock_irqsave(&pshm_drv->lock, flags);
/* Check for pending receive buffers. */
if (!list_empty(&pshm_drv->rx_pend_list)) {
pbuf = list_entry(pshm_drv->rx_pend_list.next,
struct buf_list, list);
list_del_init(&pbuf->list);
list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
/*
* Value index is never changed,
* so read access should be safe.
*/
mbox_msg |= SHM_SET_EMPTY(pbuf->index);
}
skb = skb_peek(&pshm_drv->sk_qhead);
if (skb == NULL)
goto send_msg;
/* Check the available no. of buffers in the empty list */
list_for_each(pos, &pshm_drv->tx_empty_list)
avail_emptybuff++;
if ((avail_emptybuff < LOW_WATERMARK) &&
pshm_drv->tx_empty_available) {
/* Update blocking condition. */
pshm_drv->tx_empty_available = 0;
pshm_drv->cfdev.flowctrl
(pshm_drv->pshm_dev->pshm_netdev,
CAIF_FLOW_OFF);
}
/*
* We simply return back to the caller if we do not have space
* either in Tx pending list or Tx empty list. In this case,
* we hold the received skb in the skb list, waiting to
* be transmitted once Tx buffers become available
*/
if (list_empty(&pshm_drv->tx_empty_list))
goto send_msg;
/* Get the first free Tx buffer. */
pbuf = list_entry(pshm_drv->tx_empty_list.next,
struct buf_list, list);
do {
if (append) {
skb = skb_peek(&pshm_drv->sk_qhead);
if (skb == NULL)
break;
}
frm = (struct shm_caif_frm *)
(pbuf->desc_vptr + pbuf->frm_ofs);
frm->hdr_ofs = 0;
frmlen = 0;
frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
/* Add tail padding if needed. */
if (frmlen % SHM_FRM_PAD_LEN)
frmlen += SHM_FRM_PAD_LEN -
(frmlen % SHM_FRM_PAD_LEN);
/*
* Verify that packet, header and additional padding
* can fit within the buffer frame area.
*/
if (frmlen >= (pbuf->len - pbuf->frm_ofs))
break;
if (!append) {
list_del_init(&pbuf->list);
append = 1;
}
skb = skb_dequeue(&pshm_drv->sk_qhead);
/* Copy in CAIF frame. */
skb_copy_bits(skb, 0, pbuf->desc_vptr +
pbuf->frm_ofs + SHM_HDR_LEN +
frm->hdr_ofs, skb->len);
pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
frmlen;
dev_kfree_skb(skb);
/* Fill in the shared memory packet descriptor area. */
pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
/* Forward to current frame. */
pck_desc += pbuf->frames;
pck_desc->frm_ofs = (pbuf->phy_addr -
pshm_drv->shm_base_addr) +
pbuf->frm_ofs;
pck_desc->frm_len = frmlen;
/* Terminate packet descriptor area. */
pck_desc++;
pck_desc->frm_ofs = 0;
/* Update buffer parameters. */
pbuf->frames++;
pbuf->frm_ofs += frmlen + (frmlen % 32);
} while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
/* Assign buffer as full. */
list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
append = 0;
mbox_msg |= SHM_SET_FULL(pbuf->index);
send_msg:
spin_unlock_irqrestore(&pshm_drv->lock, flags);
if (mbox_msg)
pshm_drv->pshm_dev->pshmdev_mbxsend
(pshm_drv->pshm_dev->shm_id, mbox_msg);
} while (mbox_msg);
}
static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
{
struct shmdrv_layer *pshm_drv;
unsigned long flags = 0;
pshm_drv = netdev_priv(shm_netdev);
spin_lock_irqsave(&pshm_drv->lock, flags);
skb_queue_tail(&pshm_drv->sk_qhead, skb);
spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* Schedule Tx work queue. for deferred processing of skbs*/
if (!work_pending(&pshm_drv->shm_tx_work))
queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
return 0;
}
static const struct net_device_ops netdev_ops = {
.ndo_open = shm_netdev_open,
.ndo_stop = shm_netdev_close,
.ndo_start_xmit = shm_netdev_tx,
};
static void shm_netdev_setup(struct net_device *pshm_netdev)
{
struct shmdrv_layer *pshm_drv;
pshm_netdev->netdev_ops = &netdev_ops;
pshm_netdev->mtu = CAIF_MAX_MTU;
pshm_netdev->type = ARPHRD_CAIF;
pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
pshm_netdev->tx_queue_len = 0;
pshm_netdev->destructor = free_netdev;
pshm_drv = netdev_priv(pshm_netdev);
/* Initialize structures in a clean state. */
memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
}
int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
{
int result, j;
struct shmdrv_layer *pshm_drv = NULL;
pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
"cfshm%d", shm_netdev_setup);
if (!pshm_dev->pshm_netdev)
return -ENOMEM;
pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
pshm_drv->pshm_dev = pshm_dev;
/*
* Initialization starts with the verification of the
* availability of MBX driver by calling its setup function.
* MBX driver must be available by this time for proper
* functioning of SHM driver.
*/
if ((pshm_dev->pshmdev_mbxsetup
(caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
pr_warn("Could not config. SHM Mailbox,"
" Bailing out.....\n");
free_netdev(pshm_dev->pshm_netdev);
return -ENODEV;
}
skb_queue_head_init(&pshm_drv->sk_qhead);
pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
" INSTANCE AT pshm_drv =0x%p\n",
pshm_drv->pshm_dev->shm_id, pshm_drv);
if (pshm_dev->shm_total_sz <
(NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
pr_warn("ERROR, Amount of available"
" Phys. SHM cannot accomodate current SHM "
"driver configuration, Bailing out ...\n");
free_netdev(pshm_dev->pshm_netdev);
return -ENOMEM;
}
pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
if (pshm_dev->shm_loopback)
pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
else
pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
(NR_TX_BUF * TX_BUF_SZ);
INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
INIT_LIST_HEAD(&pshm_drv->tx_full_list);
INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
INIT_LIST_HEAD(&pshm_drv->rx_full_list);
INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
pshm_drv->pshm_tx_workqueue =
create_singlethread_workqueue("shm_tx_work");
pshm_drv->pshm_rx_workqueue =
create_singlethread_workqueue("shm_rx_work");
for (j = 0; j < NR_TX_BUF; j++) {
struct buf_list *tx_buf =
kmalloc(sizeof(struct buf_list), GFP_KERNEL);
if (tx_buf == NULL) {
pr_warn("ERROR, Could not"
" allocate dynamic mem. for tx_buf,"
" Bailing out ...\n");
free_netdev(pshm_dev->pshm_netdev);
return -ENOMEM;
}
tx_buf->index = j;
tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
tx_buf->len = TX_BUF_SZ;
tx_buf->frames = 0;
tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
if (pshm_dev->shm_loopback)
tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
else
tx_buf->desc_vptr =
ioremap(tx_buf->phy_addr, TX_BUF_SZ);
list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
}
for (j = 0; j < NR_RX_BUF; j++) {
struct buf_list *rx_buf =
kmalloc(sizeof(struct buf_list), GFP_KERNEL);
if (rx_buf == NULL) {
pr_warn("ERROR, Could not"
" allocate dynamic mem.for rx_buf,"
" Bailing out ...\n");
free_netdev(pshm_dev->pshm_netdev);
return -ENOMEM;
}
rx_buf->index = j;
rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
rx_buf->len = RX_BUF_SZ;
if (pshm_dev->shm_loopback)
rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
else
rx_buf->desc_vptr =
ioremap(rx_buf->phy_addr, RX_BUF_SZ);
list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
}
pshm_drv->tx_empty_available = 1;
result = register_netdev(pshm_dev->pshm_netdev);
if (result)
pr_warn("ERROR[%d], SHM could not, "
"register with NW FRMWK Bailing out ...\n", result);
return result;
}
void caif_shmcore_remove(struct net_device *pshm_netdev)
{
struct buf_list *pbuf;
struct shmdrv_layer *pshm_drv = NULL;
pshm_drv = netdev_priv(pshm_netdev);
while (!(list_empty(&pshm_drv->tx_pend_list))) {
pbuf =
list_entry(pshm_drv->tx_pend_list.next,
struct buf_list, list);
list_del(&pbuf->list);
kfree(pbuf);
}
while (!(list_empty(&pshm_drv->tx_full_list))) {
pbuf =
list_entry(pshm_drv->tx_full_list.next,
struct buf_list, list);
list_del(&pbuf->list);
kfree(pbuf);
}
while (!(list_empty(&pshm_drv->tx_empty_list))) {
pbuf =
list_entry(pshm_drv->tx_empty_list.next,
struct buf_list, list);
list_del(&pbuf->list);
kfree(pbuf);
}
while (!(list_empty(&pshm_drv->rx_full_list))) {
pbuf =
list_entry(pshm_drv->tx_full_list.next,
struct buf_list, list);
list_del(&pbuf->list);
kfree(pbuf);
}
while (!(list_empty(&pshm_drv->rx_pend_list))) {
pbuf =
list_entry(pshm_drv->tx_pend_list.next,
struct buf_list, list);
list_del(&pbuf->list);
kfree(pbuf);
}
while (!(list_empty(&pshm_drv->rx_empty_list))) {
pbuf =
list_entry(pshm_drv->rx_empty_list.next,
struct buf_list, list);
list_del(&pbuf->list);
kfree(pbuf);
}
/* Destroy work queues. */
destroy_workqueue(pshm_drv->pshm_tx_workqueue);
destroy_workqueue(pshm_drv->pshm_rx_workqueue);
unregister_netdev(pshm_netdev);
}

View File

@ -82,6 +82,14 @@ config CAN_FLEXCAN
---help---
Say Y here if you want to support for Freescale FlexCAN.
config PCH_CAN
tristate "PCH CAN"
depends on CAN_DEV && PCI
---help---
This driver is for PCH CAN of Topcliff which is an IOH for x86
embedded processor.
This driver can access CAN bus.
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"

View File

@ -17,5 +17,6 @@ obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
obj-$(CONFIG_CAN_BFIN) += bfin_can.o
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
obj-$(CONFIG_PCH_CAN) += pch_can.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG

View File

@ -2,7 +2,7 @@
* at91_can.c - CAN network driver for AT91 SoC CAN controller
*
* (C) 2007 by Hans J. Koch <hjk@linutronix.de>
* (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
* (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de>
*
* This software may be distributed under the terms of the GNU General
* Public License ("GPL") version 2 as distributed in the 'COPYING'
@ -40,7 +40,6 @@
#include <mach/board.h>
#define DRV_NAME "at91_can"
#define AT91_NAPI_WEIGHT 12
/*
@ -172,6 +171,7 @@ struct at91_priv {
};
static struct can_bittiming_const at91_bittiming_const = {
.name = KBUILD_MODNAME,
.tseg1_min = 4,
.tseg1_max = 16,
.tseg2_min = 2,
@ -199,13 +199,13 @@ static inline int get_tx_echo_mb(const struct at91_priv *priv)
static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
{
return readl(priv->reg_base + reg);
return __raw_readl(priv->reg_base + reg);
}
static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
u32 value)
{
writel(value, priv->reg_base + reg);
__raw_writel(value, priv->reg_base + reg);
}
static inline void set_mb_mode_prio(const struct at91_priv *priv,
@ -243,6 +243,12 @@ static void at91_setup_mailboxes(struct net_device *dev)
set_mb_mode(priv, i, AT91_MB_MODE_RX);
set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
/* reset acceptance mask and id register */
for (i = AT91_MB_RX_FIRST; i <= AT91_MB_RX_LAST; i++) {
at91_write(priv, AT91_MAM(i), 0x0 );
at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
}
/* The last 4 mailboxes are used for transmitting. */
for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
@ -257,18 +263,30 @@ static int at91_set_bittiming(struct net_device *dev)
const struct can_bittiming *bt = &priv->can.bittiming;
u32 reg_br;
reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) << 24) |
((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) |
((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
((bt->phase_seg2 - 1) << 0);
dev_info(dev->dev.parent, "writing AT91_BR: 0x%08x\n", reg_br);
netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br);
at91_write(priv, AT91_BR, reg_br);
return 0;
}
static int at91_get_berr_counter(const struct net_device *dev,
struct can_berr_counter *bec)
{
const struct at91_priv *priv = netdev_priv(dev);
u32 reg_ecr = at91_read(priv, AT91_ECR);
bec->rxerr = reg_ecr & 0xff;
bec->txerr = reg_ecr >> 16;
return 0;
}
static void at91_chip_start(struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
@ -281,6 +299,7 @@ static void at91_chip_start(struct net_device *dev)
reg_mr = at91_read(priv, AT91_MR);
at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
at91_set_bittiming(dev);
at91_setup_mailboxes(dev);
at91_transceiver_switch(priv, 1);
@ -350,8 +369,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
netif_stop_queue(dev);
dev_err(dev->dev.parent,
"BUG! TX buffer full when queue awake!\n");
netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@ -435,7 +453,7 @@ static void at91_rx_overflow_err(struct net_device *dev)
struct sk_buff *skb;
struct can_frame *cf;
dev_dbg(dev->dev.parent, "RX buffer overflow\n");
netdev_dbg(dev, "RX buffer overflow\n");
stats->rx_over_errors++;
stats->rx_errors++;
@ -480,6 +498,9 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
*(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
*(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
/* allow RX of extended frames */
at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
at91_rx_overflow_err(dev);
}
@ -565,8 +586,8 @@ static int at91_poll_rx(struct net_device *dev, int quota)
if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
reg_sr & AT91_MB_RX_LOW_MASK)
dev_info(dev->dev.parent,
"order of incoming frames cannot be guaranteed\n");
netdev_info(dev,
"order of incoming frames cannot be guaranteed\n");
again:
for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
@ -604,7 +625,7 @@ static void at91_poll_err_frame(struct net_device *dev,
/* CRC error */
if (reg_sr & AT91_IRQ_CERR) {
dev_dbg(dev->dev.parent, "CERR irq\n");
netdev_dbg(dev, "CERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@ -612,7 +633,7 @@ static void at91_poll_err_frame(struct net_device *dev,
/* Stuffing Error */
if (reg_sr & AT91_IRQ_SERR) {
dev_dbg(dev->dev.parent, "SERR irq\n");
netdev_dbg(dev, "SERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@ -621,14 +642,14 @@ static void at91_poll_err_frame(struct net_device *dev,
/* Acknowledgement Error */
if (reg_sr & AT91_IRQ_AERR) {
dev_dbg(dev->dev.parent, "AERR irq\n");
netdev_dbg(dev, "AERR irq\n");
dev->stats.tx_errors++;
cf->can_id |= CAN_ERR_ACK;
}
/* Form error */
if (reg_sr & AT91_IRQ_FERR) {
dev_dbg(dev->dev.parent, "FERR irq\n");
netdev_dbg(dev, "FERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@ -637,7 +658,7 @@ static void at91_poll_err_frame(struct net_device *dev,
/* Bit Error */
if (reg_sr & AT91_IRQ_BERR) {
dev_dbg(dev->dev.parent, "BERR irq\n");
netdev_dbg(dev, "BERR irq\n");
dev->stats.tx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@ -755,12 +776,10 @@ static void at91_irq_err_state(struct net_device *dev,
struct can_frame *cf, enum can_state new_state)
{
struct at91_priv *priv = netdev_priv(dev);
u32 reg_idr, reg_ier, reg_ecr;
u8 tec, rec;
u32 reg_idr = 0, reg_ier = 0;
struct can_berr_counter bec;
reg_ecr = at91_read(priv, AT91_ECR);
rec = reg_ecr & 0xff;
tec = reg_ecr >> 16;
at91_get_berr_counter(dev, &bec);
switch (priv->can.state) {
case CAN_STATE_ERROR_ACTIVE:
@ -771,11 +790,11 @@ static void at91_irq_err_state(struct net_device *dev,
*/
if (new_state >= CAN_STATE_ERROR_WARNING &&
new_state <= CAN_STATE_BUS_OFF) {
dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
netdev_dbg(dev, "Error Warning IRQ\n");
priv->can.can_stats.error_warning++;
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (tec > rec) ?
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
}
@ -787,11 +806,11 @@ static void at91_irq_err_state(struct net_device *dev,
*/
if (new_state >= CAN_STATE_ERROR_PASSIVE &&
new_state <= CAN_STATE_BUS_OFF) {
dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
netdev_dbg(dev, "Error Passive IRQ\n");
priv->can.can_stats.error_passive++;
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (tec > rec) ?
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
}
@ -804,7 +823,7 @@ static void at91_irq_err_state(struct net_device *dev,
if (new_state <= CAN_STATE_ERROR_PASSIVE) {
cf->can_id |= CAN_ERR_RESTARTED;
dev_dbg(dev->dev.parent, "restarted\n");
netdev_dbg(dev, "restarted\n");
priv->can.can_stats.restarts++;
netif_carrier_on(dev);
@ -825,7 +844,7 @@ static void at91_irq_err_state(struct net_device *dev,
* circumstances. so just enable AT91_IRQ_ERRP, thus
* the "fallthrough"
*/
dev_dbg(dev->dev.parent, "Error Active\n");
netdev_dbg(dev, "Error Active\n");
cf->can_id |= CAN_ERR_PROT;
cf->data[2] = CAN_ERR_PROT_ACTIVE;
case CAN_STATE_ERROR_WARNING: /* fallthrough */
@ -843,7 +862,7 @@ static void at91_irq_err_state(struct net_device *dev,
cf->can_id |= CAN_ERR_BUSOFF;
dev_dbg(dev->dev.parent, "bus-off\n");
netdev_dbg(dev, "bus-off\n");
netif_carrier_off(dev);
priv->can.can_stats.bus_off++;
@ -881,7 +900,7 @@ static void at91_irq_err(struct net_device *dev)
else if (likely(reg_sr & AT91_IRQ_ERRA))
new_state = CAN_STATE_ERROR_ACTIVE;
else {
dev_err(dev->dev.parent, "BUG! hardware in undefined state\n");
netdev_err(dev, "BUG! hardware in undefined state\n");
return;
}
@ -1018,7 +1037,7 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_start_xmit = at91_start_xmit,
};
static int __init at91_can_probe(struct platform_device *pdev)
static int __devinit at91_can_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct at91_priv *priv;
@ -1067,8 +1086,8 @@ static int __init at91_can_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->can.clock.freq = clk_get_rate(clk);
priv->can.bittiming_const = &at91_bittiming_const;
priv->can.do_set_bittiming = at91_set_bittiming;
priv->can.do_set_mode = at91_set_mode;
priv->can.do_get_berr_counter = at91_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
priv->reg_base = addr;
priv->dev = dev;
@ -1092,7 +1111,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
return 0;
exit_free:
free_netdev(dev);
free_candev(dev);
exit_iounmap:
iounmap(addr);
exit_release:
@ -1113,8 +1132,6 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
free_netdev(dev);
iounmap(priv->reg_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@ -1122,6 +1139,8 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
clk_put(priv->clk);
free_candev(dev);
return 0;
}
@ -1129,21 +1148,19 @@ static struct platform_driver at91_can_driver = {
.probe = at91_can_probe,
.remove = __devexit_p(at91_can_remove),
.driver = {
.name = DRV_NAME,
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
};
static int __init at91_can_module_init(void)
{
printk(KERN_INFO "%s netdevice driver\n", DRV_NAME);
return platform_driver_register(&at91_can_driver);
}
static void __exit at91_can_module_exit(void)
{
platform_driver_unregister(&at91_can_driver);
printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
}
module_init(at91_can_module_init);
@ -1151,4 +1168,4 @@ module_exit(at91_can_module_exit);
MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION(DRV_NAME " CAN netdevice driver");
MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver");

View File

@ -992,7 +992,6 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
unregister_flexcandev(dev);
platform_set_drvdata(pdev, NULL);
free_candev(dev);
iounmap(priv->base);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@ -1000,6 +999,8 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
clk_put(priv->clk);
free_candev(dev);
return 0;
}

View File

@ -169,6 +169,7 @@
# define RXBSIDH_SHIFT 3
#define RXBSIDL(n) (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
# define RXBSIDL_IDE 0x08
# define RXBSIDL_SRR 0x10
# define RXBSIDL_EID 3
# define RXBSIDL_SHIFT 5
#define RXBEID8(n) (((n) * 0x10) + 0x60 + RXBEID8_OFF)
@ -475,6 +476,8 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
frame->can_id =
(buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
(buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
if (buf[RXBSIDL_OFF] & RXBSIDL_SRR)
frame->can_id |= CAN_RTR_FLAG;
}
/* Data length */
frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);

File diff suppressed because it is too large Load Diff

View File

@ -58,4 +58,16 @@ config CAN_PLX_PCI
- esd CAN-PCIe/2000
- Marathon CAN-bus-PCI card (http://www.marathon.ru/)
- TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
config CAN_TSCAN1
tristate "TS-CAN1 PC104 boards"
depends on ISA
help
This driver is for Technologic Systems' TSCAN-1 PC104 boards.
http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
The driver supports multiple boards and automatically configures them:
PLD IO base addresses are read from jumpers JP1 and JP2,
IRQ numbers are read from jumpers JP4 and JP5,
SJA1000 IO base addresses are chosen heuristically (first that works).
endif

View File

@ -9,5 +9,6 @@ obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
obj-$(CONFIG_CAN_TSCAN1) += tscan1.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG

View File

@ -0,0 +1,216 @@
/*
* tscan1.c: driver for Technologic Systems TS-CAN1 PC104 boards
*
* Copyright 2010 Andre B. Oliveira
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* References:
* - Getting started with TS-CAN1, Technologic Systems, Jun 2009
* http://www.embeddedarm.com/documentation/ts-can1-manual.pdf
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/isa.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include "sja1000.h"
MODULE_DESCRIPTION("Driver for Technologic Systems TS-CAN1 PC104 boards");
MODULE_AUTHOR("Andre B. Oliveira <anbadeol@gmail.com>");
MODULE_LICENSE("GPL");
/* Maximum number of boards (one in each JP1:JP2 setting of IO address) */
#define TSCAN1_MAXDEV 4
/* PLD registers address offsets */
#define TSCAN1_ID1 0
#define TSCAN1_ID2 1
#define TSCAN1_VERSION 2
#define TSCAN1_LED 3
#define TSCAN1_PAGE 4
#define TSCAN1_MODE 5
#define TSCAN1_JUMPERS 6
/* PLD board identifier registers magic values */
#define TSCAN1_ID1_VALUE 0xf6
#define TSCAN1_ID2_VALUE 0xb9
/* PLD mode register SJA1000 IO enable bit */
#define TSCAN1_MODE_ENABLE 0x40
/* PLD jumpers register bits */
#define TSCAN1_JP4 0x10
#define TSCAN1_JP5 0x20
/* PLD IO base addresses start */
#define TSCAN1_PLD_ADDRESS 0x150
/* PLD register space size */
#define TSCAN1_PLD_SIZE 8
/* SJA1000 register space size */
#define TSCAN1_SJA1000_SIZE 32
/* SJA1000 crystal frequency (16MHz) */
#define TSCAN1_SJA1000_XTAL 16000000
/* SJA1000 IO base addresses */
static const unsigned short tscan1_sja1000_addresses[] __devinitconst = {
0x100, 0x120, 0x180, 0x1a0, 0x200, 0x240, 0x280, 0x320
};
/* Read SJA1000 register */
static u8 tscan1_read(const struct sja1000_priv *priv, int reg)
{
return inb((unsigned long)priv->reg_base + reg);
}
/* Write SJA1000 register */
static void tscan1_write(const struct sja1000_priv *priv, int reg, u8 val)
{
outb(val, (unsigned long)priv->reg_base + reg);
}
/* Probe for a TS-CAN1 board with JP2:JP1 jumper setting ID */
static int __devinit tscan1_probe(struct device *dev, unsigned id)
{
struct net_device *netdev;
struct sja1000_priv *priv;
unsigned long pld_base, sja1000_base;
int irq, i;
pld_base = TSCAN1_PLD_ADDRESS + id * TSCAN1_PLD_SIZE;
if (!request_region(pld_base, TSCAN1_PLD_SIZE, dev_name(dev)))
return -EBUSY;
if (inb(pld_base + TSCAN1_ID1) != TSCAN1_ID1_VALUE ||
inb(pld_base + TSCAN1_ID2) != TSCAN1_ID2_VALUE) {
release_region(pld_base, TSCAN1_PLD_SIZE);
return -ENODEV;
}
switch (inb(pld_base + TSCAN1_JUMPERS) & (TSCAN1_JP4 | TSCAN1_JP5)) {
case TSCAN1_JP4:
irq = 6;
break;
case TSCAN1_JP5:
irq = 7;
break;
case TSCAN1_JP4 | TSCAN1_JP5:
irq = 5;
break;
default:
dev_err(dev, "invalid JP4:JP5 setting (no IRQ)\n");
release_region(pld_base, TSCAN1_PLD_SIZE);
return -EINVAL;
}
netdev = alloc_sja1000dev(0);
if (!netdev) {
release_region(pld_base, TSCAN1_PLD_SIZE);
return -ENOMEM;
}
dev_set_drvdata(dev, netdev);
SET_NETDEV_DEV(netdev, dev);
netdev->base_addr = pld_base;
netdev->irq = irq;
priv = netdev_priv(netdev);
priv->read_reg = tscan1_read;
priv->write_reg = tscan1_write;
priv->can.clock.freq = TSCAN1_SJA1000_XTAL / 2;
priv->cdr = CDR_CBP | CDR_CLK_OFF;
priv->ocr = OCR_TX0_PUSHPULL;
/* Select the first SJA1000 IO address that is free and that works */
for (i = 0; i < ARRAY_SIZE(tscan1_sja1000_addresses); i++) {
sja1000_base = tscan1_sja1000_addresses[i];
if (!request_region(sja1000_base, TSCAN1_SJA1000_SIZE,
dev_name(dev)))
continue;
/* Set SJA1000 IO base address and enable it */
outb(TSCAN1_MODE_ENABLE | i, pld_base + TSCAN1_MODE);
priv->reg_base = (void __iomem *)sja1000_base;
if (!register_sja1000dev(netdev)) {
/* SJA1000 probe succeeded; turn LED off and return */
outb(0, pld_base + TSCAN1_LED);
netdev_info(netdev, "TS-CAN1 at 0x%lx 0x%lx irq %d\n",
pld_base, sja1000_base, irq);
return 0;
}
/* SJA1000 probe failed; release and try next address */
outb(0, pld_base + TSCAN1_MODE);
release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
}
dev_err(dev, "failed to assign SJA1000 IO address\n");
dev_set_drvdata(dev, NULL);
free_sja1000dev(netdev);
release_region(pld_base, TSCAN1_PLD_SIZE);
return -ENXIO;
}
static int __devexit tscan1_remove(struct device *dev, unsigned id /*unused*/)
{
struct net_device *netdev;
struct sja1000_priv *priv;
unsigned long pld_base, sja1000_base;
netdev = dev_get_drvdata(dev);
unregister_sja1000dev(netdev);
dev_set_drvdata(dev, NULL);
priv = netdev_priv(netdev);
pld_base = netdev->base_addr;
sja1000_base = (unsigned long)priv->reg_base;
outb(0, pld_base + TSCAN1_MODE); /* disable SJA1000 IO space */
release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
release_region(pld_base, TSCAN1_PLD_SIZE);
free_sja1000dev(netdev);
return 0;
}
static struct isa_driver tscan1_isa_driver = {
.probe = tscan1_probe,
.remove = __devexit_p(tscan1_remove),
.driver = {
.name = "tscan1",
},
};
static int __init tscan1_init(void)
{
return isa_register_driver(&tscan1_isa_driver, TSCAN1_MAXDEV);
}
module_init(tscan1_init);
static void __exit tscan1_exit(void)
{
isa_unregister_driver(&tscan1_isa_driver);
}
module_exit(tscan1_exit);

View File

@ -1266,11 +1266,13 @@ static int cxgb_up(struct adapter *adap)
}
if (!(adap->flags & QUEUES_BOUND)) {
err = bind_qsets(adap);
if (err) {
CH_ERR(adap, "failed to bind qsets, err %d\n", err);
int ret = bind_qsets(adap);
if (ret < 0) {
CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
t3_intr_disable(adap);
free_irq_resources(adap);
err = ret;
goto out;
}
adap->flags |= QUEUES_BOUND;

View File

@ -281,7 +281,6 @@ struct sge_rspq;
struct port_info {
struct adapter *adapter;
struct vlan_group *vlan_grp;
u16 viid;
s16 xact_addr_filt; /* index of exact MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */

View File

@ -403,7 +403,7 @@ static int link_start(struct net_device *dev)
* that step explicitly.
*/
ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
pi->vlan_grp != NULL, true);
!!(dev->features & NETIF_F_HW_VLAN_RX), true);
if (ret == 0) {
ret = t4_change_mac(pi->adapter, mb, pi->viid,
pi->xact_addr_filt, dev->dev_addr, true,
@ -1881,7 +1881,24 @@ static int set_tso(struct net_device *dev, u32 value)
static int set_flags(struct net_device *dev, u32 flags)
{
return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH);
int err;
unsigned long old_feat = dev->features;
err = ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH |
ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
if (err)
return err;
if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX) {
const struct port_info *pi = netdev_priv(dev);
err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
-1, -1, -1, !!(flags & ETH_FLAG_RXVLAN),
true);
if (err)
dev->features = old_feat;
}
return err;
}
static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
@ -2842,15 +2859,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
{
struct port_info *pi = netdev_priv(dev);
pi->vlan_grp = grp;
t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1,
grp != NULL, true);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cxgb_netpoll(struct net_device *dev)
{
@ -2878,7 +2886,6 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = cxgb_ioctl,
.ndo_change_mtu = cxgb_change_mtu,
.ndo_vlan_rx_register = vlan_rx_register,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cxgb_netpoll,
#endif
@ -3658,7 +3665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
pi->rx_offload = RX_CSO;
pi->port_id = i;
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
netdev->irq = pdev->irq;
netdev->features |= NETIF_F_SG | TSO_FLAGS;
@ -3730,6 +3736,7 @@ static int __devinit init_one(struct pci_dev *pdev,
__set_bit(i, &adapter->registered_device_map);
adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
netif_tx_stop_all_queues(adapter->port[i]);
}
}
if (!adapter->registered_device_map) {

View File

@ -1530,18 +1530,11 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
if (unlikely(pkt->vlan_ex)) {
struct port_info *pi = netdev_priv(rxq->rspq.netdev);
struct vlan_group *grp = pi->vlan_grp;
__vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
rxq->stats.vlan_ex++;
if (likely(grp)) {
ret = vlan_gro_frags(&rxq->rspq.napi, grp,
ntohs(pkt->vlan));
goto stats;
}
}
ret = napi_gro_frags(&rxq->rspq.napi);
stats: if (ret == GRO_HELD)
if (ret == GRO_HELD)
rxq->stats.lro_pkts++;
else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
rxq->stats.lro_merged++;
@ -1608,16 +1601,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
skb_checksum_none_assert(skb);
if (unlikely(pkt->vlan_ex)) {
struct vlan_group *grp = pi->vlan_grp;
__vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
rxq->stats.vlan_ex++;
if (likely(grp))
vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan));
else
dev_kfree_skb_any(skb);
} else
netif_receive_skb(skb);
}
netif_receive_skb(skb);
return 0;
}

View File

@ -521,7 +521,7 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_clean_all_rx_rings(adapter);
}
void e1000_reinit_safe(struct e1000_adapter *adapter)
static void e1000_reinit_safe(struct e1000_adapter *adapter)
{
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);

View File

@ -396,7 +396,9 @@ struct ehea_port_res {
int swqe_ll_count;
u32 swqe_id_counter;
u64 tx_packets;
u64 tx_bytes;
u64 rx_packets;
u64 rx_bytes;
u32 poll_counter;
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];

View File

@ -330,7 +330,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
struct ehea_port *port = netdev_priv(dev);
struct net_device_stats *stats = &port->stats;
struct hcp_ehea_port_cb2 *cb2;
u64 hret, rx_packets, tx_packets;
u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0;
int i;
memset(stats, 0, sizeof(*stats));
@ -353,18 +353,22 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
rx_packets = 0;
for (i = 0; i < port->num_def_qps; i++)
for (i = 0; i < port->num_def_qps; i++) {
rx_packets += port->port_res[i].rx_packets;
rx_bytes += port->port_res[i].rx_bytes;
}
tx_packets = 0;
for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
tx_packets += port->port_res[i].tx_packets;
tx_bytes += port->port_res[i].tx_bytes;
}
stats->tx_packets = tx_packets;
stats->multicast = cb2->rxmcp;
stats->rx_errors = cb2->rxuerr;
stats->rx_bytes = cb2->rxo;
stats->tx_bytes = cb2->txo;
stats->rx_bytes = rx_bytes;
stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
out_herr:
@ -703,6 +707,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
int skb_arr_rq2_len = pr->rq2_skba.len;
int skb_arr_rq3_len = pr->rq3_skba.len;
int processed, processed_rq1, processed_rq2, processed_rq3;
u64 processed_bytes = 0;
int wqe_index, last_wqe_index, rq, port_reset;
processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
@ -760,6 +765,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
processed_rq3++;
}
processed_bytes += skb->len;
ehea_proc_skb(pr, cqe, skb);
} else {
pr->p_stats.poll_receive_errors++;
@ -775,6 +781,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
lro_flush_all(&pr->lro_mgr);
pr->rx_packets += processed;
pr->rx_bytes += processed_bytes;
ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
ehea_refill_rq2(pr, processed_rq2);
@ -1509,9 +1516,20 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
enum ehea_eq_type eq_type = EHEA_EQ;
struct ehea_qp_init_attr *init_attr = NULL;
int ret = -EIO;
u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
tx_bytes = pr->tx_bytes;
tx_packets = pr->tx_packets;
rx_bytes = pr->rx_bytes;
rx_packets = pr->rx_packets;
memset(pr, 0, sizeof(struct ehea_port_res));
pr->tx_bytes = rx_bytes;
pr->tx_packets = tx_packets;
pr->rx_bytes = rx_bytes;
pr->rx_packets = rx_packets;
pr->port = port;
spin_lock_init(&pr->xmit_lock);
spin_lock_init(&pr->netif_queue);
@ -2249,6 +2267,14 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
memset(swqe, 0, SWQE_HEADER_SIZE);
atomic_dec(&pr->swqe_avail);
if (vlan_tx_tag_present(skb)) {
swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
swqe->vlan_tag = vlan_tx_tag_get(skb);
}
pr->tx_packets++;
pr->tx_bytes += skb->len;
if (skb->len <= SWQE3_MAX_IMM) {
u32 sig_iv = port->sig_comp_iv;
u32 swqe_num = pr->swqe_id_counter;
@ -2279,11 +2305,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
pr->swqe_id_counter += 1;
if (vlan_tx_tag_present(skb)) {
swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
swqe->vlan_tag = vlan_tx_tag_get(skb);
}
if (netif_msg_tx_queued(port)) {
ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
ehea_dump(swqe, 512, "swqe");
@ -2295,7 +2316,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
ehea_post_swqe(pr->qp, swqe);
pr->tx_packets++;
if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
spin_lock_irqsave(&pr->netif_queue, flags);

View File

@ -2511,7 +2511,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
skb_recycle_check(skb, priv->rx_buffer_size +
RXBUF_ALIGNMENT)) {
gfar_align_skb(skb);
__skb_queue_head(&priv->rx_recycle, skb);
skb_queue_head(&priv->rx_recycle, skb);
} else
dev_kfree_skb_any(skb);
@ -2594,7 +2594,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
skb = __skb_dequeue(&priv->rx_recycle);
skb = skb_dequeue(&priv->rx_recycle);
if (!skb)
skb = gfar_alloc_skb(dev);
@ -2750,7 +2750,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
if (unlikely(!newskb))
newskb = skb;
else if (skb)
__skb_queue_head(&priv->rx_recycle, skb);
skb_queue_head(&priv->rx_recycle, skb);
} else {
/* Increment the number of packets */
rx_queue->stats.rx_packets++;

View File

@ -1623,12 +1623,12 @@ err_out:
return rc;
}
#ifdef CONFIG_PM
static void
jme_set_100m_half(struct jme_adapter *jme)
{
u32 bmcr, tmp;
jme_phy_on(jme);
bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
BMCR_SPEED1000 | BMCR_FULLDPLX);
@ -1656,7 +1656,6 @@ jme_wait_link(struct jme_adapter *jme)
phylink = jme_linkstat_from_phy(jme);
}
}
#endif
static inline void
jme_phy_off(struct jme_adapter *jme)
@ -1664,6 +1663,21 @@ jme_phy_off(struct jme_adapter *jme)
jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
}
static void
jme_powersave_phy(struct jme_adapter *jme)
{
if (jme->reg_pmcs) {
jme_set_100m_half(jme);
if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
jme_wait_link(jme);
jwrite32(jme, JME_PMCS, jme->reg_pmcs);
} else {
jme_phy_off(jme);
}
}
static int
jme_close(struct net_device *netdev)
{
@ -2991,6 +3005,16 @@ jme_remove_one(struct pci_dev *pdev)
}
static void
jme_shutdown(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct jme_adapter *jme = netdev_priv(netdev);
jme_powersave_phy(jme);
pci_pme_active(pdev, true);
}
#ifdef CONFIG_PM
static int
jme_suspend(struct pci_dev *pdev, pm_message_t state)
@ -3028,19 +3052,9 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
tasklet_hi_enable(&jme->rxempty_task);
pci_save_state(pdev);
if (jme->reg_pmcs) {
jme_set_100m_half(jme);
if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
jme_wait_link(jme);
jwrite32(jme, JME_PMCS, jme->reg_pmcs);
pci_enable_wake(pdev, PCI_D3cold, true);
} else {
jme_phy_off(jme);
}
pci_set_power_state(pdev, PCI_D3cold);
jme_powersave_phy(jme);
pci_enable_wake(jme->pdev, PCI_D3hot, true);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
@ -3087,6 +3101,7 @@ static struct pci_driver jme_driver = {
.suspend = jme_suspend,
.resume = jme_resume,
#endif /* CONFIG_PM */
.shutdown = jme_shutdown,
};
static int __init

View File

@ -515,14 +515,15 @@ static int macb_poll(struct napi_struct *napi, int budget)
(unsigned long)status, budget);
work_done = macb_rx(bp, budget);
if (work_done < budget)
if (work_done < budget) {
napi_complete(napi);
/*
* We've done what we can to clean the buffers. Make sure we
* get notified when new packets arrive.
*/
macb_writel(bp, IER, MACB_RX_INT_FLAGS);
/*
* We've done what we can to clean the buffers. Make sure we
* get notified when new packets arrive.
*/
macb_writel(bp, IER, MACB_RX_INT_FLAGS);
}
/* TODO: Handle errors */
@ -550,12 +551,16 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (status & MACB_RX_INT_FLAGS) {
/*
* There's no point taking any more interrupts
* until we have processed the buffers. The
* scheduling call may fail if the poll routine
* is already scheduled, so disable interrupts
* now.
*/
macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
if (napi_schedule_prep(&bp->napi)) {
/*
* There's no point taking any more interrupts
* until we have processed the buffers
*/
macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
dev_dbg(&bp->pdev->dev,
"scheduling RX softirq\n");
__napi_schedule(&bp->napi);

View File

@ -210,38 +210,12 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
}
int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
{
return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
MLX4_CMD_TIME_CLASS_B);
}
int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
{
struct mlx4_cmd_mailbox *mailbox;
__be64 *inbox;
int err;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
inbox = mailbox->buf;
inbox[0] = cpu_to_be64(virt);
inbox[1] = cpu_to_be64(dma_addr);
err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
MLX4_CMD_TIME_CLASS_B);
mlx4_free_cmd_mailbox(dev, mailbox);
if (!err)
mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
(unsigned long long) dma_addr, (unsigned long long) virt);
return err;
}
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
{
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);

View File

@ -128,8 +128,6 @@ static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
}
int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count);
int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt);
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);

View File

@ -111,6 +111,12 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
goto out;
}
}
if (free < 0) {
err = -ENOMEM;
goto out;
}
mlx4_dbg(dev, "Free MAC index is %d\n", free);
if (table->total == table->max) {
@ -224,6 +230,11 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
}
}
if (free < 0) {
err = -ENOMEM;
goto out;
}
if (table->total == table->max) {
/* No free vlan entries */
err = -ENOSPC;

View File

@ -65,7 +65,7 @@ EXPORT_SYMBOL(phy_print_status);
*
* Returns 0 on success on < 0 on error.
*/
int phy_clear_interrupt(struct phy_device *phydev)
static int phy_clear_interrupt(struct phy_device *phydev)
{
int err = 0;
@ -82,7 +82,7 @@ int phy_clear_interrupt(struct phy_device *phydev)
*
* Returns 0 on success on < 0 on error.
*/
int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
{
int err = 0;
@ -208,7 +208,7 @@ static inline int phy_find_valid(int idx, u32 features)
* duplexes. Drop down by one in this order: 1000/FULL,
* 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
*/
void phy_sanitize_settings(struct phy_device *phydev)
static void phy_sanitize_settings(struct phy_device *phydev)
{
u32 features = phydev->supported;
int idx;
@ -223,7 +223,6 @@ void phy_sanitize_settings(struct phy_device *phydev)
phydev->speed = settings[idx].speed;
phydev->duplex = settings[idx].duplex;
}
EXPORT_SYMBOL(phy_sanitize_settings);
/**
* phy_ethtool_sset - generic ethtool sset function, handles all the details
@ -532,7 +531,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
* phy_enable_interrupts - Enable the interrupts from the PHY side
* @phydev: target phy_device struct
*/
int phy_enable_interrupts(struct phy_device *phydev)
static int phy_enable_interrupts(struct phy_device *phydev)
{
int err;
@ -545,13 +544,12 @@ int phy_enable_interrupts(struct phy_device *phydev)
return err;
}
EXPORT_SYMBOL(phy_enable_interrupts);
/**
* phy_disable_interrupts - Disable the PHY interrupts from the PHY side
* @phydev: target phy_device struct
*/
int phy_disable_interrupts(struct phy_device *phydev)
static int phy_disable_interrupts(struct phy_device *phydev)
{
int err;
@ -574,7 +572,6 @@ phy_err:
return err;
}
EXPORT_SYMBOL(phy_disable_interrupts);
/**
* phy_start_interrupts - request and enable interrupts for a PHY device

View File

@ -57,6 +57,9 @@ extern void mdio_bus_exit(void);
static LIST_HEAD(phy_fixup_list);
static DEFINE_MUTEX(phy_fixup_lock);
static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface);
/*
* Creates a new phy_fixup and adds it to the list
* @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID)
@ -146,7 +149,8 @@ int phy_scan_fixups(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_scan_fixups);
struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
static struct phy_device* phy_device_create(struct mii_bus *bus,
int addr, int phy_id)
{
struct phy_device *dev;
@ -193,7 +197,6 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
return dev;
}
EXPORT_SYMBOL(phy_device_create);
/**
* get_phy_id - reads the specified addr for its ID.
@ -316,7 +319,7 @@ EXPORT_SYMBOL(phy_find_first);
* If you want to monitor your own link state, don't call
* this function.
*/
void phy_prepare_link(struct phy_device *phydev,
static void phy_prepare_link(struct phy_device *phydev,
void (*handler)(struct net_device *))
{
phydev->adjust_link = handler;
@ -435,8 +438,8 @@ int phy_init_hw(struct phy_device *phydev)
* the attaching device, and given a callback for link status
* change. The phy_device is returned to the attaching driver.
*/
int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface)
static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface)
{
struct device *d = &phydev->dev;
@ -473,7 +476,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
* (dev_flags and interface) */
return phy_init_hw(phydev);
}
EXPORT_SYMBOL(phy_attach_direct);
/**
* phy_attach - attach a network device to a particular PHY device
@ -540,7 +542,7 @@ EXPORT_SYMBOL(phy_detach);
* what is supported. Returns < 0 on error, 0 if the PHY's advertisement
* hasn't changed, and > 0 if it has changed.
*/
int genphy_config_advert(struct phy_device *phydev)
static int genphy_config_advert(struct phy_device *phydev)
{
u32 advertise;
int oldadv, adv;
@ -605,7 +607,6 @@ int genphy_config_advert(struct phy_device *phydev)
return changed;
}
EXPORT_SYMBOL(genphy_config_advert);
/**
* genphy_setup_forced - configures/forces speed/duplex from @phydev
@ -615,7 +616,7 @@ EXPORT_SYMBOL(genphy_config_advert);
* to the values in phydev. Assumes that the values are valid.
* Please see phy_sanitize_settings().
*/
int genphy_setup_forced(struct phy_device *phydev)
static int genphy_setup_forced(struct phy_device *phydev)
{
int err;
int ctl = 0;

View File

@ -146,11 +146,13 @@
#define MAX_CMD_DESCRIPTORS 1024
#define MAX_RCV_DESCRIPTORS_1G 4096
#define MAX_RCV_DESCRIPTORS_10G 8192
#define MAX_RCV_DESCRIPTORS_VF 2048
#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
#define DEFAULT_RCV_DESCRIPTORS_1G 2048
#define DEFAULT_RCV_DESCRIPTORS_10G 4096
#define DEFAULT_RCV_DESCRIPTORS_VF 1024
#define MAX_RDS_RINGS 2
#define get_next_index(index, length) \
@ -942,6 +944,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_LOOPBACK_TEST 2
#define QLCNIC_FILTER_AGE 80
#define QLCNIC_READD_AGE 20
#define QLCNIC_LB_MAX_FILTERS 64
struct qlcnic_filter {
@ -970,6 +973,8 @@ struct qlcnic_adapter {
u16 num_txd;
u16 num_rxd;
u16 num_jumbo_rxd;
u16 max_rxd;
u16 max_jumbo_rxd;
u8 max_rds_rings;
u8 max_sds_rings;
@ -1129,7 +1134,7 @@ struct qlcnic_eswitch {
#define MAX_RX_QUEUES 4
#define DEFAULT_MAC_LEARN 1
#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan <= MAX_VLAN_ID)
#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW)
#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)

View File

@ -437,14 +437,8 @@ qlcnic_get_ringparam(struct net_device *dev,
ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
ring->tx_pending = adapter->num_txd;
if (adapter->ahw.port_type == QLCNIC_GBE) {
ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
} else {
ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
}
ring->rx_max_pending = adapter->max_rxd;
ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
ring->rx_mini_max_pending = 0;
@ -472,24 +466,17 @@ qlcnic_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
u16 num_rxd, num_jumbo_rxd, num_txd;
if (ring->rx_mini_pending)
return -EOPNOTSUPP;
if (adapter->ahw.port_type == QLCNIC_GBE) {
max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
}
num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx");
num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd,
"rx jumbo");
num_txd = qlcnic_validate_ringparam(ring->tx_pending,
MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");

View File

@ -656,13 +656,23 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
fw_major, fw_minor, fw_build);
if (adapter->ahw.port_type == QLCNIC_XGBE) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
} else {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
}
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
} else if (adapter->ahw.port_type == QLCNIC_GBE) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
}
adapter->msix_supported = !!use_msi_x;
@ -1860,6 +1870,11 @@ qlcnic_send_filter(struct qlcnic_adapter *adapter,
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) {
if (jiffies >
(QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
qlcnic_change_filter(adapter, src_addr, vlan_id,
tx_ring);
tmp_fil->ftime = jiffies;
return;
}

View File

@ -2226,7 +2226,6 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
int ql_core_dump(struct ql_adapter *qdev,
struct ql_mpi_coredump *mpi_coredump);
int ql_mb_about_fw(struct ql_adapter *qdev);
int ql_wol(struct ql_adapter *qdev);
int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
@ -2243,16 +2242,13 @@ netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
int ql_own_firmware(struct ql_adapter *qdev);
int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
void qlge_set_multicast_list(struct net_device *ndev);
#if 1
#define QL_ALL_DUMP
#define QL_REG_DUMP
#define QL_DEV_DUMP
#define QL_CB_DUMP
/* #define QL_ALL_DUMP */
/* #define QL_REG_DUMP */
/* #define QL_DEV_DUMP */
/* #define QL_CB_DUMP */
/* #define QL_IB_DUMP */
/* #define QL_OB_DUMP */
#endif
#ifdef QL_REG_DUMP
extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);

View File

@ -94,6 +94,9 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
static int ql_wol(struct ql_adapter *qdev);
static void qlge_set_multicast_list(struct net_device *ndev);
/* This hardware semaphore causes exclusive access to
* resources shared between the NIC driver, MPI firmware,
* FCOE firmware and the FC driver.
@ -2382,6 +2385,20 @@ static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
}
static void qlge_restore_vlan(struct ql_adapter *qdev)
{
qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
if (qdev->vlgrp) {
u16 vid;
for (vid = 0; vid < VLAN_N_VID; vid++) {
if (!vlan_group_get_device(qdev->vlgrp, vid))
continue;
qlge_vlan_rx_add_vid(qdev->ndev, vid);
}
}
}
/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
{
@ -3842,7 +3859,7 @@ static void ql_display_dev_info(struct net_device *ndev)
"MAC address %pM\n", ndev->dev_addr);
}
int ql_wol(struct ql_adapter *qdev)
static int ql_wol(struct ql_adapter *qdev)
{
int status = 0;
u32 wol = MB_WOL_DISABLE;
@ -3957,6 +3974,9 @@ static int ql_adapter_up(struct ql_adapter *qdev)
clear_bit(QL_PROMISCUOUS, &qdev->flags);
qlge_set_multicast_list(qdev->ndev);
/* Restore vlan setting. */
qlge_restore_vlan(qdev);
ql_enable_interrupts(qdev);
ql_enable_all_completion_interrupts(qdev);
netif_tx_start_all_queues(qdev->ndev);
@ -4242,7 +4262,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
return &ndev->stats;
}
void qlge_set_multicast_list(struct net_device *ndev)
static void qlge_set_multicast_list(struct net_device *ndev)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
struct netdev_hw_addr *ha;

View File

@ -87,7 +87,7 @@ exit:
return status;
}
int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
{
int status;
status = ql_write_mpi_reg(qdev, 0x00001010, 1);
@ -681,7 +681,7 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
/* Send and ACK mailbox command to the firmware to
* let it continue with the change.
*/
int ql_mb_idc_ack(struct ql_adapter *qdev)
static int ql_mb_idc_ack(struct ql_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@ -744,7 +744,7 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
return status;
}
int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
u32 size)
{
int status = 0;

View File

@ -961,9 +961,9 @@ sb1000_open(struct net_device *dev)
lp->rx_error_count = 0;
lp->rx_error_dpc_count = 0;
lp->rx_session_id[0] = 0x50;
lp->rx_session_id[0] = 0x48;
lp->rx_session_id[0] = 0x44;
lp->rx_session_id[0] = 0x42;
lp->rx_session_id[1] = 0x48;
lp->rx_session_id[2] = 0x44;
lp->rx_session_id[3] = 0x42;
lp->rx_frame_id[0] = 0;
lp->rx_frame_id[1] = 0;
lp->rx_frame_id[2] = 0;

View File

@ -531,7 +531,7 @@ static int sgiseeq_open(struct net_device *dev)
if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
err = -EAGAIN;
return -EAGAIN;
}
err = init_seeq(dev, sp, sregs);

View File

@ -688,18 +688,8 @@ slhc_toss(struct slcompress *comp)
return 0;
}
/* VJ header compression */
EXPORT_SYMBOL(slhc_init);
EXPORT_SYMBOL(slhc_free);
EXPORT_SYMBOL(slhc_remember);
EXPORT_SYMBOL(slhc_compress);
EXPORT_SYMBOL(slhc_uncompress);
EXPORT_SYMBOL(slhc_toss);
#else /* CONFIG_INET */
int
slhc_toss(struct slcompress *comp)
{
@ -738,6 +728,10 @@ slhc_init(int rslots, int tslots)
printk(KERN_DEBUG "Called IP function on non IP-system: slhc_init");
return NULL;
}
#endif /* CONFIG_INET */
/* VJ header compression */
EXPORT_SYMBOL(slhc_init);
EXPORT_SYMBOL(slhc_free);
EXPORT_SYMBOL(slhc_remember);
@ -745,5 +739,4 @@ EXPORT_SYMBOL(slhc_compress);
EXPORT_SYMBOL(slhc_uncompress);
EXPORT_SYMBOL(slhc_toss);
#endif /* CONFIG_INET */
MODULE_LICENSE("Dual BSD/GPL");

View File

@ -9948,16 +9948,16 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
!((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
return -EINVAL;
device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
spin_lock_bh(&tp->lock);
if (wol->wolopts & WAKE_MAGIC) {
if (device_may_wakeup(dp))
tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
device_set_wakeup_enable(dp, true);
} else {
else
tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
device_set_wakeup_enable(dp, false);
}
spin_unlock_bh(&tp->lock);
return 0;
}

View File

@ -1220,7 +1220,7 @@ void tms380tr_wait(unsigned long time)
tmp = schedule_timeout_interruptible(tmp);
} while(time_after(tmp, jiffies));
#else
udelay(time);
mdelay(time / 1000);
#endif
}

View File

@ -24,10 +24,6 @@
3XP Processor. It has been tested on x86 and sparc64.
KNOWN ISSUES:
*) The current firmware always strips the VLAN tag off, even if
we tell it not to. You should filter VLANs at the switch
as a workaround (good practice in any event) until we can
get this fixed.
*) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
issue. Hopefully 3Com will fix it.
*) Waiting for a command response takes 8ms due to non-preemptable
@ -280,8 +276,6 @@ struct typhoon {
struct pci_dev * pdev;
struct net_device * dev;
struct napi_struct napi;
spinlock_t state_lock;
struct vlan_group * vlgrp;
struct basic_ring rxHiRing;
struct basic_ring rxBuffRing;
struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
@ -695,44 +689,6 @@ out:
return err;
}
static void
typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
{
struct typhoon *tp = netdev_priv(dev);
struct cmd_desc xp_cmd;
int err;
spin_lock_bh(&tp->state_lock);
if(!tp->vlgrp != !grp) {
/* We've either been turned on for the first time, or we've
* been turned off. Update the 3XP.
*/
if(grp)
tp->offload |= TYPHOON_OFFLOAD_VLAN;
else
tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
/* If the interface is up, the runtime is running -- and we
* must be up for the vlan core to call us.
*
* Do the command outside of the spin lock, as it is slow.
*/
INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
TYPHOON_CMD_SET_OFFLOAD_TASKS);
xp_cmd.parm2 = tp->offload;
xp_cmd.parm3 = tp->offload;
spin_unlock_bh(&tp->state_lock);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0)
netdev_err(tp->dev, "vlan offload error %d\n", -err);
spin_lock_bh(&tp->state_lock);
}
/* now make the change visible */
tp->vlgrp = grp;
spin_unlock_bh(&tp->state_lock);
}
static inline void
typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
u32 ring_dma)
@ -818,7 +774,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
first_txd->processFlags |=
TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
first_txd->processFlags |=
cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
TYPHOON_TX_PF_VLAN_TAG_SHIFT);
}
@ -936,7 +892,7 @@ typhoon_set_rx_mode(struct net_device *dev)
filter |= TYPHOON_RX_FILTER_MCAST_HASH;
}
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
xp_cmd.parm1 = filter;
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
}
@ -1198,6 +1154,20 @@ typhoon_get_rx_csum(struct net_device *dev)
return 1;
}
static int
typhoon_set_flags(struct net_device *dev, u32 data)
{
/* There's no way to turn off the RX VLAN offloading and stripping
* on the current 3XP firmware -- it does not respect the offload
* settings -- so we only allow the user to toggle the TX processing.
*/
if (!(data & ETH_FLAG_RXVLAN))
return -EINVAL;
return ethtool_op_set_flags(dev, data,
ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
}
static void
typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
@ -1224,6 +1194,8 @@ static const struct ethtool_ops typhoon_ethtool_ops = {
.set_sg = ethtool_op_set_sg,
.set_tso = ethtool_op_set_tso,
.get_ringparam = typhoon_get_ringparam,
.set_flags = typhoon_set_flags,
.get_flags = ethtool_op_get_flags,
};
static int
@ -1309,9 +1281,9 @@ typhoon_init_interface(struct typhoon *tp)
tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
tp->offload |= TYPHOON_OFFLOAD_VLAN;
spin_lock_init(&tp->command_lock);
spin_lock_init(&tp->state_lock);
/* Force the writes to the shared memory area out before continuing. */
wmb();
@ -1328,7 +1300,7 @@ typhoon_init_rings(struct typhoon *tp)
tp->rxHiRing.lastWrite = 0;
tp->rxBuffRing.lastWrite = 0;
tp->cmdRing.lastWrite = 0;
tp->cmdRing.lastWrite = 0;
tp->respRing.lastWrite = 0;
tp->txLoRing.lastRead = 0;
tp->txHiRing.lastRead = 0;
@ -1762,13 +1734,10 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
} else
skb_checksum_none_assert(new_skb);
spin_lock(&tp->state_lock);
if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
ntohl(rx->vlanTag) & 0xffff);
else
netif_receive_skb(new_skb);
spin_unlock(&tp->state_lock);
if (rx->rxStatus & TYPHOON_RX_VLAN)
__vlan_hwaccel_put_tag(new_skb,
ntohl(rx->vlanTag) & 0xffff);
netif_receive_skb(new_skb);
received++;
budget--;
@ -1989,11 +1958,9 @@ typhoon_start_runtime(struct typhoon *tp)
goto error_out;
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
spin_lock_bh(&tp->state_lock);
xp_cmd.parm2 = tp->offload;
xp_cmd.parm3 = tp->offload;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
spin_unlock_bh(&tp->state_lock);
if(err < 0)
goto error_out;
@ -2231,13 +2198,9 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
if(!netif_running(dev))
return 0;
spin_lock_bh(&tp->state_lock);
if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
spin_unlock_bh(&tp->state_lock);
netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
return -EBUSY;
}
spin_unlock_bh(&tp->state_lock);
/* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */
if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
netif_device_detach(dev);
@ -2338,7 +2301,6 @@ static const struct net_device_ops typhoon_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = typhoon_set_mac_address,
.ndo_change_mtu = eth_change_mtu,
.ndo_vlan_rx_register = typhoon_vlan_rx_register,
};
static int __devinit

View File

@ -88,9 +88,9 @@ struct UPT1_RSSConf {
/* features */
enum {
UPT1_F_RXCSUM = 0x0001, /* rx csum verification */
UPT1_F_RSS = 0x0002,
UPT1_F_RXVLAN = 0x0004, /* VLAN tag stripping */
UPT1_F_LRO = 0x0008,
UPT1_F_RXCSUM = cpu_to_le64(0x0001), /* rx csum verification */
UPT1_F_RSS = cpu_to_le64(0x0002),
UPT1_F_RXVLAN = cpu_to_le64(0x0004), /* VLAN tag stripping */
UPT1_F_LRO = cpu_to_le64(0x0008),
};
#endif

View File

@ -523,9 +523,9 @@ struct Vmxnet3_RxFilterConf {
#define VMXNET3_PM_MAX_PATTERN_SIZE 128
#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
#define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */
#define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching
* filters */
#define VMXNET3_PM_WAKEUP_MAGIC cpu_to_le16(0x01) /* wake up on magic pkts */
#define VMXNET3_PM_WAKEUP_FILTER cpu_to_le16(0x02) /* wake up on pkts matching
* filters */
struct Vmxnet3_PM_PktFilter {

View File

@ -1563,8 +1563,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
adapter->vlan_grp = grp;
/* update FEATURES to device */
set_flag_le64(&devRead->misc.uptFeatures,
UPT1_F_RXVLAN);
devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
/*
@ -1587,7 +1586,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
adapter->vlan_grp = NULL;
if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) {
if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
int i;
for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
@ -1600,8 +1599,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
/* update FEATURES to device */
reset_flag_le64(&devRead->misc.uptFeatures,
UPT1_F_RXVLAN);
devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
}
@ -1762,15 +1760,15 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
/* set up feature flags */
if (adapter->rxcsum)
set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM);
devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
if (adapter->lro) {
set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO);
devRead->misc.uptFeatures |= UPT1_F_LRO;
devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
}
if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
adapter->vlan_grp) {
set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN);
devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
}
devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
@ -2577,7 +2575,7 @@ vmxnet3_suspend(struct device *device)
memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
i++;
}
@ -2619,13 +2617,13 @@ vmxnet3_suspend(struct device *device)
pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
in_dev_put(in_dev);
set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
i++;
}
skip_arp:
if (adapter->wol & WAKE_MAGIC)
set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC);
pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
pmConf->numFilters = i;
@ -2667,7 +2665,7 @@ vmxnet3_resume(struct device *device)
adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
*pmConf));
adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys(
adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
pmConf));
netif_device_attach(netdev);

View File

@ -50,13 +50,11 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
adapter->rxcsum = val;
if (netif_running(netdev)) {
if (val)
set_flag_le64(
&adapter->shared->devRead.misc.uptFeatures,
UPT1_F_RXCSUM);
adapter->shared->devRead.misc.uptFeatures |=
UPT1_F_RXCSUM;
else
reset_flag_le64(
&adapter->shared->devRead.misc.uptFeatures,
UPT1_F_RXCSUM);
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_RXCSUM;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
@ -292,10 +290,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
/* update harware LRO capability accordingly */
if (lro_requested)
adapter->shared->devRead.misc.uptFeatures |=
cpu_to_le64(UPT1_F_LRO);
UPT1_F_LRO;
else
adapter->shared->devRead.misc.uptFeatures &=
cpu_to_le64(~UPT1_F_LRO);
~UPT1_F_LRO;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
}

View File

@ -301,8 +301,8 @@ struct vmxnet3_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
u8 *hw_addr0; /* for BAR 0 */
u8 *hw_addr1; /* for BAR 1 */
u8 __iomem *hw_addr0; /* for BAR 0 */
u8 __iomem *hw_addr1; /* for BAR 1 */
/* feature control */
bool rxcsum;
@ -353,21 +353,6 @@ struct vmxnet3_adapter {
#define VMXNET3_MAX_ETH_HDR_SIZE 22
#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
static inline void set_flag_le16(__le16 *data, u16 flag)
{
*data = cpu_to_le16(le16_to_cpu(*data) | flag);
}
static inline void set_flag_le64(__le64 *data, u64 flag)
{
*data = cpu_to_le64(le64_to_cpu(*data) | flag);
}
static inline void reset_flag_le64(__le64 *data, u64 flag)
{
*data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
}
int
vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);

View File

@ -20,6 +20,179 @@
#include "vxge-traffic.h"
#include "vxge-config.h"
static enum vxge_hw_status
__vxge_hw_fifo_create(
struct __vxge_hw_vpath_handle *vpath_handle,
struct vxge_hw_fifo_attr *attr);
static enum vxge_hw_status
__vxge_hw_fifo_abort(
struct __vxge_hw_fifo *fifoh);
static enum vxge_hw_status
__vxge_hw_fifo_reset(
struct __vxge_hw_fifo *ringh);
static enum vxge_hw_status
__vxge_hw_fifo_delete(
struct __vxge_hw_vpath_handle *vpath_handle);
static struct __vxge_hw_blockpool_entry *
__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
u32 size);
static void
__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
struct __vxge_hw_blockpool_entry *entry);
static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
void *block_addr,
u32 length,
struct pci_dev *dma_h,
struct pci_dev *acc_handle);
static enum vxge_hw_status
__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
struct __vxge_hw_blockpool *blockpool,
u32 pool_size,
u32 pool_max);
static void
__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
static void *
__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
u32 size,
struct vxge_hw_mempool_dma *dma_object);
static void
__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
void *memblock,
u32 size,
struct vxge_hw_mempool_dma *dma_object);
static struct __vxge_hw_channel*
__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
enum __vxge_hw_channel_type type, u32 length,
u32 per_dtr_space, void *userdata);
static void
__vxge_hw_channel_free(
struct __vxge_hw_channel *channel);
static enum vxge_hw_status
__vxge_hw_channel_initialize(
struct __vxge_hw_channel *channel);
static enum vxge_hw_status
__vxge_hw_channel_reset(
struct __vxge_hw_channel *channel);
static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
static enum vxge_hw_status
__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
static enum vxge_hw_status
__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
static void
__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
static void
__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
static enum vxge_hw_status
__vxge_hw_vpath_card_info_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg,
struct vxge_hw_device_hw_info *hw_info);
static enum vxge_hw_status
__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
static void
__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
static enum vxge_hw_status
__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
static enum vxge_hw_status
__vxge_hw_device_register_poll(
void __iomem *reg,
u64 mask, u32 max_millis);
static inline enum vxge_hw_status
__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
u64 mask, u32 max_millis)
{
__vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
wmb();
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
wmb();
return __vxge_hw_device_register_poll(addr, mask, max_millis);
}
static struct vxge_hw_mempool*
__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
u32 item_size, u32 private_size, u32 items_initial,
u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
void *userdata);
static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
static enum vxge_hw_status
__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_vpath_stats_hw_info *hw_stats);
static enum vxge_hw_status
vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
static enum vxge_hw_status
__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
static u64
__vxge_hw_vpath_pci_func_mode_get(u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg);
static u32
__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
static enum vxge_hw_status
__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]);
static enum vxge_hw_status
__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
static enum vxge_hw_status
__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
static enum vxge_hw_status
__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
struct vxge_hw_device_hw_info *hw_info);
static enum vxge_hw_status
__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
static void
__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
static enum vxge_hw_status
__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
u32 operation, u32 offset, u64 *stat);
static enum vxge_hw_status
__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
static enum vxge_hw_status
__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
/*
* __vxge_hw_channel_allocate - Allocate memory for channel
* This function allocates required memory for the channel and various arrays
@ -190,7 +363,7 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
* Will poll certain register for specified amount of time.
* Will poll until masked bit is not cleared.
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
{
u64 val64;
@ -221,7 +394,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
* in progress
* This routine checks the vpath reset in progress register is turned zero
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
{
enum vxge_hw_status status;
@ -236,7 +409,7 @@ __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
* This routine sets the swapper and reads the toc pointer and returns the
* memory mapped address of the toc
*/
struct vxge_hw_toc_reg __iomem *
static struct vxge_hw_toc_reg __iomem *
__vxge_hw_device_toc_get(void __iomem *bar0)
{
u64 val64;
@ -779,7 +952,7 @@ exit:
* vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
* Get the Statistics on aggregate port
*/
enum vxge_hw_status
static enum vxge_hw_status
vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
struct vxge_hw_xmac_aggr_stats *aggr_stats)
{
@ -814,7 +987,7 @@ exit:
* vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
* Get the Statistics on port
*/
enum vxge_hw_status
static enum vxge_hw_status
vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
struct vxge_hw_xmac_port_stats *port_stats)
{
@ -952,20 +1125,6 @@ u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
return 0;
#endif
}
/*
* vxge_hw_device_debug_mask_get - Get the debug mask
* This routine returns the current debug mask set
*/
u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev)
{
#if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
if (hldev == NULL)
return 0;
return hldev->debug_module_mask;
#else
return 0;
#endif
}
/*
* vxge_hw_getpause_data -Pause frame frame generation and reception.
@ -1090,7 +1249,7 @@ __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
* first block
* Returns the dma address of the first RxD block
*/
u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
{
struct vxge_hw_mempool_dma *dma_object;
@ -1252,7 +1411,7 @@ exit:
* This function creates Ring and initializes it.
*
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
struct vxge_hw_ring_attr *attr)
{
@ -1363,7 +1522,7 @@ exit:
* __vxge_hw_ring_abort - Returns the RxD
* This function terminates the RxDs of ring
*/
enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
{
void *rxdh;
struct __vxge_hw_channel *channel;
@ -1392,7 +1551,7 @@ enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
* __vxge_hw_ring_reset - Resets the ring
* This function resets the ring during vpath reset operation
*/
enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_channel *channel;
@ -1419,7 +1578,7 @@ exit:
* __vxge_hw_ring_delete - Removes the ring
* This function freeup the memory pool and removes the ring
*/
enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
{
struct __vxge_hw_ring *ring = vp->vpath->ringh;
@ -1438,7 +1597,7 @@ enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
* __vxge_hw_mempool_grow
* Will resize mempool up to %num_allocate value.
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
u32 *num_allocated)
{
@ -1527,7 +1686,7 @@ exit:
* with size enough to hold %items_initial number of items. Memory is
* DMA-able but client must map/unmap before interoperating with the device.
*/
struct vxge_hw_mempool*
static struct vxge_hw_mempool*
__vxge_hw_mempool_create(
struct __vxge_hw_device *devh,
u32 memblock_size,
@ -1644,7 +1803,7 @@ exit:
/*
* vxge_hw_mempool_destroy
*/
void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
{
u32 i, j;
struct __vxge_hw_device *devh = mempool->devh;
@ -1700,7 +1859,7 @@ __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
* __vxge_hw_device_vpath_config_check - Check vpath configuration.
* Check the vpath configuration
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
{
enum vxge_hw_status status;
@ -1922,7 +2081,7 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
* _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
* Set the swapper bits appropriately for the lagacy section.
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
{
u64 val64;
@ -1977,7 +2136,7 @@ __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
* __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
* Set the swapper bits appropriately for the vpath.
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
{
#ifndef __BIG_ENDIAN
@ -1996,7 +2155,7 @@ __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
* __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
* Set the swapper bits appropriately for the vpath.
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_kdfc_swapper_set(
struct vxge_hw_legacy_reg __iomem *legacy_reg,
struct vxge_hw_vpath_reg __iomem *vpath_reg)
@ -2020,28 +2179,6 @@ __vxge_hw_kdfc_swapper_set(
return VXGE_HW_OK;
}
/*
* vxge_hw_mgmt_device_config - Retrieve device configuration.
* Get device configuration. Permits to retrieve at run-time configuration
* values that were used to initialize and configure the device.
*/
enum vxge_hw_status
vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev,
struct vxge_hw_device_config *dev_config, int size)
{
if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC))
return VXGE_HW_ERR_INVALID_DEVICE;
if (size != sizeof(struct vxge_hw_device_config))
return VXGE_HW_ERR_VERSION_CONFLICT;
memcpy(dev_config, &hldev->config,
sizeof(struct vxge_hw_device_config));
return VXGE_HW_OK;
}
/*
* vxge_hw_mgmt_reg_read - Read Titan register.
*/
@ -2438,7 +2575,7 @@ exit:
* __vxge_hw_fifo_abort - Returns the TxD
* This function terminates the TxDs of fifo
*/
enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
{
void *txdlh;
@ -2466,7 +2603,7 @@ enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
* __vxge_hw_fifo_reset - Resets the fifo
* This function resets the fifo during vpath reset operation
*/
enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
{
enum vxge_hw_status status = VXGE_HW_OK;
@ -2501,7 +2638,7 @@ enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
* in pci config space.
* Read from the vpath pci config space.
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
u32 phy_func_0, u32 offset, u32 *val)
{
@ -2542,7 +2679,7 @@ exit:
* __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
* Returns the function number of the vpath.
*/
u32
static u32
__vxge_hw_vpath_func_id_get(u32 vp_id,
struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
{
@ -2573,7 +2710,7 @@ __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
* __vxge_hw_vpath_card_info_get - Get the serial numbers,
* part number and product description.
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_vpath_card_info_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg,
@ -2695,7 +2832,7 @@ __vxge_hw_vpath_card_info_get(
* __vxge_hw_vpath_fw_ver_get - Get the fw version
* Returns FW Version
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_vpath_fw_ver_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg,
@ -2789,7 +2926,7 @@ exit:
* __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
* Returns pci function mode
*/
u64
static u64
__vxge_hw_vpath_pci_func_mode_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg)
@ -2995,7 +3132,7 @@ exit:
* __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
* from MAC address table.
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_vpath_addr_get(
u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
@ -3347,7 +3484,7 @@ __vxge_hw_vpath_mgmt_read(
* This routine checks the vpath_rst_in_prog register to see if
* adapter completed the reset process for the vpath
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
{
enum vxge_hw_status status;
@ -3365,7 +3502,7 @@ __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
* __vxge_hw_vpath_reset
* This routine resets the vpath on the device
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@ -3383,7 +3520,7 @@ __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
* __vxge_hw_vpath_sw_reset
* This routine resets the vpath structures
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
{
enum vxge_hw_status status = VXGE_HW_OK;
@ -3408,7 +3545,7 @@ exit:
* This routine configures the prc registers of virtual path using the config
* passed
*/
void
static void
__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@ -3480,7 +3617,7 @@ __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
* This routine configures the kdfc registers of virtual path using the
* config passed
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@ -3553,7 +3690,7 @@ exit:
* __vxge_hw_vpath_mac_configure
* This routine configures the mac of virtual path using the config passed
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@ -3621,7 +3758,7 @@ __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
* This routine configures the tim registers of virtual path using the config
* passed
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@ -3897,7 +4034,7 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
* This routine is the final phase of init which initializes the
* registers of the vpath using the configuration passed.
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@ -3966,7 +4103,7 @@ exit:
* This routine is the initial phase of init which resets the vpath and
* initializes the software support structures.
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
struct vxge_hw_vp_config *config)
{
@ -4022,7 +4159,7 @@ exit:
* __vxge_hw_vp_terminate - Terminate Virtual Path structure
* This routine closes all channels it opened and freeup memory
*/
void
static void
__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
{
struct __vxge_hw_virtualpath *vpath;
@ -4384,7 +4521,7 @@ vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
* Enable the DMA vpath statistics. The function is to be called to re-enable
* the adapter to update stats into the host memory
*/
enum vxge_hw_status
static enum vxge_hw_status
vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
{
enum vxge_hw_status status = VXGE_HW_OK;
@ -4409,7 +4546,7 @@ exit:
* __vxge_hw_vpath_stats_access - Get the statistics from the given location
* and offset and perform an operation
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
u32 operation, u32 offset, u64 *stat)
{
@ -4445,7 +4582,7 @@ vpath_stats_access_exit:
/*
* __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_vpath_xmac_tx_stats_get(
struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
@ -4478,9 +4615,9 @@ exit:
/*
* __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
{
u64 *val64;
enum vxge_hw_status status = VXGE_HW_OK;
@ -4509,9 +4646,9 @@ exit:
/*
* __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
*/
enum vxge_hw_status __vxge_hw_vpath_stats_get(
struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_vpath_stats_hw_info *hw_stats)
static enum vxge_hw_status
__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_vpath_stats_hw_info *hw_stats)
{
u64 val64;
enum vxge_hw_status status = VXGE_HW_OK;
@ -4643,6 +4780,32 @@ exit:
return status;
}
static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
unsigned long size)
{
gfp_t flags;
void *vaddr;
if (in_interrupt())
flags = GFP_ATOMIC | GFP_DMA;
else
flags = GFP_KERNEL | GFP_DMA;
vaddr = kmalloc((size), flags);
vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
}
static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
struct pci_dev **p_dma_acch)
{
unsigned long misaligned = *(unsigned long *)p_dma_acch;
u8 *tmp = (u8 *)vaddr;
tmp -= misaligned;
kfree((void *)tmp);
}
/*
* __vxge_hw_blockpool_create - Create block pool
*/
@ -4845,12 +5008,11 @@ void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
* vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
* Adds a block to block pool
*/
void vxge_hw_blockpool_block_add(
struct __vxge_hw_device *devh,
void *block_addr,
u32 length,
struct pci_dev *dma_h,
struct pci_dev *acc_handle)
static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
void *block_addr,
u32 length,
struct pci_dev *dma_h,
struct pci_dev *acc_handle)
{
struct __vxge_hw_blockpool *blockpool;
struct __vxge_hw_blockpool_entry *entry = NULL;

View File

@ -183,11 +183,6 @@ struct vxge_hw_device_version {
char version[VXGE_HW_FW_STRLEN];
};
u64
__vxge_hw_vpath_pci_func_mode_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg);
/**
* struct vxge_hw_fifo_config - Configuration of fifo.
* @enable: Is this fifo to be commissioned
@ -1426,9 +1421,6 @@ struct vxge_hw_rth_hash_types {
u8 hash_type_ipv6ex_en;
};
u32
vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
void vxge_hw_device_debug_set(
struct __vxge_hw_device *devh,
enum vxge_debug_level level,
@ -1440,9 +1432,6 @@ vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
u32
vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
u32
vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
/**
* vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
* @buf_mode: Buffer mode (1, 3 or 5)
@ -1817,60 +1806,10 @@ struct vxge_hw_vpath_attr {
struct vxge_hw_fifo_attr fifo_attr;
};
enum vxge_hw_status
__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
struct __vxge_hw_blockpool *blockpool,
u32 pool_size,
u32 pool_max);
void
__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
struct __vxge_hw_blockpool_entry *
__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
u32 size);
void
__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
struct __vxge_hw_blockpool_entry *entry);
void *
__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
u32 size,
struct vxge_hw_mempool_dma *dma_object);
void
__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
void *memblock,
u32 size,
struct vxge_hw_mempool_dma *dma_object);
enum vxge_hw_status
__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
enum vxge_hw_status
__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
enum vxge_hw_status
vxge_hw_mgmt_device_config(struct __vxge_hw_device *devh,
struct vxge_hw_device_config *dev_config, int size);
enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
void __iomem *bar0,
struct vxge_hw_device_hw_info *hw_info);
enum vxge_hw_status
__vxge_hw_vpath_fw_ver_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg,
struct vxge_hw_device_hw_info *hw_info);
enum vxge_hw_status
__vxge_hw_vpath_card_info_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg,
struct vxge_hw_device_hw_info *hw_info);
enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
struct vxge_hw_device_config *device_config);
@ -1954,38 +1893,6 @@ out:
return vaddr;
}
extern void vxge_hw_blockpool_block_add(
struct __vxge_hw_device *devh,
void *block_addr,
u32 length,
struct pci_dev *dma_h,
struct pci_dev *acc_handle);
static inline void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
unsigned long size)
{
gfp_t flags;
void *vaddr;
if (in_interrupt())
flags = GFP_ATOMIC | GFP_DMA;
else
flags = GFP_KERNEL | GFP_DMA;
vaddr = kmalloc((size), flags);
vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
}
static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
struct pci_dev **p_dma_acch)
{
unsigned long misaligned = *(unsigned long *)p_dma_acch;
u8 *tmp = (u8 *)vaddr;
tmp -= misaligned;
kfree((void *)tmp);
}
/*
* __vxge_hw_mempool_item_priv - will return pointer on per item private space
*/
@ -2010,40 +1917,6 @@ __vxge_hw_mempool_item_priv(
(*memblock_item_idx) * mempool->items_priv_size;
}
enum vxge_hw_status
__vxge_hw_mempool_grow(
struct vxge_hw_mempool *mempool,
u32 num_allocate,
u32 *num_allocated);
struct vxge_hw_mempool*
__vxge_hw_mempool_create(
struct __vxge_hw_device *devh,
u32 memblock_size,
u32 item_size,
u32 private_size,
u32 items_initial,
u32 items_max,
struct vxge_hw_mempool_cbs *mp_callback,
void *userdata);
struct __vxge_hw_channel*
__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
enum __vxge_hw_channel_type type, u32 length,
u32 per_dtr_space, void *userdata);
void
__vxge_hw_channel_free(
struct __vxge_hw_channel *channel);
enum vxge_hw_status
__vxge_hw_channel_initialize(
struct __vxge_hw_channel *channel);
enum vxge_hw_status
__vxge_hw_channel_reset(
struct __vxge_hw_channel *channel);
/*
* __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
* for the fifo.
@ -2065,9 +1938,6 @@ enum vxge_hw_status vxge_hw_vpath_open(
struct vxge_hw_vpath_attr *attr,
struct __vxge_hw_vpath_handle **vpath_handle);
enum vxge_hw_status
__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog);
enum vxge_hw_status vxge_hw_vpath_close(
struct __vxge_hw_vpath_handle *vpath_handle);
@ -2089,54 +1959,9 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set(
struct __vxge_hw_vpath_handle *vpath_handle,
u32 new_mtu);
enum vxge_hw_status vxge_hw_vpath_stats_enable(
struct __vxge_hw_vpath_handle *vpath_handle);
enum vxge_hw_status
__vxge_hw_vpath_stats_access(
struct __vxge_hw_virtualpath *vpath,
u32 operation,
u32 offset,
u64 *stat);
enum vxge_hw_status
__vxge_hw_vpath_xmac_tx_stats_get(
struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
enum vxge_hw_status
__vxge_hw_vpath_xmac_rx_stats_get(
struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
enum vxge_hw_status
__vxge_hw_vpath_stats_get(
struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_vpath_stats_hw_info *hw_stats);
void
vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
enum vxge_hw_status
__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config);
void
__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
enum vxge_hw_status
__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
enum vxge_hw_status
__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg);
enum vxge_hw_status
__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
struct vxge_hw_vpath_reg __iomem *vpath_reg);
enum vxge_hw_status
__vxge_hw_device_register_poll(
void __iomem *reg,
u64 mask, u32 max_millis);
#ifndef readq
static inline u64 readq(void __iomem *addr)
@ -2168,62 +1993,12 @@ static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
writel(val, addr);
}
static inline enum vxge_hw_status
__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
u64 mask, u32 max_millis)
{
enum vxge_hw_status status = VXGE_HW_OK;
__vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
wmb();
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
wmb();
status = __vxge_hw_device_register_poll(addr, mask, max_millis);
return status;
}
struct vxge_hw_toc_reg __iomem *
__vxge_hw_device_toc_get(void __iomem *bar0);
enum vxge_hw_status
__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
void
__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
void
__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
enum vxge_hw_status
vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
enum vxge_hw_status
__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
enum vxge_hw_status
__vxge_hw_vpath_pci_read(
struct __vxge_hw_virtualpath *vpath,
u32 phy_func_0,
u32 offset,
u32 *val);
enum vxge_hw_status
__vxge_hw_vpath_addr_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg,
u8 (macaddr)[ETH_ALEN],
u8 (macaddr_mask)[ETH_ALEN]);
u32
__vxge_hw_vpath_func_id_get(
u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
enum vxge_hw_status
__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
enum vxge_hw_status
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
/**
* vxge_debug
* @level: level of debug verbosity.

View File

@ -1142,7 +1142,7 @@ static const struct ethtool_ops vxge_ethtool_ops = {
.get_ethtool_stats = vxge_get_ethtool_stats,
};
void initialize_ethtool_ops(struct net_device *ndev)
void vxge_initialize_ethtool_ops(struct net_device *ndev)
{
SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
}

View File

@ -82,6 +82,16 @@ module_param_array(bw_percentage, uint, NULL, 0);
static struct vxge_drv_config *driver_config;
static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
struct macInfo *mac);
static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
struct macInfo *mac);
static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
static inline int is_vxge_card_up(struct vxgedev *vdev)
{
return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@ -138,7 +148,7 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
* This function is called during interrupt context to notify link up state
* change.
*/
void
static void
vxge_callback_link_up(struct __vxge_hw_device *hldev)
{
struct net_device *dev = hldev->ndev;
@ -162,7 +172,7 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
* This function is called during interrupt context to notify link down state
* change.
*/
void
static void
vxge_callback_link_down(struct __vxge_hw_device *hldev)
{
struct net_device *dev = hldev->ndev;
@ -354,7 +364,7 @@ static inline void vxge_post(int *dtr_cnt, void **first_dtr,
* If the interrupt is because of a received frame or if the receive ring
* contains fresh as yet un-processed frames, this function is called.
*/
enum vxge_hw_status
static enum vxge_hw_status
vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
u8 t_code, void *userdata)
{
@ -531,7 +541,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
* freed and frees all skbs whose data have already DMA'ed into the NICs
* internal memory.
*/
enum vxge_hw_status
static enum vxge_hw_status
vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
enum vxge_hw_fifo_tcode t_code, void *userdata,
struct sk_buff ***skb_ptr, int nr_skb, int *more)
@ -1246,7 +1256,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
*
* Enables the interrupts for the vpath
*/
void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
int msix_id = 0;
@ -1279,7 +1289,7 @@ void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
*
* Disables the interrupts for the vpath
*/
void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
int msix_id;
@ -1553,7 +1563,7 @@ out:
*
* driver may reset the chip on events of serr, eccerr, etc
*/
int vxge_reset(struct vxgedev *vdev)
static int vxge_reset(struct vxgedev *vdev)
{
return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
}
@ -1724,7 +1734,7 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
return status;
}
int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
{
struct vxge_mac_addrs *new_mac_entry;
u8 *mac_address = NULL;
@ -1757,7 +1767,8 @@ int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
}
/* Add a mac address to DA table */
enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
struct macInfo *mac)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@ -1782,7 +1793,7 @@ enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
return status;
}
int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
{
struct list_head *entry, *next;
u64 del_mac = 0;
@ -1807,7 +1818,8 @@ int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
return FALSE;
}
/* delete a mac address from DA table */
enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
struct macInfo *mac)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@ -1854,7 +1866,7 @@ static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
}
/* Store all vlan ids from the list to the vid table */
enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxgedev *vdev = vpath->vdev;
@ -1874,7 +1886,7 @@ enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
}
/* Store all mac addresses from the list to the DA table */
enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct macInfo mac_info;
@ -1916,7 +1928,7 @@ enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
}
/* reset vpaths */
enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@ -1948,7 +1960,7 @@ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
}
/* close vpaths */
void vxge_close_vpaths(struct vxgedev *vdev, int index)
static void vxge_close_vpaths(struct vxgedev *vdev, int index)
{
struct vxge_vpath *vpath;
int i;
@ -1966,7 +1978,7 @@ void vxge_close_vpaths(struct vxgedev *vdev, int index)
}
/* open vpaths */
int vxge_open_vpaths(struct vxgedev *vdev)
static int vxge_open_vpaths(struct vxgedev *vdev)
{
struct vxge_hw_vpath_attr attr;
enum vxge_hw_status status;
@ -2517,7 +2529,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
* Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure.
*/
int
static int
vxge_open(struct net_device *dev)
{
enum vxge_hw_status status;
@ -2721,7 +2733,7 @@ out0:
}
/* Loop throught the mac address list and delete all the entries */
void vxge_free_mac_add_list(struct vxge_vpath *vpath)
static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
{
struct list_head *entry, *next;
@ -2745,7 +2757,7 @@ static void vxge_napi_del_all(struct vxgedev *vdev)
}
}
int do_vxge_close(struct net_device *dev, int do_io)
static int do_vxge_close(struct net_device *dev, int do_io)
{
enum vxge_hw_status status;
struct vxgedev *vdev;
@ -2856,7 +2868,7 @@ int do_vxge_close(struct net_device *dev, int do_io)
* Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure.
*/
int
static int
vxge_close(struct net_device *dev)
{
do_vxge_close(dev, 1);
@ -3113,10 +3125,10 @@ static const struct net_device_ops vxge_netdev_ops = {
#endif
};
int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
struct vxge_config *config,
int high_dma, int no_of_vpath,
struct vxgedev **vdev_out)
static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
struct vxge_config *config,
int high_dma, int no_of_vpath,
struct vxgedev **vdev_out)
{
struct net_device *ndev;
enum vxge_hw_status status = VXGE_HW_OK;
@ -3164,7 +3176,7 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
initialize_ethtool_ops(ndev);
vxge_initialize_ethtool_ops(ndev);
/* Allocate memory for vpath */
vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
@ -3249,7 +3261,7 @@ _out0:
*
* This function will unregister and free network device
*/
void
static void
vxge_device_unregister(struct __vxge_hw_device *hldev)
{
struct vxgedev *vdev;

View File

@ -396,64 +396,7 @@ struct vxge_tx_priv {
mod_timer(&timer, (jiffies + exp)); \
} while (0);
int __devinit vxge_device_register(struct __vxge_hw_device *devh,
struct vxge_config *config,
int high_dma, int no_of_vpath,
struct vxgedev **vdev);
void vxge_device_unregister(struct __vxge_hw_device *devh);
void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id);
void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id);
void vxge_callback_link_up(struct __vxge_hw_device *devh);
void vxge_callback_link_down(struct __vxge_hw_device *devh);
enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
struct macInfo *mac);
int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
int vxge_reset(struct vxgedev *vdev);
enum vxge_hw_status
vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
u8 t_code, void *userdata);
enum vxge_hw_status
vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
enum vxge_hw_fifo_tcode t_code, void *userdata,
struct sk_buff ***skb_ptr, int nr_skbs, int *more);
int vxge_close(struct net_device *dev);
int vxge_open(struct net_device *dev);
void vxge_close_vpaths(struct vxgedev *vdev, int index);
int vxge_open_vpaths(struct vxgedev *vdev);
enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
struct macInfo *mac);
enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
struct macInfo *mac);
int vxge_mac_list_add(struct vxge_vpath *vpath,
struct macInfo *mac);
void vxge_free_mac_add_list(struct vxge_vpath *vpath);
enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
int do_vxge_close(struct net_device *dev, int do_io);
extern void initialize_ethtool_ops(struct net_device *ndev);
extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
/**
* #define VXGE_DEBUG_INIT: debug for initialization functions
* #define VXGE_DEBUG_TX : debug transmit related functions

View File

@ -17,6 +17,13 @@
#include "vxge-config.h"
#include "vxge-main.h"
static enum vxge_hw_status
__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
u32 vp_id, enum vxge_hw_event type);
static enum vxge_hw_status
__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
u32 skip_alarms);
/*
* vxge_hw_vpath_intr_enable - Enable vpath interrupts.
* @vp: Virtual Path handle.
@ -513,7 +520,7 @@ exit:
* Link up indication handler. The function is invoked by HW when
* Titan indicates that the link is up for programmable amount of time.
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
{
/*
@ -538,7 +545,7 @@ exit:
* Link down indication handler. The function is invoked by HW when
* Titan indicates that the link is down.
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
{
/*
@ -564,7 +571,7 @@ exit:
*
* Handle error.
*/
enum vxge_hw_status
static enum vxge_hw_status
__vxge_hw_device_handle_error(
struct __vxge_hw_device *hldev,
u32 vp_id,
@ -646,7 +653,7 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
* it swaps the reserve and free arrays.
*
*/
enum vxge_hw_status
static enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
{
void **tmp_arr;
@ -692,7 +699,8 @@ _alloc_after_swap:
* Posts a dtr to work array.
*
*/
void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel,
void *dtrh)
{
vxge_assert(channel->work_arr[channel->post_index] == NULL);
@ -1657,37 +1665,6 @@ exit:
return status;
}
/**
* vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
* from vlan id table.
* @vp: Vpath handle.
* @vid: Buffer to return vlan id
*
* Returns the next vlan id in the list for this vpath.
* see also: vxge_hw_vpath_vid_get
*
*/
enum vxge_hw_status
vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
{
u64 data;
enum vxge_hw_status status = VXGE_HW_OK;
if (vp == NULL) {
status = VXGE_HW_ERR_INVALID_HANDLE;
goto exit;
}
status = __vxge_hw_vpath_rts_table_get(vp,
VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
0, vid, &data);
*vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
exit:
return status;
}
/**
* vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
* to vlan id table.
@ -1898,9 +1875,9 @@ exit:
* Process vpath alarms.
*
*/
enum vxge_hw_status __vxge_hw_vpath_alarm_process(
struct __vxge_hw_virtualpath *vpath,
u32 skip_alarms)
static enum vxge_hw_status
__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
u32 skip_alarms)
{
u64 val64;
u64 alarm_status;
@ -2264,36 +2241,6 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
&hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
}
/**
* vxge_hw_vpath_msix_clear - Clear MSIX Vector.
* @vp: Virtual Path handle.
* @msix_id: MSI ID
*
* The function clears the msix interrupt for the given msix_id
*
* Returns: 0,
* Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
* status.
* See also:
*/
void
vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
{
struct __vxge_hw_device *hldev = vp->vpath->hldev;
if (hldev->config.intr_mode ==
VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
__vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->
clr_msix_one_shot_vec[msix_id%4]);
} else {
__vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
&hldev->common_reg->
clear_msix_mask_vect[msix_id%4]);
}
}
/**
* vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
* @vp: Virtual Path handle.
@ -2315,22 +2262,6 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
&hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
}
/**
* vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
* @vp: Virtual Path handle.
*
* The function masks all msix interrupt for the given vpath
*
*/
void
vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
{
__vxge_hw_pio_mem_write32_upper(
(u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
&vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
}
/**
* vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
* @vp: Virtual Path handle.

View File

@ -1748,14 +1748,6 @@ vxge_hw_mrpcim_stats_access(
u32 offset,
u64 *stat);
enum vxge_hw_status
vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *devh, u32 port,
struct vxge_hw_xmac_aggr_stats *aggr_stats);
enum vxge_hw_status
vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *devh, u32 port,
struct vxge_hw_xmac_port_stats *port_stats);
enum vxge_hw_status
vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
struct vxge_hw_xmac_stats *xmac_stats);
@ -2117,49 +2109,10 @@ struct __vxge_hw_ring_rxd_priv {
#endif
};
/* ========================= RING PRIVATE API ============================= */
u64
__vxge_hw_ring_first_block_address_get(
struct __vxge_hw_ring *ringh);
enum vxge_hw_status
__vxge_hw_ring_create(
struct __vxge_hw_vpath_handle *vpath_handle,
struct vxge_hw_ring_attr *attr);
enum vxge_hw_status
__vxge_hw_ring_abort(
struct __vxge_hw_ring *ringh);
enum vxge_hw_status
__vxge_hw_ring_reset(
struct __vxge_hw_ring *ringh);
enum vxge_hw_status
__vxge_hw_ring_delete(
struct __vxge_hw_vpath_handle *vpath_handle);
/* ========================= FIFO PRIVATE API ============================= */
struct vxge_hw_fifo_attr;
enum vxge_hw_status
__vxge_hw_fifo_create(
struct __vxge_hw_vpath_handle *vpath_handle,
struct vxge_hw_fifo_attr *attr);
enum vxge_hw_status
__vxge_hw_fifo_abort(
struct __vxge_hw_fifo *fifoh);
enum vxge_hw_status
__vxge_hw_fifo_reset(
struct __vxge_hw_fifo *ringh);
enum vxge_hw_status
__vxge_hw_fifo_delete(
struct __vxge_hw_vpath_handle *vpath_handle);
struct vxge_hw_mempool_cbs {
void (*item_func_alloc)(
struct vxge_hw_mempool *mempoolh,
@ -2169,10 +2122,6 @@ struct vxge_hw_mempool_cbs {
u32 is_last);
};
void
__vxge_hw_mempool_destroy(
struct vxge_hw_mempool *mempool);
#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
@ -2194,62 +2143,11 @@ __vxge_hw_vpath_rts_table_set(
u64 data1,
u64 data2);
enum vxge_hw_status
__vxge_hw_vpath_reset(
struct __vxge_hw_device *devh,
u32 vp_id);
enum vxge_hw_status
__vxge_hw_vpath_sw_reset(
struct __vxge_hw_device *devh,
u32 vp_id);
enum vxge_hw_status
__vxge_hw_vpath_enable(
struct __vxge_hw_device *devh,
u32 vp_id);
void
__vxge_hw_vpath_prc_configure(
struct __vxge_hw_device *devh,
u32 vp_id);
enum vxge_hw_status
__vxge_hw_vpath_kdfc_configure(
struct __vxge_hw_device *devh,
u32 vp_id);
enum vxge_hw_status
__vxge_hw_vpath_mac_configure(
struct __vxge_hw_device *devh,
u32 vp_id);
enum vxge_hw_status
__vxge_hw_vpath_tim_configure(
struct __vxge_hw_device *devh,
u32 vp_id);
enum vxge_hw_status
__vxge_hw_vpath_initialize(
struct __vxge_hw_device *devh,
u32 vp_id);
enum vxge_hw_status
__vxge_hw_vp_initialize(
struct __vxge_hw_device *devh,
u32 vp_id,
struct vxge_hw_vp_config *config);
void
__vxge_hw_vp_terminate(
struct __vxge_hw_device *devh,
u32 vp_id);
enum vxge_hw_status
__vxge_hw_vpath_alarm_process(
struct __vxge_hw_virtualpath *vpath,
u32 skip_alarms);
void vxge_hw_device_intr_enable(
struct __vxge_hw_device *devh);
@ -2320,11 +2218,6 @@ vxge_hw_vpath_vid_get(
struct __vxge_hw_vpath_handle *vpath_handle,
u64 *vid);
enum vxge_hw_status
vxge_hw_vpath_vid_get_next(
struct __vxge_hw_vpath_handle *vpath_handle,
u64 *vid);
enum vxge_hw_status
vxge_hw_vpath_vid_delete(
struct __vxge_hw_vpath_handle *vpath_handle,
@ -2386,17 +2279,10 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
void
vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vpath_handle,
int msix_id);
void
vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
int msix_id);
void
vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vpath_handle);
enum vxge_hw_status vxge_hw_vpath_intr_enable(
struct __vxge_hw_vpath_handle *vpath_handle);
@ -2415,12 +2301,6 @@ vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
void
vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh);
void
vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh);
void
vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
void **dtrh);
@ -2436,18 +2316,4 @@ vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
void
vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
/* ========================== PRIVATE API ================================= */
enum vxge_hw_status
__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev);
enum vxge_hw_status
__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev);
enum vxge_hw_status
__vxge_hw_device_handle_error(
struct __vxge_hw_device *hldev,
u32 vp_id,
enum vxge_hw_event type);
#endif

View File

@ -3580,6 +3580,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
common->ah = sc->ah;
common->hw = hw;
common->cachelsz = csz << 2; /* convert to bytes */
spin_lock_init(&common->cc_lock);
/* Initialize device */
ret = ath5k_hw_attach(sc);

View File

@ -34,6 +34,10 @@ static const u32 ar9300_2p2_radio_postamble[][5] = {
static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
{0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
{0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
{0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@ -99,6 +103,30 @@ static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
{0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
{0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
{0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
{0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
{0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
{0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
{0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
{0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
{0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
{0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
{0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
{0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
{0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
{0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
{0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
{0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
{0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
{0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
{0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
{0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
{0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@ -118,7 +146,7 @@ static const u32 ar9300Modes_fast_clock_2p2[][3] = {
{0x00008014, 0x044c044c, 0x08980898},
{0x0000801c, 0x148ec02b, 0x148ec057},
{0x00008318, 0x000044c0, 0x00008980},
{0x00009e00, 0x03721821, 0x03721821},
{0x00009e00, 0x0372131c, 0x0372131c},
{0x0000a230, 0x0000000b, 0x00000016},
{0x0000a254, 0x00000898, 0x00001130},
};
@ -595,15 +623,16 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
{0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
{0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
{0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
{0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
{0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
{0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
{0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
{0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@ -624,16 +653,16 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
{0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
{0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
{0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
{0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
{0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
{0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
{0x0000be04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
{0x0000be04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
{0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
{0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
@ -649,13 +678,13 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x00009814, 0x9280c00a},
{0x00009818, 0x00000000},
{0x0000981c, 0x00020028},
{0x00009834, 0x5f3ca3de},
{0x00009834, 0x6400a290},
{0x00009838, 0x0108ecff},
{0x0000983c, 0x14750600},
{0x00009880, 0x201fff00},
{0x00009884, 0x00001042},
{0x000098a4, 0x00200400},
{0x000098b0, 0x52440bbe},
{0x000098b0, 0x32840bbe},
{0x000098d0, 0x004b6a8e},
{0x000098d4, 0x00000820},
{0x000098dc, 0x00000000},
@ -681,7 +710,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
{0x00009e3c, 0xcf946222},
{0x00009e40, 0x0d261820},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
@ -694,7 +722,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000a220, 0x00000000},
{0x0000a224, 0x00000000},
{0x0000a228, 0x10002310},
{0x0000a22c, 0x01036a1e},
{0x0000a22c, 0x01036a27},
{0x0000a23c, 0x00000000},
{0x0000a244, 0x0c000000},
{0x0000a2a0, 0x00000001},
@ -702,10 +730,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000a2c8, 0x00000000},
{0x0000a2cc, 0x18c43433},
{0x0000a2d4, 0x00000000},
{0x0000a2dc, 0x00000000},
{0x0000a2e0, 0x00000000},
{0x0000a2e4, 0x00000000},
{0x0000a2e8, 0x00000000},
{0x0000a2ec, 0x00000000},
{0x0000a2f0, 0x00000000},
{0x0000a2f4, 0x00000000},
@ -753,33 +777,17 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000a430, 0x1ce739ce},
{0x0000a434, 0x00000000},
{0x0000a438, 0x00001801},
{0x0000a43c, 0x00000000},
{0x0000a43c, 0x00100000},
{0x0000a440, 0x00000000},
{0x0000a444, 0x00000000},
{0x0000a448, 0x06000080},
{0x0000a44c, 0x00000001},
{0x0000a450, 0x00010000},
{0x0000a458, 0x00000000},
{0x0000a600, 0x00000000},
{0x0000a604, 0x00000000},
{0x0000a608, 0x00000000},
{0x0000a60c, 0x00000000},
{0x0000a610, 0x00000000},
{0x0000a614, 0x00000000},
{0x0000a618, 0x00000000},
{0x0000a61c, 0x00000000},
{0x0000a620, 0x00000000},
{0x0000a624, 0x00000000},
{0x0000a628, 0x00000000},
{0x0000a62c, 0x00000000},
{0x0000a630, 0x00000000},
{0x0000a634, 0x00000000},
{0x0000a638, 0x00000000},
{0x0000a63c, 0x00000000},
{0x0000a640, 0x00000000},
{0x0000a644, 0x3fad9d74},
{0x0000a648, 0x0048060a},
{0x0000a64c, 0x00000637},
{0x0000a64c, 0x00003c37},
{0x0000a670, 0x03020100},
{0x0000a674, 0x09080504},
{0x0000a678, 0x0d0c0b0a},
@ -802,10 +810,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000a8f4, 0x00000000},
{0x0000b2d0, 0x00000080},
{0x0000b2d4, 0x00000000},
{0x0000b2dc, 0x00000000},
{0x0000b2e0, 0x00000000},
{0x0000b2e4, 0x00000000},
{0x0000b2e8, 0x00000000},
{0x0000b2ec, 0x00000000},
{0x0000b2f0, 0x00000000},
{0x0000b2f4, 0x00000000},
@ -820,10 +824,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
{0x0000b8f4, 0x00000000},
{0x0000c2d0, 0x00000080},
{0x0000c2d4, 0x00000000},
{0x0000c2dc, 0x00000000},
{0x0000c2e0, 0x00000000},
{0x0000c2e4, 0x00000000},
{0x0000c2e8, 0x00000000},
{0x0000c2ec, 0x00000000},
{0x0000c2f0, 0x00000000},
{0x0000c2f4, 0x00000000},
@ -835,6 +835,10 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
{0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
{0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
{0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@ -855,7 +859,7 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
{0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
{0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
@ -900,6 +904,30 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
{0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
{0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
{0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
{0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
{0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
{0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
{0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
{0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
{0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
{0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
{0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
{0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
{0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
{0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
{0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
{0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
{0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
{0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
{0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
{0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
@ -913,6 +941,10 @@ static const u32 ar9300Modes_high_power_tx_gain_table_2p2[][5] = {
static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
{0x0000a2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
{0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
{0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@ -933,7 +965,7 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
{0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
{0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
@ -978,6 +1010,30 @@ static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p2[][5] = {
{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
{0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
{0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
{0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
{0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
{0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
{0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
{0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
{0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
{0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
{0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
{0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
{0x0000b2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
{0x0000b2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
{0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
{0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000c2dc, 0x01feee00, 0x01feee00, 0x00637800, 0x00637800},
{0x0000c2e0, 0x0000f000, 0x0000f000, 0x03838000, 0x03838000},
{0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x03fc0000, 0x03fc0000},
{0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
{0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@ -1151,14 +1207,14 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
{0x0000b074, 0x00000000},
{0x0000b078, 0x00000000},
{0x0000b07c, 0x00000000},
{0x0000b080, 0x32323232},
{0x0000b084, 0x2f2f3232},
{0x0000b088, 0x23282a2d},
{0x0000b08c, 0x1c1e2123},
{0x0000b090, 0x14171919},
{0x0000b094, 0x0e0e1214},
{0x0000b098, 0x03050707},
{0x0000b09c, 0x00030303},
{0x0000b080, 0x2a2d2f32},
{0x0000b084, 0x21232328},
{0x0000b088, 0x19191c1e},
{0x0000b08c, 0x12141417},
{0x0000b090, 0x07070e0e},
{0x0000b094, 0x03030305},
{0x0000b098, 0x00000003},
{0x0000b09c, 0x00000000},
{0x0000b0a0, 0x00000000},
{0x0000b0a4, 0x00000000},
{0x0000b0a8, 0x00000000},
@ -1251,6 +1307,10 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
{0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
{0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
{0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@ -1316,6 +1376,30 @@ static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p2[][5] = {
{0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
{0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
{0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
{0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
{0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
{0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
{0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
{0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
{0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
{0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
{0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
{0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
{0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
{0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
{0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
{0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
{0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
{0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800},
{0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000},
{0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000},
{0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
{0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
{0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
@ -1414,15 +1498,10 @@ static const u32 ar9300_2p2_mac_core[][2] = {
{0x00008144, 0xffffffff},
{0x00008168, 0x00000000},
{0x0000816c, 0x00000000},
{0x00008170, 0x18486200},
{0x00008174, 0x33332210},
{0x00008178, 0x00000000},
{0x0000817c, 0x00020000},
{0x000081c0, 0x00000000},
{0x000081c4, 0x33332210},
{0x000081c8, 0x00000000},
{0x000081cc, 0x00000000},
{0x000081d4, 0x00000000},
{0x000081ec, 0x00000000},
{0x000081f0, 0x00000000},
{0x000081f4, 0x00000000},

View File

@ -347,6 +347,10 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
(((Y[6] - Y[3]) * 1 << scale_factor) +
(x_est[6] - x_est[3])) / (x_est[6] - x_est[3]);
/* prevent division by zero */
if (G_fxp == 0)
return false;
Y_intercept =
(G_fxp * (x_est[0] - x_est[3]) +
(1 << scale_factor)) / (1 << scale_factor) + Y[3];
@ -356,14 +360,12 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
for (i = 0; i <= 3; i++) {
y_est[i] = i * 32;
/* prevent division by zero */
if (G_fxp == 0)
return false;
x_est[i] = ((y_est[i] * 1 << scale_factor) + G_fxp) / G_fxp;
}
if (y_est[max_index] == 0)
return false;
x_est_fxp1_nonlin =
x_est[max_index] - ((1 << scale_factor) * y_est[max_index] +
G_fxp) / G_fxp;
@ -457,6 +459,8 @@ static bool create_pa_curve(u32 *data_L, u32 *data_U, u32 *pa_table, u16 *gain)
Q_scale_B = find_proper_scale(find_expn(abs(scale_B)), 10);
scale_B = scale_B / (1 << Q_scale_B);
if (scale_B == 0)
return false;
Q_beta = find_proper_scale(find_expn(abs(beta_raw)), 10);
Q_alpha = find_proper_scale(find_expn(abs(alpha_raw)), 10);
beta_raw = beta_raw / (1 << Q_beta);

View File

@ -370,7 +370,7 @@ void ath_beacon_tasklet(unsigned long data)
ath_print(common, ATH_DBG_BSTUCK,
"beacon is officially stuck\n");
sc->sc_flags |= SC_OP_TSF_RESET;
ath_reset(sc, false);
ath_reset(sc, true);
}
return;

View File

@ -577,6 +577,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
common->hw = sc->hw;
common->priv = sc;
common->debug_mask = ath9k_debug;
spin_lock_init(&common->cc_lock);
spin_lock_init(&sc->wiphy_lock);
spin_lock_init(&sc->sc_resetlock);

View File

@ -182,6 +182,9 @@ static void ath_update_survey_stats(struct ath_softc *sc)
struct ath_cycle_counters *cc = &common->cc_survey;
unsigned int div = common->clockrate * 1000;
if (!ah->curchan)
return;
if (ah->power_mode == ATH9K_PM_AWAKE)
ath_hw_cycle_counters_update(common);
@ -577,7 +580,7 @@ void ath_hw_check(struct work_struct *work)
msleep(1);
}
ath_reset(sc, false);
ath_reset(sc, true);
out:
ath9k_ps_restore(sc);
@ -595,7 +598,7 @@ void ath9k_tasklet(unsigned long data)
ath9k_ps_wakeup(sc);
if (status & ATH9K_INT_FATAL) {
ath_reset(sc, false);
ath_reset(sc, true);
ath9k_ps_restore(sc);
return;
}

View File

@ -673,6 +673,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
u16 aggr_limit = 0, al = 0, bpad = 0,
al_delta, h_baw = tid->baw_size / 2;
enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
struct ieee80211_tx_info *tx_info;
bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
@ -699,6 +700,11 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
break;
}
tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
!(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
break;
/* do not exceed subframe limit */
if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
status = ATH_AGGR_LIMITED;
@ -2157,7 +2163,7 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
"tx hung, resetting the chip\n");
ath9k_ps_wakeup(sc);
ath_reset(sc, false);
ath_reset(sc, true);
ath9k_ps_restore(sc);
}

View File

@ -116,8 +116,9 @@ __regwrite_out : \
} while (0);
#define carl9170_async_get_buf() \
#define carl9170_async_regwrite_get_buf() \
do { \
__nreg = 0; \
__cmd = carl9170_cmd_buf(__carl, CARL9170_CMD_WREG_ASYNC, \
CARL9170_MAX_CMD_PAYLOAD_LEN); \
if (__cmd == NULL) { \
@ -128,38 +129,42 @@ do { \
#define carl9170_async_regwrite_begin(carl) \
do { \
int __nreg = 0, __err = 0; \
struct ar9170 *__carl = carl; \
struct carl9170_cmd *__cmd; \
carl9170_async_get_buf(); \
unsigned int __nreg; \
int __err = 0; \
carl9170_async_regwrite_get_buf(); \
#define carl9170_async_regwrite_flush() \
do { \
if (__cmd == NULL || __nreg == 0) \
break; \
\
if (IS_ACCEPTING_CMD(__carl) && __nreg) { \
__cmd->hdr.len = 8 * __nreg; \
__err = __carl9170_exec_cmd(__carl, __cmd, true); \
__cmd = NULL; \
break; \
} \
goto __async_regwrite_out; \
} while (0)
#define carl9170_async_regwrite(r, v) do { \
if (__cmd == NULL) \
carl9170_async_regwrite_get_buf(); \
__cmd->wreg.regs[__nreg].addr = cpu_to_le32(r); \
__cmd->wreg.regs[__nreg].val = cpu_to_le32(v); \
__nreg++; \
if ((__nreg >= PAYLOAD_MAX/2)) { \
if (IS_ACCEPTING_CMD(__carl)) { \
__cmd->hdr.len = 8 * __nreg; \
__err = __carl9170_exec_cmd(__carl, __cmd, true);\
__cmd = NULL; \
carl9170_async_get_buf(); \
} else { \
goto __async_regwrite_out; \
} \
__nreg = 0; \
if (__err) \
goto __async_regwrite_out; \
} \
if ((__nreg >= PAYLOAD_MAX / 2)) \
carl9170_async_regwrite_flush(); \
} while (0)
#define carl9170_async_regwrite_finish() \
#define carl9170_async_regwrite_finish() do { \
__async_regwrite_out : \
if (__err == 0 && __nreg) { \
__cmd->hdr.len = 8 * __nreg; \
if (IS_ACCEPTING_CMD(__carl)) \
__err = __carl9170_exec_cmd(__carl, __cmd, true);\
__nreg = 0; \
}
if (__cmd != NULL && __err == 0) \
carl9170_async_regwrite_flush(); \
kfree(__cmd); \
} while (0) \
#define carl9170_async_regwrite_result() \
__err; \

View File

@ -639,8 +639,8 @@ init:
if (err)
goto unlock;
} else {
err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
rcu_read_unlock();
err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
if (err)
goto unlock;

View File

@ -591,16 +591,23 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
const bool free_buf)
{
struct urb *urb;
int err = 0;
if (!IS_INITIALIZED(ar))
return -EPERM;
if (!IS_INITIALIZED(ar)) {
err = -EPERM;
goto err_free;
}
if (WARN_ON(cmd->hdr.len > CARL9170_MAX_CMD_LEN - 4))
return -EINVAL;
if (WARN_ON(cmd->hdr.len > CARL9170_MAX_CMD_LEN - 4)) {
err = -EINVAL;
goto err_free;
}
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb)
return -ENOMEM;
if (!urb) {
err = -ENOMEM;
goto err_free;
}
usb_fill_int_urb(urb, ar->udev, usb_sndintpipe(ar->udev,
AR9170_USB_EP_CMD), cmd, cmd->hdr.len + 4,
@ -613,6 +620,12 @@ int __carl9170_exec_cmd(struct ar9170 *ar, struct carl9170_cmd *cmd,
usb_free_urb(urb);
return carl9170_usb_submit_cmd_urb(ar);
err_free:
if (free_buf)
kfree(cmd);
return err;
}
int carl9170_exec_cmd(struct ar9170 *ar, const enum carl9170_cmd_oids cmd,

View File

@ -2964,7 +2964,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
(2 - i));
}
for (j = 0; i < 4; j++) {
for (j = 0; j < 4; j++) {
if (j < 3) {
cur_lna = lna[j];
cur_hpf1 = hpf1[j];

View File

@ -1227,7 +1227,8 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
struct ieee80211_tx_info *info;
if (unlikely(!agg->wait_for_ba)) {
IWL_ERR(priv, "Received BA when not expected\n");
if (unlikely(ba_resp->bitmap))
IWL_ERR(priv, "Received BA when not expected\n");
return -EINVAL;
}

View File

@ -1,6 +1,8 @@
wl1251-objs = main.o event.o tx.o rx.o ps.o cmd.o \
acx.o boot.o init.o debugfs.o io.o
wl1251_spi-objs += spi.o
wl1251_sdio-objs += sdio.o
obj-$(CONFIG_WL1251) += wl1251.o
obj-$(CONFIG_WL1251_SPI) += spi.o
obj-$(CONFIG_WL1251_SDIO) += sdio.o
obj-$(CONFIG_WL1251) += wl1251.o
obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o
obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o

View File

@ -88,12 +88,6 @@ struct cn_queue_dev {
unsigned char name[CN_CBQ_NAMELEN];
struct workqueue_struct *cn_queue;
/* Sent to kevent to create cn_queue only when needed */
struct work_struct wq_creation;
/* Tell if the wq_creation job is pending/completed */
atomic_t wq_requested;
/* Wait for cn_queue to be created */
wait_queue_head_t wq_created;
struct list_head queue_list;
spinlock_t queue_lock;
@ -141,8 +135,6 @@ int cn_netlink_send(struct cn_msg *, u32, gfp_t);
int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
int queue_cn_work(struct cn_callback_entry *cbq, struct work_struct *work);
struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *);
void cn_queue_free_dev(struct cn_queue_dev *dev);

View File

@ -585,15 +585,15 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
table->ents[hash & table->mask] = RPS_NO_CPU;
}
extern struct rps_sock_flow_table *rps_sock_flow_table;
extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
struct rps_map *rps_map;
struct rps_dev_flow_table *rps_flow_table;
struct kobject kobj;
struct netdev_rx_queue *first;
atomic_t count;
struct rps_map __rcu *rps_map;
struct rps_dev_flow_table __rcu *rps_flow_table;
struct kobject kobj;
struct netdev_rx_queue *first;
atomic_t count;
} ____cacheline_aligned_in_smp;
#endif /* CONFIG_RPS */
@ -944,7 +944,7 @@ struct net_device {
/* Protocol specific pointers */
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
struct vlan_group *vlgrp; /* VLAN group */
struct vlan_group __rcu *vlgrp; /* VLAN group */
#endif
#ifdef CONFIG_NET_DSA
void *dsa_ptr; /* dsa specific data */
@ -952,7 +952,7 @@ struct net_device {
void *atalk_ptr; /* AppleTalk link */
struct in_device __rcu *ip_ptr; /* IPv4 specific data */
void *dn_ptr; /* DECnet specific data */
void *ip6_ptr; /* IPv6 specific data */
struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
void *ec_ptr; /* Econet specific data */
void *ax25_ptr; /* AX.25 specific data */
struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
@ -1072,7 +1072,7 @@ struct net_device {
struct pcpu_dstats __percpu *dstats; /* dummy stats */
};
/* GARP */
struct garp_port *garp_port;
struct garp_port __rcu *garp_port;
/* class/net/name entry */
struct device dev;

View File

@ -472,11 +472,7 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id);
struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
int phy_device_register(struct phy_device *phy);
int phy_clear_interrupt(struct phy_device *phydev);
int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
int phy_init_hw(struct phy_device *phydev);
int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface);
struct phy_device * phy_attach(struct net_device *dev,
const char *bus_id, u32 flags, phy_interface_t interface);
struct phy_device *phy_find_first(struct mii_bus *bus);
@ -492,17 +488,12 @@ void phy_start(struct phy_device *phydev);
void phy_stop(struct phy_device *phydev);
int phy_start_aneg(struct phy_device *phydev);
void phy_sanitize_settings(struct phy_device *phydev);
int phy_stop_interrupts(struct phy_device *phydev);
int phy_enable_interrupts(struct phy_device *phydev);
int phy_disable_interrupts(struct phy_device *phydev);
static inline int phy_read_status(struct phy_device *phydev) {
return phydev->drv->read_status(phydev);
}
int genphy_config_advert(struct phy_device *phydev);
int genphy_setup_forced(struct phy_device *phydev);
int genphy_restart_aneg(struct phy_device *phydev);
int genphy_config_aneg(struct phy_device *phydev);
int genphy_update_link(struct phy_device *phydev);
@ -511,8 +502,6 @@ int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
void phy_driver_unregister(struct phy_driver *drv);
int phy_driver_register(struct phy_driver *new_driver);
void phy_prepare_link(struct phy_device *phydev,
void (*adjust_link)(struct net_device *));
void phy_state_machine(struct work_struct *work);
void phy_start_machine(struct phy_device *phydev,
void (*handler)(struct net_device *));
@ -523,7 +512,6 @@ int phy_mii_ioctl(struct phy_device *phydev,
struct ifreq *ifr, int cmd);
int phy_start_interrupts(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id);
void phy_device_free(struct phy_device *phydev);
int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,

View File

@ -0,0 +1,26 @@
/*
* Copyright (C) ST-Ericsson AB 2010
* Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
* Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
* License terms: GNU General Public License (GPL) version 2
*/
#ifndef CAIF_SHM_H_
#define CAIF_SHM_H_
struct shmdev_layer {
u32 shm_base_addr;
u32 shm_total_sz;
u32 shm_id;
u32 shm_loopback;
void *hmbx;
int (*pshmdev_mbxsend) (u32 shm_id, u32 mbx_msg);
int (*pshmdev_mbxsetup) (void *pshmdrv_cb,
struct shmdev_layer *pshm_dev, void *pshm_drv);
struct net_device *pshm_netdev;
};
extern int caif_shmcore_probe(struct shmdev_layer *pshm_dev);
extern void caif_shmcore_remove(struct net_device *pshm_netdev);
#endif

View File

@ -95,7 +95,7 @@ struct dst_entry {
unsigned long lastuse;
union {
struct dst_entry *next;
struct rtable *rt_next;
struct rtable __rcu *rt_next;
struct rt6_info *rt6_next;
struct dn_route *dn_next;
};

View File

@ -20,7 +20,7 @@ struct fib_rule {
u32 table;
u8 action;
u32 target;
struct fib_rule * ctarget;
struct fib_rule __rcu *ctarget;
char iifname[IFNAMSIZ];
char oifname[IFNAMSIZ];
struct rcu_head rcu;

View File

@ -107,7 +107,7 @@ struct garp_applicant {
};
struct garp_port {
struct garp_applicant *applicants[GARP_APPLICATION_MAX + 1];
struct garp_applicant __rcu *applicants[GARP_APPLICATION_MAX + 1];
};
extern int garp_register_application(struct garp_application *app);

View File

@ -15,7 +15,7 @@
struct inet_peer {
/* group together avl_left,avl_right,v4daddr to speedup lookups */
struct inet_peer *avl_left, *avl_right;
struct inet_peer __rcu *avl_left, *avl_right;
__be32 v4daddr; /* peer's address */
__u32 avl_height;
struct list_head unused;

View File

@ -59,7 +59,7 @@ struct ipcm_cookie {
#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
struct ip_ra_chain {
struct ip_ra_chain *next;
struct ip_ra_chain __rcu *next;
struct sock *sk;
union {
void (*destructor)(struct sock *);
@ -68,7 +68,7 @@ struct ip_ra_chain {
struct rcu_head rcu;
};
extern struct ip_ra_chain *ip_ra_chain;
extern struct ip_ra_chain __rcu *ip_ra_chain;
/* IP flags. */
#define IP_CE 0x8000 /* Flag: "Congestion" */

View File

@ -13,7 +13,7 @@
/* IPv6 tunnel */
struct ip6_tnl {
struct ip6_tnl *next; /* next tunnel in list */
struct ip6_tnl __rcu *next; /* next tunnel in list */
struct net_device *dev; /* virtual device associated with tunnel */
struct ip6_tnl_parm parms; /* tunnel configuration parameters */
struct flowi fl; /* flowi template for xmit */

View File

@ -16,7 +16,7 @@ struct ip_tunnel_6rd_parm {
};
struct ip_tunnel {
struct ip_tunnel *next;
struct ip_tunnel __rcu *next;
struct net_device *dev;
int err_count; /* Number of arrived ICMP errors */
@ -34,12 +34,12 @@ struct ip_tunnel {
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd_parm ip6rd;
#endif
struct ip_tunnel_prl_entry *prl; /* potential router list */
struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */
unsigned int prl_count; /* # of entries in PRL */
};
struct ip_tunnel_prl_entry {
struct ip_tunnel_prl_entry *next;
struct ip_tunnel_prl_entry __rcu *next;
__be32 addr;
u16 flags;
struct rcu_head rcu_head;

View File

@ -88,7 +88,7 @@ struct net {
#ifdef CONFIG_WEXT_CORE
struct sk_buff_head wext_nlevents;
#endif
struct net_generic *gen;
struct net_generic __rcu *gen;
/* Note : following structs are cache line aligned */
#ifdef CONFIG_XFRM

View File

@ -89,10 +89,10 @@ struct inet_protosw {
#define INET_PROTOSW_PERMANENT 0x02 /* Permanent protocols are unremovable. */
#define INET_PROTOSW_ICSK 0x04 /* Is this an inet_connection_sock? */
extern const struct net_protocol *inet_protos[MAX_INET_PROTOS];
extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
extern const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS];
extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
#endif
extern int inet_add_protocol(const struct net_protocol *prot, unsigned char num);

Some files were not shown because too many files have changed in this diff Show More