1
0
Fork 0

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (30 commits)
  ctcm: rename READ/WRITE defines to avoid redefinitions
  claw: rename READ/WRITE defines to avoid redefinitions
  phylib: available for any speed ethernet
  can: add limit for nframes and clean up signed/unsigned variables
  pkt_sched: Check .walk and .leaf class handlers
  pkt_sched: Fix sch_sfq vs tc_modify_qdisc oops
  caif-spi: Bugfix SPI_DATA_POS settings were inverted.
  caif: Bugfix - Increase default headroom size for control channel.
  net: make netpoll_rx return bool for !CONFIG_NETPOLL
  Bluetooth: Use 3-DH5 payload size for default ERTM max PDU size
  Bluetooth: Fix incorrect setting of remote_tx_win for L2CAP ERTM
  Bluetooth: Change default L2CAP ERTM retransmit timeout
  Bluetooth: Fix endianness issue with L2CAP MPS configuration
  net: Use NET_XMIT_SUCCESS where possible.
  isdn: mISDN: call pci_disable_device() if pci_probe() failed
  isdn: avm: call pci_disable_device() if pci_probe() failed
  isdn: avm: call pci_disable_device() if pci_probe() failed
  usbnet: rx_submit() should return an error code.
  pkt_sched: Add some basic qdisc class ops verification. Was: [PATCH] sfq: add dummy bind/unbind handles
  pkt_sched: sch_sfq: Add dummy unbind_tcf and put handles. Was: [PATCH] sfq: add dummy bind/unbind handles
  ...
wifi-calibration
Linus Torvalds 2010-08-13 10:38:12 -07:00
commit 2f2c779583
43 changed files with 428 additions and 391 deletions

View File

@ -1273,6 +1273,7 @@ static int __devinit c4_probe(struct pci_dev *dev,
if (retval != 0) { if (retval != 0) {
printk(KERN_ERR "c4: no AVM-C%d at i/o %#x, irq %d detected, mem %#x\n", printk(KERN_ERR "c4: no AVM-C%d at i/o %#x, irq %d detected, mem %#x\n",
nr, param.port, param.irq, param.membase); nr, param.port, param.irq, param.membase);
pci_disable_device(dev);
return -ENODEV; return -ENODEV;
} }
return 0; return 0;

View File

@ -210,6 +210,7 @@ static int __devinit t1pci_probe(struct pci_dev *dev,
if (retval != 0) { if (retval != 0) {
printk(KERN_ERR "t1pci: no AVM-T1-PCI at i/o %#x, irq %d detected, mem %#x\n", printk(KERN_ERR "t1pci: no AVM-T1-PCI at i/o %#x, irq %d detected, mem %#x\n",
param.port, param.irq, param.membase); param.port, param.irq, param.membase);
pci_disable_device(dev);
return -ENODEV; return -ENODEV;
} }
return 0; return 0;

View File

@ -1094,6 +1094,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pr_info("mISDN: do not have informations about adapter at %s\n", pr_info("mISDN: do not have informations about adapter at %s\n",
pci_name(pdev)); pci_name(pdev));
kfree(card); kfree(card);
pci_disable_device(pdev);
return -EINVAL; return -EINVAL;
} else } else
pr_notice("mISDN: found adapter %s at %s\n", pr_notice("mISDN: found adapter %s at %s\n",
@ -1103,7 +1104,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, card); pci_set_drvdata(pdev, card);
err = setup_instance(card); err = setup_instance(card);
if (err) { if (err) {
pci_disable_device(card->pdev); pci_disable_device(pdev);
kfree(card); kfree(card);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
} else if (ent->driver_data == INF_SCT_1) { } else if (ent->driver_data == INF_SCT_1) {
@ -1114,6 +1115,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
sc = kzalloc(sizeof(struct inf_hw), GFP_KERNEL); sc = kzalloc(sizeof(struct inf_hw), GFP_KERNEL);
if (!sc) { if (!sc) {
release_card(card); release_card(card);
pci_disable_device(pdev);
return -ENOMEM; return -ENOMEM;
} }
sc->irq = card->irq; sc->irq = card->irq;
@ -1121,6 +1123,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
sc->ci = card->ci + i; sc->ci = card->ci + i;
err = setup_instance(sc); err = setup_instance(sc);
if (err) { if (err) {
pci_disable_device(pdev);
kfree(sc); kfree(sc);
release_card(card); release_card(card);
break; break;

View File

@ -22,13 +22,13 @@
#include <net/caif/caif_spi.h> #include <net/caif/caif_spi.h>
#ifndef CONFIG_CAIF_SPI_SYNC #ifndef CONFIG_CAIF_SPI_SYNC
#define SPI_DATA_POS SPI_CMD_SZ #define SPI_DATA_POS 0
static inline int forward_to_spi_cmd(struct cfspi *cfspi) static inline int forward_to_spi_cmd(struct cfspi *cfspi)
{ {
return cfspi->rx_cpck_len; return cfspi->rx_cpck_len;
} }
#else #else
#define SPI_DATA_POS 0 #define SPI_DATA_POS SPI_CMD_SZ
static inline int forward_to_spi_cmd(struct cfspi *cfspi) static inline int forward_to_spi_cmd(struct cfspi *cfspi)
{ {
return 0; return 0;

View File

@ -5,7 +5,7 @@
menuconfig PHYLIB menuconfig PHYLIB
tristate "PHY Device support and infrastructure" tristate "PHY Device support and infrastructure"
depends on !S390 depends on !S390
depends on NET_ETHERNET depends on NETDEVICES
help help
Ethernet controllers are usually attached to PHY Ethernet controllers are usually attached to PHY
devices. This option provides infrastructure for devices. This option provides infrastructure for

View File

@ -301,7 +301,7 @@ EXPORT_SYMBOL(phy_ethtool_gset);
/** /**
* phy_mii_ioctl - generic PHY MII ioctl interface * phy_mii_ioctl - generic PHY MII ioctl interface
* @phydev: the phy_device struct * @phydev: the phy_device struct
* @mii_data: MII ioctl data * @ifr: &struct ifreq for socket ioctl's
* @cmd: ioctl cmd to execute * @cmd: ioctl cmd to execute
* *
* Note that this function is currently incompatible with the * Note that this function is currently incompatible with the

View File

@ -473,48 +473,58 @@ qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
static int static int
qlcnic_init_pci_info(struct qlcnic_adapter *adapter) qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
{ {
struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC]; struct qlcnic_pci_info *pci_info;
int i, ret = 0, err; int i, ret = 0, err;
u8 pfn; u8 pfn;
if (!adapter->npars) pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) * if (!pci_info)
QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
if (!adapter->npars)
return -ENOMEM; return -ENOMEM;
if (!adapter->eswitch) adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) * QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
if (!adapter->npars) {
err = -ENOMEM;
goto err_pci_info;
}
adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL); QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
if (!adapter->eswitch) { if (!adapter->eswitch) {
err = -ENOMEM; err = -ENOMEM;
goto err_eswitch; goto err_npars;
} }
ret = qlcnic_get_pci_info(adapter, pci_info); ret = qlcnic_get_pci_info(adapter, pci_info);
if (!ret) { if (ret)
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { goto err_eswitch;
pfn = pci_info[i].id;
if (pfn > QLCNIC_MAX_PCI_FUNC)
return QL_STATUS_INVALID_PARAM;
adapter->npars[pfn].active = pci_info[i].active;
adapter->npars[pfn].type = pci_info[i].type;
adapter->npars[pfn].phy_port = pci_info[i].default_port;
adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
}
for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE; pfn = pci_info[i].id;
if (pfn > QLCNIC_MAX_PCI_FUNC)
return ret; return QL_STATUS_INVALID_PARAM;
adapter->npars[pfn].active = pci_info[i].active;
adapter->npars[pfn].type = pci_info[i].type;
adapter->npars[pfn].phy_port = pci_info[i].default_port;
adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
} }
for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
kfree(pci_info);
return 0;
err_eswitch:
kfree(adapter->eswitch); kfree(adapter->eswitch);
adapter->eswitch = NULL; adapter->eswitch = NULL;
err_eswitch: err_npars:
kfree(adapter->npars); kfree(adapter->npars);
adapter->npars = NULL;
err_pci_info:
kfree(pci_info);
return ret; return ret;
} }
@ -3361,15 +3371,21 @@ qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj); struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev); struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC]; struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC]; struct qlcnic_pci_info *pci_info;
int i, ret; int i, ret;
if (size != sizeof(pci_cfg)) if (size != sizeof(pci_cfg))
return QL_STATUS_INVALID_PARAM; return QL_STATUS_INVALID_PARAM;
pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
ret = qlcnic_get_pci_info(adapter, pci_info); ret = qlcnic_get_pci_info(adapter, pci_info);
if (ret) if (ret) {
kfree(pci_info);
return ret; return ret;
}
for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) { for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
pci_cfg[i].pci_func = pci_info[i].id; pci_cfg[i].pci_func = pci_info[i].id;
@ -3380,8 +3396,8 @@ qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN); memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
} }
memcpy(buf, &pci_cfg, size); memcpy(buf, &pci_cfg, size);
kfree(pci_info);
return size; return size;
} }
static struct bin_attribute bin_attr_npar_config = { static struct bin_attribute bin_attr_npar_config = {
.attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)}, .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},

View File

@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
static void rx_complete (struct urb *urb); static void rx_complete (struct urb *urb);
static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct skb_data *entry; struct skb_data *entry;
@ -327,7 +327,7 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
usbnet_defer_kevent (dev, EVENT_RX_MEMORY); usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
usb_free_urb (urb); usb_free_urb (urb);
return; return -ENOMEM;
} }
skb_reserve (skb, NET_IP_ALIGN); skb_reserve (skb, NET_IP_ALIGN);
@ -357,6 +357,9 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
netif_dbg(dev, ifdown, dev->net, "device gone\n"); netif_dbg(dev, ifdown, dev->net, "device gone\n");
netif_device_detach (dev->net); netif_device_detach (dev->net);
break; break;
case -EHOSTUNREACH:
retval = -ENOLINK;
break;
default: default:
netif_dbg(dev, rx_err, dev->net, netif_dbg(dev, rx_err, dev->net,
"rx submit, %d\n", retval); "rx submit, %d\n", retval);
@ -374,6 +377,7 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
dev_kfree_skb_any (skb); dev_kfree_skb_any (skb);
usb_free_urb (urb); usb_free_urb (urb);
} }
return retval;
} }
@ -912,6 +916,7 @@ fail_halt:
/* tasklet could resubmit itself forever if memory is tight */ /* tasklet could resubmit itself forever if memory is tight */
if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
struct urb *urb = NULL; struct urb *urb = NULL;
int resched = 1;
if (netif_running (dev->net)) if (netif_running (dev->net))
urb = usb_alloc_urb (0, GFP_KERNEL); urb = usb_alloc_urb (0, GFP_KERNEL);
@ -922,10 +927,12 @@ fail_halt:
status = usb_autopm_get_interface(dev->intf); status = usb_autopm_get_interface(dev->intf);
if (status < 0) if (status < 0)
goto fail_lowmem; goto fail_lowmem;
rx_submit (dev, urb, GFP_KERNEL); if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
resched = 0;
usb_autopm_put_interface(dev->intf); usb_autopm_put_interface(dev->intf);
fail_lowmem: fail_lowmem:
tasklet_schedule (&dev->bh); if (resched)
tasklet_schedule (&dev->bh);
} }
} }
@ -1175,8 +1182,11 @@ static void usbnet_bh (unsigned long param)
// don't refill the queue all at once // don't refill the queue all at once
for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) { for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
urb = usb_alloc_urb (0, GFP_ATOMIC); urb = usb_alloc_urb (0, GFP_ATOMIC);
if (urb != NULL) if (urb != NULL) {
rx_submit (dev, urb, GFP_ATOMIC); if (rx_submit (dev, urb, GFP_ATOMIC) ==
-ENOLINK)
return;
}
} }
if (temp != dev->rxq.qlen) if (temp != dev->rxq.qlen)
netif_dbg(dev, link, dev->net, netif_dbg(dev, link, dev->net,

View File

@ -885,20 +885,21 @@ fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
* Receive a frame through the DMA * Receive a frame through the DMA
*/ */
static inline void static inline void
fst_rx_dma(struct fst_card_info *card, unsigned char *skb, fst_rx_dma(struct fst_card_info *card, dma_addr_t skb,
unsigned char *mem, int len) dma_addr_t mem, int len)
{ {
/* /*
* This routine will setup the DMA and start it * This routine will setup the DMA and start it
*/ */
dbg(DBG_RX, "In fst_rx_dma %p %p %d\n", skb, mem, len); dbg(DBG_RX, "In fst_rx_dma %lx %lx %d\n",
(unsigned long) skb, (unsigned long) mem, len);
if (card->dmarx_in_progress) { if (card->dmarx_in_progress) {
dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n"); dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
} }
outl((unsigned long) skb, card->pci_conf + DMAPADR0); /* Copy to here */ outl(skb, card->pci_conf + DMAPADR0); /* Copy to here */
outl((unsigned long) mem, card->pci_conf + DMALADR0); /* from here */ outl(mem, card->pci_conf + DMALADR0); /* from here */
outl(len, card->pci_conf + DMASIZ0); /* for this length */ outl(len, card->pci_conf + DMASIZ0); /* for this length */
outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */ outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */
@ -1309,8 +1310,8 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
card->dma_port_rx = port; card->dma_port_rx = port;
card->dma_len_rx = len; card->dma_len_rx = len;
card->dma_rxpos = rxp; card->dma_rxpos = rxp;
fst_rx_dma(card, (char *) card->rx_dma_handle_card, fst_rx_dma(card, card->rx_dma_handle_card,
(char *) BUF_OFFSET(rxBuffer[pi][rxp][0]), len); BUF_OFFSET(rxBuffer[pi][rxp][0]), len);
} }
if (rxp != port->rxpos) { if (rxp != port->rxpos) {
dbg(DBG_ASS, "About to increment rxpos by more than 1\n"); dbg(DBG_ASS, "About to increment rxpos by more than 1\n");

View File

@ -260,7 +260,7 @@ struct iwl_cfg iwl1000_bgn_cfg = {
.shadow_ram_support = false, .shadow_ram_support = false,
.ht_greenfield_support = true, .ht_greenfield_support = true,
.led_compensation = 51, .led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true, .support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,

View File

@ -769,22 +769,6 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
rts_retry_limit = data_retry_limit; rts_retry_limit = data_retry_limit;
tx_cmd->rts_retry_limit = rts_retry_limit; tx_cmd->rts_retry_limit = rts_retry_limit;
if (ieee80211_is_mgmt(fc)) {
switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
case cpu_to_le16(IEEE80211_STYPE_AUTH):
case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
if (tx_flags & TX_CMD_FLG_RTS_MSK) {
tx_flags &= ~TX_CMD_FLG_RTS_MSK;
tx_flags |= TX_CMD_FLG_CTS_MSK;
}
break;
default:
break;
}
}
tx_cmd->rate = rate; tx_cmd->rate = rate;
tx_cmd->tx_flags = tx_flags; tx_cmd->tx_flags = tx_flags;
@ -2717,7 +2701,7 @@ static struct iwl_lib_ops iwl3945_lib = {
static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
.get_hcmd_size = iwl3945_get_hcmd_size, .get_hcmd_size = iwl3945_get_hcmd_size,
.build_addsta_hcmd = iwl3945_build_addsta_hcmd, .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
.rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag, .tx_cmd_protection = iwlcore_tx_cmd_protection,
.request_scan = iwl3945_request_scan, .request_scan = iwl3945_request_scan,
}; };

View File

@ -2223,7 +2223,7 @@ static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
.build_addsta_hcmd = iwl4965_build_addsta_hcmd, .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
.chain_noise_reset = iwl4965_chain_noise_reset, .chain_noise_reset = iwl4965_chain_noise_reset,
.gain_computation = iwl4965_gain_computation, .gain_computation = iwl4965_gain_computation,
.rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag, .tx_cmd_protection = iwlcore_tx_cmd_protection,
.calc_rssi = iwl4965_calc_rssi, .calc_rssi = iwl4965_calc_rssi,
.request_scan = iwlagn_request_scan, .request_scan = iwlagn_request_scan,
}; };

View File

@ -506,7 +506,7 @@ struct iwl_cfg iwl5300_agn_cfg = {
.use_bsm = false, .use_bsm = false,
.ht_greenfield_support = true, .ht_greenfield_support = true,
.led_compensation = 51, .led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000, .chain_noise_scale = 1000,
@ -537,7 +537,7 @@ struct iwl_cfg iwl5100_bgn_cfg = {
.use_bsm = false, .use_bsm = false,
.ht_greenfield_support = true, .ht_greenfield_support = true,
.led_compensation = 51, .led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000, .chain_noise_scale = 1000,
@ -597,7 +597,7 @@ struct iwl_cfg iwl5100_agn_cfg = {
.use_bsm = false, .use_bsm = false,
.ht_greenfield_support = true, .ht_greenfield_support = true,
.led_compensation = 51, .led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000, .chain_noise_scale = 1000,
@ -628,7 +628,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
.use_bsm = false, .use_bsm = false,
.ht_greenfield_support = true, .ht_greenfield_support = true,
.led_compensation = 51, .led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000, .chain_noise_scale = 1000,
@ -659,7 +659,7 @@ struct iwl_cfg iwl5150_agn_cfg = {
.use_bsm = false, .use_bsm = false,
.ht_greenfield_support = true, .ht_greenfield_support = true,
.led_compensation = 51, .led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000, .chain_noise_scale = 1000,

View File

@ -381,7 +381,7 @@ struct iwl_cfg iwl6000g2a_2agn_cfg = {
.shadow_ram_support = true, .shadow_ram_support = true,
.ht_greenfield_support = true, .ht_greenfield_support = true,
.led_compensation = 51, .led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true, .supports_idle = true,
.adv_thermal_throttle = true, .adv_thermal_throttle = true,
@ -489,7 +489,7 @@ struct iwl_cfg iwl6000g2b_2agn_cfg = {
.shadow_ram_support = true, .shadow_ram_support = true,
.ht_greenfield_support = true, .ht_greenfield_support = true,
.led_compensation = 51, .led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true, .supports_idle = true,
.adv_thermal_throttle = true, .adv_thermal_throttle = true,
@ -563,7 +563,7 @@ struct iwl_cfg iwl6000g2b_2bgn_cfg = {
.shadow_ram_support = true, .shadow_ram_support = true,
.ht_greenfield_support = true, .ht_greenfield_support = true,
.led_compensation = 51, .led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true, .supports_idle = true,
.adv_thermal_throttle = true, .adv_thermal_throttle = true,
@ -637,7 +637,7 @@ struct iwl_cfg iwl6000g2b_bgn_cfg = {
.shadow_ram_support = true, .shadow_ram_support = true,
.ht_greenfield_support = true, .ht_greenfield_support = true,
.led_compensation = 51, .led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true, .supports_idle = true,
.adv_thermal_throttle = true, .adv_thermal_throttle = true,
@ -714,7 +714,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
.shadow_ram_support = true, .shadow_ram_support = true,
.ht_greenfield_support = true, .ht_greenfield_support = true,
.led_compensation = 51, .led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true, .supports_idle = true,
.adv_thermal_throttle = true, .adv_thermal_throttle = true,
@ -821,7 +821,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.shadow_ram_support = true, .shadow_ram_support = true,
.ht_greenfield_support = true, .ht_greenfield_support = true,
.led_compensation = 51, .led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true, .supports_idle = true,
.adv_thermal_throttle = true, .adv_thermal_throttle = true,
@ -859,7 +859,7 @@ struct iwl_cfg iwl6050g2_bgn_cfg = {
.shadow_ram_support = true, .shadow_ram_support = true,
.ht_greenfield_support = true, .ht_greenfield_support = true,
.led_compensation = 51, .led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true, .supports_idle = true,
.adv_thermal_throttle = true, .adv_thermal_throttle = true,
@ -933,7 +933,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.shadow_ram_support = true, .shadow_ram_support = true,
.ht_greenfield_support = true, .ht_greenfield_support = true,
.led_compensation = 51, .led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */ .use_rts_for_aggregation = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.supports_idle = true, .supports_idle = true,
.adv_thermal_throttle = true, .adv_thermal_throttle = true,

View File

@ -211,10 +211,21 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
} }
} }
static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info, static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
__le32 *tx_flags) struct ieee80211_tx_info *info,
__le16 fc, __le32 *tx_flags)
{ {
*tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK; if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
*tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
return;
}
if (priv->cfg->use_rts_for_aggregation &&
info->flags & IEEE80211_TX_CTL_AMPDU) {
*tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
return;
}
} }
/* Calc max signal level (dBm) among 3 possible receivers */ /* Calc max signal level (dBm) among 3 possible receivers */
@ -268,7 +279,7 @@ struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {
.build_addsta_hcmd = iwlagn_build_addsta_hcmd, .build_addsta_hcmd = iwlagn_build_addsta_hcmd,
.gain_computation = iwlagn_gain_computation, .gain_computation = iwlagn_gain_computation,
.chain_noise_reset = iwlagn_chain_noise_reset, .chain_noise_reset = iwlagn_chain_noise_reset,
.rts_tx_cmd_flag = iwlagn_rts_tx_cmd_flag, .tx_cmd_protection = iwlagn_tx_cmd_protection,
.calc_rssi = iwlagn_calc_rssi, .calc_rssi = iwlagn_calc_rssi,
.request_scan = iwlagn_request_scan, .request_scan = iwlagn_request_scan,
}; };

View File

@ -379,10 +379,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
} }
priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags);
if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
if (ieee80211_is_mgmt(fc)) { if (ieee80211_is_mgmt(fc)) {
@ -456,21 +453,6 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
rate_flags |= RATE_MCS_CCK_MSK; rate_flags |= RATE_MCS_CCK_MSK;
/* Set up RTS and CTS flags for certain packets */
switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
case cpu_to_le16(IEEE80211_STYPE_AUTH):
case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
}
break;
default:
break;
}
/* Set up antennas */ /* Set up antennas */
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
priv->hw_params.valid_tx_ant); priv->hw_params.valid_tx_ant);

View File

@ -202,13 +202,6 @@ int iwl_commit_rxon(struct iwl_priv *priv)
priv->start_calib = 0; priv->start_calib = 0;
if (new_assoc) { if (new_assoc) {
/*
* allow CTS-to-self if possible for new association.
* this is relevant only for 5000 series and up,
* but will not damage 4965
*/
priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
/* Apply the new configuration /* Apply the new configuration
* RXON assoc doesn't clear the station table in uCode, * RXON assoc doesn't clear the station table in uCode,
*/ */
@ -1618,45 +1611,9 @@ static ssize_t store_tx_power(struct device *d,
static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
static ssize_t show_rts_ht_protection(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "%s\n",
priv->cfg->use_rts_for_ht ? "RTS/CTS" : "CTS-to-self");
}
static ssize_t store_rts_ht_protection(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct iwl_priv *priv = dev_get_drvdata(d);
unsigned long val;
int ret;
ret = strict_strtoul(buf, 10, &val);
if (ret)
IWL_INFO(priv, "Input is not in decimal form.\n");
else {
if (!iwl_is_associated(priv))
priv->cfg->use_rts_for_ht = val ? true : false;
else
IWL_ERR(priv, "Sta associated with AP - "
"Change protection mechanism is not allowed\n");
ret = count;
}
return ret;
}
static DEVICE_ATTR(rts_ht_protection, S_IWUSR | S_IRUGO,
show_rts_ht_protection, store_rts_ht_protection);
static struct attribute *iwl_sysfs_entries[] = { static struct attribute *iwl_sysfs_entries[] = {
&dev_attr_temperature.attr, &dev_attr_temperature.attr,
&dev_attr_tx_power.attr, &dev_attr_tx_power.attr,
&dev_attr_rts_ht_protection.attr,
#ifdef CONFIG_IWLWIFI_DEBUG #ifdef CONFIG_IWLWIFI_DEBUG
&dev_attr_debug_level.attr, &dev_attr_debug_level.attr,
#endif #endif
@ -3464,25 +3421,6 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return ret; return ret;
} }
/*
* switch to RTS/CTS for TX
*/
static void iwl_enable_rts_cts(struct iwl_priv *priv)
{
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
if (!test_bit(STATUS_SCANNING, &priv->status)) {
IWL_DEBUG_INFO(priv, "use RTS/CTS protection\n");
iwlcore_commit_rxon(priv);
} else {
/* scanning, defer the request until scan completed */
IWL_DEBUG_INFO(priv, "defer setting RTS/CTS protection\n");
}
}
static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action, enum ieee80211_ampdu_mlme_action action,
@ -3529,14 +3467,33 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
} }
if (test_bit(STATUS_EXIT_PENDING, &priv->status)) if (test_bit(STATUS_EXIT_PENDING, &priv->status))
ret = 0; ret = 0;
if (priv->cfg->use_rts_for_aggregation) {
struct iwl_station_priv *sta_priv =
(void *) sta->drv_priv;
/*
* switch off RTS/CTS if it was previously enabled
*/
sta_priv->lq_sta.lq.general_params.flags &=
~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq,
CMD_ASYNC, false);
}
break; break;
case IEEE80211_AMPDU_TX_OPERATIONAL: case IEEE80211_AMPDU_TX_OPERATIONAL:
if (priv->cfg->use_rts_for_ht) { if (priv->cfg->use_rts_for_aggregation) {
struct iwl_station_priv *sta_priv =
(void *) sta->drv_priv;
/* /*
* switch to RTS/CTS if it is the prefer protection * switch to RTS/CTS if it is the prefer protection
* method for HT traffic * method for HT traffic
*/ */
iwl_enable_rts_cts(priv);
sta_priv->lq_sta.lq.general_params.flags |=
LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq,
CMD_ASYNC, false);
} }
ret = 0; ret = 0;
break; break;

View File

@ -401,21 +401,38 @@ void iwlcore_free_geos(struct iwl_priv *priv)
EXPORT_SYMBOL(iwlcore_free_geos); EXPORT_SYMBOL(iwlcore_free_geos);
/* /*
* iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this * iwlcore_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
* function. * function.
*/ */
void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info, void iwlcore_tx_cmd_protection(struct iwl_priv *priv,
__le32 *tx_flags) struct ieee80211_tx_info *info,
__le16 fc, __le32 *tx_flags)
{ {
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
*tx_flags |= TX_CMD_FLG_RTS_MSK; *tx_flags |= TX_CMD_FLG_RTS_MSK;
*tx_flags &= ~TX_CMD_FLG_CTS_MSK; *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
*tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
if (!ieee80211_is_mgmt(fc))
return;
switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
case cpu_to_le16(IEEE80211_STYPE_AUTH):
case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
*tx_flags &= ~TX_CMD_FLG_RTS_MSK;
*tx_flags |= TX_CMD_FLG_CTS_MSK;
break;
}
} else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
*tx_flags &= ~TX_CMD_FLG_RTS_MSK; *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
*tx_flags |= TX_CMD_FLG_CTS_MSK; *tx_flags |= TX_CMD_FLG_CTS_MSK;
*tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
} }
} }
EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag); EXPORT_SYMBOL(iwlcore_tx_cmd_protection);
static bool is_single_rx_stream(struct iwl_priv *priv) static bool is_single_rx_stream(struct iwl_priv *priv)
{ {
@ -1869,6 +1886,10 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK; priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
else else
priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK; priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
if (bss_conf->use_cts_prot)
priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
else
priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
} }
if (changes & BSS_CHANGED_BASIC_RATES) { if (changes & BSS_CHANGED_BASIC_RATES) {

View File

@ -104,8 +104,9 @@ struct iwl_hcmd_utils_ops {
u32 min_average_noise, u32 min_average_noise,
u8 default_chain); u8 default_chain);
void (*chain_noise_reset)(struct iwl_priv *priv); void (*chain_noise_reset)(struct iwl_priv *priv);
void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info, void (*tx_cmd_protection)(struct iwl_priv *priv,
__le32 *tx_flags); struct ieee80211_tx_info *info,
__le16 fc, __le32 *tx_flags);
int (*calc_rssi)(struct iwl_priv *priv, int (*calc_rssi)(struct iwl_priv *priv,
struct iwl_rx_phy_res *rx_resp); struct iwl_rx_phy_res *rx_resp);
void (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif); void (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
@ -249,7 +250,7 @@ struct iwl_mod_params {
* @led_compensation: compensate on the led on/off time per HW according * @led_compensation: compensate on the led on/off time per HW according
* to the deviation to achieve the desired led frequency. * to the deviation to achieve the desired led frequency.
* The detail algorithm is described in iwl-led.c * The detail algorithm is described in iwl-led.c
* @use_rts_for_ht: use rts/cts protection for HT traffic * @use_rts_for_aggregation: use rts/cts protection for HT traffic
* @chain_noise_num_beacons: number of beacons used to compute chain noise * @chain_noise_num_beacons: number of beacons used to compute chain noise
* @adv_thermal_throttle: support advance thermal throttle * @adv_thermal_throttle: support advance thermal throttle
* @support_ct_kill_exit: support ct kill exit condition * @support_ct_kill_exit: support ct kill exit condition
@ -318,7 +319,7 @@ struct iwl_cfg {
const bool ht_greenfield_support; const bool ht_greenfield_support;
u16 led_compensation; u16 led_compensation;
const bool broken_powersave; const bool broken_powersave;
bool use_rts_for_ht; bool use_rts_for_aggregation;
int chain_noise_num_beacons; int chain_noise_num_beacons;
const bool supports_idle; const bool supports_idle;
bool adv_thermal_throttle; bool adv_thermal_throttle;
@ -390,8 +391,9 @@ void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif);
void iwl_mac_reset_tsf(struct ieee80211_hw *hw); void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
int iwl_alloc_txq_mem(struct iwl_priv *priv); int iwl_alloc_txq_mem(struct iwl_priv *priv);
void iwl_free_txq_mem(struct iwl_priv *priv); void iwl_free_txq_mem(struct iwl_priv *priv);
void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info, void iwlcore_tx_cmd_protection(struct iwl_priv *priv,
__le32 *tx_flags); struct ieee80211_tx_info *info,
__le16 fc, __le32 *tx_flags);
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_alloc_traffic_mem(struct iwl_priv *priv); int iwl_alloc_traffic_mem(struct iwl_priv *priv);
void iwl_free_traffic_mem(struct iwl_priv *priv); void iwl_free_traffic_mem(struct iwl_priv *priv);

View File

@ -435,10 +435,7 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
} }
priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags);
if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
if (ieee80211_is_mgmt(fc)) { if (ieee80211_is_mgmt(fc)) {

View File

@ -9,6 +9,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sched.h>
#include <linux/ieee80211.h> #include <linux/ieee80211.h>
#include <net/cfg80211.h> #include <net/cfg80211.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>

View File

@ -43,8 +43,6 @@ static DEFINE_PCI_DEVICE_TABLE(p54p_table) = {
{ PCI_DEVICE(0x1260, 0x3886) }, { PCI_DEVICE(0x1260, 0x3886) },
/* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */ /* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */
{ PCI_DEVICE(0x1260, 0xffff) }, { PCI_DEVICE(0x1260, 0xffff) },
/* Standard Microsystems Corp SMC2802W Wireless PCI */
{ PCI_DEVICE(0x10b8, 0x2802) },
{ }, { },
}; };

View File

@ -386,7 +386,7 @@ claw_tx(struct sk_buff *skb, struct net_device *dev)
struct chbk *p_ch; struct chbk *p_ch;
CLAW_DBF_TEXT(4, trace, "claw_tx"); CLAW_DBF_TEXT(4, trace, "claw_tx");
p_ch=&privptr->channel[WRITE]; p_ch = &privptr->channel[WRITE_CHANNEL];
spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags); spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
rc=claw_hw_tx( skb, dev, 1 ); rc=claw_hw_tx( skb, dev, 1 );
spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags); spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
@ -407,7 +407,7 @@ static struct sk_buff *
claw_pack_skb(struct claw_privbk *privptr) claw_pack_skb(struct claw_privbk *privptr)
{ {
struct sk_buff *new_skb,*held_skb; struct sk_buff *new_skb,*held_skb;
struct chbk *p_ch = &privptr->channel[WRITE]; struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
struct claw_env *p_env = privptr->p_env; struct claw_env *p_env = privptr->p_env;
int pkt_cnt,pk_ind,so_far; int pkt_cnt,pk_ind,so_far;
@ -515,15 +515,15 @@ claw_open(struct net_device *dev)
privptr->p_env->write_size=CLAW_FRAME_SIZE; privptr->p_env->write_size=CLAW_FRAME_SIZE;
} }
claw_set_busy(dev); claw_set_busy(dev);
tasklet_init(&privptr->channel[READ].tasklet, claw_irq_tasklet, tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
(unsigned long) &privptr->channel[READ]); (unsigned long) &privptr->channel[READ_CHANNEL]);
for ( i = 0; i < 2; i++) { for ( i = 0; i < 2; i++) {
CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i); CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
init_waitqueue_head(&privptr->channel[i].wait); init_waitqueue_head(&privptr->channel[i].wait);
/* skb_queue_head_init(&p_ch->io_queue); */ /* skb_queue_head_init(&p_ch->io_queue); */
if (i == WRITE) if (i == WRITE_CHANNEL)
skb_queue_head_init( skb_queue_head_init(
&privptr->channel[WRITE].collect_queue); &privptr->channel[WRITE_CHANNEL].collect_queue);
privptr->channel[i].flag_a = 0; privptr->channel[i].flag_a = 0;
privptr->channel[i].IO_active = 0; privptr->channel[i].IO_active = 0;
privptr->channel[i].flag &= ~CLAW_TIMER; privptr->channel[i].flag &= ~CLAW_TIMER;
@ -551,12 +551,12 @@ claw_open(struct net_device *dev)
if((privptr->channel[i].flag & CLAW_TIMER) == 0x00) if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
del_timer(&timer); del_timer(&timer);
} }
if ((((privptr->channel[READ].last_dstat | if ((((privptr->channel[READ_CHANNEL].last_dstat |
privptr->channel[WRITE].last_dstat) & privptr->channel[WRITE_CHANNEL].last_dstat) &
~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) || ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
(((privptr->channel[READ].flag | (((privptr->channel[READ_CHANNEL].flag |
privptr->channel[WRITE].flag) & CLAW_TIMER) != 0x00)) { privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
dev_info(&privptr->channel[READ].cdev->dev, dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
"%s: remote side is not ready\n", dev->name); "%s: remote side is not ready\n", dev->name);
CLAW_DBF_TEXT(2, trace, "notrdy"); CLAW_DBF_TEXT(2, trace, "notrdy");
@ -608,8 +608,8 @@ claw_open(struct net_device *dev)
} }
} }
privptr->buffs_alloc = 0; privptr->buffs_alloc = 0;
privptr->channel[READ].flag= 0x00; privptr->channel[READ_CHANNEL].flag = 0x00;
privptr->channel[WRITE].flag = 0x00; privptr->channel[WRITE_CHANNEL].flag = 0x00;
privptr->p_buff_ccw=NULL; privptr->p_buff_ccw=NULL;
privptr->p_buff_read=NULL; privptr->p_buff_read=NULL;
privptr->p_buff_write=NULL; privptr->p_buff_write=NULL;
@ -652,10 +652,10 @@ claw_irq_handler(struct ccw_device *cdev,
} }
/* Try to extract channel from driver data. */ /* Try to extract channel from driver data. */
if (privptr->channel[READ].cdev == cdev) if (privptr->channel[READ_CHANNEL].cdev == cdev)
p_ch = &privptr->channel[READ]; p_ch = &privptr->channel[READ_CHANNEL];
else if (privptr->channel[WRITE].cdev == cdev) else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
p_ch = &privptr->channel[WRITE]; p_ch = &privptr->channel[WRITE_CHANNEL];
else { else {
dev_warn(&cdev->dev, "The device is not a CLAW device\n"); dev_warn(&cdev->dev, "The device is not a CLAW device\n");
CLAW_DBF_TEXT(2, trace, "badchan"); CLAW_DBF_TEXT(2, trace, "badchan");
@ -813,7 +813,7 @@ claw_irq_handler(struct ccw_device *cdev,
claw_clearbit_busy(TB_TX, dev); claw_clearbit_busy(TB_TX, dev);
claw_clear_busy(dev); claw_clear_busy(dev);
} }
p_ch_r = (struct chbk *)&privptr->channel[READ]; p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
if (test_and_set_bit(CLAW_BH_ACTIVE, if (test_and_set_bit(CLAW_BH_ACTIVE,
(void *)&p_ch_r->flag_a) == 0) (void *)&p_ch_r->flag_a) == 0)
tasklet_schedule(&p_ch_r->tasklet); tasklet_schedule(&p_ch_r->tasklet);
@ -878,13 +878,13 @@ claw_release(struct net_device *dev)
for ( i = 1; i >=0 ; i--) { for ( i = 1; i >=0 ; i--) {
spin_lock_irqsave( spin_lock_irqsave(
get_ccwdev_lock(privptr->channel[i].cdev), saveflags); get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
/* del_timer(&privptr->channel[READ].timer); */ /* del_timer(&privptr->channel[READ_CHANNEL].timer); */
privptr->channel[i].claw_state = CLAW_STOP; privptr->channel[i].claw_state = CLAW_STOP;
privptr->channel[i].IO_active = 0; privptr->channel[i].IO_active = 0;
parm = (unsigned long) &privptr->channel[i]; parm = (unsigned long) &privptr->channel[i];
if (i == WRITE) if (i == WRITE_CHANNEL)
claw_purge_skb_queue( claw_purge_skb_queue(
&privptr->channel[WRITE].collect_queue); &privptr->channel[WRITE_CHANNEL].collect_queue);
rc = ccw_device_halt (privptr->channel[i].cdev, parm); rc = ccw_device_halt (privptr->channel[i].cdev, parm);
if (privptr->system_validate_comp==0x00) /* never opened? */ if (privptr->system_validate_comp==0x00) /* never opened? */
init_waitqueue_head(&privptr->channel[i].wait); init_waitqueue_head(&privptr->channel[i].wait);
@ -971,16 +971,16 @@ claw_release(struct net_device *dev)
privptr->mtc_skipping = 1; privptr->mtc_skipping = 1;
privptr->mtc_offset=0; privptr->mtc_offset=0;
if (((privptr->channel[READ].last_dstat | if (((privptr->channel[READ_CHANNEL].last_dstat |
privptr->channel[WRITE].last_dstat) & privptr->channel[WRITE_CHANNEL].last_dstat) &
~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) { ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
dev_warn(&privptr->channel[READ].cdev->dev, dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
"Deactivating %s completed with incorrect" "Deactivating %s completed with incorrect"
" subchannel status " " subchannel status "
"(read %02x, write %02x)\n", "(read %02x, write %02x)\n",
dev->name, dev->name,
privptr->channel[READ].last_dstat, privptr->channel[READ_CHANNEL].last_dstat,
privptr->channel[WRITE].last_dstat); privptr->channel[WRITE_CHANNEL].last_dstat);
CLAW_DBF_TEXT(2, trace, "badclose"); CLAW_DBF_TEXT(2, trace, "badclose");
} }
CLAW_DBF_TEXT(4, trace, "rlsexit"); CLAW_DBF_TEXT(4, trace, "rlsexit");
@ -1324,7 +1324,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
CLAW_DBF_TEXT(4, trace, "hw_tx"); CLAW_DBF_TEXT(4, trace, "hw_tx");
privptr = (struct claw_privbk *)(dev->ml_priv); privptr = (struct claw_privbk *)(dev->ml_priv);
p_ch=(struct chbk *)&privptr->channel[WRITE]; p_ch = (struct chbk *)&privptr->channel[WRITE_CHANNEL];
p_env =privptr->p_env; p_env =privptr->p_env;
claw_free_wrt_buf(dev); /* Clean up free chain if posible */ claw_free_wrt_buf(dev); /* Clean up free chain if posible */
/* scan the write queue to free any completed write packets */ /* scan the write queue to free any completed write packets */
@ -1357,7 +1357,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
claw_strt_out_IO(dev ); claw_strt_out_IO(dev );
claw_free_wrt_buf( dev ); claw_free_wrt_buf( dev );
if (privptr->write_free_count==0) { if (privptr->write_free_count==0) {
ch = &privptr->channel[WRITE]; ch = &privptr->channel[WRITE_CHANNEL];
atomic_inc(&skb->users); atomic_inc(&skb->users);
skb_queue_tail(&ch->collect_queue, skb); skb_queue_tail(&ch->collect_queue, skb);
goto Done; goto Done;
@ -1369,7 +1369,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
} }
/* tx lock */ /* tx lock */
if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */ if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
ch = &privptr->channel[WRITE]; ch = &privptr->channel[WRITE_CHANNEL];
atomic_inc(&skb->users); atomic_inc(&skb->users);
skb_queue_tail(&ch->collect_queue, skb); skb_queue_tail(&ch->collect_queue, skb);
claw_strt_out_IO(dev ); claw_strt_out_IO(dev );
@ -1385,7 +1385,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
privptr->p_write_free_chain == NULL ) { privptr->p_write_free_chain == NULL ) {
claw_setbit_busy(TB_NOBUFFER,dev); claw_setbit_busy(TB_NOBUFFER,dev);
ch = &privptr->channel[WRITE]; ch = &privptr->channel[WRITE_CHANNEL];
atomic_inc(&skb->users); atomic_inc(&skb->users);
skb_queue_tail(&ch->collect_queue, skb); skb_queue_tail(&ch->collect_queue, skb);
CLAW_DBF_TEXT(2, trace, "clawbusy"); CLAW_DBF_TEXT(2, trace, "clawbusy");
@ -1397,7 +1397,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
while (len_of_data > 0) { while (len_of_data > 0) {
p_this_ccw=privptr->p_write_free_chain; /* get a block */ p_this_ccw=privptr->p_write_free_chain; /* get a block */
if (p_this_ccw == NULL) { /* lost the race */ if (p_this_ccw == NULL) { /* lost the race */
ch = &privptr->channel[WRITE]; ch = &privptr->channel[WRITE_CHANNEL];
atomic_inc(&skb->users); atomic_inc(&skb->users);
skb_queue_tail(&ch->collect_queue, skb); skb_queue_tail(&ch->collect_queue, skb);
goto Done2; goto Done2;
@ -2067,7 +2067,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
*catch up to each other */ *catch up to each other */
privptr = dev->ml_priv; privptr = dev->ml_priv;
p_env=privptr->p_env; p_env=privptr->p_env;
tdev = &privptr->channel[READ].cdev->dev; tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
memcpy( &temp_host_name, p_env->host_name, 8); memcpy( &temp_host_name, p_env->host_name, 8);
memcpy( &temp_ws_name, p_env->adapter_name , 8); memcpy( &temp_ws_name, p_env->adapter_name , 8);
dev_info(tdev, "%s: CLAW device %.8s: " dev_info(tdev, "%s: CLAW device %.8s: "
@ -2245,7 +2245,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
dev->name, temp_ws_name, dev->name, temp_ws_name,
p_ctlbk->linkid); p_ctlbk->linkid);
privptr->active_link_ID = p_ctlbk->linkid; privptr->active_link_ID = p_ctlbk->linkid;
p_ch = &privptr->channel[WRITE]; p_ch = &privptr->channel[WRITE_CHANNEL];
wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */ wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
break; break;
case CONNECTION_RESPONSE: case CONNECTION_RESPONSE:
@ -2296,7 +2296,7 @@ claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
"%s: Confirmed Now packing\n", dev->name); "%s: Confirmed Now packing\n", dev->name);
p_env->packing = DO_PACKED; p_env->packing = DO_PACKED;
} }
p_ch = &privptr->channel[WRITE]; p_ch = &privptr->channel[WRITE_CHANNEL];
wake_up(&p_ch->wait); wake_up(&p_ch->wait);
} else { } else {
dev_warn(tdev, "Activating %s failed because of" dev_warn(tdev, "Activating %s failed because of"
@ -2556,7 +2556,7 @@ unpack_read(struct net_device *dev )
p_packd=NULL; p_packd=NULL;
privptr = dev->ml_priv; privptr = dev->ml_priv;
p_dev = &privptr->channel[READ].cdev->dev; p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
p_env = privptr->p_env; p_env = privptr->p_env;
p_this_ccw=privptr->p_read_active_first; p_this_ccw=privptr->p_read_active_first;
while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) { while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
@ -2728,7 +2728,7 @@ claw_strt_read (struct net_device *dev, int lock )
struct ccwbk*p_ccwbk; struct ccwbk*p_ccwbk;
struct chbk *p_ch; struct chbk *p_ch;
struct clawh *p_clawh; struct clawh *p_clawh;
p_ch=&privptr->channel[READ]; p_ch = &privptr->channel[READ_CHANNEL];
CLAW_DBF_TEXT(4, trace, "StRdNter"); CLAW_DBF_TEXT(4, trace, "StRdNter");
p_clawh=(struct clawh *)privptr->p_claw_signal_blk; p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
@ -2782,7 +2782,7 @@ claw_strt_out_IO( struct net_device *dev )
return; return;
} }
privptr = (struct claw_privbk *)dev->ml_priv; privptr = (struct claw_privbk *)dev->ml_priv;
p_ch=&privptr->channel[WRITE]; p_ch = &privptr->channel[WRITE_CHANNEL];
CLAW_DBF_TEXT(4, trace, "strt_io"); CLAW_DBF_TEXT(4, trace, "strt_io");
p_first_ccw=privptr->p_write_active_first; p_first_ccw=privptr->p_write_active_first;
@ -2875,7 +2875,7 @@ claw_free_netdevice(struct net_device * dev, int free_dev)
if (dev->flags & IFF_RUNNING) if (dev->flags & IFF_RUNNING)
claw_release(dev); claw_release(dev);
if (privptr) { if (privptr) {
privptr->channel[READ].ndev = NULL; /* say it's free */ privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */
} }
dev->ml_priv = NULL; dev->ml_priv = NULL;
#ifdef MODULE #ifdef MODULE
@ -2960,18 +2960,18 @@ claw_new_device(struct ccwgroup_device *cgdev)
struct ccw_dev_id dev_id; struct ccw_dev_id dev_id;
dev_info(&cgdev->dev, "add for %s\n", dev_info(&cgdev->dev, "add for %s\n",
dev_name(&cgdev->cdev[READ]->dev)); dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
CLAW_DBF_TEXT(2, setup, "new_dev"); CLAW_DBF_TEXT(2, setup, "new_dev");
privptr = dev_get_drvdata(&cgdev->dev); privptr = dev_get_drvdata(&cgdev->dev);
dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr); dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr); dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
if (!privptr) if (!privptr)
return -ENODEV; return -ENODEV;
p_env = privptr->p_env; p_env = privptr->p_env;
ccw_device_get_id(cgdev->cdev[READ], &dev_id); ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
p_env->devno[READ] = dev_id.devno; p_env->devno[READ_CHANNEL] = dev_id.devno;
ccw_device_get_id(cgdev->cdev[WRITE], &dev_id); ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
p_env->devno[WRITE] = dev_id.devno; p_env->devno[WRITE_CHANNEL] = dev_id.devno;
ret = add_channel(cgdev->cdev[0],0,privptr); ret = add_channel(cgdev->cdev[0],0,privptr);
if (ret == 0) if (ret == 0)
ret = add_channel(cgdev->cdev[1],1,privptr); ret = add_channel(cgdev->cdev[1],1,privptr);
@ -2980,14 +2980,14 @@ claw_new_device(struct ccwgroup_device *cgdev)
" failed with error code %d\n", ret); " failed with error code %d\n", ret);
goto out; goto out;
} }
ret = ccw_device_set_online(cgdev->cdev[READ]); ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
if (ret != 0) { if (ret != 0) {
dev_warn(&cgdev->dev, dev_warn(&cgdev->dev,
"Setting the read subchannel online" "Setting the read subchannel online"
" failed with error code %d\n", ret); " failed with error code %d\n", ret);
goto out; goto out;
} }
ret = ccw_device_set_online(cgdev->cdev[WRITE]); ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
if (ret != 0) { if (ret != 0) {
dev_warn(&cgdev->dev, dev_warn(&cgdev->dev,
"Setting the write subchannel online " "Setting the write subchannel online "
@ -3002,8 +3002,8 @@ claw_new_device(struct ccwgroup_device *cgdev)
} }
dev->ml_priv = privptr; dev->ml_priv = privptr;
dev_set_drvdata(&cgdev->dev, privptr); dev_set_drvdata(&cgdev->dev, privptr);
dev_set_drvdata(&cgdev->cdev[READ]->dev, privptr); dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
dev_set_drvdata(&cgdev->cdev[WRITE]->dev, privptr); dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
/* sysfs magic */ /* sysfs magic */
SET_NETDEV_DEV(dev, &cgdev->dev); SET_NETDEV_DEV(dev, &cgdev->dev);
if (register_netdev(dev) != 0) { if (register_netdev(dev) != 0) {
@ -3021,16 +3021,16 @@ claw_new_device(struct ccwgroup_device *cgdev)
goto out; goto out;
} }
} }
privptr->channel[READ].ndev = dev; privptr->channel[READ_CHANNEL].ndev = dev;
privptr->channel[WRITE].ndev = dev; privptr->channel[WRITE_CHANNEL].ndev = dev;
privptr->p_env->ndev = dev; privptr->p_env->ndev = dev;
dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d " dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d "
"readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n", "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
dev->name, p_env->read_size, dev->name, p_env->read_size,
p_env->write_size, p_env->read_buffers, p_env->write_size, p_env->read_buffers,
p_env->write_buffers, p_env->devno[READ], p_env->write_buffers, p_env->devno[READ_CHANNEL],
p_env->devno[WRITE]); p_env->devno[WRITE_CHANNEL]);
dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name " dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
":%.8s api_type: %.8s\n", ":%.8s api_type: %.8s\n",
dev->name, p_env->host_name, dev->name, p_env->host_name,
@ -3072,10 +3072,10 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
priv = dev_get_drvdata(&cgdev->dev); priv = dev_get_drvdata(&cgdev->dev);
if (!priv) if (!priv)
return -ENODEV; return -ENODEV;
ndev = priv->channel[READ].ndev; ndev = priv->channel[READ_CHANNEL].ndev;
if (ndev) { if (ndev) {
/* Close the device */ /* Close the device */
dev_info(&cgdev->dev, "%s: shutting down \n", dev_info(&cgdev->dev, "%s: shutting down\n",
ndev->name); ndev->name);
if (ndev->flags & IFF_RUNNING) if (ndev->flags & IFF_RUNNING)
ret = claw_release(ndev); ret = claw_release(ndev);
@ -3083,8 +3083,8 @@ claw_shutdown_device(struct ccwgroup_device *cgdev)
unregister_netdev(ndev); unregister_netdev(ndev);
ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */ ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
claw_free_netdevice(ndev, 1); claw_free_netdevice(ndev, 1);
priv->channel[READ].ndev = NULL; priv->channel[READ_CHANNEL].ndev = NULL;
priv->channel[WRITE].ndev = NULL; priv->channel[WRITE_CHANNEL].ndev = NULL;
priv->p_env->ndev = NULL; priv->p_env->ndev = NULL;
} }
ccw_device_set_offline(cgdev->cdev[1]); ccw_device_set_offline(cgdev->cdev[1]);
@ -3115,8 +3115,8 @@ claw_remove_device(struct ccwgroup_device *cgdev)
priv->channel[1].irb=NULL; priv->channel[1].irb=NULL;
kfree(priv); kfree(priv);
dev_set_drvdata(&cgdev->dev, NULL); dev_set_drvdata(&cgdev->dev, NULL);
dev_set_drvdata(&cgdev->cdev[READ]->dev, NULL); dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
dev_set_drvdata(&cgdev->cdev[WRITE]->dev, NULL); dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
put_device(&cgdev->dev); put_device(&cgdev->dev);
return; return;

View File

@ -74,8 +74,8 @@
#define MAX_ENVELOPE_SIZE 65536 #define MAX_ENVELOPE_SIZE 65536
#define CLAW_DEFAULT_MTU_SIZE 4096 #define CLAW_DEFAULT_MTU_SIZE 4096
#define DEF_PACK_BUFSIZE 32768 #define DEF_PACK_BUFSIZE 32768
#define READ 0 #define READ_CHANNEL 0
#define WRITE 1 #define WRITE_CHANNEL 1
#define TB_TX 0 /* sk buffer handling in process */ #define TB_TX 0 /* sk buffer handling in process */
#define TB_STOP 1 /* network device stop in process */ #define TB_STOP 1 /* network device stop in process */

View File

@ -454,7 +454,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
if ((fsmstate == CTC_STATE_SETUPWAIT) && if ((fsmstate == CTC_STATE_SETUPWAIT) &&
(ch->protocol == CTCM_PROTO_OS390)) { (ch->protocol == CTCM_PROTO_OS390)) {
/* OS/390 resp. z/OS */ /* OS/390 resp. z/OS */
if (CHANNEL_DIRECTION(ch->flags) == READ) { if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
CTC_EVENT_TIMER, ch); CTC_EVENT_TIMER, ch);
@ -472,14 +472,14 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
* if in compatibility mode, since VM TCP delays the initial * if in compatibility mode, since VM TCP delays the initial
* frame until it has some data to send. * frame until it has some data to send.
*/ */
if ((CHANNEL_DIRECTION(ch->flags) == WRITE) || if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
(ch->protocol != CTCM_PROTO_S390)) (ch->protocol != CTCM_PROTO_S390))
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
ch->ccw[1].count = 2; /* Transfer only length */ ch->ccw[1].count = 2; /* Transfer only length */
fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ) fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
? CTC_STATE_RXINIT : CTC_STATE_TXINIT); ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
rc = ccw_device_start(ch->cdev, &ch->ccw[0], rc = ccw_device_start(ch->cdev, &ch->ccw[0],
(unsigned long)ch, 0xff, 0); (unsigned long)ch, 0xff, 0);
@ -495,7 +495,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
* reply from VM TCP which brings up the RX channel to it's * reply from VM TCP which brings up the RX channel to it's
* final state. * final state.
*/ */
if ((CHANNEL_DIRECTION(ch->flags) == READ) && if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
(ch->protocol == CTCM_PROTO_S390)) { (ch->protocol == CTCM_PROTO_S390)) {
struct net_device *dev = ch->netdev; struct net_device *dev = ch->netdev;
struct ctcm_priv *priv = dev->ml_priv; struct ctcm_priv *priv = dev->ml_priv;
@ -600,15 +600,15 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
int rc; int rc;
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s", CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
CTCM_FUNTAIL, ch->id, CTCM_FUNTAIL, ch->id,
(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
if (ch->trans_skb != NULL) { if (ch->trans_skb != NULL) {
clear_normalized_cda(&ch->ccw[1]); clear_normalized_cda(&ch->ccw[1]);
dev_kfree_skb(ch->trans_skb); dev_kfree_skb(ch->trans_skb);
ch->trans_skb = NULL; ch->trans_skb = NULL;
} }
if (CHANNEL_DIRECTION(ch->flags) == READ) { if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
ch->ccw[1].cmd_code = CCW_CMD_READ; ch->ccw[1].cmd_code = CCW_CMD_READ;
ch->ccw[1].flags = CCW_FLAG_SLI; ch->ccw[1].flags = CCW_FLAG_SLI;
ch->ccw[1].count = 0; ch->ccw[1].count = 0;
@ -622,7 +622,8 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
"%s(%s): %s trans_skb alloc delayed " "%s(%s): %s trans_skb alloc delayed "
"until first transfer", "until first transfer",
CTCM_FUNTAIL, ch->id, CTCM_FUNTAIL, ch->id,
(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
"RX" : "TX");
} }
ch->ccw[0].cmd_code = CCW_CMD_PREPARE; ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC; ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
@ -720,7 +721,7 @@ static void ctcm_chx_cleanup(fsm_instance *fi, int state,
ch->th_seg = 0x00; ch->th_seg = 0x00;
ch->th_seq_num = 0x00; ch->th_seq_num = 0x00;
if (CHANNEL_DIRECTION(ch->flags) == READ) { if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
skb_queue_purge(&ch->io_queue); skb_queue_purge(&ch->io_queue);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
} else { } else {
@ -799,7 +800,8 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, CTC_STATE_STARTRETRY); fsm_newstate(fi, CTC_STATE_STARTRETRY);
fsm_deltimer(&ch->timer); fsm_deltimer(&ch->timer);
fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
if (!IS_MPC(ch) && (CHANNEL_DIRECTION(ch->flags) == READ)) { if (!IS_MPC(ch) &&
(CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
int rc = ccw_device_halt(ch->cdev, (unsigned long)ch); int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
if (rc != 0) if (rc != 0)
ctcm_ccw_check_rc(ch, rc, ctcm_ccw_check_rc(ch, rc,
@ -811,10 +813,10 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT, CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
"%s(%s) : %s error during %s channel setup state=%s\n", "%s(%s) : %s error during %s channel setup state=%s\n",
CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event], CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX", (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
fsm_getstate_str(fi)); fsm_getstate_str(fi));
if (CHANNEL_DIRECTION(ch->flags) == READ) { if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
fsm_newstate(fi, CTC_STATE_RXERR); fsm_newstate(fi, CTC_STATE_RXERR);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
} else { } else {
@ -945,7 +947,7 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
fsm_newstate(fi, CTC_STATE_DTERM); fsm_newstate(fi, CTC_STATE_DTERM);
ch2 = priv->channel[WRITE]; ch2 = priv->channel[CTCM_WRITE];
fsm_newstate(ch2->fsm, CTC_STATE_DTERM); fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
ccw_device_halt(ch->cdev, (unsigned long)ch); ccw_device_halt(ch->cdev, (unsigned long)ch);
@ -1074,13 +1076,13 @@ static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
fsm_deltimer(&ch->timer); fsm_deltimer(&ch->timer);
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s: %s: %s unrecoverable channel error", "%s: %s: %s unrecoverable channel error",
CTCM_FUNTAIL, ch->id, rd == READ ? "RX" : "TX"); CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
if (IS_MPC(ch)) { if (IS_MPC(ch)) {
priv->stats.tx_dropped++; priv->stats.tx_dropped++;
priv->stats.tx_errors++; priv->stats.tx_errors++;
} }
if (rd == READ) { if (rd == CTCM_READ) {
fsm_newstate(fi, CTC_STATE_RXERR); fsm_newstate(fi, CTC_STATE_RXERR);
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
} else { } else {
@ -1503,7 +1505,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
switch (fsm_getstate(fi)) { switch (fsm_getstate(fi)) {
case CTC_STATE_STARTRETRY: case CTC_STATE_STARTRETRY:
case CTC_STATE_SETUPWAIT: case CTC_STATE_SETUPWAIT:
if (CHANNEL_DIRECTION(ch->flags) == READ) { if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
ctcmpc_chx_rxidle(fi, event, arg); ctcmpc_chx_rxidle(fi, event, arg);
} else { } else {
fsm_newstate(fi, CTC_STATE_TXIDLE); fsm_newstate(fi, CTC_STATE_TXIDLE);
@ -1514,7 +1516,7 @@ static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
break; break;
}; };
fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ) fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
? CTC_STATE_RXINIT : CTC_STATE_TXINIT); ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
done: done:
@ -1753,8 +1755,8 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
struct net_device *dev = ach->netdev; struct net_device *dev = ach->netdev;
struct ctcm_priv *priv = dev->ml_priv; struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg; struct mpc_group *grp = priv->mpcg;
struct channel *wch = priv->channel[WRITE]; struct channel *wch = priv->channel[CTCM_WRITE];
struct channel *rch = priv->channel[READ]; struct channel *rch = priv->channel[CTCM_READ];
struct sk_buff *skb; struct sk_buff *skb;
struct th_sweep *header; struct th_sweep *header;
int rc = 0; int rc = 0;
@ -2070,7 +2072,7 @@ static void dev_action_start(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
if (IS_MPC(priv)) if (IS_MPC(priv))
priv->mpcg->channels_terminating = 0; priv->mpcg->channels_terminating = 0;
for (direction = READ; direction <= WRITE; direction++) { for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
struct channel *ch = priv->channel[direction]; struct channel *ch = priv->channel[direction];
fsm_event(ch->fsm, CTC_EVENT_START, ch); fsm_event(ch->fsm, CTC_EVENT_START, ch);
} }
@ -2092,7 +2094,7 @@ static void dev_action_stop(fsm_instance *fi, int event, void *arg)
CTCMY_DBF_DEV_NAME(SETUP, dev, ""); CTCMY_DBF_DEV_NAME(SETUP, dev, "");
fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
for (direction = READ; direction <= WRITE; direction++) { for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
struct channel *ch = priv->channel[direction]; struct channel *ch = priv->channel[direction];
fsm_event(ch->fsm, CTC_EVENT_STOP, ch); fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
ch->th_seq_num = 0x00; ch->th_seq_num = 0x00;
@ -2183,11 +2185,11 @@ static void dev_action_chup(fsm_instance *fi, int event, void *arg)
if (IS_MPC(priv)) { if (IS_MPC(priv)) {
if (event == DEV_EVENT_RXUP) if (event == DEV_EVENT_RXUP)
mpc_channel_action(priv->channel[READ], mpc_channel_action(priv->channel[CTCM_READ],
READ, MPC_CHANNEL_ADD); CTCM_READ, MPC_CHANNEL_ADD);
else else
mpc_channel_action(priv->channel[WRITE], mpc_channel_action(priv->channel[CTCM_WRITE],
WRITE, MPC_CHANNEL_ADD); CTCM_WRITE, MPC_CHANNEL_ADD);
} }
} }
@ -2239,11 +2241,11 @@ static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
} }
if (IS_MPC(priv)) { if (IS_MPC(priv)) {
if (event == DEV_EVENT_RXDOWN) if (event == DEV_EVENT_RXDOWN)
mpc_channel_action(priv->channel[READ], mpc_channel_action(priv->channel[CTCM_READ],
READ, MPC_CHANNEL_REMOVE); CTCM_READ, MPC_CHANNEL_REMOVE);
else else
mpc_channel_action(priv->channel[WRITE], mpc_channel_action(priv->channel[CTCM_WRITE],
WRITE, MPC_CHANNEL_REMOVE); CTCM_WRITE, MPC_CHANNEL_REMOVE);
} }
} }

View File

@ -267,7 +267,7 @@ static struct channel *channel_get(enum ctcm_channel_types type,
else { else {
ch->flags |= CHANNEL_FLAGS_INUSE; ch->flags |= CHANNEL_FLAGS_INUSE;
ch->flags &= ~CHANNEL_FLAGS_RWMASK; ch->flags &= ~CHANNEL_FLAGS_RWMASK;
ch->flags |= (direction == WRITE) ch->flags |= (direction == CTCM_WRITE)
? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ; ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
fsm_newstate(ch->fsm, CTC_STATE_STOPPED); fsm_newstate(ch->fsm, CTC_STATE_STOPPED);
} }
@ -388,7 +388,8 @@ int ctcm_ch_alloc_buffer(struct channel *ch)
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): %s trans_skb allocation error", "%s(%s): %s trans_skb allocation error",
CTCM_FUNTAIL, ch->id, CTCM_FUNTAIL, ch->id,
(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
"RX" : "TX");
return -ENOMEM; return -ENOMEM;
} }
@ -399,7 +400,8 @@ int ctcm_ch_alloc_buffer(struct channel *ch)
CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
"%s(%s): %s set norm_cda failed", "%s(%s): %s set norm_cda failed",
CTCM_FUNTAIL, ch->id, CTCM_FUNTAIL, ch->id,
(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
"RX" : "TX");
return -ENOMEM; return -ENOMEM;
} }
@ -603,14 +605,14 @@ static void ctcmpc_send_sweep_req(struct channel *rch)
priv = dev->ml_priv; priv = dev->ml_priv;
grp = priv->mpcg; grp = priv->mpcg;
ch = priv->channel[WRITE]; ch = priv->channel[CTCM_WRITE];
/* sweep processing is not complete until response and request */ /* sweep processing is not complete until response and request */
/* has completed for all read channels in group */ /* has completed for all read channels in group */
if (grp->in_sweep == 0) { if (grp->in_sweep == 0) {
grp->in_sweep = 1; grp->in_sweep = 1;
grp->sweep_rsp_pend_num = grp->active_channels[READ]; grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
grp->sweep_req_pend_num = grp->active_channels[READ]; grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
} }
sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA); sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
@ -911,7 +913,7 @@ static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
dev->trans_start = jiffies; dev->trans_start = jiffies;
if (ctcm_transmit_skb(priv->channel[WRITE], skb) != 0) if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0)
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
@ -994,7 +996,7 @@ static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
} }
dev->trans_start = jiffies; dev->trans_start = jiffies;
if (ctcmpc_transmit_skb(priv->channel[WRITE], skb) != 0) { if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) {
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): device error - dropped", "%s(%s): device error - dropped",
CTCM_FUNTAIL, dev->name); CTCM_FUNTAIL, dev->name);
@ -1035,7 +1037,7 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL; return -EINVAL;
priv = dev->ml_priv; priv = dev->ml_priv;
max_bufsize = priv->channel[READ]->max_bufsize; max_bufsize = priv->channel[CTCM_READ]->max_bufsize;
if (IS_MPC(priv)) { if (IS_MPC(priv)) {
if (new_mtu > max_bufsize - TH_HEADER_LENGTH) if (new_mtu > max_bufsize - TH_HEADER_LENGTH)
@ -1226,10 +1228,10 @@ static void ctcm_irq_handler(struct ccw_device *cdev,
priv = dev_get_drvdata(&cgdev->dev); priv = dev_get_drvdata(&cgdev->dev);
/* Try to extract channel from driver data. */ /* Try to extract channel from driver data. */
if (priv->channel[READ]->cdev == cdev) if (priv->channel[CTCM_READ]->cdev == cdev)
ch = priv->channel[READ]; ch = priv->channel[CTCM_READ];
else if (priv->channel[WRITE]->cdev == cdev) else if (priv->channel[CTCM_WRITE]->cdev == cdev)
ch = priv->channel[WRITE]; ch = priv->channel[CTCM_WRITE];
else { else {
dev_err(&cdev->dev, dev_err(&cdev->dev,
"%s: Internal error: Can't determine channel for " "%s: Internal error: Can't determine channel for "
@ -1587,13 +1589,13 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
goto out_ccw2; goto out_ccw2;
} }
for (direction = READ; direction <= WRITE; direction++) { for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
priv->channel[direction] = priv->channel[direction] =
channel_get(type, direction == READ ? read_id : write_id, channel_get(type, direction == CTCM_READ ?
direction); read_id : write_id, direction);
if (priv->channel[direction] == NULL) { if (priv->channel[direction] == NULL) {
if (direction == WRITE) if (direction == CTCM_WRITE)
channel_free(priv->channel[READ]); channel_free(priv->channel[CTCM_READ]);
goto out_dev; goto out_dev;
} }
priv->channel[direction]->netdev = dev; priv->channel[direction]->netdev = dev;
@ -1617,13 +1619,13 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
dev_info(&dev->dev, dev_info(&dev->dev,
"setup OK : r/w = %s/%s, protocol : %d\n", "setup OK : r/w = %s/%s, protocol : %d\n",
priv->channel[READ]->id, priv->channel[CTCM_READ]->id,
priv->channel[WRITE]->id, priv->protocol); priv->channel[CTCM_WRITE]->id, priv->protocol);
CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
"setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name, "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name,
priv->channel[READ]->id, priv->channel[CTCM_READ]->id,
priv->channel[WRITE]->id, priv->protocol); priv->channel[CTCM_WRITE]->id, priv->protocol);
return 0; return 0;
out_unregister: out_unregister:
@ -1635,10 +1637,10 @@ out_ccw2:
out_ccw1: out_ccw1:
ccw_device_set_offline(cgdev->cdev[0]); ccw_device_set_offline(cgdev->cdev[0]);
out_remove_channel2: out_remove_channel2:
readc = channel_get(type, read_id, READ); readc = channel_get(type, read_id, CTCM_READ);
channel_remove(readc); channel_remove(readc);
out_remove_channel1: out_remove_channel1:
writec = channel_get(type, write_id, WRITE); writec = channel_get(type, write_id, CTCM_WRITE);
channel_remove(writec); channel_remove(writec);
out_err_result: out_err_result:
return result; return result;
@ -1660,19 +1662,19 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
if (!priv) if (!priv)
return -ENODEV; return -ENODEV;
if (priv->channel[READ]) { if (priv->channel[CTCM_READ]) {
dev = priv->channel[READ]->netdev; dev = priv->channel[CTCM_READ]->netdev;
CTCM_DBF_DEV(SETUP, dev, ""); CTCM_DBF_DEV(SETUP, dev, "");
/* Close the device */ /* Close the device */
ctcm_close(dev); ctcm_close(dev);
dev->flags &= ~IFF_RUNNING; dev->flags &= ~IFF_RUNNING;
ctcm_remove_attributes(&cgdev->dev); ctcm_remove_attributes(&cgdev->dev);
channel_free(priv->channel[READ]); channel_free(priv->channel[CTCM_READ]);
} else } else
dev = NULL; dev = NULL;
if (priv->channel[WRITE]) if (priv->channel[CTCM_WRITE])
channel_free(priv->channel[WRITE]); channel_free(priv->channel[CTCM_WRITE]);
if (dev) { if (dev) {
unregister_netdev(dev); unregister_netdev(dev);
@ -1685,11 +1687,11 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
ccw_device_set_offline(cgdev->cdev[1]); ccw_device_set_offline(cgdev->cdev[1]);
ccw_device_set_offline(cgdev->cdev[0]); ccw_device_set_offline(cgdev->cdev[0]);
if (priv->channel[READ]) if (priv->channel[CTCM_READ])
channel_remove(priv->channel[READ]); channel_remove(priv->channel[CTCM_READ]);
if (priv->channel[WRITE]) if (priv->channel[CTCM_WRITE])
channel_remove(priv->channel[WRITE]); channel_remove(priv->channel[CTCM_WRITE]);
priv->channel[READ] = priv->channel[WRITE] = NULL; priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL;
return 0; return 0;
@ -1720,11 +1722,11 @@ static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
if (gdev->state == CCWGROUP_OFFLINE) if (gdev->state == CCWGROUP_OFFLINE)
return 0; return 0;
netif_device_detach(priv->channel[READ]->netdev); netif_device_detach(priv->channel[CTCM_READ]->netdev);
ctcm_close(priv->channel[READ]->netdev); ctcm_close(priv->channel[CTCM_READ]->netdev);
if (!wait_event_timeout(priv->fsm->wait_q, if (!wait_event_timeout(priv->fsm->wait_q,
fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) { fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
netif_device_attach(priv->channel[READ]->netdev); netif_device_attach(priv->channel[CTCM_READ]->netdev);
return -EBUSY; return -EBUSY;
} }
ccw_device_set_offline(gdev->cdev[1]); ccw_device_set_offline(gdev->cdev[1]);
@ -1745,9 +1747,9 @@ static int ctcm_pm_resume(struct ccwgroup_device *gdev)
rc = ccw_device_set_online(gdev->cdev[0]); rc = ccw_device_set_online(gdev->cdev[0]);
if (rc) if (rc)
goto err_out; goto err_out;
ctcm_open(priv->channel[READ]->netdev); ctcm_open(priv->channel[CTCM_READ]->netdev);
err_out: err_out:
netif_device_attach(priv->channel[READ]->netdev); netif_device_attach(priv->channel[CTCM_READ]->netdev);
return rc; return rc;
} }

View File

@ -111,8 +111,8 @@ enum ctcm_channel_types {
#define CTCM_INITIAL_BLOCKLEN 2 #define CTCM_INITIAL_BLOCKLEN 2
#define READ 0 #define CTCM_READ 0
#define WRITE 1 #define CTCM_WRITE 1
#define CTCM_ID_SIZE 20+3 #define CTCM_ID_SIZE 20+3

View File

@ -419,8 +419,8 @@ void ctc_mpc_establish_connectivity(int port_num,
return; return;
priv = dev->ml_priv; priv = dev->ml_priv;
grp = priv->mpcg; grp = priv->mpcg;
rch = priv->channel[READ]; rch = priv->channel[CTCM_READ];
wch = priv->channel[WRITE]; wch = priv->channel[CTCM_WRITE];
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO, CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
"%s(%s): state=%s", "%s(%s): state=%s",
@ -578,7 +578,7 @@ void ctc_mpc_flow_control(int port_num, int flowc)
"%s: %s: flowc = %d", "%s: %s: flowc = %d",
CTCM_FUNTAIL, dev->name, flowc); CTCM_FUNTAIL, dev->name, flowc);
rch = priv->channel[READ]; rch = priv->channel[CTCM_READ];
mpcg_state = fsm_getstate(grp->fsm); mpcg_state = fsm_getstate(grp->fsm);
switch (flowc) { switch (flowc) {
@ -622,7 +622,7 @@ static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
struct net_device *dev = rch->netdev; struct net_device *dev = rch->netdev;
struct ctcm_priv *priv = dev->ml_priv; struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg; struct mpc_group *grp = priv->mpcg;
struct channel *ch = priv->channel[WRITE]; struct channel *ch = priv->channel[CTCM_WRITE];
CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id); CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id);
CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH); CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
@ -656,7 +656,7 @@ static void ctcmpc_send_sweep_resp(struct channel *rch)
int rc = 0; int rc = 0;
struct th_sweep *header; struct th_sweep *header;
struct sk_buff *sweep_skb; struct sk_buff *sweep_skb;
struct channel *ch = priv->channel[WRITE]; struct channel *ch = priv->channel[CTCM_WRITE];
CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id); CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id);
@ -712,7 +712,7 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
struct net_device *dev = rch->netdev; struct net_device *dev = rch->netdev;
struct ctcm_priv *priv = dev->ml_priv; struct ctcm_priv *priv = dev->ml_priv;
struct mpc_group *grp = priv->mpcg; struct mpc_group *grp = priv->mpcg;
struct channel *ch = priv->channel[WRITE]; struct channel *ch = priv->channel[CTCM_WRITE];
if (do_debug) if (do_debug)
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
@ -721,8 +721,8 @@ static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
if (grp->in_sweep == 0) { if (grp->in_sweep == 0) {
grp->in_sweep = 1; grp->in_sweep = 1;
ctcm_test_and_set_busy(dev); ctcm_test_and_set_busy(dev);
grp->sweep_req_pend_num = grp->active_channels[READ]; grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
grp->sweep_rsp_pend_num = grp->active_channels[READ]; grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
} }
CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH); CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
@ -906,14 +906,14 @@ void mpc_group_ready(unsigned long adev)
fsm_newstate(grp->fsm, MPCG_STATE_READY); fsm_newstate(grp->fsm, MPCG_STATE_READY);
/* Put up a read on the channel */ /* Put up a read on the channel */
ch = priv->channel[READ]; ch = priv->channel[CTCM_READ];
ch->pdu_seq = 0; ch->pdu_seq = 0;
CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" , CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" ,
__func__, ch->pdu_seq); __func__, ch->pdu_seq);
ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch); ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch);
/* Put the write channel in idle state */ /* Put the write channel in idle state */
ch = priv->channel[WRITE]; ch = priv->channel[CTCM_WRITE];
if (ch->collect_len > 0) { if (ch->collect_len > 0) {
spin_lock(&ch->collect_lock); spin_lock(&ch->collect_lock);
ctcm_purge_skb_queue(&ch->collect_queue); ctcm_purge_skb_queue(&ch->collect_queue);
@ -960,7 +960,8 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
"%s: %i / Grp:%s total_channels=%i, active_channels: " "%s: %i / Grp:%s total_channels=%i, active_channels: "
"read=%i, write=%i\n", __func__, action, "read=%i, write=%i\n", __func__, action,
fsm_getstate_str(grp->fsm), grp->num_channel_paths, fsm_getstate_str(grp->fsm), grp->num_channel_paths,
grp->active_channels[READ], grp->active_channels[WRITE]); grp->active_channels[CTCM_READ],
grp->active_channels[CTCM_WRITE]);
if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) { if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) {
grp->num_channel_paths++; grp->num_channel_paths++;
@ -994,10 +995,11 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
grp->xid_skb->data, grp->xid_skb->data,
grp->xid_skb->len); grp->xid_skb->len);
ch->xid->xid2_dlc_type = ((CHANNEL_DIRECTION(ch->flags) == READ) ch->xid->xid2_dlc_type =
((CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
? XID2_READ_SIDE : XID2_WRITE_SIDE); ? XID2_READ_SIDE : XID2_WRITE_SIDE);
if (CHANNEL_DIRECTION(ch->flags) == WRITE) if (CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE)
ch->xid->xid2_buf_len = 0x00; ch->xid->xid2_buf_len = 0x00;
ch->xid_skb->data = ch->xid_skb_data; ch->xid_skb->data = ch->xid_skb_data;
@ -1006,8 +1008,8 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
fsm_newstate(ch->fsm, CH_XID0_PENDING); fsm_newstate(ch->fsm, CH_XID0_PENDING);
if ((grp->active_channels[READ] > 0) && if ((grp->active_channels[CTCM_READ] > 0) &&
(grp->active_channels[WRITE] > 0) && (grp->active_channels[CTCM_WRITE] > 0) &&
(fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) { (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW); fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE, CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
@ -1027,10 +1029,10 @@ void mpc_channel_action(struct channel *ch, int direction, int action)
if (grp->channels_terminating) if (grp->channels_terminating)
goto done; goto done;
if (((grp->active_channels[READ] == 0) && if (((grp->active_channels[CTCM_READ] == 0) &&
(grp->active_channels[WRITE] > 0)) (grp->active_channels[CTCM_WRITE] > 0))
|| ((grp->active_channels[WRITE] == 0) && || ((grp->active_channels[CTCM_WRITE] == 0) &&
(grp->active_channels[READ] > 0))) (grp->active_channels[CTCM_READ] > 0)))
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
} }
done: done:
@ -1038,7 +1040,8 @@ done:
"exit %s: %i / Grp:%s total_channels=%i, active_channels: " "exit %s: %i / Grp:%s total_channels=%i, active_channels: "
"read=%i, write=%i\n", __func__, action, "read=%i, write=%i\n", __func__, action,
fsm_getstate_str(grp->fsm), grp->num_channel_paths, fsm_getstate_str(grp->fsm), grp->num_channel_paths,
grp->active_channels[READ], grp->active_channels[WRITE]); grp->active_channels[CTCM_READ],
grp->active_channels[CTCM_WRITE]);
CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id); CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
} }
@ -1392,8 +1395,8 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
(grp->port_persist == 0)) (grp->port_persist == 0))
fsm_deltimer(&priv->restart_timer); fsm_deltimer(&priv->restart_timer);
wch = priv->channel[WRITE]; wch = priv->channel[CTCM_WRITE];
rch = priv->channel[READ]; rch = priv->channel[CTCM_READ];
switch (grp->saved_state) { switch (grp->saved_state) {
case MPCG_STATE_RESET: case MPCG_STATE_RESET:
@ -1480,8 +1483,8 @@ static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
priv = dev->ml_priv; priv = dev->ml_priv;
grp = priv->mpcg; grp = priv->mpcg;
wch = priv->channel[WRITE]; wch = priv->channel[CTCM_WRITE];
rch = priv->channel[READ]; rch = priv->channel[CTCM_READ];
switch (fsm_getstate(grp->fsm)) { switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_XID2INITW: case MPCG_STATE_XID2INITW:
@ -1586,7 +1589,7 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo)
CTCM_D3_DUMP((char *)xid, XID2_LENGTH); CTCM_D3_DUMP((char *)xid, XID2_LENGTH);
/*the received direction should be the opposite of ours */ /*the received direction should be the opposite of ours */
if (((CHANNEL_DIRECTION(ch->flags) == READ) ? XID2_WRITE_SIDE : if (((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_WRITE_SIDE :
XID2_READ_SIDE) != xid->xid2_dlc_type) { XID2_READ_SIDE) != xid->xid2_dlc_type) {
rc = 2; rc = 2;
/* XID REJECTED: r/w channel pairing mismatch */ /* XID REJECTED: r/w channel pairing mismatch */
@ -1912,7 +1915,7 @@ static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg)
if (grp == NULL) if (grp == NULL)
return; return;
for (direction = READ; direction <= WRITE; direction++) { for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
struct channel *ch = priv->channel[direction]; struct channel *ch = priv->channel[direction];
struct xid2 *thisxid = ch->xid; struct xid2 *thisxid = ch->xid;
ch->xid_skb->data = ch->xid_skb_data; ch->xid_skb->data = ch->xid_skb_data;
@ -2152,14 +2155,15 @@ static int mpc_send_qllc_discontact(struct net_device *dev)
return -ENOMEM; return -ENOMEM;
} }
*((__u32 *)skb_push(skb, 4)) = priv->channel[READ]->pdu_seq; *((__u32 *)skb_push(skb, 4)) =
priv->channel[READ]->pdu_seq++; priv->channel[CTCM_READ]->pdu_seq;
priv->channel[CTCM_READ]->pdu_seq++;
CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n", CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n",
__func__, priv->channel[READ]->pdu_seq); __func__, priv->channel[CTCM_READ]->pdu_seq);
/* receipt of CC03 resets anticipated sequence number on /* receipt of CC03 resets anticipated sequence number on
receiving side */ receiving side */
priv->channel[READ]->pdu_seq = 0x00; priv->channel[CTCM_READ]->pdu_seq = 0x00;
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
skb->dev = dev; skb->dev = dev;
skb->protocol = htons(ETH_P_SNAP); skb->protocol = htons(ETH_P_SNAP);

View File

@ -38,8 +38,8 @@ static ssize_t ctcm_buffer_write(struct device *dev,
int bs1; int bs1;
struct ctcm_priv *priv = dev_get_drvdata(dev); struct ctcm_priv *priv = dev_get_drvdata(dev);
if (!(priv && priv->channel[READ] && ndev = priv->channel[CTCM_READ]->netdev;
(ndev = priv->channel[READ]->netdev))) { if (!(priv && priv->channel[CTCM_READ] && ndev)) {
CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev"); CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
return -ENODEV; return -ENODEV;
} }
@ -55,12 +55,12 @@ static ssize_t ctcm_buffer_write(struct device *dev,
(bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2))) (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
goto einval; goto einval;
priv->channel[READ]->max_bufsize = bs1; priv->channel[CTCM_READ]->max_bufsize = bs1;
priv->channel[WRITE]->max_bufsize = bs1; priv->channel[CTCM_WRITE]->max_bufsize = bs1;
if (!(ndev->flags & IFF_RUNNING)) if (!(ndev->flags & IFF_RUNNING))
ndev->mtu = bs1 - LL_HEADER_LENGTH - 2; ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
priv->channel[READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; priv->channel[CTCM_READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
priv->channel[WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; priv->channel[CTCM_WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
CTCM_DBF_DEV(SETUP, ndev, buf); CTCM_DBF_DEV(SETUP, ndev, buf);
return count; return count;
@ -85,9 +85,9 @@ static void ctcm_print_statistics(struct ctcm_priv *priv)
p += sprintf(p, " Device FSM state: %s\n", p += sprintf(p, " Device FSM state: %s\n",
fsm_getstate_str(priv->fsm)); fsm_getstate_str(priv->fsm));
p += sprintf(p, " RX channel FSM state: %s\n", p += sprintf(p, " RX channel FSM state: %s\n",
fsm_getstate_str(priv->channel[READ]->fsm)); fsm_getstate_str(priv->channel[CTCM_READ]->fsm));
p += sprintf(p, " TX channel FSM state: %s\n", p += sprintf(p, " TX channel FSM state: %s\n",
fsm_getstate_str(priv->channel[WRITE]->fsm)); fsm_getstate_str(priv->channel[CTCM_WRITE]->fsm));
p += sprintf(p, " Max. TX buffer used: %ld\n", p += sprintf(p, " Max. TX buffer used: %ld\n",
priv->channel[WRITE]->prof.maxmulti); priv->channel[WRITE]->prof.maxmulti);
p += sprintf(p, " Max. chained SKBs: %ld\n", p += sprintf(p, " Max. chained SKBs: %ld\n",
@ -102,7 +102,7 @@ static void ctcm_print_statistics(struct ctcm_priv *priv)
priv->channel[WRITE]->prof.tx_time); priv->channel[WRITE]->prof.tx_time);
printk(KERN_INFO "Statistics for %s:\n%s", printk(KERN_INFO "Statistics for %s:\n%s",
priv->channel[WRITE]->netdev->name, sbuf); priv->channel[CTCM_WRITE]->netdev->name, sbuf);
kfree(sbuf); kfree(sbuf);
return; return;
} }
@ -125,7 +125,7 @@ static ssize_t stats_write(struct device *dev, struct device_attribute *attr,
return -ENODEV; return -ENODEV;
/* Reset statistics */ /* Reset statistics */
memset(&priv->channel[WRITE]->prof, 0, memset(&priv->channel[WRITE]->prof, 0,
sizeof(priv->channel[WRITE]->prof)); sizeof(priv->channel[CTCM_WRITE]->prof));
return count; return count;
} }

View File

@ -129,7 +129,7 @@ static inline void random_ether_addr(u8 *addr)
/** /**
* dev_hw_addr_random - Create random MAC and set device flag * dev_hw_addr_random - Create random MAC and set device flag
* @dev: pointer to net_device structure * @dev: pointer to net_device structure
* @addr: Pointer to a six-byte array containing the Ethernet address * @hwaddr: Pointer to a six-byte array containing the Ethernet address
* *
* Generate random MAC to be used by a device and set addr_assign_type * Generate random MAC to be used by a device and set addr_assign_type
* so the state can be read by sysfs and be used by udev. * so the state can be read by sysfs and be used by udev.

View File

@ -122,7 +122,7 @@ static inline int netpoll_tx_running(struct net_device *dev)
} }
#else #else
static inline int netpoll_rx(struct sk_buff *skb) static inline bool netpoll_rx(struct sk_buff *skb)
{ {
return 0; return 0;
} }

View File

@ -33,9 +33,9 @@
#define L2CAP_DEFAULT_FLUSH_TO 0xffff #define L2CAP_DEFAULT_FLUSH_TO 0xffff
#define L2CAP_DEFAULT_TX_WINDOW 63 #define L2CAP_DEFAULT_TX_WINDOW 63
#define L2CAP_DEFAULT_MAX_TX 3 #define L2CAP_DEFAULT_MAX_TX 3
#define L2CAP_DEFAULT_RETRANS_TO 1000 /* 1 second */ #define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */
#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */ #define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */
#define L2CAP_DEFAULT_MAX_PDU_SIZE 672 #define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */
#define L2CAP_DEFAULT_ACK_TO 200 #define L2CAP_DEFAULT_ACK_TO 200
#define L2CAP_LOCAL_BUSY_TRIES 12 #define L2CAP_LOCAL_BUSY_TRIES 12

View File

@ -195,7 +195,8 @@ struct sock_common {
* @sk_priority: %SO_PRIORITY setting * @sk_priority: %SO_PRIORITY setting
* @sk_type: socket type (%SOCK_STREAM, etc) * @sk_type: socket type (%SOCK_STREAM, etc)
* @sk_protocol: which protocol this socket belongs in this network family * @sk_protocol: which protocol this socket belongs in this network family
* @sk_peercred: %SO_PEERCRED setting * @sk_peer_pid: &struct pid for this socket's peer
* @sk_peer_cred: %SO_PEERCRED setting
* @sk_rcvlowat: %SO_RCVLOWAT setting * @sk_rcvlowat: %SO_RCVLOWAT setting
* @sk_rcvtimeo: %SO_RCVTIMEO setting * @sk_rcvtimeo: %SO_RCVTIMEO setting
* @sk_sndtimeo: %SO_SNDTIMEO setting * @sk_sndtimeo: %SO_SNDTIMEO setting
@ -211,6 +212,7 @@ struct sock_common {
* @sk_send_head: front of stuff to transmit * @sk_send_head: front of stuff to transmit
* @sk_security: used by security modules * @sk_security: used by security modules
* @sk_mark: generic packet mark * @sk_mark: generic packet mark
* @sk_classid: this socket's cgroup classid
* @sk_write_pending: a write to stream socket waits to start * @sk_write_pending: a write to stream socket waits to start
* @sk_state_change: callback to indicate change in the state of the sock * @sk_state_change: callback to indicate change in the state of the sock
* @sk_data_ready: callback to indicate there is data to be processed * @sk_data_ready: callback to indicate there is data to be processed

View File

@ -2705,8 +2705,9 @@ done:
case L2CAP_MODE_ERTM: case L2CAP_MODE_ERTM:
pi->remote_tx_win = rfc.txwin_size; pi->remote_tx_win = rfc.txwin_size;
pi->remote_max_tx = rfc.max_transmit; pi->remote_max_tx = rfc.max_transmit;
if (rfc.max_pdu_size > pi->conn->mtu - 10)
rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10); if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
pi->remote_mps = le16_to_cpu(rfc.max_pdu_size); pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
@ -2723,8 +2724,8 @@ done:
break; break;
case L2CAP_MODE_STREAMING: case L2CAP_MODE_STREAMING:
if (rfc.max_pdu_size > pi->conn->mtu - 10) if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10); rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
pi->remote_mps = le16_to_cpu(rfc.max_pdu_size); pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
@ -2806,7 +2807,6 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
if (*result == L2CAP_CONF_SUCCESS) { if (*result == L2CAP_CONF_SUCCESS) {
switch (rfc.mode) { switch (rfc.mode) {
case L2CAP_MODE_ERTM: case L2CAP_MODE_ERTM:
pi->remote_tx_win = rfc.txwin_size;
pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
pi->mps = le16_to_cpu(rfc.max_pdu_size); pi->mps = le16_to_cpu(rfc.max_pdu_size);
@ -2862,7 +2862,6 @@ static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
done: done:
switch (rfc.mode) { switch (rfc.mode) {
case L2CAP_MODE_ERTM: case L2CAP_MODE_ERTM:
pi->remote_tx_win = rfc.txwin_size;
pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
pi->mps = le16_to_cpu(rfc.max_pdu_size); pi->mps = le16_to_cpu(rfc.max_pdu_size);

View File

@ -9,7 +9,7 @@
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <net/caif/cfpkt.h> #include <net/caif/cfpkt.h>
#define PKT_PREFIX 16 #define PKT_PREFIX 48
#define PKT_POSTFIX 2 #define PKT_POSTFIX 2
#define PKT_LEN_WHEN_EXTENDING 128 #define PKT_LEN_WHEN_EXTENDING 128
#define PKT_ERROR(pkt, errmsg) do { \ #define PKT_ERROR(pkt, errmsg) do { \

View File

@ -60,6 +60,13 @@
#include <net/sock.h> #include <net/sock.h>
#include <net/net_namespace.h> #include <net/net_namespace.h>
/*
* To send multiple CAN frame content within TX_SETUP or to filter
* CAN messages with multiplex index within RX_SETUP, the number of
* different filters is limited to 256 due to the one byte index value.
*/
#define MAX_NFRAMES 256
/* use of last_frames[index].can_dlc */ /* use of last_frames[index].can_dlc */
#define RX_RECV 0x40 /* received data for this element */ #define RX_RECV 0x40 /* received data for this element */
#define RX_THR 0x80 /* element not been sent due to throttle feature */ #define RX_THR 0x80 /* element not been sent due to throttle feature */
@ -89,16 +96,16 @@ struct bcm_op {
struct list_head list; struct list_head list;
int ifindex; int ifindex;
canid_t can_id; canid_t can_id;
int flags; u32 flags;
unsigned long frames_abs, frames_filtered; unsigned long frames_abs, frames_filtered;
struct timeval ival1, ival2; struct timeval ival1, ival2;
struct hrtimer timer, thrtimer; struct hrtimer timer, thrtimer;
struct tasklet_struct tsklet, thrtsklet; struct tasklet_struct tsklet, thrtsklet;
ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
int rx_ifindex; int rx_ifindex;
int count; u32 count;
int nframes; u32 nframes;
int currframe; u32 currframe;
struct can_frame *frames; struct can_frame *frames;
struct can_frame *last_frames; struct can_frame *last_frames;
struct can_frame sframe; struct can_frame sframe;
@ -175,7 +182,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
seq_printf(m, "rx_op: %03X %-5s ", seq_printf(m, "rx_op: %03X %-5s ",
op->can_id, bcm_proc_getifname(ifname, op->ifindex)); op->can_id, bcm_proc_getifname(ifname, op->ifindex));
seq_printf(m, "[%d]%c ", op->nframes, seq_printf(m, "[%u]%c ", op->nframes,
(op->flags & RX_CHECK_DLC)?'d':' '); (op->flags & RX_CHECK_DLC)?'d':' ');
if (op->kt_ival1.tv64) if (op->kt_ival1.tv64)
seq_printf(m, "timeo=%lld ", seq_printf(m, "timeo=%lld ",
@ -198,7 +205,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
list_for_each_entry(op, &bo->tx_ops, list) { list_for_each_entry(op, &bo->tx_ops, list) {
seq_printf(m, "tx_op: %03X %s [%d] ", seq_printf(m, "tx_op: %03X %s [%u] ",
op->can_id, op->can_id,
bcm_proc_getifname(ifname, op->ifindex), bcm_proc_getifname(ifname, op->ifindex),
op->nframes); op->nframes);
@ -283,7 +290,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
struct can_frame *firstframe; struct can_frame *firstframe;
struct sockaddr_can *addr; struct sockaddr_can *addr;
struct sock *sk = op->sk; struct sock *sk = op->sk;
int datalen = head->nframes * CFSIZ; unsigned int datalen = head->nframes * CFSIZ;
int err; int err;
skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
@ -468,7 +475,7 @@ rx_changed_settime:
* bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
* received data stored in op->last_frames[] * received data stored in op->last_frames[]
*/ */
static void bcm_rx_cmp_to_index(struct bcm_op *op, int index, static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
const struct can_frame *rxdata) const struct can_frame *rxdata)
{ {
/* /*
@ -554,7 +561,8 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
/* /*
* bcm_rx_do_flush - helper for bcm_rx_thr_flush * bcm_rx_do_flush - helper for bcm_rx_thr_flush
*/ */
static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index) static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
unsigned int index)
{ {
if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) { if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
if (update) if (update)
@ -575,7 +583,7 @@ static int bcm_rx_thr_flush(struct bcm_op *op, int update)
int updated = 0; int updated = 0;
if (op->nframes > 1) { if (op->nframes > 1) {
int i; unsigned int i;
/* for MUX filter we start at index 1 */ /* for MUX filter we start at index 1 */
for (i = 1; i < op->nframes; i++) for (i = 1; i < op->nframes; i++)
@ -624,7 +632,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
{ {
struct bcm_op *op = (struct bcm_op *)data; struct bcm_op *op = (struct bcm_op *)data;
const struct can_frame *rxframe = (struct can_frame *)skb->data; const struct can_frame *rxframe = (struct can_frame *)skb->data;
int i; unsigned int i;
/* disable timeout */ /* disable timeout */
hrtimer_cancel(&op->timer); hrtimer_cancel(&op->timer);
@ -822,14 +830,15 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
{ {
struct bcm_sock *bo = bcm_sk(sk); struct bcm_sock *bo = bcm_sk(sk);
struct bcm_op *op; struct bcm_op *op;
int i, err; unsigned int i;
int err;
/* we need a real device to send frames */ /* we need a real device to send frames */
if (!ifindex) if (!ifindex)
return -ENODEV; return -ENODEV;
/* we need at least one can_frame */ /* check nframes boundaries - we need at least one can_frame */
if (msg_head->nframes < 1) if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
return -EINVAL; return -EINVAL;
/* check the given can_id */ /* check the given can_id */
@ -993,6 +1002,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
msg_head->nframes = 0; msg_head->nframes = 0;
} }
/* the first element contains the mux-mask => MAX_NFRAMES + 1 */
if (msg_head->nframes > MAX_NFRAMES + 1)
return -EINVAL;
if ((msg_head->flags & RX_RTR_FRAME) && if ((msg_head->flags & RX_RTR_FRAME) &&
((msg_head->nframes != 1) || ((msg_head->nframes != 1) ||
(!(msg_head->can_id & CAN_RTR_FLAG)))) (!(msg_head->can_id & CAN_RTR_FLAG))))

View File

@ -1,7 +1,7 @@
menuconfig NET_DSA menuconfig NET_DSA
bool "Distributed Switch Architecture support" bool "Distributed Switch Architecture support"
default n default n
depends on EXPERIMENTAL && NET_ETHERNET && !S390 depends on EXPERIMENTAL && NETDEVICES && !S390
select PHYLIB select PHYLIB
---help--- ---help---
This allows you to use hardware switch chips that use This allows you to use hardware switch chips that use

View File

@ -150,22 +150,34 @@ int register_qdisc(struct Qdisc_ops *qops)
if (qops->enqueue == NULL) if (qops->enqueue == NULL)
qops->enqueue = noop_qdisc_ops.enqueue; qops->enqueue = noop_qdisc_ops.enqueue;
if (qops->peek == NULL) { if (qops->peek == NULL) {
if (qops->dequeue == NULL) { if (qops->dequeue == NULL)
qops->peek = noop_qdisc_ops.peek; qops->peek = noop_qdisc_ops.peek;
} else { else
rc = -EINVAL; goto out_einval;
goto out;
}
} }
if (qops->dequeue == NULL) if (qops->dequeue == NULL)
qops->dequeue = noop_qdisc_ops.dequeue; qops->dequeue = noop_qdisc_ops.dequeue;
if (qops->cl_ops) {
const struct Qdisc_class_ops *cops = qops->cl_ops;
if (!(cops->get && cops->put && cops->walk && cops->leaf))
goto out_einval;
if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
goto out_einval;
}
qops->next = NULL; qops->next = NULL;
*qp = qops; *qp = qops;
rc = 0; rc = 0;
out: out:
write_unlock(&qdisc_mod_lock); write_unlock(&qdisc_mod_lock);
return rc; return rc;
out_einval:
rc = -EINVAL;
goto out;
} }
EXPORT_SYMBOL(register_qdisc); EXPORT_SYMBOL(register_qdisc);

View File

@ -418,7 +418,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
ret = qdisc_enqueue(skb, flow->q); ret = qdisc_enqueue(skb, flow->q);
if (ret != 0) { if (ret != NET_XMIT_SUCCESS) {
drop: __maybe_unused drop: __maybe_unused
if (net_xmit_drop_count(ret)) { if (net_xmit_drop_count(ret)) {
sch->qstats.drops++; sch->qstats.drops++;
@ -442,7 +442,7 @@ drop: __maybe_unused
*/ */
if (flow == &p->link) { if (flow == &p->link) {
sch->q.qlen++; sch->q.qlen++;
return 0; return NET_XMIT_SUCCESS;
} }
tasklet_schedule(&p->task); tasklet_schedule(&p->task);
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;

View File

@ -334,7 +334,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (++sch->q.qlen <= q->limit) { if (++sch->q.qlen <= q->limit) {
sch->bstats.bytes += qdisc_pkt_len(skb); sch->bstats.bytes += qdisc_pkt_len(skb);
sch->bstats.packets++; sch->bstats.packets++;
return 0; return NET_XMIT_SUCCESS;
} }
sfq_drop(sch); sfq_drop(sch);
@ -508,6 +508,11 @@ nla_put_failure:
return -1; return -1;
} }
static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
{
return NULL;
}
static unsigned long sfq_get(struct Qdisc *sch, u32 classid) static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
{ {
return 0; return 0;
@ -519,6 +524,10 @@ static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
return 0; return 0;
} }
static void sfq_put(struct Qdisc *q, unsigned long cl)
{
}
static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl) static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
{ {
struct sfq_sched_data *q = qdisc_priv(sch); struct sfq_sched_data *q = qdisc_priv(sch);
@ -571,9 +580,12 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
} }
static const struct Qdisc_class_ops sfq_class_ops = { static const struct Qdisc_class_ops sfq_class_ops = {
.leaf = sfq_leaf,
.get = sfq_get, .get = sfq_get,
.put = sfq_put,
.tcf_chain = sfq_find_tcf, .tcf_chain = sfq_find_tcf,
.bind_tcf = sfq_bind, .bind_tcf = sfq_bind,
.unbind_tcf = sfq_put,
.dump = sfq_dump_class, .dump = sfq_dump_class,
.dump_stats = sfq_dump_class_stats, .dump_stats = sfq_dump_class_stats,
.walk = sfq_walk, .walk = sfq_walk,

View File

@ -127,7 +127,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return qdisc_reshape_fail(skb, sch); return qdisc_reshape_fail(skb, sch);
ret = qdisc_enqueue(skb, q->qdisc); ret = qdisc_enqueue(skb, q->qdisc);
if (ret != 0) { if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) if (net_xmit_drop_count(ret))
sch->qstats.drops++; sch->qstats.drops++;
return ret; return ret;
@ -136,7 +136,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
sch->q.qlen++; sch->q.qlen++;
sch->bstats.bytes += qdisc_pkt_len(skb); sch->bstats.bytes += qdisc_pkt_len(skb);
sch->bstats.packets++; sch->bstats.packets++;
return 0; return NET_XMIT_SUCCESS;
} }
static unsigned int tbf_drop(struct Qdisc* sch) static unsigned int tbf_drop(struct Qdisc* sch)

View File

@ -85,7 +85,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
__skb_queue_tail(&q->q, skb); __skb_queue_tail(&q->q, skb);
sch->bstats.bytes += qdisc_pkt_len(skb); sch->bstats.bytes += qdisc_pkt_len(skb);
sch->bstats.packets++; sch->bstats.packets++;
return 0; return NET_XMIT_SUCCESS;
} }
kfree_skb(skb); kfree_skb(skb);

View File

@ -843,13 +843,19 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
return -EINVAL; return -EINVAL;
if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) { if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) {
/* Verify that we are associated with the destination AP */ /* Verify that we are associated with the destination AP */
wdev_lock(wdev);
if (!wdev->current_bss || if (!wdev->current_bss ||
memcmp(wdev->current_bss->pub.bssid, mgmt->bssid, memcmp(wdev->current_bss->pub.bssid, mgmt->bssid,
ETH_ALEN) != 0 || ETH_ALEN) != 0 ||
(wdev->iftype == NL80211_IFTYPE_STATION && (wdev->iftype == NL80211_IFTYPE_STATION &&
memcmp(wdev->current_bss->pub.bssid, mgmt->da, memcmp(wdev->current_bss->pub.bssid, mgmt->da,
ETH_ALEN) != 0)) ETH_ALEN) != 0)) {
wdev_unlock(wdev);
return -ENOTCONN; return -ENOTCONN;
}
wdev_unlock(wdev);
} }
if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0) if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0)