1
0
Fork 0

Bug fixes galore, removal of the ntb atom driver, and updates to the ntb

tools and tests to support the multi-port interface
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJadPvQAAoJEG5mS6x6i9Ijht8P/2Y/j+3blqJghxwI6aislC01
 7XuX+Z84a/mLD+WDuaBbn6TGkS1OzRNeKSu3ld5ts6y6Vs/UhtclFEEV45Yhg7H5
 FQXbuDG0o12dnB7Sy7CePbAbxC3lBTyGw7ENyoC8XP57e/aVPIVZCralAmSR+i/D
 Y5eNDG3j5a51Ab/VqK66URJbltbMi3yzi8BWuFEQsGsrST4Rm6zZ/6rLlNFjMNzq
 /H+aXcHSdLJbSUT1d225AjkzsqIGvj1XmHtr+igE7b5LBdGXi1YtadH0vyjBvEQZ
 NZzEzqMIOnmRAznMK5FoeYz/frIQJScQhDA/8y5DsJo9sSgycG6t3F+OLmjSCxua
 tEjD9wfLOfHPfIbEC+kujormoIahM1JRzGymHJf/MkKhoInNS8cYpnw9BBiXQtY8
 CPuoGT4yQRivsSyru1bg1jAzwS7iUec5/wgA6aTHroasiA35kgQ+iHGuNDoloE6T
 /mfReCqQmGOl/XXXG5o7R84z1yXdttf5L8M+ItPM7kaPRncOwFJg6A4Re0AumCMk
 qNOdfjPQXQQAcvwXQRcT6H7kecRnUdGSnxTjGezMwG2ykG5Q8PCXXlXV4Q2Rsjl2
 D97UmMxRLe4PKjt83sXGRR6XdIpORnqFScQgFY861QrWm0gusu3qBRZgMUO01qWU
 7yEPRsKB96SJ2I/8CeFg
 =b7cs
 -----END PGP SIGNATURE-----

Merge tag 'ntb-4.16' of git://github.com/jonmason/ntb

Pull NTB updates from Jon Mason:
 "Bug fixes galore, removal of the ntb atom driver, and updates to the
  ntb tools and tests to support the multi-port interface"

* tag 'ntb-4.16' of git://github.com/jonmason/ntb: (37 commits)
  NTB: ntb_perf: fix cast to restricted __le32
  ntb_perf: Fix an error code in perf_copy_chunk()
  ntb_hw_switchtec: Make function switchtec_ntb_remove() static
  NTB: ntb_tool: fix memory leak on 'buf' on error exit path
  NTB: ntb_perf: fix printing of resource_size_t
  NTB: ntb_hw_idt: Set NTB_TOPO_SWITCH topology
  NTB: ntb_test: Update ntb_perf tests
  NTB: ntb_test: Update ntb_tool MW tests
  NTB: ntb_test: Add ntb_tool Message tests
  NTB: ntb_test: Update ntb_tool Scratchpad tests
  NTB: ntb_test: Update ntb_tool DB tests
  NTB: ntb_test: Update ntb_tool link tests
  NTB: ntb_test: Add ntb_tool port tests
  NTB: ntb_test: Safely use paths with whitespace
  NTB: ntb_perf: Add full multi-port NTB API support
  NTB: ntb_tool: Add full multi-port NTB API support
  NTB: ntb_pp: Add full multi-port NTB API support
  NTB: Fix UB/bug in ntb_mw_get_align()
  NTB: Set dma mask and dma coherent mask to NTB devices
  NTB: Rename NTB messaging API methods
  ...
hifive-unleashed-5.1
Linus Torvalds 2018-02-04 11:13:49 -08:00
commit d3658c2266
14 changed files with 3694 additions and 2098 deletions

View File

@ -9801,7 +9801,7 @@ F: drivers/ntb/hw/amd/
NTB DRIVER CORE
M: Jon Mason <jdmason@kudzu.us>
M: Dave Jiang <dave.jiang@intel.com>
M: Allen Hubbe <Allen.Hubbe@emc.com>
M: Allen Hubbe <allenbh@gmail.com>
L: linux-ntb@googlegroups.com
S: Supported
W: https://github.com/jonmason/ntb/wiki

View File

@ -1020,6 +1020,10 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev,
goto err_dma_mask;
dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
}
rc = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
dma_get_mask(&pdev->dev));
if (rc)
goto err_dma_mask;
ndev->self_mmio = pci_iomap(pdev, 0, 0);
if (!ndev->self_mmio) {

View File

@ -1744,20 +1744,19 @@ static int idt_ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits)
* idt_ntb_msg_read() - read message register with specified index
* (NTB API callback)
* @ntb: NTB device context.
* @midx: Message register index
* @pidx: OUT - Port index of peer device a message retrieved from
* @msg: OUT - Data
* @midx: Message register index
*
* Read data from the specified message register and source register.
*
* Return: zero on success, negative error if invalid argument passed.
* Return: inbound message register value.
*/
static int idt_ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx, u32 *msg)
static u32 idt_ntb_msg_read(struct ntb_dev *ntb, int *pidx, int midx)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
if (midx < 0 || IDT_MSG_CNT <= midx)
return -EINVAL;
return ~(u32)0;
/* Retrieve source port index of the message */
if (pidx != NULL) {
@ -1772,18 +1771,15 @@ static int idt_ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx, u32 *msg)
}
/* Retrieve data of the corresponding message register */
if (msg != NULL)
*msg = idt_nt_read(ndev, ntdata_tbl.msgs[midx].in);
return 0;
return idt_nt_read(ndev, ntdata_tbl.msgs[midx].in);
}
/*
* idt_ntb_msg_write() - write data to the specified message register
* (NTB API callback)
* idt_ntb_peer_msg_write() - write data to the specified message register
* (NTB API callback)
* @ntb: NTB device context.
* @midx: Message register index
* @pidx: Port index of peer device a message being sent to
* @midx: Message register index
* @msg: Data to send
*
* Just try to send data to a peer. Message status register should be
@ -1791,7 +1787,8 @@ static int idt_ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx, u32 *msg)
*
* Return: zero on success, negative error if invalid argument passed.
*/
static int idt_ntb_msg_write(struct ntb_dev *ntb, int midx, int pidx, u32 msg)
static int idt_ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx,
u32 msg)
{
struct idt_ntb_dev *ndev = to_ndev_ntb(ntb);
unsigned long irqflags;
@ -2058,7 +2055,7 @@ static const struct ntb_dev_ops idt_ntb_ops = {
.msg_set_mask = idt_ntb_msg_set_mask,
.msg_clear_mask = idt_ntb_msg_clear_mask,
.msg_read = idt_ntb_msg_read,
.msg_write = idt_ntb_msg_write
.peer_msg_write = idt_ntb_peer_msg_write
};
/*
@ -2073,7 +2070,7 @@ static int idt_register_device(struct idt_ntb_dev *ndev)
/* Initialize the rest of NTB device structure and register it */
ndev->ntb.ops = &idt_ntb_ops;
ndev->ntb.topo = NTB_TOPO_PRI;
ndev->ntb.topo = NTB_TOPO_SWITCH;
ret = ntb_register_device(&ndev->ntb);
if (ret != 0) {
@ -2269,7 +2266,7 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
"Message data:\n");
for (idx = 0; idx < IDT_MSG_CNT; idx++) {
int src;
(void)idt_ntb_msg_read(&ndev->ntb, idx, &src, &data);
data = idt_ntb_msg_read(&ndev->ntb, &src, idx);
off += scnprintf(strbuf + off, size - off,
"\t%hhu. 0x%08x from peer %hhu (Port %hhu)\n",
idx, data, src, ndev->peers[src].port);
@ -2429,7 +2426,7 @@ static int idt_init_pci(struct idt_ntb_dev *ndev)
struct pci_dev *pdev = ndev->ntb.pdev;
int ret;
/* Initialize the bit mask of DMA */
/* Initialize the bit mask of PCI/NTB DMA */
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (ret != 0) {
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
@ -2450,6 +2447,12 @@ static int idt_init_pci(struct idt_ntb_dev *ndev)
dev_warn(&pdev->dev,
"Cannot set consistent DMA highmem bit mask\n");
}
ret = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
dma_get_mask(&pdev->dev));
if (ret != 0) {
dev_err(&pdev->dev, "Failed to set NTB device DMA bit mask\n");
return ret;
}
/*
* Enable the device advanced error reporting. It's not critical to

View File

@ -74,12 +74,6 @@ MODULE_AUTHOR("Intel Corporation");
#define bar0_off(base, bar) ((base) + ((bar) << 2))
#define bar2_off(base, bar) bar0_off(base, (bar) - 2)
static const struct intel_ntb_reg atom_reg;
static const struct intel_ntb_alt_reg atom_pri_reg;
static const struct intel_ntb_alt_reg atom_sec_reg;
static const struct intel_ntb_alt_reg atom_b2b_reg;
static const struct intel_ntb_xlat_reg atom_pri_xlat;
static const struct intel_ntb_xlat_reg atom_sec_xlat;
static const struct intel_ntb_reg xeon_reg;
static const struct intel_ntb_alt_reg xeon_pri_reg;
static const struct intel_ntb_alt_reg xeon_sec_reg;
@ -184,15 +178,6 @@ static inline void _iowrite64(u64 val, void __iomem *mmio)
#endif
#endif
static inline int pdev_is_atom(struct pci_dev *pdev)
{
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
return 1;
}
return 0;
}
static inline int pdev_is_xeon(struct pci_dev *pdev)
{
switch (pdev->device) {
@ -1006,8 +991,7 @@ static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
{
struct intel_ntb_dev *ndev = filp->private_data;
if (pdev_is_xeon(ndev->ntb.pdev) ||
pdev_is_atom(ndev->ntb.pdev))
if (pdev_is_xeon(ndev->ntb.pdev))
return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
else if (pdev_is_skx_xeon(ndev->ntb.pdev))
return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
@ -1439,242 +1423,6 @@ static int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
ndev->peer_reg->spad);
}
/* ATOM */
static u64 atom_db_ioread(void __iomem *mmio)
{
return ioread64(mmio);
}
static void atom_db_iowrite(u64 bits, void __iomem *mmio)
{
iowrite64(bits, mmio);
}
static int atom_poll_link(struct intel_ntb_dev *ndev)
{
u32 ntb_ctl;
ntb_ctl = ioread32(ndev->self_mmio + ATOM_NTBCNTL_OFFSET);
if (ntb_ctl == ndev->ntb_ctl)
return 0;
ndev->ntb_ctl = ntb_ctl;
ndev->lnk_sta = ioread32(ndev->self_mmio + ATOM_LINK_STATUS_OFFSET);
return 1;
}
static int atom_link_is_up(struct intel_ntb_dev *ndev)
{
return ATOM_NTB_CTL_ACTIVE(ndev->ntb_ctl);
}
static int atom_link_is_err(struct intel_ntb_dev *ndev)
{
if (ioread32(ndev->self_mmio + ATOM_LTSSMSTATEJMP_OFFSET)
& ATOM_LTSSMSTATEJMP_FORCEDETECT)
return 1;
if (ioread32(ndev->self_mmio + ATOM_IBSTERRRCRVSTS0_OFFSET)
& ATOM_IBIST_ERR_OFLOW)
return 1;
return 0;
}
static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
{
struct device *dev = &ndev->ntb.pdev->dev;
switch (ppd & ATOM_PPD_TOPO_MASK) {
case ATOM_PPD_TOPO_B2B_USD:
dev_dbg(dev, "PPD %d B2B USD\n", ppd);
return NTB_TOPO_B2B_USD;
case ATOM_PPD_TOPO_B2B_DSD:
dev_dbg(dev, "PPD %d B2B DSD\n", ppd);
return NTB_TOPO_B2B_DSD;
case ATOM_PPD_TOPO_PRI_USD:
case ATOM_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
case ATOM_PPD_TOPO_SEC_USD:
case ATOM_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
dev_dbg(dev, "PPD %d non B2B disabled\n", ppd);
return NTB_TOPO_NONE;
}
dev_dbg(dev, "PPD %d invalid\n", ppd);
return NTB_TOPO_NONE;
}
static void atom_link_hb(struct work_struct *work)
{
struct intel_ntb_dev *ndev = hb_ndev(work);
struct device *dev = &ndev->ntb.pdev->dev;
unsigned long poll_ts;
void __iomem *mmio;
u32 status32;
poll_ts = ndev->last_ts + ATOM_LINK_HB_TIMEOUT;
/* Delay polling the link status if an interrupt was received,
* unless the cached link status says the link is down.
*/
if (time_after(poll_ts, jiffies) && atom_link_is_up(ndev)) {
schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies);
return;
}
if (atom_poll_link(ndev))
ntb_link_event(&ndev->ntb);
if (atom_link_is_up(ndev) || !atom_link_is_err(ndev)) {
schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
return;
}
/* Link is down with error: recover the link! */
mmio = ndev->self_mmio;
/* Driver resets the NTB ModPhy lanes - magic! */
iowrite8(0xe0, mmio + ATOM_MODPHY_PCSREG6);
iowrite8(0x40, mmio + ATOM_MODPHY_PCSREG4);
iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG4);
iowrite8(0x60, mmio + ATOM_MODPHY_PCSREG6);
/* Driver waits 100ms to allow the NTB ModPhy to settle */
msleep(100);
/* Clear AER Errors, write to clear */
status32 = ioread32(mmio + ATOM_ERRCORSTS_OFFSET);
dev_dbg(dev, "ERRCORSTS = %x\n", status32);
status32 &= PCI_ERR_COR_REP_ROLL;
iowrite32(status32, mmio + ATOM_ERRCORSTS_OFFSET);
/* Clear unexpected electrical idle event in LTSSM, write to clear */
status32 = ioread32(mmio + ATOM_LTSSMERRSTS0_OFFSET);
dev_dbg(dev, "LTSSMERRSTS0 = %x\n", status32);
status32 |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
iowrite32(status32, mmio + ATOM_LTSSMERRSTS0_OFFSET);
/* Clear DeSkew Buffer error, write to clear */
status32 = ioread32(mmio + ATOM_DESKEWSTS_OFFSET);
dev_dbg(dev, "DESKEWSTS = %x\n", status32);
status32 |= ATOM_DESKEWSTS_DBERR;
iowrite32(status32, mmio + ATOM_DESKEWSTS_OFFSET);
status32 = ioread32(mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
dev_dbg(dev, "IBSTERRRCRVSTS0 = %x\n", status32);
status32 &= ATOM_IBIST_ERR_OFLOW;
iowrite32(status32, mmio + ATOM_IBSTERRRCRVSTS0_OFFSET);
/* Releases the NTB state machine to allow the link to retrain */
status32 = ioread32(mmio + ATOM_LTSSMSTATEJMP_OFFSET);
dev_dbg(dev, "LTSSMSTATEJMP = %x\n", status32);
status32 &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
iowrite32(status32, mmio + ATOM_LTSSMSTATEJMP_OFFSET);
/* There is a potential race between the 2 NTB devices recovering at the
* same time. If the times are the same, the link will not recover and
* the driver will be stuck in this loop forever. Add a random interval
* to the recovery time to prevent this race.
*/
schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_RECOVERY_TIME
+ prandom_u32() % ATOM_LINK_RECOVERY_TIME);
}
static int atom_init_isr(struct intel_ntb_dev *ndev)
{
int rc;
rc = ndev_init_isr(ndev, 1, ATOM_DB_MSIX_VECTOR_COUNT,
ATOM_DB_MSIX_VECTOR_SHIFT, ATOM_DB_TOTAL_SHIFT);
if (rc)
return rc;
/* ATOM doesn't have link status interrupt, poll on that platform */
ndev->last_ts = jiffies;
INIT_DELAYED_WORK(&ndev->hb_timer, atom_link_hb);
schedule_delayed_work(&ndev->hb_timer, ATOM_LINK_HB_TIMEOUT);
return 0;
}
static void atom_deinit_isr(struct intel_ntb_dev *ndev)
{
cancel_delayed_work_sync(&ndev->hb_timer);
ndev_deinit_isr(ndev);
}
static int atom_init_ntb(struct intel_ntb_dev *ndev)
{
ndev->mw_count = ATOM_MW_COUNT;
ndev->spad_count = ATOM_SPAD_COUNT;
ndev->db_count = ATOM_DB_COUNT;
switch (ndev->ntb.topo) {
case NTB_TOPO_B2B_USD:
case NTB_TOPO_B2B_DSD:
ndev->self_reg = &atom_pri_reg;
ndev->peer_reg = &atom_b2b_reg;
ndev->xlat_reg = &atom_sec_xlat;
/* Enable Bus Master and Memory Space on the secondary side */
iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
ndev->self_mmio + ATOM_SPCICMD_OFFSET);
break;
default:
return -EINVAL;
}
ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
return 0;
}
static int atom_init_dev(struct intel_ntb_dev *ndev)
{
u32 ppd;
int rc;
rc = pci_read_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET, &ppd);
if (rc)
return -EIO;
ndev->ntb.topo = atom_ppd_topo(ndev, ppd);
if (ndev->ntb.topo == NTB_TOPO_NONE)
return -EINVAL;
rc = atom_init_ntb(ndev);
if (rc)
return rc;
rc = atom_init_isr(ndev);
if (rc)
return rc;
if (ndev->ntb.topo != NTB_TOPO_SEC) {
/* Initiate PCI-E link training */
rc = pci_write_config_dword(ndev->ntb.pdev, ATOM_PPD_OFFSET,
ppd | ATOM_PPD_INIT_LINK);
if (rc)
return rc;
}
return 0;
}
static void atom_deinit_dev(struct intel_ntb_dev *ndev)
{
atom_deinit_isr(ndev);
}
/* Skylake Xeon NTB */
static int skx_poll_link(struct intel_ntb_dev *ndev)
@ -2586,6 +2334,10 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
goto err_dma_mask;
dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
}
rc = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
dma_get_mask(&pdev->dev));
if (rc)
goto err_dma_mask;
ndev->self_mmio = pci_iomap(pdev, 0, 0);
if (!ndev->self_mmio) {
@ -2658,24 +2410,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
node = dev_to_node(&pdev->dev);
if (pdev_is_atom(pdev)) {
ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
if (!ndev) {
rc = -ENOMEM;
goto err_ndev;
}
ndev_init_struct(ndev, pdev);
rc = intel_ntb_init_pci(ndev, pdev);
if (rc)
goto err_init_pci;
rc = atom_init_dev(ndev);
if (rc)
goto err_init_dev;
} else if (pdev_is_xeon(pdev)) {
if (pdev_is_xeon(pdev)) {
ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
if (!ndev) {
rc = -ENOMEM;
@ -2731,9 +2466,7 @@ static int intel_ntb_pci_probe(struct pci_dev *pdev,
err_register:
ndev_deinit_debugfs(ndev);
if (pdev_is_atom(pdev))
atom_deinit_dev(ndev);
else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
xeon_deinit_dev(ndev);
err_init_dev:
intel_ntb_deinit_pci(ndev);
@ -2749,41 +2482,12 @@ static void intel_ntb_pci_remove(struct pci_dev *pdev)
ntb_unregister_device(&ndev->ntb);
ndev_deinit_debugfs(ndev);
if (pdev_is_atom(pdev))
atom_deinit_dev(ndev);
else if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
if (pdev_is_xeon(pdev) || pdev_is_skx_xeon(pdev))
xeon_deinit_dev(ndev);
intel_ntb_deinit_pci(ndev);
kfree(ndev);
}
static const struct intel_ntb_reg atom_reg = {
.poll_link = atom_poll_link,
.link_is_up = atom_link_is_up,
.db_ioread = atom_db_ioread,
.db_iowrite = atom_db_iowrite,
.db_size = sizeof(u64),
.ntb_ctl = ATOM_NTBCNTL_OFFSET,
.mw_bar = {2, 4},
};
static const struct intel_ntb_alt_reg atom_pri_reg = {
.db_bell = ATOM_PDOORBELL_OFFSET,
.db_mask = ATOM_PDBMSK_OFFSET,
.spad = ATOM_SPAD_OFFSET,
};
static const struct intel_ntb_alt_reg atom_b2b_reg = {
.db_bell = ATOM_B2B_DOORBELL_OFFSET,
.spad = ATOM_B2B_SPAD_OFFSET,
};
static const struct intel_ntb_xlat_reg atom_sec_xlat = {
/* FIXME : .bar0_base = ATOM_SBAR0BASE_OFFSET, */
/* FIXME : .bar2_limit = ATOM_SBAR2LMT_OFFSET, */
.bar2_xlat = ATOM_SBAR2XLAT_OFFSET,
};
static const struct intel_ntb_reg xeon_reg = {
.poll_link = xeon_poll_link,
.link_is_up = xeon_link_is_up,
@ -2940,7 +2644,6 @@ static const struct file_operations intel_ntb_debugfs_info = {
};
static const struct pci_device_id intel_ntb_pci_tbl[] = {
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},

View File

@ -66,7 +66,6 @@
#define PCI_DEVICE_ID_INTEL_NTB_B2B_HSX 0x2F0D
#define PCI_DEVICE_ID_INTEL_NTB_PS_HSX 0x2F0E
#define PCI_DEVICE_ID_INTEL_NTB_SS_HSX 0x2F0F
#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD 0x0C4E
#define PCI_DEVICE_ID_INTEL_NTB_B2B_BDX 0x6F0D
#define PCI_DEVICE_ID_INTEL_NTB_PS_BDX 0x6F0E
#define PCI_DEVICE_ID_INTEL_NTB_SS_BDX 0x6F0F
@ -196,63 +195,6 @@
#define SKX_DB_TOTAL_SHIFT 33
#define SKX_SPAD_COUNT 16
/* Intel Atom hardware */
#define ATOM_SBAR2XLAT_OFFSET 0x0008
#define ATOM_PDOORBELL_OFFSET 0x0020
#define ATOM_PDBMSK_OFFSET 0x0028
#define ATOM_NTBCNTL_OFFSET 0x0060
#define ATOM_SPAD_OFFSET 0x0080
#define ATOM_PPD_OFFSET 0x00d4
#define ATOM_PBAR2XLAT_OFFSET 0x8008
#define ATOM_B2B_DOORBELL_OFFSET 0x8020
#define ATOM_B2B_SPAD_OFFSET 0x8080
#define ATOM_SPCICMD_OFFSET 0xb004
#define ATOM_LINK_STATUS_OFFSET 0xb052
#define ATOM_ERRCORSTS_OFFSET 0xb110
#define ATOM_IP_BASE 0xc000
#define ATOM_DESKEWSTS_OFFSET (ATOM_IP_BASE + 0x3024)
#define ATOM_LTSSMERRSTS0_OFFSET (ATOM_IP_BASE + 0x3180)
#define ATOM_LTSSMSTATEJMP_OFFSET (ATOM_IP_BASE + 0x3040)
#define ATOM_IBSTERRRCRVSTS0_OFFSET (ATOM_IP_BASE + 0x3324)
#define ATOM_MODPHY_PCSREG4 0x1c004
#define ATOM_MODPHY_PCSREG6 0x1c006
#define ATOM_PPD_INIT_LINK 0x0008
#define ATOM_PPD_CONN_MASK 0x0300
#define ATOM_PPD_CONN_TRANSPARENT 0x0000
#define ATOM_PPD_CONN_B2B 0x0100
#define ATOM_PPD_CONN_RP 0x0200
#define ATOM_PPD_DEV_MASK 0x1000
#define ATOM_PPD_DEV_USD 0x0000
#define ATOM_PPD_DEV_DSD 0x1000
#define ATOM_PPD_TOPO_MASK (ATOM_PPD_CONN_MASK | ATOM_PPD_DEV_MASK)
#define ATOM_PPD_TOPO_PRI_USD (ATOM_PPD_CONN_TRANSPARENT | ATOM_PPD_DEV_USD)
#define ATOM_PPD_TOPO_PRI_DSD (ATOM_PPD_CONN_TRANSPARENT | ATOM_PPD_DEV_DSD)
#define ATOM_PPD_TOPO_SEC_USD (ATOM_PPD_CONN_RP | ATOM_PPD_DEV_USD)
#define ATOM_PPD_TOPO_SEC_DSD (ATOM_PPD_CONN_RP | ATOM_PPD_DEV_DSD)
#define ATOM_PPD_TOPO_B2B_USD (ATOM_PPD_CONN_B2B | ATOM_PPD_DEV_USD)
#define ATOM_PPD_TOPO_B2B_DSD (ATOM_PPD_CONN_B2B | ATOM_PPD_DEV_DSD)
#define ATOM_MW_COUNT 2
#define ATOM_DB_COUNT 34
#define ATOM_DB_VALID_MASK (BIT_ULL(ATOM_DB_COUNT) - 1)
#define ATOM_DB_MSIX_VECTOR_COUNT 34
#define ATOM_DB_MSIX_VECTOR_SHIFT 1
#define ATOM_DB_TOTAL_SHIFT 34
#define ATOM_SPAD_COUNT 16
#define ATOM_NTB_CTL_DOWN_BIT BIT(16)
#define ATOM_NTB_CTL_ACTIVE(x) !(x & ATOM_NTB_CTL_DOWN_BIT)
#define ATOM_DESKEWSTS_DBERR BIT(15)
#define ATOM_LTSSMERRSTS0_UNEXPECTEDEI BIT(20)
#define ATOM_LTSSMSTATEJMP_FORCEDETECT BIT(2)
#define ATOM_IBIST_ERR_OFLOW 0x7FFF7FFF
#define ATOM_LINK_HB_TIMEOUT msecs_to_jiffies(1000)
#define ATOM_LINK_RECOVERY_TIME msecs_to_jiffies(500)
/* Ntb control and link status */
#define NTB_CTL_CFG_LOCK BIT(0)

View File

@ -94,6 +94,9 @@ struct switchtec_ntb {
struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
struct ntb_dbmsg_regs __iomem *mmio_peer_dbmsg;
void __iomem *mmio_xlink_win;
struct shared_mw *self_shared;
struct shared_mw __iomem *peer_shared;
@ -109,6 +112,7 @@ struct switchtec_ntb {
int nr_direct_mw;
int nr_lut_mw;
int nr_rsvd_luts;
int direct_mw_to_bar[MAX_DIRECT_MW];
int peer_nr_direct_mw;
@ -118,6 +122,7 @@ struct switchtec_ntb {
bool link_is_up;
enum ntb_speed link_speed;
enum ntb_width link_width;
struct work_struct link_reinit_work;
};
static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
@ -172,7 +177,7 @@ static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
if (ps == status) {
dev_err(&sndev->stdev->dev,
"Timed out while peforming %s (%d). (%08x)",
"Timed out while performing %s (%d). (%08x)\n",
op_text[op], op,
ioread32(&ctl->partition_status));
@ -185,10 +190,10 @@ static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
u32 val)
{
if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_self_dbmsg->omsg))
if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_peer_dbmsg->omsg))
return -EINVAL;
iowrite32(val, &sndev->mmio_self_dbmsg->omsg[idx].msg);
iowrite32(val, &sndev->mmio_peer_dbmsg->omsg[idx].msg);
return 0;
}
@ -197,7 +202,7 @@ static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
int nr_direct_mw = sndev->peer_nr_direct_mw;
int nr_lut_mw = sndev->peer_nr_lut_mw - 1;
int nr_lut_mw = sndev->peer_nr_lut_mw - sndev->nr_rsvd_luts;
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
@ -210,12 +215,12 @@ static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
{
return mw_idx - sndev->nr_direct_mw + 1;
return mw_idx - sndev->nr_direct_mw + sndev->nr_rsvd_luts;
}
static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
{
return mw_idx - sndev->peer_nr_direct_mw + 1;
return mw_idx - sndev->peer_nr_direct_mw + sndev->nr_rsvd_luts;
}
static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
@ -306,7 +311,7 @@ static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
if (pidx != NTB_DEF_PEER_IDX)
return -EINVAL;
dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap",
dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap\n",
widx, pidx, &addr, &size);
if (widx >= switchtec_ntb_mw_count(ntb, pidx))
@ -315,6 +320,19 @@ static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
if (xlate_pos < 12)
return -EINVAL;
if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) {
/*
* In certain circumstances we can get a buffer that is
* not aligned to its size. (Most of the time
* dma_alloc_coherent ensures this). This can happen when
* using large buffers allocated by the CMA
* (see CMA_CONFIG_ALIGNMENT)
*/
dev_err(&sndev->stdev->dev,
"ERROR: Memory window address is not aligned to it's size!\n");
return -EINVAL;
}
rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
NTB_CTRL_PART_STATUS_LOCKED);
if (rc)
@ -337,7 +355,7 @@ static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
if (rc == -EIO) {
dev_err(&sndev->stdev->dev,
"Hardware reported an error configuring mw %d: %08x",
"Hardware reported an error configuring mw %d: %08x\n",
widx, ioread32(&ctl->bar_error));
if (widx < nr_direct_mw)
@ -355,8 +373,9 @@ static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
int nr_lut_mw = sndev->nr_lut_mw - sndev->nr_rsvd_luts;
return sndev->nr_direct_mw + (use_lut_mws ? sndev->nr_lut_mw - 1 : 0);
return sndev->nr_direct_mw + (use_lut_mws ? nr_lut_mw : 0);
}
static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
@ -463,18 +482,69 @@ static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
sndev->link_width = min(self_width, peer_width);
}
enum {
static int crosslink_is_enabled(struct switchtec_ntb *sndev)
{
struct ntb_info_regs __iomem *inf = sndev->mmio_ntb;
return ioread8(&inf->ntp_info[sndev->peer_partition].xlink_enabled);
}
static void crosslink_init_dbmsgs(struct switchtec_ntb *sndev)
{
int i;
u32 msg_map = 0;
if (!crosslink_is_enabled(sndev))
return;
for (i = 0; i < ARRAY_SIZE(sndev->mmio_peer_dbmsg->imsg); i++) {
int m = i | sndev->self_partition << 2;
msg_map |= m << i * 8;
}
iowrite32(msg_map, &sndev->mmio_peer_dbmsg->msg_map);
iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
&sndev->mmio_peer_dbmsg->odb_mask);
}
enum switchtec_msg {
LINK_MESSAGE = 0,
MSG_LINK_UP = 1,
MSG_LINK_DOWN = 2,
MSG_CHECK_LINK = 3,
MSG_LINK_FORCE_DOWN = 4,
};
static void switchtec_ntb_check_link(struct switchtec_ntb *sndev)
static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev);
static void link_reinit_work(struct work_struct *work)
{
struct switchtec_ntb *sndev;
sndev = container_of(work, struct switchtec_ntb, link_reinit_work);
switchtec_ntb_reinit_peer(sndev);
}
static void switchtec_ntb_check_link(struct switchtec_ntb *sndev,
enum switchtec_msg msg)
{
int link_sta;
int old = sndev->link_is_up;
if (msg == MSG_LINK_FORCE_DOWN) {
schedule_work(&sndev->link_reinit_work);
if (sndev->link_is_up) {
sndev->link_is_up = 0;
ntb_link_event(&sndev->ntb);
dev_info(&sndev->stdev->dev, "ntb link forced down\n");
}
return;
}
link_sta = sndev->self_shared->link_sta;
if (link_sta) {
u64 peer = ioread64(&sndev->peer_shared->magic);
@ -491,8 +561,11 @@ static void switchtec_ntb_check_link(struct switchtec_ntb *sndev)
if (link_sta != old) {
switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
ntb_link_event(&sndev->ntb);
dev_info(&sndev->stdev->dev, "ntb link %s",
dev_info(&sndev->stdev->dev, "ntb link %s\n",
link_sta ? "up" : "down");
if (link_sta)
crosslink_init_dbmsgs(sndev);
}
}
@ -500,7 +573,7 @@ static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
{
struct switchtec_ntb *sndev = stdev->sndev;
switchtec_ntb_check_link(sndev);
switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
}
static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
@ -523,12 +596,12 @@ static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
dev_dbg(&sndev->stdev->dev, "enabling link");
dev_dbg(&sndev->stdev->dev, "enabling link\n");
sndev->self_shared->link_sta = 1;
switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
switchtec_ntb_check_link(sndev);
switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
return 0;
}
@ -537,12 +610,12 @@ static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
dev_dbg(&sndev->stdev->dev, "disabling link");
dev_dbg(&sndev->stdev->dev, "disabling link\n");
sndev->self_shared->link_sta = 0;
switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_DOWN);
switchtec_ntb_check_link(sndev);
switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
return 0;
}
@ -638,7 +711,7 @@ static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
struct switchtec_ntb *sndev = ntb_sndev(ntb);
unsigned long offset;
offset = (unsigned long)sndev->mmio_self_dbmsg->odb -
offset = (unsigned long)sndev->mmio_peer_dbmsg->odb -
(unsigned long)sndev->stdev->mmio;
offset += sndev->db_shift / 8;
@ -656,7 +729,7 @@ static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
struct switchtec_ntb *sndev = ntb_sndev(ntb);
iowrite64(db_bits << sndev->db_peer_shift,
&sndev->mmio_self_dbmsg->odb);
&sndev->mmio_peer_dbmsg->odb);
return 0;
}
@ -777,24 +850,63 @@ static const struct ntb_dev_ops switchtec_ntb_ops = {
.peer_spad_addr = switchtec_ntb_peer_spad_addr,
};
static void switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
{
u64 tpart_vec;
int self;
u64 part_map;
int bit;
sndev->ntb.pdev = sndev->stdev->pdev;
sndev->ntb.topo = NTB_TOPO_SWITCH;
sndev->ntb.ops = &switchtec_ntb_ops;
INIT_WORK(&sndev->link_reinit_work, link_reinit_work);
sndev->self_partition = sndev->stdev->partition;
sndev->mmio_ntb = sndev->stdev->mmio_ntb;
self = sndev->self_partition;
tpart_vec = ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_high);
tpart_vec <<= 32;
tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low);
part_map = ioread64(&sndev->mmio_ntb->ep_map);
part_map &= ~(1 << sndev->self_partition);
sndev->peer_partition = ffs(part_map) - 1;
dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d (%llx)",
sndev->self_partition, sndev->stdev->partition_count,
part_map);
if (!ffs(tpart_vec)) {
if (sndev->stdev->partition_count != 2) {
dev_err(&sndev->stdev->dev,
"ntb target partition not defined\n");
return -ENODEV;
}
bit = ffs(part_map);
if (!bit) {
dev_err(&sndev->stdev->dev,
"peer partition is not NT partition\n");
return -ENODEV;
}
sndev->peer_partition = bit - 1;
} else {
if (ffs(tpart_vec) != fls(tpart_vec)) {
dev_err(&sndev->stdev->dev,
"ntb driver only supports 1 pair of 1-1 ntb mapping\n");
return -ENODEV;
}
sndev->peer_partition = ffs(tpart_vec) - 1;
if (!(part_map & (1 << sndev->peer_partition))) {
dev_err(&sndev->stdev->dev,
"ntb target partition is not NT partition\n");
return -ENODEV;
}
}
dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d\n",
sndev->self_partition, sndev->stdev->partition_count);
sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
SWITCHTEC_NTB_REG_CTRL_OFFSET;
@ -804,6 +916,283 @@ static void switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
sndev->mmio_peer_dbmsg = sndev->mmio_self_dbmsg;
return 0;
}
static int config_rsvd_lut_win(struct switchtec_ntb *sndev,
struct ntb_ctrl_regs __iomem *ctl,
int lut_idx, int partition, u64 addr)
{
int peer_bar = sndev->peer_direct_mw_to_bar[0];
u32 ctl_val;
int rc;
rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
NTB_CTRL_PART_STATUS_LOCKED);
if (rc)
return rc;
ctl_val = ioread32(&ctl->bar_entry[peer_bar].ctl);
ctl_val &= 0xFF;
ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
ctl_val |= ilog2(LUT_SIZE) << 8;
ctl_val |= (sndev->nr_lut_mw - 1) << 14;
iowrite32(ctl_val, &ctl->bar_entry[peer_bar].ctl);
iowrite64((NTB_CTRL_LUT_EN | (partition << 1) | addr),
&ctl->lut_entry[lut_idx]);
rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
NTB_CTRL_PART_STATUS_NORMAL);
if (rc) {
u32 bar_error, lut_error;
bar_error = ioread32(&ctl->bar_error);
lut_error = ioread32(&ctl->lut_error);
dev_err(&sndev->stdev->dev,
"Error setting up reserved lut window: %08x / %08x\n",
bar_error, lut_error);
return rc;
}
return 0;
}
static int config_req_id_table(struct switchtec_ntb *sndev,
struct ntb_ctrl_regs __iomem *mmio_ctrl,
int *req_ids, int count)
{
int i, rc = 0;
u32 error;
u32 proxy_id;
if (ioread32(&mmio_ctrl->req_id_table_size) < count) {
dev_err(&sndev->stdev->dev,
"Not enough requester IDs available.\n");
return -EFAULT;
}
rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
NTB_CTRL_PART_OP_LOCK,
NTB_CTRL_PART_STATUS_LOCKED);
if (rc)
return rc;
iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
&mmio_ctrl->partition_ctrl);
for (i = 0; i < count; i++) {
iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN,
&mmio_ctrl->req_id_table[i]);
proxy_id = ioread32(&mmio_ctrl->req_id_table[i]);
dev_dbg(&sndev->stdev->dev,
"Requester ID %02X:%02X.%X -> BB:%02X.%X\n",
req_ids[i] >> 8, (req_ids[i] >> 3) & 0x1F,
req_ids[i] & 0x7, (proxy_id >> 4) & 0x1F,
(proxy_id >> 1) & 0x7);
}
rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
NTB_CTRL_PART_OP_CFG,
NTB_CTRL_PART_STATUS_NORMAL);
if (rc == -EIO) {
error = ioread32(&mmio_ctrl->req_id_error);
dev_err(&sndev->stdev->dev,
"Error setting up the requester ID table: %08x\n",
error);
}
return 0;
}
static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx,
u64 *mw_addrs, int mw_count)
{
int rc, i;
struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_self_ctrl;
u64 addr;
size_t size, offset;
int bar;
int xlate_pos;
u32 ctl_val;
rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
NTB_CTRL_PART_STATUS_LOCKED);
if (rc)
return rc;
for (i = 0; i < sndev->nr_lut_mw; i++) {
if (i == ntb_lut_idx)
continue;
addr = mw_addrs[0] + LUT_SIZE * i;
iowrite64((NTB_CTRL_LUT_EN | (sndev->peer_partition << 1) |
addr),
&ctl->lut_entry[i]);
}
sndev->nr_direct_mw = min_t(int, sndev->nr_direct_mw, mw_count);
for (i = 0; i < sndev->nr_direct_mw; i++) {
bar = sndev->direct_mw_to_bar[i];
offset = (i == 0) ? LUT_SIZE * sndev->nr_lut_mw : 0;
addr = mw_addrs[i] + offset;
size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
xlate_pos = ilog2(size);
if (offset && size > offset)
size = offset;
ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size);
iowrite64(sndev->peer_partition | addr,
&ctl->bar_entry[bar].xlate_addr);
}
rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
NTB_CTRL_PART_STATUS_NORMAL);
if (rc) {
u32 bar_error, lut_error;
bar_error = ioread32(&ctl->bar_error);
lut_error = ioread32(&ctl->lut_error);
dev_err(&sndev->stdev->dev,
"Error setting up cross link windows: %08x / %08x\n",
bar_error, lut_error);
return rc;
}
return 0;
}
static int crosslink_setup_req_ids(struct switchtec_ntb *sndev,
struct ntb_ctrl_regs __iomem *mmio_ctrl)
{
int req_ids[16];
int i;
u32 proxy_id;
for (i = 0; i < ARRAY_SIZE(req_ids); i++) {
proxy_id = ioread32(&sndev->mmio_self_ctrl->req_id_table[i]);
if (!(proxy_id & NTB_CTRL_REQ_ID_EN))
break;
req_ids[i] = ((proxy_id >> 1) & 0xFF);
}
return config_req_id_table(sndev, mmio_ctrl, req_ids, i);
}
/*
* In crosslink configuration there is a virtual partition in the
* middle of the two switches. The BARs in this partition have to be
* enumerated and assigned addresses.
*/
static int crosslink_enum_partition(struct switchtec_ntb *sndev,
u64 *bar_addrs)
{
struct part_cfg_regs __iomem *part_cfg =
&sndev->stdev->mmio_part_cfg_all[sndev->peer_partition];
u32 pff = ioread32(&part_cfg->vep_pff_inst_id);
struct pff_csr_regs __iomem *mmio_pff =
&sndev->stdev->mmio_pff_csr[pff];
const u64 bar_space = 0x1000000000LL;
u64 bar_addr;
int bar_cnt = 0;
int i;
iowrite16(0x6, &mmio_pff->pcicmd);
for (i = 0; i < ARRAY_SIZE(mmio_pff->pci_bar64); i++) {
iowrite64(bar_space * i, &mmio_pff->pci_bar64[i]);
bar_addr = ioread64(&mmio_pff->pci_bar64[i]);
bar_addr &= ~0xf;
dev_dbg(&sndev->stdev->dev,
"Crosslink BAR%d addr: %llx\n",
i, bar_addr);
if (bar_addr != bar_space * i)
continue;
bar_addrs[bar_cnt++] = bar_addr;
}
return bar_cnt;
}
static int switchtec_ntb_init_crosslink(struct switchtec_ntb *sndev)
{
int rc;
int bar = sndev->direct_mw_to_bar[0];
const int ntb_lut_idx = 1;
u64 bar_addrs[6];
u64 addr;
int offset;
int bar_cnt;
if (!crosslink_is_enabled(sndev))
return 0;
dev_info(&sndev->stdev->dev, "Using crosslink configuration\n");
sndev->ntb.topo = NTB_TOPO_CROSSLINK;
bar_cnt = crosslink_enum_partition(sndev, bar_addrs);
if (bar_cnt < sndev->nr_direct_mw + 1) {
dev_err(&sndev->stdev->dev,
"Error enumerating crosslink partition\n");
return -EINVAL;
}
addr = (bar_addrs[0] + SWITCHTEC_GAS_NTB_OFFSET +
SWITCHTEC_NTB_REG_DBMSG_OFFSET +
sizeof(struct ntb_dbmsg_regs) * sndev->peer_partition);
offset = addr & (LUT_SIZE - 1);
addr -= offset;
rc = config_rsvd_lut_win(sndev, sndev->mmio_self_ctrl, ntb_lut_idx,
sndev->peer_partition, addr);
if (rc)
return rc;
rc = crosslink_setup_mws(sndev, ntb_lut_idx, &bar_addrs[1],
bar_cnt - 1);
if (rc)
return rc;
rc = crosslink_setup_req_ids(sndev, sndev->mmio_peer_ctrl);
if (rc)
return rc;
sndev->mmio_xlink_win = pci_iomap_range(sndev->stdev->pdev, bar,
LUT_SIZE, LUT_SIZE);
if (!sndev->mmio_xlink_win) {
rc = -ENOMEM;
return rc;
}
sndev->mmio_peer_dbmsg = sndev->mmio_xlink_win + offset;
sndev->nr_rsvd_luts++;
crosslink_init_dbmsgs(sndev);
return 0;
}
static void switchtec_ntb_deinit_crosslink(struct switchtec_ntb *sndev)
{
if (sndev->mmio_xlink_win)
pci_iounmap(sndev->stdev->pdev, sndev->mmio_xlink_win);
}
static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
@ -829,7 +1218,7 @@ static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut",
dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut\n",
sndev->nr_direct_mw, sndev->nr_lut_mw);
sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
@ -839,7 +1228,7 @@ static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut",
dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut\n",
sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
}
@ -849,24 +1238,35 @@ static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
* shared among all partitions. So we must split them in half
* (32 for each partition). However, the message interrupts are
* also shared with the top 4 doorbells so we just limit this to
* 28 doorbells per partition
* 28 doorbells per partition.
*
* In crosslink mode, each side has it's own dbmsg register so
* they can each use all 60 of the available doorbells.
*/
static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
{
sndev->db_valid_mask = 0x0FFFFFFF;
sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
if (sndev->self_partition < sndev->peer_partition) {
if (sndev->mmio_peer_dbmsg != sndev->mmio_self_dbmsg) {
sndev->db_shift = 0;
sndev->db_peer_shift = 0;
sndev->db_valid_mask = sndev->db_mask;
} else if (sndev->self_partition < sndev->peer_partition) {
sndev->db_shift = 0;
sndev->db_peer_shift = 32;
sndev->db_valid_mask = 0x0FFFFFFF;
} else {
sndev->db_shift = 32;
sndev->db_peer_shift = 0;
sndev->db_valid_mask = 0x0FFFFFFF;
}
sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
&sndev->mmio_self_dbmsg->odb_mask);
&sndev->mmio_peer_dbmsg->odb_mask);
dev_dbg(&sndev->stdev->dev, "dbs: shift %d/%d, mask %016llx\n",
sndev->db_shift, sndev->db_peer_shift, sndev->db_valid_mask);
}
static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
@ -887,52 +1287,23 @@ static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
&sndev->mmio_self_dbmsg->imsg[i]);
}
static int switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
static int
switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
{
int rc = 0;
u16 req_id;
u32 error;
req_id = ioread16(&sndev->mmio_ntb->requester_id);
if (ioread32(&sndev->mmio_self_ctrl->req_id_table_size) < 2) {
dev_err(&sndev->stdev->dev,
"Not enough requester IDs available.");
return -EFAULT;
}
rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
NTB_CTRL_PART_OP_LOCK,
NTB_CTRL_PART_STATUS_LOCKED);
if (rc)
return rc;
iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
&sndev->mmio_self_ctrl->partition_ctrl);
int req_ids[2];
/*
* Root Complex Requester ID (which is 0:00.0)
*/
iowrite32(0 << 16 | NTB_CTRL_REQ_ID_EN,
&sndev->mmio_self_ctrl->req_id_table[0]);
req_ids[0] = 0;
/*
* Host Bridge Requester ID (as read from the mmap address)
*/
iowrite32(req_id << 16 | NTB_CTRL_REQ_ID_EN,
&sndev->mmio_self_ctrl->req_id_table[1]);
req_ids[1] = ioread16(&sndev->mmio_ntb->requester_id);
rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl,
NTB_CTRL_PART_OP_CFG,
NTB_CTRL_PART_STATUS_NORMAL);
if (rc == -EIO) {
error = ioread32(&sndev->mmio_self_ctrl->req_id_error);
dev_err(&sndev->stdev->dev,
"Error setting up the requester ID table: %08x",
error);
}
return rc;
return config_req_id_table(sndev, sndev->mmio_self_ctrl, req_ids,
ARRAY_SIZE(req_ids));
}
static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
@ -963,59 +1334,35 @@ static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
{
struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
int bar = sndev->direct_mw_to_bar[0];
u32 ctl_val;
int self_bar = sndev->direct_mw_to_bar[0];
int rc;
sndev->nr_rsvd_luts++;
sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev,
LUT_SIZE,
&sndev->self_shared_dma,
GFP_KERNEL);
if (!sndev->self_shared) {
dev_err(&sndev->stdev->dev,
"unable to allocate memory for shared mw");
"unable to allocate memory for shared mw\n");
return -ENOMEM;
}
switchtec_ntb_init_shared(sndev);
rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
NTB_CTRL_PART_STATUS_LOCKED);
rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0,
sndev->self_partition,
sndev->self_shared_dma);
if (rc)
goto unalloc_and_exit;
ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
ctl_val &= 0xFF;
ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
ctl_val |= ilog2(LUT_SIZE) << 8;
ctl_val |= (sndev->nr_lut_mw - 1) << 14;
iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) |
sndev->self_shared_dma),
&ctl->lut_entry[0]);
rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
NTB_CTRL_PART_STATUS_NORMAL);
if (rc) {
u32 bar_error, lut_error;
bar_error = ioread32(&ctl->bar_error);
lut_error = ioread32(&ctl->lut_error);
dev_err(&sndev->stdev->dev,
"Error setting up shared MW: %08x / %08x",
bar_error, lut_error);
goto unalloc_and_exit;
}
sndev->peer_shared = pci_iomap(sndev->stdev->pdev, bar, LUT_SIZE);
sndev->peer_shared = pci_iomap(sndev->stdev->pdev, self_bar, LUT_SIZE);
if (!sndev->peer_shared) {
rc = -ENOMEM;
goto unalloc_and_exit;
}
dev_dbg(&sndev->stdev->dev, "Shared MW Ready");
dev_dbg(&sndev->stdev->dev, "Shared MW Ready\n");
return 0;
unalloc_and_exit:
@ -1034,6 +1381,7 @@ static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
sndev->self_shared,
sndev->self_shared_dma);
sndev->nr_rsvd_luts--;
}
static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
@ -1056,12 +1404,12 @@ static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
if (msg & NTB_DBMSG_IMSG_STATUS) {
dev_dbg(&sndev->stdev->dev, "message: %d %08x\n", i,
(u32)msg);
dev_dbg(&sndev->stdev->dev, "message: %d %08x\n",
i, (u32)msg);
iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
if (i == LINK_MESSAGE)
switchtec_ntb_check_link(sndev);
switchtec_ntb_check_link(sndev, msg);
}
}
@ -1085,7 +1433,7 @@ static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
message_irq == event_irq)
message_irq++;
dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d",
dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d\n",
event_irq, doorbell_irq, message_irq);
for (i = 0; i < idb_vecs - 4; i++)
@ -1122,6 +1470,14 @@ static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
free_irq(sndev->message_irq, sndev);
}
static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev)
{
dev_info(&sndev->stdev->dev, "peer reinitialized\n");
switchtec_ntb_deinit_shared_mw(sndev);
switchtec_ntb_init_mw(sndev);
return switchtec_ntb_init_shared_mw(sndev);
}
static int switchtec_ntb_add(struct device *dev,
struct class_interface *class_intf)
{
@ -1134,38 +1490,50 @@ static int switchtec_ntb_add(struct device *dev,
if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE)
return -ENODEV;
if (stdev->partition_count != 2)
dev_warn(dev, "ntb driver only supports 2 partitions");
sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
if (!sndev)
return -ENOMEM;
sndev->stdev = stdev;
switchtec_ntb_init_sndev(sndev);
rc = switchtec_ntb_init_sndev(sndev);
if (rc)
goto free_and_exit;
switchtec_ntb_init_mw(sndev);
switchtec_ntb_init_db(sndev);
switchtec_ntb_init_msgs(sndev);
rc = switchtec_ntb_init_req_id_table(sndev);
if (rc)
goto free_and_exit;
rc = switchtec_ntb_init_shared_mw(sndev);
rc = switchtec_ntb_init_crosslink(sndev);
if (rc)
goto free_and_exit;
switchtec_ntb_init_db(sndev);
switchtec_ntb_init_msgs(sndev);
rc = switchtec_ntb_init_shared_mw(sndev);
if (rc)
goto deinit_crosslink;
rc = switchtec_ntb_init_db_msg_irq(sndev);
if (rc)
goto deinit_shared_and_exit;
/*
* If this host crashed, the other host may think the link is
* still up. Tell them to force it down (it will go back up
* once we register the ntb device).
*/
switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_FORCE_DOWN);
rc = ntb_register_device(&sndev->ntb);
if (rc)
goto deinit_and_exit;
stdev->sndev = sndev;
stdev->link_notifier = switchtec_ntb_link_notification;
dev_info(dev, "NTB device registered");
dev_info(dev, "NTB device registered\n");
return 0;
@ -1173,14 +1541,16 @@ deinit_and_exit:
switchtec_ntb_deinit_db_msg_irq(sndev);
deinit_shared_and_exit:
switchtec_ntb_deinit_shared_mw(sndev);
deinit_crosslink:
switchtec_ntb_deinit_crosslink(sndev);
free_and_exit:
kfree(sndev);
dev_err(dev, "failed to register ntb device: %d", rc);
dev_err(dev, "failed to register ntb device: %d\n", rc);
return rc;
}
void switchtec_ntb_remove(struct device *dev,
struct class_interface *class_intf)
static void switchtec_ntb_remove(struct device *dev,
struct class_interface *class_intf)
{
struct switchtec_dev *stdev = to_stdev(dev);
struct switchtec_ntb *sndev = stdev->sndev;
@ -1193,8 +1563,9 @@ void switchtec_ntb_remove(struct device *dev,
ntb_unregister_device(&sndev->ntb);
switchtec_ntb_deinit_db_msg_irq(sndev);
switchtec_ntb_deinit_shared_mw(sndev);
switchtec_ntb_deinit_crosslink(sndev);
kfree(sndev);
dev_info(dev, "ntb device unregistered");
dev_info(dev, "ntb device unregistered\n");
}
static struct class_interface switchtec_interface = {

View File

@ -63,12 +63,11 @@
#define DRIVER_NAME "ntb"
#define DRIVER_DESCRIPTION "PCIe NTB Driver Framework"
#define DRIVER_LICENSE "Dual BSD/GPL"
#define DRIVER_VERSION "1.0"
#define DRIVER_RELDATE "24 March 2015"
#define DRIVER_AUTHOR "Allen Hubbe <Allen.Hubbe@emc.com>"
MODULE_LICENSE(DRIVER_LICENSE);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRIVER_VERSION);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
@ -112,7 +111,6 @@ int ntb_register_device(struct ntb_dev *ntb)
init_completion(&ntb->released);
memset(&ntb->dev, 0, sizeof(ntb->dev));
ntb->dev.bus = &ntb_bus;
ntb->dev.parent = &ntb->pdev->dev;
ntb->dev.release = ntb_dev_release;

View File

@ -1003,6 +1003,9 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
mw_base = nt->mw_vec[mw_num].phys_addr;
mw_size = nt->mw_vec[mw_num].phys_size;
if (max_mw_size && mw_size > max_mw_size)
mw_size = max_mw_size;
tx_size = (unsigned int)mw_size / num_qps_mw;
qp_offset = tx_size * (qp_num / mw_count);

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +1,11 @@
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
* Copyright (C) 2017 T-Platforms. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -18,6 +19,7 @@
* BSD LICENSE
*
* Copyright (C) 2015 EMC Corporation. All Rights Reserved.
* Copyright (C) 2017 T-Platforms. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -46,37 +48,45 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* PCIe NTB Pingpong Linux driver
*
* Contact Information:
* Allen Hubbe <Allen.Hubbe@emc.com>
*/
/* Note: load this module with option 'dyndbg=+p' */
/*
* How to use this tool, by example.
*
* Assuming $DBG_DIR is something like:
* '/sys/kernel/debug/ntb_perf/0000:00:03.0'
* Suppose aside from local device there is at least one remote device
* connected to NTB with index 0.
*-----------------------------------------------------------------------------
* Eg: install driver with specified delay between doorbell event and response
*
* root@self# insmod ntb_pingpong.ko delay_ms=1000
*-----------------------------------------------------------------------------
* Eg: get number of ping-pong cycles performed
*
* root@self# cat $DBG_DIR/count
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/hrtimer.h>
#include <linux/debugfs.h>
#include <linux/ntb.h>
#define DRIVER_NAME "ntb_pingpong"
#define DRIVER_DESCRIPTION "PCIe NTB Simple Pingpong Client"
#define DRIVER_NAME "ntb_pingpong"
#define DRIVER_VERSION "2.0"
#define DRIVER_LICENSE "Dual BSD/GPL"
#define DRIVER_VERSION "1.0"
#define DRIVER_RELDATE "24 March 2015"
#define DRIVER_AUTHOR "Allen Hubbe <Allen.Hubbe@emc.com>"
MODULE_LICENSE(DRIVER_LICENSE);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRIVER_VERSION);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
MODULE_AUTHOR("Allen Hubbe <Allen.Hubbe@emc.com>");
MODULE_DESCRIPTION("PCIe NTB Simple Pingpong Client");
static unsigned int unsafe;
module_param(unsafe, uint, 0644);
@ -86,237 +96,343 @@ static unsigned int delay_ms = 1000;
module_param(delay_ms, uint, 0644);
MODULE_PARM_DESC(delay_ms, "Milliseconds to delay the response to peer");
static unsigned long db_init = 0x7;
module_param(db_init, ulong, 0644);
MODULE_PARM_DESC(db_init, "Initial doorbell bits to ring on the peer");
/* Only two-ports NTB devices are supported */
#define PIDX NTB_DEF_PEER_IDX
struct pp_ctx {
struct ntb_dev *ntb;
u64 db_bits;
/* synchronize access to db_bits by ping and pong */
spinlock_t db_lock;
struct timer_list db_timer;
unsigned long db_delay;
struct dentry *debugfs_node_dir;
struct dentry *debugfs_count;
atomic_t count;
struct ntb_dev *ntb;
struct hrtimer timer;
u64 in_db;
u64 out_db;
int out_pidx;
u64 nmask;
u64 pmask;
atomic_t count;
spinlock_t lock;
struct dentry *dbgfs_dir;
};
#define to_pp_timer(__timer) \
container_of(__timer, struct pp_ctx, timer)
static struct dentry *pp_debugfs_dir;
static struct dentry *pp_dbgfs_topdir;
static void pp_ping(struct timer_list *t)
static int pp_find_next_peer(struct pp_ctx *pp)
{
struct pp_ctx *pp = from_timer(pp, t, db_timer);
unsigned long irqflags;
u64 db_bits, db_mask;
u32 spad_rd, spad_wr;
u64 link, out_db;
int pidx;
spin_lock_irqsave(&pp->db_lock, irqflags);
{
db_mask = ntb_db_valid_mask(pp->ntb);
db_bits = ntb_db_read(pp->ntb);
link = ntb_link_is_up(pp->ntb, NULL, NULL);
if (db_bits) {
dev_dbg(&pp->ntb->dev,
"Masked pongs %#llx\n",
db_bits);
ntb_db_clear(pp->ntb, db_bits);
}
db_bits = ((pp->db_bits | db_bits) << 1) & db_mask;
if (!db_bits)
db_bits = db_init;
spad_rd = ntb_spad_read(pp->ntb, 0);
spad_wr = spad_rd + 1;
dev_dbg(&pp->ntb->dev,
"Ping bits %#llx read %#x write %#x\n",
db_bits, spad_rd, spad_wr);
ntb_peer_spad_write(pp->ntb, PIDX, 0, spad_wr);
ntb_peer_db_set(pp->ntb, db_bits);
ntb_db_clear_mask(pp->ntb, db_mask);
pp->db_bits = 0;
/* Find next available peer */
if (link & pp->nmask) {
pidx = __ffs64(link & pp->nmask);
out_db = BIT_ULL(pidx + 1);
} else if (link & pp->pmask) {
pidx = __ffs64(link & pp->pmask);
out_db = BIT_ULL(pidx);
} else {
return -ENODEV;
}
spin_unlock_irqrestore(&pp->db_lock, irqflags);
spin_lock(&pp->lock);
pp->out_pidx = pidx;
pp->out_db = out_db;
spin_unlock(&pp->lock);
return 0;
}
static void pp_setup(struct pp_ctx *pp)
{
int ret;
ntb_db_set_mask(pp->ntb, pp->in_db);
hrtimer_cancel(&pp->timer);
ret = pp_find_next_peer(pp);
if (ret == -ENODEV) {
dev_dbg(&pp->ntb->dev, "Got no peers, so cancel\n");
return;
}
dev_dbg(&pp->ntb->dev, "Ping-pong started with port %d, db %#llx\n",
ntb_peer_port_number(pp->ntb, pp->out_pidx), pp->out_db);
hrtimer_start(&pp->timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
}
static void pp_clear(struct pp_ctx *pp)
{
hrtimer_cancel(&pp->timer);
ntb_db_set_mask(pp->ntb, pp->in_db);
dev_dbg(&pp->ntb->dev, "Ping-pong cancelled\n");
}
static void pp_ping(struct pp_ctx *pp)
{
u32 count;
count = atomic_read(&pp->count);
spin_lock(&pp->lock);
ntb_peer_spad_write(pp->ntb, pp->out_pidx, 0, count);
ntb_peer_msg_write(pp->ntb, pp->out_pidx, 0, count);
dev_dbg(&pp->ntb->dev, "Ping port %d spad %#x, msg %#x\n",
ntb_peer_port_number(pp->ntb, pp->out_pidx), count, count);
ntb_peer_db_set(pp->ntb, pp->out_db);
ntb_db_clear_mask(pp->ntb, pp->in_db);
spin_unlock(&pp->lock);
}
static void pp_pong(struct pp_ctx *pp)
{
u32 msg_data = -1, spad_data = -1;
int pidx = 0;
/* Read pong data */
spad_data = ntb_spad_read(pp->ntb, 0);
msg_data = ntb_msg_read(pp->ntb, &pidx, 0);
ntb_msg_clear_sts(pp->ntb, -1);
/*
* Scratchpad and message data may differ, since message register can't
* be rewritten unless status is cleared. Additionally either of them
* might be unsupported
*/
dev_dbg(&pp->ntb->dev, "Pong spad %#x, msg %#x (port %d)\n",
spad_data, msg_data, ntb_peer_port_number(pp->ntb, pidx));
atomic_inc(&pp->count);
ntb_db_set_mask(pp->ntb, pp->in_db);
ntb_db_clear(pp->ntb, pp->in_db);
hrtimer_start(&pp->timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
}
static enum hrtimer_restart pp_timer_func(struct hrtimer *t)
{
struct pp_ctx *pp = to_pp_timer(t);
pp_ping(pp);
return HRTIMER_NORESTART;
}
static void pp_link_event(void *ctx)
{
struct pp_ctx *pp = ctx;
if (ntb_link_is_up(pp->ntb, NULL, NULL) == 1) {
dev_dbg(&pp->ntb->dev, "link is up\n");
pp_ping(&pp->db_timer);
} else {
dev_dbg(&pp->ntb->dev, "link is down\n");
del_timer(&pp->db_timer);
}
pp_setup(pp);
}
static void pp_db_event(void *ctx, int vec)
{
struct pp_ctx *pp = ctx;
u64 db_bits, db_mask;
unsigned long irqflags;
spin_lock_irqsave(&pp->db_lock, irqflags);
{
db_mask = ntb_db_vector_mask(pp->ntb, vec);
db_bits = db_mask & ntb_db_read(pp->ntb);
ntb_db_set_mask(pp->ntb, db_mask);
ntb_db_clear(pp->ntb, db_bits);
pp->db_bits |= db_bits;
mod_timer(&pp->db_timer, jiffies + pp->db_delay);
dev_dbg(&pp->ntb->dev,
"Pong vec %d bits %#llx\n",
vec, db_bits);
atomic_inc(&pp->count);
}
spin_unlock_irqrestore(&pp->db_lock, irqflags);
}
static int pp_debugfs_setup(struct pp_ctx *pp)
{
struct pci_dev *pdev = pp->ntb->pdev;
if (!pp_debugfs_dir)
return -ENODEV;
pp->debugfs_node_dir = debugfs_create_dir(pci_name(pdev),
pp_debugfs_dir);
if (!pp->debugfs_node_dir)
return -ENODEV;
pp->debugfs_count = debugfs_create_atomic_t("count", S_IRUSR | S_IWUSR,
pp->debugfs_node_dir,
&pp->count);
if (!pp->debugfs_count)
return -ENODEV;
return 0;
pp_pong(pp);
}
static const struct ntb_ctx_ops pp_ops = {
.link_event = pp_link_event,
.db_event = pp_db_event,
.db_event = pp_db_event
};
static int pp_probe(struct ntb_client *client,
struct ntb_dev *ntb)
static int pp_check_ntb(struct ntb_dev *ntb)
{
struct pp_ctx *pp;
int rc;
u64 pmask;
if (ntb_db_is_unsafe(ntb)) {
dev_dbg(&ntb->dev, "doorbell is unsafe\n");
if (!unsafe) {
rc = -EINVAL;
goto err_pp;
}
}
if (ntb_spad_count(ntb) < 1) {
dev_dbg(&ntb->dev, "no enough scratchpads\n");
rc = -EINVAL;
goto err_pp;
dev_dbg(&ntb->dev, "Doorbell is unsafe\n");
if (!unsafe)
return -EINVAL;
}
if (ntb_spad_is_unsafe(ntb)) {
dev_dbg(&ntb->dev, "scratchpad is unsafe\n");
if (!unsafe) {
rc = -EINVAL;
goto err_pp;
}
dev_dbg(&ntb->dev, "Scratchpad is unsafe\n");
if (!unsafe)
return -EINVAL;
}
if (ntb_peer_port_count(ntb) != NTB_DEF_PEER_CNT)
dev_warn(&ntb->dev, "multi-port NTB is unsupported\n");
pp = kmalloc(sizeof(*pp), GFP_KERNEL);
if (!pp) {
rc = -ENOMEM;
goto err_pp;
pmask = GENMASK_ULL(ntb_peer_port_count(ntb), 0);
if ((ntb_db_valid_mask(ntb) & pmask) != pmask) {
dev_err(&ntb->dev, "Unsupported DB configuration\n");
return -EINVAL;
}
pp->ntb = ntb;
pp->db_bits = 0;
atomic_set(&pp->count, 0);
spin_lock_init(&pp->db_lock);
timer_setup(&pp->db_timer, pp_ping, 0);
pp->db_delay = msecs_to_jiffies(delay_ms);
rc = ntb_set_ctx(ntb, pp, &pp_ops);
if (rc)
goto err_ctx;
rc = pp_debugfs_setup(pp);
if (rc)
goto err_ctx;
ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
ntb_link_event(ntb);
if (ntb_spad_count(ntb) < 1 && ntb_msg_count(ntb) < 1) {
dev_err(&ntb->dev, "Scratchpads and messages unsupported\n");
return -EINVAL;
} else if (ntb_spad_count(ntb) < 1) {
dev_dbg(&ntb->dev, "Scratchpads unsupported\n");
} else if (ntb_msg_count(ntb) < 1) {
dev_dbg(&ntb->dev, "Messages unsupported\n");
}
return 0;
err_ctx:
kfree(pp);
err_pp:
return rc;
}
static void pp_remove(struct ntb_client *client,
struct ntb_dev *ntb)
static struct pp_ctx *pp_create_data(struct ntb_dev *ntb)
{
struct pp_ctx *pp;
pp = devm_kzalloc(&ntb->dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return ERR_PTR(-ENOMEM);
pp->ntb = ntb;
atomic_set(&pp->count, 0);
spin_lock_init(&pp->lock);
hrtimer_init(&pp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pp->timer.function = pp_timer_func;
return pp;
}
static void pp_init_flds(struct pp_ctx *pp)
{
int pidx, lport, pcnt;
/* Find global port index */
lport = ntb_port_number(pp->ntb);
pcnt = ntb_peer_port_count(pp->ntb);
for (pidx = 0; pidx < pcnt; pidx++) {
if (lport < ntb_peer_port_number(pp->ntb, pidx))
break;
}
pp->in_db = BIT_ULL(pidx);
pp->pmask = GENMASK_ULL(pidx, 0) >> 1;
pp->nmask = GENMASK_ULL(pcnt - 1, pidx);
dev_dbg(&pp->ntb->dev, "Inbound db %#llx, prev %#llx, next %#llx\n",
pp->in_db, pp->pmask, pp->nmask);
}
static int pp_mask_events(struct pp_ctx *pp)
{
u64 db_mask, msg_mask;
int ret;
db_mask = ntb_db_valid_mask(pp->ntb);
ret = ntb_db_set_mask(pp->ntb, db_mask);
if (ret)
return ret;
/* Skip message events masking if unsupported */
if (ntb_msg_count(pp->ntb) < 1)
return 0;
msg_mask = ntb_msg_outbits(pp->ntb) | ntb_msg_inbits(pp->ntb);
return ntb_msg_set_mask(pp->ntb, msg_mask);
}
static int pp_setup_ctx(struct pp_ctx *pp)
{
int ret;
ret = ntb_set_ctx(pp->ntb, pp, &pp_ops);
if (ret)
return ret;
ntb_link_enable(pp->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
/* Might be not necessary */
ntb_link_event(pp->ntb);
return 0;
}
static void pp_clear_ctx(struct pp_ctx *pp)
{
ntb_link_disable(pp->ntb);
ntb_clear_ctx(pp->ntb);
}
static void pp_setup_dbgfs(struct pp_ctx *pp)
{
struct pci_dev *pdev = pp->ntb->pdev;
void *ret;
pp->dbgfs_dir = debugfs_create_dir(pci_name(pdev), pp_dbgfs_topdir);
ret = debugfs_create_atomic_t("count", 0600, pp->dbgfs_dir, &pp->count);
if (!ret)
dev_warn(&pp->ntb->dev, "DebugFS unsupported\n");
}
static void pp_clear_dbgfs(struct pp_ctx *pp)
{
debugfs_remove_recursive(pp->dbgfs_dir);
}
static int pp_probe(struct ntb_client *client, struct ntb_dev *ntb)
{
struct pp_ctx *pp;
int ret;
ret = pp_check_ntb(ntb);
if (ret)
return ret;
pp = pp_create_data(ntb);
if (IS_ERR(pp))
return PTR_ERR(pp);
pp_init_flds(pp);
ret = pp_mask_events(pp);
if (ret)
return ret;
ret = pp_setup_ctx(pp);
if (ret)
return ret;
pp_setup_dbgfs(pp);
return 0;
}
static void pp_remove(struct ntb_client *client, struct ntb_dev *ntb)
{
struct pp_ctx *pp = ntb->ctx;
debugfs_remove_recursive(pp->debugfs_node_dir);
pp_clear_dbgfs(pp);
ntb_clear_ctx(ntb);
del_timer_sync(&pp->db_timer);
ntb_link_disable(ntb);
pp_clear_ctx(pp);
kfree(pp);
pp_clear(pp);
}
static struct ntb_client pp_client = {
.ops = {
.probe = pp_probe,
.remove = pp_remove,
},
.remove = pp_remove
}
};
static int __init pp_init(void)
{
int rc;
int ret;
if (debugfs_initialized())
pp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
pp_dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
rc = ntb_register_client(&pp_client);
if (rc)
goto err_client;
ret = ntb_register_client(&pp_client);
if (ret)
debugfs_remove_recursive(pp_dbgfs_topdir);
return 0;
err_client:
debugfs_remove_recursive(pp_debugfs_dir);
return rc;
return ret;
}
module_init(pp_init);
static void __exit pp_exit(void)
{
ntb_unregister_client(&pp_client);
debugfs_remove_recursive(pp_debugfs_dir);
debugfs_remove_recursive(pp_dbgfs_topdir);
}
module_exit(pp_exit);

File diff suppressed because it is too large Load Diff

View File

@ -71,6 +71,7 @@ struct pci_dev;
* @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb.
* @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb.
* @NTB_TOPO_SWITCH: Connected via a switch which supports ntb.
* @NTB_TOPO_CROSSLINK: Connected via two symmetric switchecs
*/
enum ntb_topo {
NTB_TOPO_NONE = -1,
@ -79,6 +80,7 @@ enum ntb_topo {
NTB_TOPO_B2B_USD,
NTB_TOPO_B2B_DSD,
NTB_TOPO_SWITCH,
NTB_TOPO_CROSSLINK,
};
static inline int ntb_topo_is_b2b(enum ntb_topo topo)
@ -94,12 +96,13 @@ static inline int ntb_topo_is_b2b(enum ntb_topo topo)
static inline char *ntb_topo_string(enum ntb_topo topo)
{
switch (topo) {
case NTB_TOPO_NONE: return "NTB_TOPO_NONE";
case NTB_TOPO_PRI: return "NTB_TOPO_PRI";
case NTB_TOPO_SEC: return "NTB_TOPO_SEC";
case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD";
case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD";
case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH";
case NTB_TOPO_NONE: return "NTB_TOPO_NONE";
case NTB_TOPO_PRI: return "NTB_TOPO_PRI";
case NTB_TOPO_SEC: return "NTB_TOPO_SEC";
case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD";
case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD";
case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH";
case NTB_TOPO_CROSSLINK: return "NTB_TOPO_CROSSLINK";
}
return "NTB_TOPO_INVALID";
}
@ -250,7 +253,7 @@ static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops)
* @msg_set_mask: See ntb_msg_set_mask().
* @msg_clear_mask: See ntb_msg_clear_mask().
* @msg_read: See ntb_msg_read().
* @msg_write: See ntb_msg_write().
* @peer_msg_write: See ntb_peer_msg_write().
*/
struct ntb_dev_ops {
int (*port_number)(struct ntb_dev *ntb);
@ -321,8 +324,8 @@ struct ntb_dev_ops {
int (*msg_clear_sts)(struct ntb_dev *ntb, u64 sts_bits);
int (*msg_set_mask)(struct ntb_dev *ntb, u64 mask_bits);
int (*msg_clear_mask)(struct ntb_dev *ntb, u64 mask_bits);
int (*msg_read)(struct ntb_dev *ntb, int midx, int *pidx, u32 *msg);
int (*msg_write)(struct ntb_dev *ntb, int midx, int pidx, u32 msg);
u32 (*msg_read)(struct ntb_dev *ntb, int *pidx, int midx);
int (*peer_msg_write)(struct ntb_dev *ntb, int pidx, int midx, u32 msg);
};
static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
@ -384,7 +387,7 @@ static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
/* !ops->msg_set_mask == !ops->msg_count && */
/* !ops->msg_clear_mask == !ops->msg_count && */
!ops->msg_read == !ops->msg_count &&
!ops->msg_write == !ops->msg_count &&
!ops->peer_msg_write == !ops->msg_count &&
1;
}
@ -764,7 +767,7 @@ static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx,
resource_size_t *size_align,
resource_size_t *size_max)
{
if (!(ntb_link_is_up(ntb, NULL, NULL) & (1 << pidx)))
if (!(ntb_link_is_up(ntb, NULL, NULL) & BIT_ULL(pidx)))
return -ENOTCONN;
return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align,
@ -1459,31 +1462,29 @@ static inline int ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits)
}
/**
* ntb_msg_read() - read message register with specified index
* ntb_msg_read() - read inbound message register with specified index
* @ntb: NTB device context.
* @midx: Message register index
* @pidx: OUT - Port index of peer device a message retrieved from
* @msg: OUT - Data
* @midx: Message register index
*
* Read data from the specified message register. Source port index of a
* message is retrieved as well.
*
* Return: Zero on success, otherwise a negative error number.
* Return: The value of the inbound message register.
*/
static inline int ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx,
u32 *msg)
static inline u32 ntb_msg_read(struct ntb_dev *ntb, int *pidx, int midx)
{
if (!ntb->ops->msg_read)
return -EINVAL;
return ~(u32)0;
return ntb->ops->msg_read(ntb, midx, pidx, msg);
return ntb->ops->msg_read(ntb, pidx, midx);
}
/**
* ntb_msg_write() - write data to the specified message register
* ntb_peer_msg_write() - write data to the specified peer message register
* @ntb: NTB device context.
* @midx: Message register index
* @pidx: Port index of peer device a message being sent to
* @midx: Message register index
* @msg: Data to send
*
* Send data to a specified peer device using the defined message register.
@ -1492,13 +1493,13 @@ static inline int ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx,
*
* Return: Zero on success, otherwise a negative error number.
*/
static inline int ntb_msg_write(struct ntb_dev *ntb, int midx, int pidx,
u32 msg)
static inline int ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx,
u32 msg)
{
if (!ntb->ops->msg_write)
if (!ntb->ops->peer_msg_write)
return -EINVAL;
return ntb->ops->msg_write(ntb, midx, pidx, msg);
return ntb->ops->peer_msg_write(ntb, pidx, midx, msg);
}
#endif

View File

@ -168,6 +168,14 @@ struct ntb_info_regs {
u16 reserved1;
u64 ep_map;
u16 requester_id;
u16 reserved2;
u32 reserved3[4];
struct nt_partition_info {
u32 xlink_enabled;
u32 target_part_low;
u32 target_part_high;
u32 reserved;
} ntp_info[48];
} __packed;
struct part_cfg_regs {
@ -284,7 +292,20 @@ enum {
struct pff_csr_regs {
u16 vendor_id;
u16 device_id;
u32 pci_cfg_header[15];
u16 pcicmd;
u16 pcists;
u32 pci_class;
u32 pci_opts;
union {
u32 pci_bar[6];
u64 pci_bar64[3];
};
u32 pci_cardbus;
u32 pci_subsystem_id;
u32 pci_expansion_rom;
u32 pci_cap_ptr;
u32 reserved1;
u32 pci_irq;
u32 pci_cap_region[48];
u32 pcie_cap_region[448];
u32 indirect_gas_window[128];

View File

@ -18,7 +18,6 @@ LIST_DEVS=FALSE
DEBUGFS=${DEBUGFS-/sys/kernel/debug}
DB_BITMASK=0x7FFF
PERF_RUN_ORDER=32
MAX_MW_SIZE=0
RUN_DMA_TESTS=
@ -39,15 +38,17 @@ function show_help()
echo "be highly recommended."
echo
echo "Options:"
echo " -b BITMASK doorbell clear bitmask for ntb_tool"
echo " -C don't cleanup ntb modules on exit"
echo " -d run dma tests"
echo " -h show this help message"
echo " -l list available local and remote PCI ids"
echo " -r REMOTE_HOST specify the remote's hostname to connect"
echo " to for the test (using ssh)"
echo " -p NUM ntb_perf run order (default: $PERF_RUN_ORDER)"
echo " -w max_mw_size maxmium memory window size"
echo " to for the test (using ssh)"
echo " -m MW_SIZE memory window size for ntb_tool"
echo " (default: $MW_SIZE)"
echo " -d run dma tests for ntb_perf"
echo " -p ORDER total data order for ntb_perf"
echo " (default: $PERF_RUN_ORDER)"
echo " -w MAX_MW_SIZE maxmium memory window size for ntb_perf"
echo
}
@ -56,7 +57,6 @@ function parse_args()
OPTIND=0
while getopts "b:Cdhlm:r:p:w:" opt; do
case "$opt" in
b) DB_BITMASK=${OPTARG} ;;
C) DONT_CLEANUP=1 ;;
d) RUN_DMA_TESTS=1 ;;
h) show_help; exit 0 ;;
@ -87,7 +87,7 @@ set -e
function _modprobe()
{
modprobe "$@"
modprobe "$@"
if [[ "$REMOTE_HOST" != "" ]]; then
ssh "$REMOTE_HOST" modprobe "$@"
@ -127,15 +127,70 @@ function write_file()
fi
}
function check_file()
{
split_remote $1
if [[ "$REMOTE" != "" ]]; then
ssh "$REMOTE" "[[ -e ${VPATH} ]]"
else
[[ -e ${VPATH} ]]
fi
}
function subdirname()
{
echo $(basename $(dirname $1)) 2> /dev/null
}
function find_pidx()
{
PORT=$1
PPATH=$2
for ((i = 0; i < 64; i++)); do
PEER_DIR="$PPATH/peer$i"
check_file ${PEER_DIR} || break
PEER_PORT=$(read_file "${PEER_DIR}/port")
if [[ ${PORT} -eq $PEER_PORT ]]; then
echo $i
return 0
fi
done
return 1
}
function port_test()
{
LOC=$1
REM=$2
echo "Running port tests on: $(basename $LOC) / $(basename $REM)"
LOCAL_PORT=$(read_file "$LOC/port")
REMOTE_PORT=$(read_file "$REM/port")
LOCAL_PIDX=$(find_pidx ${REMOTE_PORT} "$LOC")
REMOTE_PIDX=$(find_pidx ${LOCAL_PORT} "$REM")
echo "Local port ${LOCAL_PORT} with index ${REMOTE_PIDX} on remote host"
echo "Peer port ${REMOTE_PORT} with index ${LOCAL_PIDX} on local host"
echo " Passed"
}
function link_test()
{
LOC=$1
REM=$2
EXP=0
echo "Running link tests on: $(basename $LOC) / $(basename $REM)"
echo "Running link tests on: $(subdirname $LOC) / $(subdirname $REM)"
if ! write_file "N" "$LOC/link" 2> /dev/null; then
if ! write_file "N" "$LOC/../link" 2> /dev/null; then
echo " Unsupported"
return
fi
@ -143,12 +198,11 @@ function link_test()
write_file "N" "$LOC/link_event"
if [[ $(read_file "$REM/link") != "N" ]]; then
echo "Expected remote link to be down in $REM/link" >&2
echo "Expected link to be down in $REM/link" >&2
exit -1
fi
write_file "Y" "$LOC/link"
write_file "Y" "$LOC/link_event"
write_file "Y" "$LOC/../link"
echo " Passed"
}
@ -161,58 +215,136 @@ function doorbell_test()
echo "Running db tests on: $(basename $LOC) / $(basename $REM)"
write_file "c $DB_BITMASK" "$REM/db"
DB_VALID_MASK=$(read_file "$LOC/db_valid_mask")
for ((i=1; i <= 8; i++)); do
let DB=$(read_file "$REM/db") || true
if [[ "$DB" != "$EXP" ]]; then
write_file "c $DB_VALID_MASK" "$REM/db"
for ((i = 0; i < 64; i++)); do
DB=$(read_file "$REM/db")
if [[ "$DB" -ne "$EXP" ]]; then
echo "Doorbell doesn't match expected value $EXP " \
"in $REM/db" >&2
exit -1
fi
let "MASK=1 << ($i-1)" || true
let "EXP=$EXP | $MASK" || true
let "MASK = (1 << $i) & $DB_VALID_MASK" || true
let "EXP = $EXP | $MASK" || true
write_file "s $MASK" "$LOC/peer_db"
done
write_file "c $DB_VALID_MASK" "$REM/db_mask"
write_file $DB_VALID_MASK "$REM/db_event"
write_file "s $DB_VALID_MASK" "$REM/db_mask"
write_file "c $DB_VALID_MASK" "$REM/db"
echo " Passed"
}
function read_spad()
function get_files_count()
{
VPATH=$1
IDX=$2
NAME=$1
LOC=$2
ROW=($(read_file "$VPATH" | grep -e "^$IDX"))
let VAL=${ROW[1]} || true
echo $VAL
split_remote $LOC
if [[ "$REMOTE" == "" ]]; then
echo $(ls -1 "$LOC"/${NAME}* 2>/dev/null | wc -l)
else
echo $(ssh "$REMOTE" "ls -1 \"$VPATH\"/${NAME}* | \
wc -l" 2> /dev/null)
fi
}
function scratchpad_test()
{
LOC=$1
REM=$2
CNT=$(read_file "$LOC/spad" | wc -l)
echo "Running spad tests on: $(basename $LOC) / $(basename $REM)"
echo "Running spad tests on: $(subdirname $LOC) / $(subdirname $REM)"
CNT=$(get_files_count "spad" "$LOC")
if [[ $CNT -eq 0 ]]; then
echo " Unsupported"
return
fi
for ((i = 0; i < $CNT; i++)); do
VAL=$RANDOM
write_file "$i $VAL" "$LOC/peer_spad"
RVAL=$(read_spad "$REM/spad" $i)
write_file "$VAL" "$LOC/spad$i"
RVAL=$(read_file "$REM/../spad$i")
if [[ "$VAL" != "$RVAL" ]]; then
echo "Scratchpad doesn't match expected value $VAL " \
"in $REM/spad, got $RVAL" >&2
if [[ "$VAL" -ne "$RVAL" ]]; then
echo "Scratchpad $i value $RVAL doesn't match $VAL" >&2
exit -1
fi
done
echo " Passed"
}
function message_test()
{
LOC=$1
REM=$2
echo "Running msg tests on: $(subdirname $LOC) / $(subdirname $REM)"
CNT=$(get_files_count "msg" "$LOC")
if [[ $CNT -eq 0 ]]; then
echo " Unsupported"
return
fi
MSG_OUTBITS_MASK=$(read_file "$LOC/../msg_inbits")
MSG_INBITS_MASK=$(read_file "$REM/../msg_inbits")
write_file "c $MSG_OUTBITS_MASK" "$LOC/../msg_sts"
write_file "c $MSG_INBITS_MASK" "$REM/../msg_sts"
for ((i = 0; i < $CNT; i++)); do
VAL=$RANDOM
write_file "$VAL" "$LOC/msg$i"
RVAL=$(read_file "$REM/../msg$i")
if [[ "$VAL" -ne "${RVAL%%<-*}" ]]; then
echo "Message $i value $RVAL doesn't match $VAL" >&2
exit -1
fi
done
echo " Passed"
}
function get_number()
{
KEY=$1
sed -n "s/^\(${KEY}\)[ \t]*\(0x[0-9a-fA-F]*\)\(\[p\]\)\?$/\2/p"
}
function mw_alloc()
{
IDX=$1
LOC=$2
REM=$3
write_file $MW_SIZE "$LOC/mw_trans$IDX"
INB_MW=$(read_file "$LOC/mw_trans$IDX")
MW_ALIGNED_SIZE=$(echo "$INB_MW" | get_number "Window Size")
MW_DMA_ADDR=$(echo "$INB_MW" | get_number "DMA Address")
write_file "$MW_DMA_ADDR:$(($MW_ALIGNED_SIZE))" "$REM/peer_mw_trans$IDX"
if [[ $MW_SIZE -ne $MW_ALIGNED_SIZE ]]; then
echo "MW $IDX size aligned to $MW_ALIGNED_SIZE"
fi
}
function write_mw()
{
split_remote $2
@ -225,17 +357,15 @@ function write_mw()
fi
}
function mw_test()
function mw_check()
{
IDX=$1
LOC=$2
REM=$3
echo "Running $IDX tests on: $(basename $LOC) / $(basename $REM)"
write_mw "$LOC/mw$IDX"
write_mw "$LOC/$IDX"
split_remote "$LOC/$IDX"
split_remote "$LOC/mw$IDX"
if [[ "$REMOTE" == "" ]]; then
A=$VPATH
else
@ -243,7 +373,7 @@ function mw_test()
ssh "$REMOTE" cat "$VPATH" > "$A"
fi
split_remote "$REM/peer_$IDX"
split_remote "$REM/peer_mw$IDX"
if [[ "$REMOTE" == "" ]]; then
B=$VPATH
else
@ -251,7 +381,7 @@ function mw_test()
ssh "$REMOTE" cat "$VPATH" > "$B"
fi
cmp -n $MW_SIZE "$A" "$B"
cmp -n $MW_ALIGNED_SIZE "$A" "$B"
if [[ $? != 0 ]]; then
echo "Memory window $MW did not match!" >&2
fi
@ -263,8 +393,39 @@ function mw_test()
if [[ "$B" == "/tmp/*" ]]; then
rm "$B"
fi
}
function mw_free()
{
IDX=$1
LOC=$2
REM=$3
write_file "$MW_DMA_ADDR:0" "$REM/peer_mw_trans$IDX"
write_file 0 "$LOC/mw_trans$IDX"
}
function mw_test()
{
LOC=$1
REM=$2
CNT=$(get_files_count "mw_trans" "$LOC")
for ((i = 0; i < $CNT; i++)); do
echo "Running mw$i tests on: $(subdirname $LOC) / " \
"$(subdirname $REM)"
mw_alloc $i $LOC $REM
mw_check $i $LOC $REM
mw_free $i $LOC $REM
echo " Passed"
done
echo " Passed"
}
function pingpong_test()
@ -274,13 +435,13 @@ function pingpong_test()
echo "Running ping pong tests on: $(basename $LOC) / $(basename $REM)"
LOC_START=$(read_file $LOC/count)
REM_START=$(read_file $REM/count)
LOC_START=$(read_file "$LOC/count")
REM_START=$(read_file "$REM/count")
sleep 7
LOC_END=$(read_file $LOC/count)
REM_END=$(read_file $REM/count)
LOC_END=$(read_file "$LOC/count")
REM_END=$(read_file "$REM/count")
if [[ $LOC_START == $LOC_END ]] || [[ $REM_START == $REM_END ]]; then
echo "Ping pong counter not incrementing!" >&2
@ -300,19 +461,19 @@ function perf_test()
WITH="without"
fi
_modprobe ntb_perf run_order=$PERF_RUN_ORDER \
_modprobe ntb_perf total_order=$PERF_RUN_ORDER \
max_mw_size=$MAX_MW_SIZE use_dma=$USE_DMA
echo "Running local perf test $WITH DMA"
write_file "" $LOCAL_PERF/run
write_file "$LOCAL_PIDX" "$LOCAL_PERF/run"
echo -n " "
read_file $LOCAL_PERF/run
read_file "$LOCAL_PERF/run"
echo " Passed"
echo "Running remote perf test $WITH DMA"
write_file "" $REMOTE_PERF/run
write_file "$REMOTE_PIDX" "$REMOTE_PERF/run"
echo -n " "
read_file $REMOTE_PERF/run
read_file "$REMOTE_PERF/run"
echo " Passed"
_modprobe -r ntb_perf
@ -320,48 +481,44 @@ function perf_test()
function ntb_tool_tests()
{
LOCAL_TOOL=$DEBUGFS/ntb_tool/$LOCAL_DEV
REMOTE_TOOL=$REMOTE_HOST:$DEBUGFS/ntb_tool/$REMOTE_DEV
LOCAL_TOOL="$DEBUGFS/ntb_tool/$LOCAL_DEV"
REMOTE_TOOL="$REMOTE_HOST:$DEBUGFS/ntb_tool/$REMOTE_DEV"
echo "Starting ntb_tool tests..."
_modprobe ntb_tool
write_file Y $LOCAL_TOOL/link_event
write_file Y $REMOTE_TOOL/link_event
port_test "$LOCAL_TOOL" "$REMOTE_TOOL"
link_test $LOCAL_TOOL $REMOTE_TOOL
link_test $REMOTE_TOOL $LOCAL_TOOL
LOCAL_PEER_TOOL="$LOCAL_TOOL/peer$LOCAL_PIDX"
REMOTE_PEER_TOOL="$REMOTE_TOOL/peer$REMOTE_PIDX"
link_test "$LOCAL_PEER_TOOL" "$REMOTE_PEER_TOOL"
link_test "$REMOTE_PEER_TOOL" "$LOCAL_PEER_TOOL"
#Ensure the link is up on both sides before continuing
write_file Y $LOCAL_TOOL/link_event
write_file Y $REMOTE_TOOL/link_event
write_file "Y" "$LOCAL_PEER_TOOL/link_event"
write_file "Y" "$REMOTE_PEER_TOOL/link_event"
for PEER_TRANS in $(ls $LOCAL_TOOL/peer_trans*); do
PT=$(basename $PEER_TRANS)
write_file $MW_SIZE $LOCAL_TOOL/$PT
write_file $MW_SIZE $REMOTE_TOOL/$PT
done
doorbell_test "$LOCAL_TOOL" "$REMOTE_TOOL"
doorbell_test "$REMOTE_TOOL" "$LOCAL_TOOL"
doorbell_test $LOCAL_TOOL $REMOTE_TOOL
doorbell_test $REMOTE_TOOL $LOCAL_TOOL
scratchpad_test $LOCAL_TOOL $REMOTE_TOOL
scratchpad_test $REMOTE_TOOL $LOCAL_TOOL
scratchpad_test "$LOCAL_PEER_TOOL" "$REMOTE_PEER_TOOL"
scratchpad_test "$REMOTE_PEER_TOOL" "$LOCAL_PEER_TOOL"
for MW in $(ls $LOCAL_TOOL/mw*); do
MW=$(basename $MW)
message_test "$LOCAL_PEER_TOOL" "$REMOTE_PEER_TOOL"
message_test "$REMOTE_PEER_TOOL" "$LOCAL_PEER_TOOL"
mw_test $MW $LOCAL_TOOL $REMOTE_TOOL
mw_test $MW $REMOTE_TOOL $LOCAL_TOOL
done
mw_test "$LOCAL_PEER_TOOL" "$REMOTE_PEER_TOOL"
mw_test "$REMOTE_PEER_TOOL" "$LOCAL_PEER_TOOL"
_modprobe -r ntb_tool
}
function ntb_pingpong_tests()
{
LOCAL_PP=$DEBUGFS/ntb_pingpong/$LOCAL_DEV
REMOTE_PP=$REMOTE_HOST:$DEBUGFS/ntb_pingpong/$REMOTE_DEV
LOCAL_PP="$DEBUGFS/ntb_pingpong/$LOCAL_DEV"
REMOTE_PP="$REMOTE_HOST:$DEBUGFS/ntb_pingpong/$REMOTE_DEV"
echo "Starting ntb_pingpong tests..."
@ -374,8 +531,8 @@ function ntb_pingpong_tests()
function ntb_perf_tests()
{
LOCAL_PERF=$DEBUGFS/ntb_perf/$LOCAL_DEV
REMOTE_PERF=$REMOTE_HOST:$DEBUGFS/ntb_perf/$REMOTE_DEV
LOCAL_PERF="$DEBUGFS/ntb_perf/$LOCAL_DEV"
REMOTE_PERF="$REMOTE_HOST:$DEBUGFS/ntb_perf/$REMOTE_DEV"
echo "Starting ntb_perf tests..."