Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "The last two weeks have been quiet here, just the usual smattering of
  long standing bug fixes.

  A collection of error case bug fixes:

   - Improper nesting of spinlock types in cm

   - Missing error codes and kfree()

   - Ensure dma_virt_ops users have the right kconfig symbols to work
     properly

   - Compilation failure of tools/testing"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  tools/testing/scatterlist: Fix test to compile and run
  IB/hfi1: Fix error return code in hfi1_init_dd()
  RMDA/sw: Don't allow drivers using dma_virt_ops on highmem configs
  RDMA/pvrdma: Fix missing kfree() in pvrdma_register_device()
  RDMA/cm: Make the local_id_table xarray non-irq
This commit is contained in:
Linus Torvalds
2020-11-19 13:01:53 -08:00
9 changed files with 19 additions and 12 deletions

View File

@@ -73,6 +73,9 @@ config INFINIBAND_ADDR_TRANS_CONFIGFS
This allows the user to config the default GID type that the CM This allows the user to config the default GID type that the CM
uses for each device, when initiaing new connections. uses for each device, when initiaing new connections.
config INFINIBAND_VIRT_DMA
def_bool !HIGHMEM
if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
source "drivers/infiniband/hw/mthca/Kconfig" source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/qib/Kconfig" source "drivers/infiniband/hw/qib/Kconfig"

View File

@@ -859,8 +859,8 @@ static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
atomic_set(&cm_id_priv->work_count, -1); atomic_set(&cm_id_priv->work_count, -1);
refcount_set(&cm_id_priv->refcount, 1); refcount_set(&cm_id_priv->refcount, 1);
ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b, ret = xa_alloc_cyclic(&cm.local_id_table, &id, NULL, xa_limit_32b,
&cm.local_id_next, GFP_KERNEL); &cm.local_id_next, GFP_KERNEL);
if (ret < 0) if (ret < 0)
goto error; goto error;
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand; cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
@@ -878,8 +878,8 @@ error:
*/ */
static void cm_finalize_id(struct cm_id_private *cm_id_priv) static void cm_finalize_id(struct cm_id_private *cm_id_priv)
{ {
xa_store_irq(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id), xa_store(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
cm_id_priv, GFP_KERNEL); cm_id_priv, GFP_ATOMIC);
} }
struct ib_cm_id *ib_create_cm_id(struct ib_device *device, struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
@@ -1169,7 +1169,7 @@ retest:
spin_unlock(&cm.lock); spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock); spin_unlock_irq(&cm_id_priv->lock);
xa_erase_irq(&cm.local_id_table, cm_local_id(cm_id->local_id)); xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
cm_deref_id(cm_id_priv); cm_deref_id(cm_id_priv);
wait_for_completion(&cm_id_priv->comp); wait_for_completion(&cm_id_priv->comp);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL) while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
@@ -4482,7 +4482,7 @@ static int __init ib_cm_init(void)
cm.remote_id_table = RB_ROOT; cm.remote_id_table = RB_ROOT;
cm.remote_qp_table = RB_ROOT; cm.remote_qp_table = RB_ROOT;
cm.remote_sidr_table = RB_ROOT; cm.remote_sidr_table = RB_ROOT;
xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ); xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC);
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand); get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
INIT_LIST_HEAD(&cm.timewait_list); INIT_LIST_HEAD(&cm.timewait_list);

View File

@@ -15245,7 +15245,8 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
& CCE_REVISION_SW_MASK); & CCE_REVISION_SW_MASK);
/* alloc netdev data */ /* alloc netdev data */
if (hfi1_netdev_alloc(dd)) ret = hfi1_netdev_alloc(dd);
if (ret)
goto bail_cleanup; goto bail_cleanup;
ret = set_up_context_variables(dd); ret = set_up_context_variables(dd);

View File

@@ -266,7 +266,7 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
} }
ret = ib_device_set_netdev(&dev->ib_dev, dev->netdev, 1); ret = ib_device_set_netdev(&dev->ib_dev, dev->netdev, 1);
if (ret) if (ret)
return ret; goto err_srq_free;
spin_lock_init(&dev->srq_tbl_lock); spin_lock_init(&dev->srq_tbl_lock);
rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group); rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group);

View File

@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_RDMAVT config INFINIBAND_RDMAVT
tristate "RDMA verbs transport library" tristate "RDMA verbs transport library"
depends on X86_64 && ARCH_DMA_ADDR_T_64BIT depends on INFINIBAND_VIRT_DMA
depends on X86_64
depends on PCI depends on PCI
select DMA_VIRT_OPS select DMA_VIRT_OPS
help help

View File

@@ -2,7 +2,7 @@
config RDMA_RXE config RDMA_RXE
tristate "Software RDMA over Ethernet (RoCE) driver" tristate "Software RDMA over Ethernet (RoCE) driver"
depends on INET && PCI && INFINIBAND depends on INET && PCI && INFINIBAND
depends on !64BIT || ARCH_DMA_ADDR_T_64BIT depends on INFINIBAND_VIRT_DMA
select NET_UDP_TUNNEL select NET_UDP_TUNNEL
select CRYPTO_CRC32 select CRYPTO_CRC32
select DMA_VIRT_OPS select DMA_VIRT_OPS

View File

@@ -1,6 +1,7 @@
config RDMA_SIW config RDMA_SIW
tristate "Software RDMA over TCP/IP (iWARP) driver" tristate "Software RDMA over TCP/IP (iWARP) driver"
depends on INET && INFINIBAND && LIBCRC32C depends on INET && INFINIBAND && LIBCRC32C
depends on INFINIBAND_VIRT_DMA
select DMA_VIRT_OPS select DMA_VIRT_OPS
help help
This driver implements the iWARP RDMA transport over This driver implements the iWARP RDMA transport over

View File

@@ -33,6 +33,7 @@ typedef unsigned long dma_addr_t;
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) #define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) #define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) #define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)

View File

@@ -52,9 +52,9 @@ int main(void)
{ {
const unsigned int sgmax = SCATTERLIST_MAX_SEGMENT; const unsigned int sgmax = SCATTERLIST_MAX_SEGMENT;
struct test *test, tests[] = { struct test *test, tests[] = {
{ -EINVAL, 1, pfn(0), PAGE_SIZE, PAGE_SIZE + 1, 1 },
{ -EINVAL, 1, pfn(0), PAGE_SIZE, 0, 1 }, { -EINVAL, 1, pfn(0), PAGE_SIZE, 0, 1 },
{ -EINVAL, 1, pfn(0), PAGE_SIZE, sgmax + 1, 1 }, { 0, 1, pfn(0), PAGE_SIZE, PAGE_SIZE + 1, 1 },
{ 0, 1, pfn(0), PAGE_SIZE, sgmax + 1, 1 },
{ 0, 1, pfn(0), PAGE_SIZE, sgmax, 1 }, { 0, 1, pfn(0), PAGE_SIZE, sgmax, 1 },
{ 0, 1, pfn(0), 1, sgmax, 1 }, { 0, 1, pfn(0), 1, sgmax, 1 },
{ 0, 2, pfn(0, 1), 2 * PAGE_SIZE, sgmax, 1 }, { 0, 2, pfn(0, 1), 2 * PAGE_SIZE, sgmax, 1 },