Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 51 additions & 0 deletions drivers/infiniband/core/umem_dmabuf.c
Original file line number Diff line number Diff line change
Expand Up @@ -163,12 +163,63 @@ struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
}
EXPORT_SYMBOL(ib_umem_dmabuf_get);

static void
ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach)
{
struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;

ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev,
"Invalidate callback should not be called when memory is pinned\n");
}

static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
.allow_peer2peer = true,
.move_notify = ib_umem_dmabuf_unsupported_move_notify,
};

struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
unsigned long offset,
size_t size, int fd,
int access)
{
struct ib_umem_dmabuf *umem_dmabuf;
int err;

umem_dmabuf = ib_umem_dmabuf_get(device, offset, size, fd, access,
&ib_umem_dmabuf_attach_pinned_ops);
if (IS_ERR(umem_dmabuf))
return umem_dmabuf;

dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
err = dma_buf_pin(umem_dmabuf->attach);
if (err)
goto err_release;
umem_dmabuf->pinned = 1;

err = ib_umem_dmabuf_map_pages(umem_dmabuf);
if (err)
goto err_unpin;
dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);

return umem_dmabuf;

err_unpin:
dma_buf_unpin(umem_dmabuf->attach);
err_release:
dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
ib_umem_release(&umem_dmabuf->umem);
return ERR_PTR(err);
}
EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned);

void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
{
struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;

dma_resv_lock(dmabuf->resv, NULL);
ib_umem_dmabuf_unmap_pages(umem_dmabuf);
if (umem_dmabuf->pinned)
dma_buf_unpin(umem_dmabuf->attach);
dma_resv_unlock(dmabuf->resv);

dma_buf_detach(dmabuf, umem_dmabuf->attach);
Expand Down
8 changes: 8 additions & 0 deletions drivers/infiniband/core/uverbs_ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -337,6 +337,14 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,

break;

case UVERBS_ATTR_TYPE_RAW_FD:
if (uattr->attr_data.reserved || uattr->len != 0 ||
uattr->data_s64 < INT_MIN || uattr->data_s64 > INT_MAX)
return -EINVAL;
/* _uverbs_get_const_signed() is the accessor */
e->ptr_attr.data = uattr->data_s64;
break;

case UVERBS_ATTR_TYPE_IDRS_ARRAY:
return uverbs_process_idrs_array(pbundle, attr_uapi,
&e->objs_arr_attr, uattr,
Expand Down
4 changes: 4 additions & 0 deletions drivers/infiniband/hw/efa/efa.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,10 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
u64 length, u64 virt_addr,
int fd, int access_flags,
struct ib_udata *udata);
int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable);
Expand Down
1 change: 1 addition & 0 deletions drivers/infiniband/hw/efa/efa_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -266,6 +266,7 @@ static const struct ib_device_ops efa_dev_ops = {
.query_port = efa_query_port,
.query_qp = efa_query_qp,
.reg_user_mr = efa_reg_mr,
.reg_user_mr_dmabuf = efa_reg_user_mr_dmabuf,

INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq),
Expand Down
127 changes: 96 additions & 31 deletions drivers/infiniband/hw/efa/efa_verbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
* Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/

#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
#include <linux/vmalloc.h>
#include <linux/log2.h>

Expand Down Expand Up @@ -1490,26 +1492,18 @@ static int efa_create_pbl(struct efa_dev *dev,
return 0;
}

struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata)
static struct efa_mr *efa_alloc_mr(struct ib_pd *ibpd, int access_flags,
struct ib_udata *udata)
{
struct efa_dev *dev = to_edev(ibpd->device);
struct efa_com_reg_mr_params params = {};
struct efa_com_reg_mr_result result = {};
struct pbl_context pbl;
int supp_access_flags;
unsigned int pg_sz;
struct efa_mr *mr;
int inline_size;
int err;

if (udata && udata->inlen &&
!ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
ibdev_dbg(&dev->ibdev,
"Incompatible ABI params, udata not cleared\n");
err = -EINVAL;
goto err_out;
return ERR_PTR(-EINVAL);
}

supp_access_flags =
Expand All @@ -1521,23 +1515,26 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
ibdev_dbg(&dev->ibdev,
"Unsupported access flags[%#x], supported[%#x]\n",
access_flags, supp_access_flags);
err = -EOPNOTSUPP;
goto err_out;
return ERR_PTR(-EOPNOTSUPP);
}

mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr) {
err = -ENOMEM;
goto err_out;
}
if (!mr)
return ERR_PTR(-ENOMEM);

mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
ibdev_dbg(&dev->ibdev,
"Failed to pin and map user space memory[%d]\n", err);
goto err_free;
}
return mr;
}

static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start,
u64 length, u64 virt_addr, int access_flags)
{
struct efa_dev *dev = to_edev(ibpd->device);
struct efa_com_reg_mr_params params = {};
struct efa_com_reg_mr_result result = {};
struct pbl_context pbl;
unsigned int pg_sz;
int inline_size;
int err;

params.pd = to_epd(ibpd)->pdn;
params.iova = virt_addr;
Expand All @@ -1548,10 +1545,9 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
dev->dev_attr.page_size_cap,
virt_addr);
if (!pg_sz) {
err = -EOPNOTSUPP;
ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
dev->dev_attr.page_size_cap);
goto err_unmap;
return -EOPNOTSUPP;
}

params.page_shift = order_base_2(pg_sz);
Expand All @@ -1565,31 +1561,100 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
if (params.page_num <= inline_size) {
err = efa_create_inline_pbl(dev, mr, &params);
if (err)
goto err_unmap;
return err;

err = efa_com_register_mr(&dev->edev, &params, &result);
if (err)
goto err_unmap;
return err;
} else {
err = efa_create_pbl(dev, &pbl, mr, &params);
if (err)
goto err_unmap;
return err;

err = efa_com_register_mr(&dev->edev, &params, &result);
pbl_destroy(dev, &pbl);

if (err)
goto err_unmap;
return err;
}

mr->ibmr.lkey = result.l_key;
mr->ibmr.rkey = result.r_key;
mr->ibmr.length = length;
ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);

return 0;
}

struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
u64 length, u64 virt_addr,
int fd, int access_flags,
struct ib_udata *udata)
{
struct efa_dev *dev = to_edev(ibpd->device);
struct ib_umem_dmabuf *umem_dmabuf;
struct efa_mr *mr;
int err;

mr = efa_alloc_mr(ibpd, access_flags, udata);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
goto err_out;
}

umem_dmabuf = ib_umem_dmabuf_get_pinned(ibpd->device, start, length, fd,
access_flags);
if (IS_ERR(umem_dmabuf)) {
err = PTR_ERR(umem_dmabuf);
ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
goto err_free;
}

mr->umem = &umem_dmabuf->umem;
err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags);
if (err)
goto err_release;

return &mr->ibmr;

err_release:
ib_umem_release(mr->umem);
err_free:
kfree(mr);
err_out:
atomic64_inc(&dev->stats.reg_mr_err);
return ERR_PTR(err);
}

struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata)
{
struct efa_dev *dev = to_edev(ibpd->device);
struct efa_mr *mr;
int err;

mr = efa_alloc_mr(ibpd, access_flags, udata);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
goto err_out;
}

mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
ibdev_dbg(&dev->ibdev,
"Failed to pin and map user space memory[%d]\n", err);
goto err_free;
}

err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags);
if (err)
goto err_release;

return &mr->ibmr;

err_unmap:
err_release:
ib_umem_release(mr->umem);
err_free:
kfree(mr);
Expand Down
Loading