描述:注册用户内存区域【通过IB_USER_VERBS_CMD_REG_MR】
根据type
流程:
a) 类型转换为rxe类型
b) 验证mr_type
c) 申请rxe_mem: mr = rxe_alloc(&rxe->mr_pool);
d) 为mr分配索引:rxe_add_index(mr)
e) 对对象进行引用:rxe_add_ref(pd)
f) 构成mr:rxe_mem_init_user(pd, start, length, iova, ccess, udata, mr)
g) mr:rxe_mem_init(0, mem)->生成lkey和rkey
h) 返回新分配的mr
Ops_rxe:
/**
* rxe_reg_user_mr - Register a user memory region
* @pd: ptr of pd
* @start: virtual start address
* @length: length of mr
* @virt: virtual address
* @acc: access of mr
* @udata: user data
static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
u64 start,
u64 length,
u64 iova,
int access, struct ib_udata *udata)
{
err = rxe_mem_init_user(pd, start, length, iova,
access, udata, mr);
}
rxe_mem_init_user()
int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
u64 length, u64 iova, int access, struct ib_udata *udata,
struct rxe_mem *mem)
{
struct rxe_map **map;
struct rxe_phys_buf *buf = NULL;
struct ib_umem *umem;
struct sg_page_iter sg_iter;
int num_buf;
void *vaddr;
int err;
umem = ib_umem_get(udata, start, length, access, 0);
if (IS_ERR(umem)) {
pr_warn("err %d from rxe_umem_get\n",
(int)PTR_ERR(umem));
err = -EINVAL;
goto err1;
mem->umem = umem;
num_buf = ib_umem_num_pages(umem);
rxe_mem_init(access, mem);
err = rxe_mem_alloc(mem, num_buf);
if (err) {
pr_warn("err %d from rxe_mem_alloc\n", err);
ib_umem_release(umem);
goto err1;
mem->page_shift =
mem->page_mask = PAGE_SIZE - 1;
num_buf = 0;
map = mem->map;
if (length > 0) {
buf = map[0]->buf;
for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
if (num_buf >= RXE_BUF_PER_MAP) {
map++;
buf = map[0]->buf;
num_buf = 0;
vaddr = page_address(sg_page_iter_page(&sg_iter));
if (!vaddr) {
pr_warn("null vaddr\n");
err = -ENOMEM;
goto
buf->addr = (uintptr_t)vaddr;
buf->size =
num_buf++;
buf++;
mem->pd = pd;
mem->umem = umem;
mem->access = access;
mem->length = length;
mem->iova = iova;
mem->va = start;
mem->offset = ib_umem_offset(umem);
mem->state = RXE_MEM_STATE_VALID;
mem->type = RXE_MEM_TYPE_MR;
return 0;
err1:
return err;
ops_i40iw:
/**
* i40iw_reg_user_mr - Register a user memory region
* @pd: ptr of pd
* @start: virtual start address
* @length: length of mr
* @virt: virtual address
* @acc: access of mr
* @udata: user data
*/
static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
u64 start,
u64 length,
u64 virt,
int acc,
struct ib_udata *udata)
{
struct i40iw_pd *iwpd = to_iwpd(pd);
struct i40iw_device *iwdev = to_iwdev(pd->device);
struct i40iw_ucontext *ucontext = rdma_udata_to_drv_context(
udata, struct
struct i40iw_pble_alloc *palloc;
struct i40iw_pbl *iwpbl;
struct i40iw_mr *iwmr;
struct ib_umem *region;
struct i40iw_mem_reg_req req;
u64 pbl_depth = 0;
u32 stag = 0;
u16 access;
u64 region_length;
bool use_pbles = false;
unsigned long flags;
int err = -ENOSYS;
int ret;
int pg_shift;
if (iwdev->closing)
return ERR_PTR(-ENODEV);
if (length > I40IW_MAX_MR_SIZE)
return ERR_PTR(-EINVAL);
region = ib_umem_get(udata, start, length, acc, 0);
if (IS_ERR(region))
return (struct ib_mr *)region;
if (ib_copy_from_udata(&req, udata, sizeof(req))) {
ib_umem_release(region);
return ERR_PTR(-EFAULT);
iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
if (!iwmr) {
ib_umem_release(region);
return ERR_PTR(-ENOMEM);
iwpbl = &iwmr->iwpbl;
iwpbl->iwmr = iwmr;
iwmr->region = region;
iwmr->ibmr.pd = pd;
iwmr->ibmr.device = pd->device;
iwmr->page_size =
if (req.reg_type ==
iwmr->page_size = ib_umem_find_best_pgsz(region, SZ_4K |
virt);
region_length = region->length + (start & (iwmr->page_size - 1));
pg_shift = ffs(iwmr->page_size) - 1;
pbl_depth = region_length >> pg_shift;
pbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0;
iwmr->length = region->length;
iwpbl->user_base = virt;
palloc = &iwpbl->pble_alloc;
iwmr->type = req.reg_type;
iwmr->page_cnt =
switch (req.reg_type) {
case
use_pbles = ((req.sq_pages + req.rq_pages) > 2);
err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
if (err)
goto error;
spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
iwpbl->on_list = true;
spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
break;
case
use_pbles = (req.cq_pages > 1);
err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
if (err)
goto error;
spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
iwpbl->on_list = true;
spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
break;
case
use_pbles = (iwmr->page_cnt != 1);
access = I40IW_ACCESS_FLAGS_LOCALREAD;
err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
if (err)
goto error;
if (use_pbles) {
ret = i40iw_check_mr_contiguous(palloc, iwmr->page_size);
if (ret) {
i40iw_free_pble(iwdev->pble_rsrc, palloc);
iwpbl->pbl_allocated = false;
access |= i40iw_get_user_access(acc);
stag = i40iw_create_stag(iwdev);
if (!stag) {
err = -ENOMEM;
goto error;
iwmr->stag = stag;
iwmr->ibmr.rkey = stag;
iwmr->ibmr.lkey = stag;
err = i40iw_hwreg_mr(iwdev, iwmr, access);
if (err) {
i40iw_free_stag(iwdev, stag);
goto error;
break;
default:
goto error;
iwmr->type = req.reg_type;
if (req.reg_type ==
i40iw_add_pdusecount(iwpd);
return &iwmr->ibmr;
error:
if (palloc->level != I40IW_LEVEL_0 && iwpbl->pbl_allocated)
i40iw_free_pble(iwdev->pble_rsrc, palloc);
ib_umem_release(region);
kfree(iwmr);
return ERR_PTR(err);
}
标签:struct,mem,rxe,iwmr,umem,rdma,mr From: https://blog.51cto.com/u_16113732/7435693