Skip to content

Commit ffdf504

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Jason Gunthorpe: "Small bug fixes: - Prevent a crash in bnxt if the en and rdma drivers disagree on the MSI vectors - Have rxe memcpy inline data from the correct address - Fix rxe's validation of UD packets - Several mlx5 mr cache issues: bad lock balancing on error, missing propagation of the ATS property to the HW, wrong bucketing of freed mrs in some cases - Incorrect goto error unwind in mlx5 driver probe - Missed userspace input validation in mlx5 SRQ create - Incorrect uABI in MANA rejecting valid optional MR creation flags" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/mana_ib: Ignore optional access flags for MRs RDMA/mlx5: Add check for srq max_sge attribute RDMA/mlx5: Fix unwind flow as part of mlx5_ib_stage_init_init RDMA/mlx5: Ensure created mkeys always have a populated rb_key RDMA/mlx5: Follow rb_key.ats when creating new mkeys RDMA/mlx5: Remove extra unlock on error path RDMA/rxe: Fix responder length checking for UD request packets RDMA/rxe: Fix data copy for IB_SEND_INLINE RDMA/bnxt_re: Fix the max msix vectors macro
2 parents 4545981 + 82a5cc7 commit ffdf504

File tree

7 files changed

+30
-15
lines changed

7 files changed

+30
-15
lines changed

drivers/infiniband/hw/bnxt_re/bnxt_re.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -107,8 +107,6 @@ struct bnxt_re_gsi_context {
107107
struct bnxt_re_sqp_entries *sqp_tbl;
108108
};
109109

110-
#define BNXT_RE_MIN_MSIX 2
111-
#define BNXT_RE_MAX_MSIX 9
112110
#define BNXT_RE_AEQ_IDX 0
113111
#define BNXT_RE_NQ_IDX 1
114112
#define BNXT_RE_GEN_P5_MAX_VF 64
@@ -168,7 +166,7 @@ struct bnxt_re_dev {
168166
struct bnxt_qplib_rcfw rcfw;
169167

170168
/* NQ */
171-
struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX];
169+
struct bnxt_qplib_nq nq[BNXT_MAX_ROCE_MSIX];
172170

173171
/* Device Resources */
174172
struct bnxt_qplib_dev_attr dev_attr;

drivers/infiniband/hw/mana/mr.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
112112
"start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
113113
start, iova, length, access_flags);
114114

115+
access_flags &= ~IB_ACCESS_OPTIONAL;
115116
if (access_flags & ~VALID_MR_FLAGS)
116117
return ERR_PTR(-EINVAL);
117118

drivers/infiniband/hw/mlx5/main.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3759,10 +3759,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
37593759
spin_lock_init(&dev->dm.lock);
37603760
dev->dm.dev = mdev;
37613761
return 0;
3762-
err:
3763-
mlx5r_macsec_dealloc_gids(dev);
37643762
err_mp:
37653763
mlx5_ib_cleanup_multiport_master(dev);
3764+
err:
3765+
mlx5r_macsec_dealloc_gids(dev);
37663766
return err;
37673767
}
37683768

drivers/infiniband/hw/mlx5/mr.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -246,6 +246,7 @@ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
246246
MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
247247
MLX5_SET(mkc, mkc, access_mode_4_2,
248248
(ent->rb_key.access_mode >> 2) & 0x7);
249+
MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats);
249250

250251
MLX5_SET(mkc, mkc, translations_octword_size,
251252
get_mkc_octo_size(ent->rb_key.access_mode,
@@ -641,10 +642,8 @@ static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache,
641642
new = &((*new)->rb_left);
642643
if (cmp < 0)
643644
new = &((*new)->rb_right);
644-
if (cmp == 0) {
645-
mutex_unlock(&cache->rb_lock);
645+
if (cmp == 0)
646646
return -EEXIST;
647-
}
648647
}
649648

650649
/* Add new node and rebalance tree. */
@@ -719,6 +718,8 @@ static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
719718
}
720719
mr->mmkey.cache_ent = ent;
721720
mr->mmkey.type = MLX5_MKEY_MR;
721+
mr->mmkey.rb_key = ent->rb_key;
722+
mr->mmkey.cacheable = true;
722723
init_waitqueue_head(&mr->mmkey.wait);
723724
return mr;
724725
}
@@ -1169,7 +1170,6 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
11691170
mr->ibmr.pd = pd;
11701171
mr->umem = umem;
11711172
mr->page_shift = order_base_2(page_size);
1172-
mr->mmkey.cacheable = true;
11731173
set_mr_fields(dev, mr, umem->length, access_flags, iova);
11741174

11751175
return mr;

drivers/infiniband/hw/mlx5/srq.c

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -199,17 +199,20 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
199199
int err;
200200
struct mlx5_srq_attr in = {};
201201
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
202+
__u32 max_sge_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) /
203+
sizeof(struct mlx5_wqe_data_seg);
202204

203205
if (init_attr->srq_type != IB_SRQT_BASIC &&
204206
init_attr->srq_type != IB_SRQT_XRC &&
205207
init_attr->srq_type != IB_SRQT_TM)
206208
return -EOPNOTSUPP;
207209

208-
/* Sanity check SRQ size before proceeding */
209-
if (init_attr->attr.max_wr >= max_srq_wqes) {
210-
mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
211-
init_attr->attr.max_wr,
212-
max_srq_wqes);
210+
/* Sanity check SRQ and sge size before proceeding */
211+
if (init_attr->attr.max_wr >= max_srq_wqes ||
212+
init_attr->attr.max_sge > max_sge_sz) {
213+
mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n",
214+
init_attr->attr.max_wr, max_srq_wqes,
215+
init_attr->attr.max_sge, max_sge_sz);
213216
return -EINVAL;
214217
}
215218

drivers/infiniband/sw/rxe/rxe_resp.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -344,6 +344,19 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
344344
* receive buffer later. For rmda operations additional
345345
* length checks are performed in check_rkey.
346346
*/
347+
if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) {
348+
unsigned int payload = payload_size(pkt);
349+
unsigned int recv_buffer_len = 0;
350+
int i;
351+
352+
for (i = 0; i < qp->resp.wqe->dma.num_sge; i++)
353+
recv_buffer_len += qp->resp.wqe->dma.sge[i].length;
354+
if (payload + 40 > recv_buffer_len) {
355+
rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n");
356+
return RESPST_ERR_LENGTH;
357+
}
358+
}
359+
347360
if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) ||
348361
(qp_type(qp) == IB_QPT_UC))) {
349362
unsigned int mtu = qp->mtu;

drivers/infiniband/sw/rxe/rxe_verbs.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -812,7 +812,7 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
812812
int i;
813813

814814
for (i = 0; i < ibwr->num_sge; i++, sge++) {
815-
memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length);
815+
memcpy(p, ib_virt_dma_to_ptr(sge->addr), sge->length);
816816
p += sge->length;
817817
}
818818
}

0 commit comments

Comments
 (0)