Skip to content

Commit dea3165

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Jason Gunthorpe: "A lot of fixes accumulated over the holiday break: - Static tool fixes, value is already proven to be NULL, possible integer overflow - Many bnxt_re fixes: - Crashes due to a mismatch in the maximum SGE list size - Don't waste memory for user QPs by creating kernel-only structures - Fix compatability issues with older HW in some of the new HW features recently introduced: RTS->RTS feature, work around 9096 - Do not allow destroy_qp to fail - Validate QP MTU against device limits - Add missing validation on madatory QP attributes for RTR->RTS - Report port_num in query_qp as required by the spec - Fix creation of QPs of the maximum queue size, and in the variable mode - Allow all QPs to be used on newer HW by limiting a work around only to HW it affects - Use the correct MSN table size for variable mode QPs - Add missing locking in create_qp() accessing the qp_tbl - Form WQE buffers correctly when some of the buffers are 0 hop - Don't crash on QP destroy if the userspace doesn't setup the dip_ctx - Add the missing QP flush handler call on the DWQE path to avoid hanging on error recovery - Consistently use ENXIO for return codes if the devices is fatally errored - Try again to fix VLAN support on iwarp, previous fix was reverted due to breaking other cards - Correct error path return code for rdma netlink events - Remove the seperate net_device pointer in siw and rxe which syzkaller found a way to UAF - Fix a UAF of a stack ib_sge in rtrs - Fix a regression where old mlx5 devices and FW were wrongly activing new device features and failing" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (28 commits) RDMA/mlx5: Enable multiplane mode only when it is supported RDMA/bnxt_re: Fix error recovery sequence RDMA/rtrs: Ensure 'ib_sge list' is accessible RDMA/rxe: Remove the direct link to net_device RDMA/hns: Fix missing flush CQE for DWQE RDMA/hns: Fix warning storm caused by invalid input in IO path RDMA/hns: Fix accessing invalid dip_ctx during destroying QP RDMA/hns: Fix mapping error of zero-hop WQE buffer RDMA/bnxt_re: Fix the locking while accessing the QP table RDMA/bnxt_re: Fix MSN table size for variable wqe mode RDMA/bnxt_re: Add send queue size check for variable wqe RDMA/bnxt_re: Disable use of reserved wqes RDMA/bnxt_re: Fix max_qp_wrs reported RDMA/siw: Remove direct link to net_device RDMA/nldev: Set error code in rdma_nl_notify_event RDMA/bnxt_re: Fix reporting hw_ver in query_device RDMA/bnxt_re: Fix to export port num to ib_query_qp RDMA/bnxt_re: Fix setting mandatory attributes for modify_qp RDMA/bnxt_re: Add check for path mtu in modify_qp RDMA/bnxt_re: Fix the check for 9060 condition ...
2 parents f274fff + 45d339f commit dea3165

29 files changed

+321
-159
lines changed

drivers/infiniband/core/cma.c

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -690,6 +690,7 @@ cma_validate_port(struct ib_device *device, u32 port,
690690
int bound_if_index = dev_addr->bound_dev_if;
691691
int dev_type = dev_addr->dev_type;
692692
struct net_device *ndev = NULL;
693+
struct net_device *pdev = NULL;
693694

694695
if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
695696
goto out;
@@ -714,6 +715,21 @@ cma_validate_port(struct ib_device *device, u32 port,
714715

715716
rcu_read_lock();
716717
ndev = rcu_dereference(sgid_attr->ndev);
718+
if (ndev->ifindex != bound_if_index) {
719+
pdev = dev_get_by_index_rcu(dev_addr->net, bound_if_index);
720+
if (pdev) {
721+
if (is_vlan_dev(pdev)) {
722+
pdev = vlan_dev_real_dev(pdev);
723+
if (ndev->ifindex == pdev->ifindex)
724+
bound_if_index = pdev->ifindex;
725+
}
726+
if (is_vlan_dev(ndev)) {
727+
pdev = vlan_dev_real_dev(ndev);
728+
if (bound_if_index == pdev->ifindex)
729+
bound_if_index = ndev->ifindex;
730+
}
731+
}
732+
}
717733
if (!net_eq(dev_net(ndev), dev_addr->net) ||
718734
ndev->ifindex != bound_if_index) {
719735
rdma_put_gid_attr(sgid_attr);

drivers/infiniband/core/nldev.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2833,8 +2833,8 @@ int rdma_nl_notify_event(struct ib_device *device, u32 port_num,
28332833
enum rdma_nl_notify_event_type type)
28342834
{
28352835
struct sk_buff *skb;
2836+
int ret = -EMSGSIZE;
28362837
struct net *net;
2837-
int ret = 0;
28382838
void *nlh;
28392839

28402840
net = read_pnet(&device->coredev.rdma_net);

drivers/infiniband/core/uverbs_cmd.c

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -161,7 +161,7 @@ static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
161161
{
162162
const void __user *res = iter->cur;
163163

164-
if (iter->cur + len > iter->end)
164+
if (len > iter->end - iter->cur)
165165
return (void __force __user *)ERR_PTR(-ENOSPC);
166166
iter->cur += len;
167167
return res;
@@ -2008,11 +2008,13 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
20082008
ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
20092009
if (ret)
20102010
return ret;
2011-
wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
2011+
wqes = uverbs_request_next_ptr(&iter, size_mul(cmd.wqe_size,
2012+
cmd.wr_count));
20122013
if (IS_ERR(wqes))
20132014
return PTR_ERR(wqes);
2014-
sgls = uverbs_request_next_ptr(
2015-
&iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
2015+
sgls = uverbs_request_next_ptr(&iter,
2016+
size_mul(cmd.sge_count,
2017+
sizeof(struct ib_uverbs_sge)));
20162018
if (IS_ERR(sgls))
20172019
return PTR_ERR(sgls);
20182020
ret = uverbs_request_finish(&iter);
@@ -2198,11 +2200,11 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
21982200
if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
21992201
return ERR_PTR(-EINVAL);
22002202

2201-
wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
2203+
wqes = uverbs_request_next_ptr(iter, size_mul(wqe_size, wr_count));
22022204
if (IS_ERR(wqes))
22032205
return ERR_CAST(wqes);
2204-
sgls = uverbs_request_next_ptr(
2205-
iter, sge_count * sizeof(struct ib_uverbs_sge));
2206+
sgls = uverbs_request_next_ptr(iter, size_mul(sge_count,
2207+
sizeof(struct ib_uverbs_sge)));
22062208
if (IS_ERR(sgls))
22072209
return ERR_CAST(sgls);
22082210
ret = uverbs_request_finish(iter);

drivers/infiniband/hw/bnxt_re/ib_verbs.c

Lines changed: 25 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
199199

200200
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
201201
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
202-
ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
202+
ib_attr->hw_ver = rdev->en_dev->pdev->revision;
203203
ib_attr->max_qp = dev_attr->max_qp;
204204
ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
205205
ib_attr->device_cap_flags =
@@ -967,13 +967,13 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
967967
unsigned int flags;
968968
int rc;
969969

970+
bnxt_re_debug_rem_qpinfo(rdev, qp);
971+
970972
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
971973

972974
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
973-
if (rc) {
975+
if (rc)
974976
ibdev_err(&rdev->ibdev, "Failed to destroy HW QP");
975-
return rc;
976-
}
977977

978978
if (rdma_is_kernel_res(&qp->ib_qp.res)) {
979979
flags = bnxt_re_lock_cqs(qp);
@@ -983,11 +983,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
983983

984984
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
985985

986-
if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
987-
rc = bnxt_re_destroy_gsi_sqp(qp);
988-
if (rc)
989-
return rc;
990-
}
986+
if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp)
987+
bnxt_re_destroy_gsi_sqp(qp);
991988

992989
mutex_lock(&rdev->qp_lock);
993990
list_del(&qp->list);
@@ -998,8 +995,6 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
998995
else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
999996
atomic_dec(&rdev->stats.res.ud_qp_count);
1000997

1001-
bnxt_re_debug_rem_qpinfo(rdev, qp);
1002-
1003998
ib_umem_release(qp->rumem);
1004999
ib_umem_release(qp->sumem);
10051000

@@ -2167,18 +2162,20 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
21672162
}
21682163
}
21692164

2170-
if (qp_attr_mask & IB_QP_PATH_MTU) {
2171-
qp->qplib_qp.modify_flags |=
2172-
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
2173-
qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
2174-
qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
2175-
} else if (qp_attr->qp_state == IB_QPS_RTR) {
2176-
qp->qplib_qp.modify_flags |=
2177-
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
2178-
qp->qplib_qp.path_mtu =
2179-
__from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
2180-
qp->qplib_qp.mtu =
2181-
ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
2165+
if (qp_attr->qp_state == IB_QPS_RTR) {
2166+
enum ib_mtu qpmtu;
2167+
2168+
qpmtu = iboe_get_mtu(rdev->netdev->mtu);
2169+
if (qp_attr_mask & IB_QP_PATH_MTU) {
2170+
if (ib_mtu_enum_to_int(qp_attr->path_mtu) >
2171+
ib_mtu_enum_to_int(qpmtu))
2172+
return -EINVAL;
2173+
qpmtu = qp_attr->path_mtu;
2174+
}
2175+
2176+
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
2177+
qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
2178+
qp->qplib_qp.mtu = ib_mtu_enum_to_int(qpmtu);
21822179
}
21832180

21842181
if (qp_attr_mask & IB_QP_TIMEOUT) {
@@ -2328,6 +2325,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
23282325
qp_attr->retry_cnt = qplib_qp->retry_cnt;
23292326
qp_attr->rnr_retry = qplib_qp->rnr_retry;
23302327
qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2328+
qp_attr->port_num = __to_ib_port_num(qplib_qp->port_id);
23312329
qp_attr->rq_psn = qplib_qp->rq.psn;
23322330
qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
23332331
qp_attr->sq_psn = qplib_qp->sq.psn;
@@ -2824,7 +2822,8 @@ static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
28242822
wr = wr->next;
28252823
}
28262824
bnxt_qplib_post_send_db(&qp->qplib_qp);
2827-
bnxt_ud_qp_hw_stall_workaround(qp);
2825+
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
2826+
bnxt_ud_qp_hw_stall_workaround(qp);
28282827
spin_unlock_irqrestore(&qp->sq_lock, flags);
28292828
return rc;
28302829
}
@@ -2936,7 +2935,8 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
29362935
wr = wr->next;
29372936
}
29382937
bnxt_qplib_post_send_db(&qp->qplib_qp);
2939-
bnxt_ud_qp_hw_stall_workaround(qp);
2938+
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
2939+
bnxt_ud_qp_hw_stall_workaround(qp);
29402940
spin_unlock_irqrestore(&qp->sq_lock, flags);
29412941

29422942
return rc;

drivers/infiniband/hw/bnxt_re/ib_verbs.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -268,6 +268,10 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
268268
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
269269
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
270270

271+
static inline u32 __to_ib_port_num(u16 port_id)
272+
{
273+
return (u32)port_id + 1;
274+
}
271275

272276
unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
273277
void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);

drivers/infiniband/hw/bnxt_re/main.c

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1715,24 +1715,18 @@ static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
17151715

17161716
static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
17171717
{
1718-
int mask = IB_QP_STATE;
1719-
struct ib_qp_attr qp_attr;
17201718
struct bnxt_re_qp *qp;
17211719

1722-
qp_attr.qp_state = IB_QPS_ERR;
17231720
mutex_lock(&rdev->qp_lock);
17241721
list_for_each_entry(qp, &rdev->qp_list, list) {
17251722
/* Modify the state of all QPs except QP1/Shadow QP */
17261723
if (!bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) {
17271724
if (qp->qplib_qp.state !=
17281725
CMDQ_MODIFY_QP_NEW_STATE_RESET &&
17291726
qp->qplib_qp.state !=
1730-
CMDQ_MODIFY_QP_NEW_STATE_ERR) {
1727+
CMDQ_MODIFY_QP_NEW_STATE_ERR)
17311728
bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
17321729
1, IB_EVENT_QP_FATAL);
1733-
bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask,
1734-
NULL);
1735-
}
17361730
}
17371731
}
17381732
mutex_unlock(&rdev->qp_lock);

drivers/infiniband/hw/bnxt_re/qplib_fp.c

Lines changed: 50 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -659,13 +659,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
659659
rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
660660
if (rc)
661661
return rc;
662-
663-
srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
664-
GFP_KERNEL);
665-
if (!srq->swq) {
666-
rc = -ENOMEM;
667-
goto fail;
668-
}
669662
srq->dbinfo.flags = 0;
670663
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
671664
CMDQ_BASE_OPCODE_CREATE_SRQ,
@@ -694,9 +687,17 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
694687
spin_lock_init(&srq->lock);
695688
srq->start_idx = 0;
696689
srq->last_idx = srq->hwq.max_elements - 1;
697-
for (idx = 0; idx < srq->hwq.max_elements; idx++)
698-
srq->swq[idx].next_idx = idx + 1;
699-
srq->swq[srq->last_idx].next_idx = -1;
690+
if (!srq->hwq.is_user) {
691+
srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
692+
GFP_KERNEL);
693+
if (!srq->swq) {
694+
rc = -ENOMEM;
695+
goto fail;
696+
}
697+
for (idx = 0; idx < srq->hwq.max_elements; idx++)
698+
srq->swq[idx].next_idx = idx + 1;
699+
srq->swq[srq->last_idx].next_idx = -1;
700+
}
700701

701702
srq->id = le32_to_cpu(resp.xid);
702703
srq->dbinfo.hwq = &srq->hwq;
@@ -1000,9 +1001,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
10001001
u32 tbl_indx;
10011002
u16 nsge;
10021003

1003-
if (res->dattr)
1004-
qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
1005-
1004+
qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
10061005
sq->dbinfo.flags = 0;
10071006
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
10081007
CMDQ_BASE_OPCODE_CREATE_QP,
@@ -1034,7 +1033,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
10341033
: 0;
10351034
/* Update msn tbl size */
10361035
if (qp->is_host_msn_tbl && psn_sz) {
1037-
hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1036+
if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1037+
hwq_attr.aux_depth =
1038+
roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
1039+
else
1040+
hwq_attr.aux_depth =
1041+
roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
10381042
qp->msn_tbl_sz = hwq_attr.aux_depth;
10391043
qp->msn = 0;
10401044
}
@@ -1044,13 +1048,14 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
10441048
if (rc)
10451049
return rc;
10461050

1047-
rc = bnxt_qplib_alloc_init_swq(sq);
1048-
if (rc)
1049-
goto fail_sq;
1050-
1051-
if (psn_sz)
1052-
bnxt_qplib_init_psn_ptr(qp, psn_sz);
1051+
if (!sq->hwq.is_user) {
1052+
rc = bnxt_qplib_alloc_init_swq(sq);
1053+
if (rc)
1054+
goto fail_sq;
10531055

1056+
if (psn_sz)
1057+
bnxt_qplib_init_psn_ptr(qp, psn_sz);
1058+
}
10541059
req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
10551060
pbl = &sq->hwq.pbl[PBL_LVL_0];
10561061
req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
@@ -1076,9 +1081,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
10761081
rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
10771082
if (rc)
10781083
goto sq_swq;
1079-
rc = bnxt_qplib_alloc_init_swq(rq);
1080-
if (rc)
1081-
goto fail_rq;
1084+
if (!rq->hwq.is_user) {
1085+
rc = bnxt_qplib_alloc_init_swq(rq);
1086+
if (rc)
1087+
goto fail_rq;
1088+
}
10821089

10831090
req.rq_size = cpu_to_le32(rq->max_wqe);
10841091
pbl = &rq->hwq.pbl[PBL_LVL_0];
@@ -1174,9 +1181,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
11741181
rq->dbinfo.db = qp->dpi->dbr;
11751182
rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
11761183
}
1184+
spin_lock_bh(&rcfw->tbl_lock);
11771185
tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
11781186
rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
11791187
rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
1188+
spin_unlock_bh(&rcfw->tbl_lock);
11801189

11811190
return 0;
11821191
fail:
@@ -1283,7 +1292,8 @@ static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
12831292
}
12841293
}
12851294

1286-
static void bnxt_set_mandatory_attributes(struct bnxt_qplib_qp *qp,
1295+
static void bnxt_set_mandatory_attributes(struct bnxt_qplib_res *res,
1296+
struct bnxt_qplib_qp *qp,
12871297
struct cmdq_modify_qp *req)
12881298
{
12891299
u32 mandatory_flags = 0;
@@ -1298,6 +1308,14 @@ static void bnxt_set_mandatory_attributes(struct bnxt_qplib_qp *qp,
12981308
mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
12991309
}
13001310

1311+
if (_is_min_rnr_in_rtr_rts_mandatory(res->dattr->dev_cap_flags2) &&
1312+
(qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RTR &&
1313+
qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTS)) {
1314+
if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_RC)
1315+
mandatory_flags |=
1316+
CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
1317+
}
1318+
13011319
if (qp->type == CMDQ_MODIFY_QP_QP_TYPE_UD ||
13021320
qp->type == CMDQ_MODIFY_QP_QP_TYPE_GSI)
13031321
mandatory_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
@@ -1338,7 +1356,7 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
13381356
/* Set mandatory attributes for INIT -> RTR and RTR -> RTS transition */
13391357
if (_is_optimize_modify_qp_supported(res->dattr->dev_cap_flags2) &&
13401358
is_optimized_state_transition(qp))
1341-
bnxt_set_mandatory_attributes(qp, &req);
1359+
bnxt_set_mandatory_attributes(res, qp, &req);
13421360
}
13431361
bmask = qp->modify_flags;
13441362
req.modify_mask = cpu_to_le32(qp->modify_flags);
@@ -1521,6 +1539,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
15211539
qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
15221540
memcpy(qp->smac, sb->src_mac, 6);
15231541
qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
1542+
qp->port_id = le16_to_cpu(sb->port_id);
15241543
bail:
15251544
dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
15261545
sbuf.sb, sbuf.dma_addr);
@@ -2667,10 +2686,12 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
26672686
bnxt_qplib_add_flush_qp(qp);
26682687
} else {
26692688
/* Before we complete, do WA 9060 */
2670-
if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2671-
cqe_sq_cons)) {
2672-
*lib_qp = qp;
2673-
goto out;
2689+
if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
2690+
if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
2691+
cqe_sq_cons)) {
2692+
*lib_qp = qp;
2693+
goto out;
2694+
}
26742695
}
26752696
if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
26762697
cqe->status = CQ_REQ_STATUS_OK;

0 commit comments

Comments
 (0)