Skip to content

Commit c08cfd6

Browse files
committed
Merge tag 'cxl-fixes-6.3-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl
Pull compute express link (cxl) fixes from Dan Williams: "Several fixes for driver startup regressions that landed during the merge window as well as some older bugs. The regressions were due to a lack of testing with what the CXL specification calls Restricted CXL Host (RCH) topologies compared to the testing with Virtual Host (VH) CXL topologies. A VH topology is typical PCIe while RCH topologies map CXL endpoints as Root Complex Integrated endpoints. The impact is some driver crashes on startup. This merge window also added compatibility for range registers (the mechanism that CXL 1.1 defined for mapping memory) to treat them like HDM decoders (the mechanism that CXL 2.0 defined for mapping Host-managed Device Memory). That work collided with the new region enumeration code that was tested with CXL 2.0 setups, and fails with crashes at startup. Lastly, the DOE (Data Object Exchange) implementation for retrieving an ACPI-like data table from CXL devices is being reworked for v6.4. Several fixes fell out of that work that are suitable for v6.3. All of this has been in linux-next for a while, and all reported issues [1] have been addressed. Summary: - Fix several issues with region enumeration in RCH topologies that can trigger crashes on driver startup or shutdown. - Fix CXL DVSEC range register compatibility versus region enumeration that leads to startup crashes - Fix CDAT endiannes handling - Fix multiple buffer handling boundary conditions - Fix Data Object Exchange (DOE) workqueue usage vs CONFIG_DEBUG_OBJECTS warn splats" Link: http://lore.kernel.org/r/20230405075704.33de8121@canb.auug.org.au [1] * tag 'cxl-fixes-6.3-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: cxl/hdm: Extend DVSEC range register emulation for region enumeration cxl/hdm: Limit emulation to the number of range registers cxl/region: Move coherence tracking into cxl_region_attach() cxl/region: Fix region setup/teardown for RCDs cxl/port: Fix find_cxl_root() for RCDs and simplify it cxl/hdm: Skip emulation when driver manages mem_enable cxl/hdm: Fix double allocation of @cxlhdm PCI/DOE: Fix memory leak with CONFIG_DEBUG_OBJECTS=y PCI/DOE: Silence WARN splat with CONFIG_DEBUG_OBJECTS=y cxl/pci: Handle excessive CDAT length cxl/pci: Handle truncated CDAT entries cxl/pci: Handle truncated CDAT header cxl/pci: Fix CDAT retrieval on big endian
2 parents cdc9718 + ca712e4 commit c08cfd6

File tree

10 files changed

+175
-130
lines changed

10 files changed

+175
-130
lines changed

drivers/cxl/core/hdm.c

Lines changed: 68 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -101,25 +101,40 @@ static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
101101
BIT(CXL_CM_CAP_CAP_ID_HDM));
102102
}
103103

104-
static struct cxl_hdm *devm_cxl_setup_emulated_hdm(struct cxl_port *port,
105-
struct cxl_endpoint_dvsec_info *info)
104+
static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
106105
{
107-
struct device *dev = &port->dev;
108106
struct cxl_hdm *cxlhdm;
107+
void __iomem *hdm;
108+
u32 ctrl;
109+
int i;
109110

110-
if (!info->mem_enabled)
111-
return ERR_PTR(-ENODEV);
111+
if (!info)
112+
return false;
112113

113-
cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
114-
if (!cxlhdm)
115-
return ERR_PTR(-ENOMEM);
114+
cxlhdm = dev_get_drvdata(&info->port->dev);
115+
hdm = cxlhdm->regs.hdm_decoder;
116116

117-
cxlhdm->port = port;
118-
cxlhdm->decoder_count = info->ranges;
119-
cxlhdm->target_count = info->ranges;
120-
dev_set_drvdata(&port->dev, cxlhdm);
117+
if (!hdm)
118+
return true;
121119

122-
return cxlhdm;
120+
/*
121+
* If HDM decoders are present and the driver is in control of
122+
* Mem_Enable skip DVSEC based emulation
123+
*/
124+
if (!info->mem_enabled)
125+
return false;
126+
127+
/*
128+
* If any decoders are committed already, there should not be any
129+
* emulated DVSEC decoders.
130+
*/
131+
for (i = 0; i < cxlhdm->decoder_count; i++) {
132+
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
133+
if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
134+
return false;
135+
}
136+
137+
return true;
123138
}
124139

125140
/**
@@ -138,13 +153,14 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
138153
cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
139154
if (!cxlhdm)
140155
return ERR_PTR(-ENOMEM);
141-
142156
cxlhdm->port = port;
143-
crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
144-
if (!crb) {
145-
if (info && info->mem_enabled)
146-
return devm_cxl_setup_emulated_hdm(port, info);
157+
dev_set_drvdata(dev, cxlhdm);
147158

159+
crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
160+
if (!crb && info && info->mem_enabled) {
161+
cxlhdm->decoder_count = info->ranges;
162+
return cxlhdm;
163+
} else if (!crb) {
148164
dev_err(dev, "No component registers mapped\n");
149165
return ERR_PTR(-ENXIO);
150166
}
@@ -160,7 +176,15 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
160176
return ERR_PTR(-ENXIO);
161177
}
162178

163-
dev_set_drvdata(dev, cxlhdm);
179+
/*
180+
* Now that the hdm capability is parsed, decide if range
181+
* register emulation is needed and fixup cxlhdm accordingly.
182+
*/
183+
if (should_emulate_decoders(info)) {
184+
dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
185+
info->ranges > 1 ? "s" : "");
186+
cxlhdm->decoder_count = info->ranges;
187+
}
164188

165189
return cxlhdm;
166190
}
@@ -714,14 +738,20 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld)
714738
return 0;
715739
}
716740

717-
static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port,
718-
struct cxl_decoder *cxld, int which,
719-
struct cxl_endpoint_dvsec_info *info)
741+
static int cxl_setup_hdm_decoder_from_dvsec(
742+
struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base,
743+
int which, struct cxl_endpoint_dvsec_info *info)
720744
{
745+
struct cxl_endpoint_decoder *cxled;
746+
u64 len;
747+
int rc;
748+
721749
if (!is_cxl_endpoint(port))
722750
return -EOPNOTSUPP;
723751

724-
if (!range_len(&info->dvsec_range[which]))
752+
cxled = to_cxl_endpoint_decoder(&cxld->dev);
753+
len = range_len(&info->dvsec_range[which]);
754+
if (!len)
725755
return -ENOENT;
726756

727757
cxld->target_type = CXL_DECODER_EXPANDER;
@@ -736,40 +766,24 @@ static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port,
736766
cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK;
737767
port->commit_end = cxld->id;
738768

739-
return 0;
740-
}
741-
742-
static bool should_emulate_decoders(struct cxl_port *port)
743-
{
744-
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
745-
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
746-
u32 ctrl;
747-
int i;
748-
749-
if (!is_cxl_endpoint(cxlhdm->port))
750-
return false;
751-
752-
if (!hdm)
753-
return true;
754-
755-
/*
756-
* If any decoders are committed already, there should not be any
757-
* emulated DVSEC decoders.
758-
*/
759-
for (i = 0; i < cxlhdm->decoder_count; i++) {
760-
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i));
761-
if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl))
762-
return false;
769+
rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0);
770+
if (rc) {
771+
dev_err(&port->dev,
772+
"decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)",
773+
port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc);
774+
return rc;
763775
}
776+
*dpa_base += len;
777+
cxled->state = CXL_DECODER_STATE_AUTO;
764778

765-
return true;
779+
return 0;
766780
}
767781

768782
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
769783
int *target_map, void __iomem *hdm, int which,
770784
u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
771785
{
772-
struct cxl_endpoint_decoder *cxled = NULL;
786+
struct cxl_endpoint_decoder *cxled;
773787
u64 size, base, skip, dpa_size;
774788
bool committed;
775789
u32 remainder;
@@ -780,11 +794,9 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
780794
unsigned char target_id[8];
781795
} target_list;
782796

783-
if (should_emulate_decoders(port))
784-
return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info);
785-
786-
if (is_endpoint_decoder(&cxld->dev))
787-
cxled = to_cxl_endpoint_decoder(&cxld->dev);
797+
if (should_emulate_decoders(info))
798+
return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base,
799+
which, info);
788800

789801
ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which));
790802
base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which));
@@ -806,9 +818,6 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
806818
.end = base + size - 1,
807819
};
808820

809-
if (cxled && !committed && range_len(&info->dvsec_range[which]))
810-
return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info);
811-
812821
/* decoders are enabled if committed */
813822
if (committed) {
814823
cxld->flags |= CXL_DECODER_F_ENABLE;
@@ -846,7 +855,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
846855
if (rc)
847856
return rc;
848857

849-
if (!cxled) {
858+
if (!info) {
850859
target_list.value =
851860
ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which));
852861
for (i = 0; i < cxld->interleave_ways; i++)
@@ -866,6 +875,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
866875
return -ENXIO;
867876
}
868877
skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which));
878+
cxled = to_cxl_endpoint_decoder(&cxld->dev);
869879
rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip);
870880
if (rc) {
871881
dev_err(&port->dev,

drivers/cxl/core/pci.c

Lines changed: 23 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -462,7 +462,7 @@ static struct pci_doe_mb *find_cdat_doe(struct device *uport)
462462
return NULL;
463463
}
464464

465-
#define CDAT_DOE_REQ(entry_handle) \
465+
#define CDAT_DOE_REQ(entry_handle) cpu_to_le32 \
466466
(FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \
467467
CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \
468468
FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \
@@ -475,8 +475,8 @@ static void cxl_doe_task_complete(struct pci_doe_task *task)
475475
}
476476

477477
struct cdat_doe_task {
478-
u32 request_pl;
479-
u32 response_pl[32];
478+
__le32 request_pl;
479+
__le32 response_pl[32];
480480
struct completion c;
481481
struct pci_doe_task task;
482482
};
@@ -510,10 +510,10 @@ static int cxl_cdat_get_length(struct device *dev,
510510
return rc;
511511
}
512512
wait_for_completion(&t.c);
513-
if (t.task.rv < sizeof(u32))
513+
if (t.task.rv < 2 * sizeof(__le32))
514514
return -EIO;
515515

516-
*length = t.response_pl[1];
516+
*length = le32_to_cpu(t.response_pl[1]);
517517
dev_dbg(dev, "CDAT length %zu\n", *length);
518518

519519
return 0;
@@ -524,13 +524,13 @@ static int cxl_cdat_read_table(struct device *dev,
524524
struct cxl_cdat *cdat)
525525
{
526526
size_t length = cdat->length;
527-
u32 *data = cdat->table;
527+
__le32 *data = cdat->table;
528528
int entry_handle = 0;
529529

530530
do {
531531
DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t);
532+
struct cdat_entry_header *entry;
532533
size_t entry_dw;
533-
u32 *entry;
534534
int rc;
535535

536536
rc = pci_doe_submit_task(cdat_doe, &t.task);
@@ -539,26 +539,34 @@ static int cxl_cdat_read_table(struct device *dev,
539539
return rc;
540540
}
541541
wait_for_completion(&t.c);
542-
/* 1 DW header + 1 DW data min */
543-
if (t.task.rv < (2 * sizeof(u32)))
542+
543+
/* 1 DW Table Access Response Header + CDAT entry */
544+
entry = (struct cdat_entry_header *)(t.response_pl + 1);
545+
if ((entry_handle == 0 &&
546+
t.task.rv != sizeof(__le32) + sizeof(struct cdat_header)) ||
547+
(entry_handle > 0 &&
548+
(t.task.rv < sizeof(__le32) + sizeof(*entry) ||
549+
t.task.rv != sizeof(__le32) + le16_to_cpu(entry->length))))
544550
return -EIO;
545551

546552
/* Get the CXL table access header entry handle */
547553
entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE,
548-
t.response_pl[0]);
549-
entry = t.response_pl + 1;
550-
entry_dw = t.task.rv / sizeof(u32);
554+
le32_to_cpu(t.response_pl[0]));
555+
entry_dw = t.task.rv / sizeof(__le32);
551556
/* Skip Header */
552557
entry_dw -= 1;
553-
entry_dw = min(length / sizeof(u32), entry_dw);
558+
entry_dw = min(length / sizeof(__le32), entry_dw);
554559
/* Prevent length < 1 DW from causing a buffer overflow */
555560
if (entry_dw) {
556-
memcpy(data, entry, entry_dw * sizeof(u32));
557-
length -= entry_dw * sizeof(u32);
561+
memcpy(data, entry, entry_dw * sizeof(__le32));
562+
length -= entry_dw * sizeof(__le32);
558563
data += entry_dw;
559564
}
560565
} while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY);
561566

567+
/* Length in CDAT header may exceed concatenation of CDAT entries */
568+
cdat->length -= length;
569+
562570
return 0;
563571
}
564572

drivers/cxl/core/pmem.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -62,9 +62,9 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
6262
return is_cxl_nvdimm_bridge(dev);
6363
}
6464

65-
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *start)
65+
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd)
6666
{
67-
struct cxl_port *port = find_cxl_root(start);
67+
struct cxl_port *port = find_cxl_root(dev_get_drvdata(&cxlmd->dev));
6868
struct device *dev;
6969

7070
if (!port)
@@ -253,7 +253,7 @@ int devm_cxl_add_nvdimm(struct cxl_memdev *cxlmd)
253253
struct device *dev;
254254
int rc;
255255

256-
cxl_nvb = cxl_find_nvdimm_bridge(&cxlmd->dev);
256+
cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
257257
if (!cxl_nvb)
258258
return -ENODEV;
259259

drivers/cxl/core/port.c

Lines changed: 7 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -823,41 +823,17 @@ static bool dev_is_cxl_root_child(struct device *dev)
823823
return false;
824824
}
825825

826-
/* Find a 2nd level CXL port that has a dport that is an ancestor of @match */
827-
static int match_root_child(struct device *dev, const void *match)
826+
struct cxl_port *find_cxl_root(struct cxl_port *port)
828827
{
829-
const struct device *iter = NULL;
830-
struct cxl_dport *dport;
831-
struct cxl_port *port;
832-
833-
if (!dev_is_cxl_root_child(dev))
834-
return 0;
835-
836-
port = to_cxl_port(dev);
837-
iter = match;
838-
while (iter) {
839-
dport = cxl_find_dport_by_dev(port, iter);
840-
if (dport)
841-
break;
842-
iter = iter->parent;
843-
}
844-
845-
return !!iter;
846-
}
828+
struct cxl_port *iter = port;
847829

848-
struct cxl_port *find_cxl_root(struct device *dev)
849-
{
850-
struct device *port_dev;
851-
struct cxl_port *root;
830+
while (iter && !is_cxl_root(iter))
831+
iter = to_cxl_port(iter->dev.parent);
852832

853-
port_dev = bus_find_device(&cxl_bus_type, NULL, dev, match_root_child);
854-
if (!port_dev)
833+
if (!iter)
855834
return NULL;
856-
857-
root = to_cxl_port(port_dev->parent);
858-
get_device(&root->dev);
859-
put_device(port_dev);
860-
return root;
835+
get_device(&iter->dev);
836+
return iter;
861837
}
862838
EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
863839

0 commit comments

Comments
 (0)