mirror of
https://github.com/torvalds/linux.git
synced 2024-11-12 23:23:03 +00:00
cxl/mem: Move devm_cxl_add_endpoint() from cxl_core to cxl_mem
tl;dr: Clean up an unnecessary export and enable cxl_test. An RCD (Restricted CXL Device), in contrast to a typical CXL device in a VH topology, obtains its component registers from the bottom half of the associated CXL host bridge RCRB (Root Complex Register Block). In turn this means that cxl_rcrb_to_component() needs to be called from devm_cxl_add_endpoint(). Presently devm_cxl_add_endpoint() is part of the CXL core, but the only user is the CXL mem module. Move it from cxl_core to cxl_mem to not only get rid of an unnecessary export, but to also enable its call out to cxl_rcrb_to_component(), in a subsequent patch, to be mocked by cxl_test. Recall that cxl_test can only mock exported symbols, and since cxl_rcrb_to_component() is itself inside the core, all callers must be outside of cxl_core to allow cxl_test to mock it. Reviewed-by: Robert Richter <rrichter@amd.com> Link: https://lore.kernel.org/r/166993045072.1882361.13944923741276843683.stgit@dwillia2-xfh.jf.intel.com Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
d5b1a27143
commit
7592d935b7
@ -58,14 +58,6 @@ extern struct rw_semaphore cxl_dpa_rwsem;
|
||||
|
||||
bool is_switch_decoder(struct device *dev);
|
||||
struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
|
||||
static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
|
||||
struct cxl_memdev *cxlmd)
|
||||
{
|
||||
if (!port)
|
||||
return NULL;
|
||||
|
||||
return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev);
|
||||
}
|
||||
|
||||
int cxl_memdev_init(void);
|
||||
void cxl_memdev_exit(void);
|
||||
|
@ -1212,45 +1212,6 @@ static void reap_dports(struct cxl_port *port)
|
||||
}
|
||||
}
|
||||
|
||||
int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
|
||||
struct cxl_dport *parent_dport)
|
||||
{
|
||||
struct cxl_port *parent_port = parent_dport->port;
|
||||
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
||||
struct cxl_port *endpoint, *iter, *down;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* Now that the path to the root is established record all the
|
||||
* intervening ports in the chain.
|
||||
*/
|
||||
for (iter = parent_port, down = NULL; !is_cxl_root(iter);
|
||||
down = iter, iter = to_cxl_port(iter->dev.parent)) {
|
||||
struct cxl_ep *ep;
|
||||
|
||||
ep = cxl_ep_load(iter, cxlmd);
|
||||
ep->next = down;
|
||||
}
|
||||
|
||||
endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
|
||||
cxlds->component_reg_phys, parent_dport);
|
||||
if (IS_ERR(endpoint))
|
||||
return PTR_ERR(endpoint);
|
||||
|
||||
rc = cxl_endpoint_autoremove(cxlmd, endpoint);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (!endpoint->dev.driver) {
|
||||
dev_err(&cxlmd->dev, "%s failed probe\n",
|
||||
dev_name(&endpoint->dev));
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_endpoint, CXL);
|
||||
|
||||
static void cxl_detach_ep(void *data)
|
||||
{
|
||||
struct cxl_memdev *cxlmd = data;
|
||||
|
@ -560,8 +560,6 @@ struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port);
|
||||
struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
|
||||
resource_size_t component_reg_phys,
|
||||
struct cxl_dport *parent_dport);
|
||||
int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
|
||||
struct cxl_dport *parent_dport);
|
||||
struct cxl_port *find_cxl_root(struct device *dev);
|
||||
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
|
||||
void cxl_bus_rescan(void);
|
||||
|
@ -80,6 +80,15 @@ static inline bool is_cxl_endpoint(struct cxl_port *port)
|
||||
|
||||
struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
|
||||
|
||||
static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port,
|
||||
struct cxl_memdev *cxlmd)
|
||||
{
|
||||
if (!port)
|
||||
return NULL;
|
||||
|
||||
return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* struct cxl_mbox_cmd - A command to be submitted to hardware.
|
||||
* @opcode: (input) The command set and command submitted to hardware.
|
||||
|
@ -45,6 +45,44 @@ static int cxl_mem_dpa_show(struct seq_file *file, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
|
||||
struct cxl_dport *parent_dport)
|
||||
{
|
||||
struct cxl_port *parent_port = parent_dport->port;
|
||||
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
||||
struct cxl_port *endpoint, *iter, *down;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* Now that the path to the root is established record all the
|
||||
* intervening ports in the chain.
|
||||
*/
|
||||
for (iter = parent_port, down = NULL; !is_cxl_root(iter);
|
||||
down = iter, iter = to_cxl_port(iter->dev.parent)) {
|
||||
struct cxl_ep *ep;
|
||||
|
||||
ep = cxl_ep_load(iter, cxlmd);
|
||||
ep->next = down;
|
||||
}
|
||||
|
||||
endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
|
||||
cxlds->component_reg_phys, parent_dport);
|
||||
if (IS_ERR(endpoint))
|
||||
return PTR_ERR(endpoint);
|
||||
|
||||
rc = cxl_endpoint_autoremove(cxlmd, endpoint);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (!endpoint->dev.driver) {
|
||||
dev_err(&cxlmd->dev, "%s failed probe\n",
|
||||
dev_name(&endpoint->dev));
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cxl_mem_probe(struct device *dev)
|
||||
{
|
||||
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
|
||||
|
Loading…
Reference in New Issue
Block a user