2018-05-06 21:58:06 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
2015-03-05 19:25:25 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2014 Google, Inc
|
|
|
|
* Written by Simon Glass <sjg@chromium.org>
|
|
|
|
*/
|
|
|
|
|
2021-04-27 09:02:19 +00:00
|
|
|
#define LOG_CATEGORY UCLASS_PCI
|
|
|
|
|
2015-03-05 19:25:25 +00:00
|
|
|
#include <common.h>
|
|
|
|
#include <dm.h>
|
|
|
|
#include <errno.h>
|
2020-05-10 17:40:02 +00:00
|
|
|
#include <init.h>
|
2020-05-10 17:40:05 +00:00
|
|
|
#include <log.h>
|
2020-02-03 14:36:16 +00:00
|
|
|
#include <malloc.h>
|
2015-03-05 19:25:25 +00:00
|
|
|
#include <pci.h>
|
2020-10-31 03:38:53 +00:00
|
|
|
#include <asm/global_data.h>
|
2015-11-29 20:18:03 +00:00
|
|
|
#include <asm/io.h>
|
2015-03-05 19:25:25 +00:00
|
|
|
#include <dm/device-internal.h>
|
2017-05-19 02:09:51 +00:00
|
|
|
#include <dm/lists.h>
|
2020-12-17 04:20:18 +00:00
|
|
|
#include <dm/uclass-internal.h>
|
2015-08-20 13:40:23 +00:00
|
|
|
#if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP)
|
2019-08-24 20:19:05 +00:00
|
|
|
#include <asm/fsp/fsp_support.h>
|
2015-08-20 13:40:23 +00:00
|
|
|
#endif
|
2021-06-27 23:50:57 +00:00
|
|
|
#include <dt-bindings/pci/pci.h>
|
2020-05-10 17:40:11 +00:00
|
|
|
#include <linux/delay.h>
|
2015-11-29 20:17:49 +00:00
|
|
|
#include "pci_internal.h"
|
2015-03-05 19:25:25 +00:00
|
|
|
|
|
|
|
DECLARE_GLOBAL_DATA_PTR;
|
|
|
|
|
2016-01-19 03:19:14 +00:00
|
|
|
int pci_get_bus(int busnum, struct udevice **busp)
|
2015-09-01 00:55:35 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp);
|
|
|
|
|
|
|
|
/* Since buses may not be numbered yet try a little harder with bus 0 */
|
|
|
|
if (ret == -ENODEV) {
|
2016-02-11 20:23:26 +00:00
|
|
|
ret = uclass_first_device_err(UCLASS_PCI, busp);
|
2015-09-01 00:55:35 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
ret = uclass_get_device_by_seq(UCLASS_PCI, busnum, busp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-11-20 03:27:00 +00:00
|
|
|
struct udevice *pci_get_controller(struct udevice *dev)
|
|
|
|
{
|
|
|
|
while (device_is_on_pci_bus(dev))
|
|
|
|
dev = dev->parent;
|
|
|
|
|
|
|
|
return dev;
|
|
|
|
}
|
|
|
|
|
2020-01-27 15:49:38 +00:00
|
|
|
pci_dev_t dm_pci_get_bdf(const struct udevice *dev)
|
2015-07-06 22:47:46 +00:00
|
|
|
{
|
2020-12-03 23:55:23 +00:00
|
|
|
struct pci_child_plat *pplat = dev_get_parent_plat(dev);
|
2015-07-06 22:47:46 +00:00
|
|
|
struct udevice *bus = dev->parent;
|
|
|
|
|
2019-12-30 04:19:14 +00:00
|
|
|
/*
|
|
|
|
* This error indicates that @dev is a device on an unprobed PCI bus.
|
|
|
|
* The bus likely has bus=seq == -1, so the PCI_ADD_BUS() macro below
|
|
|
|
* will produce a bad BDF>
|
|
|
|
*
|
|
|
|
* A common cause of this problem is that this function is called in the
|
2020-12-03 23:55:21 +00:00
|
|
|
* of_to_plat() method of @dev. Accessing the PCI bus in that
|
2019-12-30 04:19:14 +00:00
|
|
|
* method is not allowed, since it has not yet been probed. To fix this,
|
|
|
|
* move that access to the probe() method of @dev instead.
|
|
|
|
*/
|
|
|
|
if (!device_active(bus))
|
|
|
|
log_err("PCI: Device '%s' on unprobed bus '%s'\n", dev->name,
|
|
|
|
bus->name);
|
2020-12-17 04:20:07 +00:00
|
|
|
return PCI_ADD_BUS(dev_seq(bus), pplat->devfn);
|
2015-07-06 22:47:46 +00:00
|
|
|
}
|
|
|
|
|
2015-03-05 19:25:25 +00:00
|
|
|
/**
|
|
|
|
* pci_get_bus_max() - returns the bus number of the last active bus
|
|
|
|
*
|
2022-01-19 17:05:50 +00:00
|
|
|
* Return: last bus number, or -1 if no active buses
|
2015-03-05 19:25:25 +00:00
|
|
|
*/
|
|
|
|
static int pci_get_bus_max(void)
|
|
|
|
{
|
|
|
|
struct udevice *bus;
|
|
|
|
struct uclass *uc;
|
|
|
|
int ret = -1;
|
|
|
|
|
|
|
|
ret = uclass_get(UCLASS_PCI, &uc);
|
|
|
|
uclass_foreach_dev(bus, uc) {
|
2020-12-17 04:20:07 +00:00
|
|
|
if (dev_seq(bus) > ret)
|
|
|
|
ret = dev_seq(bus);
|
2015-03-05 19:25:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
debug("%s: ret=%d\n", __func__, ret);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pci_last_busno(void)
|
|
|
|
{
|
2015-10-01 07:36:01 +00:00
|
|
|
return pci_get_bus_max();
|
2015-03-05 19:25:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int pci_get_ff(enum pci_size_t size)
|
|
|
|
{
|
|
|
|
switch (size) {
|
|
|
|
case PCI_SIZE_8:
|
|
|
|
return 0xff;
|
|
|
|
case PCI_SIZE_16:
|
|
|
|
return 0xffff;
|
|
|
|
default:
|
|
|
|
return 0xffffffff;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-10 19:27:06 +00:00
|
|
|
static void pci_dev_find_ofnode(struct udevice *bus, phys_addr_t bdf,
|
|
|
|
ofnode *rnode)
|
|
|
|
{
|
|
|
|
struct fdt_pci_addr addr;
|
|
|
|
ofnode node;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
dev_for_each_subnode(node, bus) {
|
|
|
|
ret = ofnode_read_pci_addr(node, FDT_PCI_SPACE_CONFIG, "reg",
|
|
|
|
&addr);
|
|
|
|
if (ret)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (PCI_MASK_BUS(addr.phys_hi) != PCI_MASK_BUS(bdf))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
*rnode = node;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-01-27 15:49:37 +00:00
|
|
|
int pci_bus_find_devfn(const struct udevice *bus, pci_dev_t find_devfn,
|
2015-03-05 19:25:25 +00:00
|
|
|
struct udevice **devp)
|
|
|
|
{
|
|
|
|
struct udevice *dev;
|
|
|
|
|
|
|
|
for (device_find_first_child(bus, &dev);
|
|
|
|
dev;
|
|
|
|
device_find_next_child(&dev)) {
|
2020-12-03 23:55:23 +00:00
|
|
|
struct pci_child_plat *pplat;
|
2015-03-05 19:25:25 +00:00
|
|
|
|
2020-12-03 23:55:18 +00:00
|
|
|
pplat = dev_get_parent_plat(dev);
|
2015-03-05 19:25:25 +00:00
|
|
|
if (pplat && pplat->devfn == find_devfn) {
|
|
|
|
*devp = dev;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2015-11-29 20:17:48 +00:00
|
|
|
int dm_pci_bus_find_bdf(pci_dev_t bdf, struct udevice **devp)
|
2015-03-05 19:25:25 +00:00
|
|
|
{
|
|
|
|
struct udevice *bus;
|
|
|
|
int ret;
|
|
|
|
|
2015-09-01 00:55:35 +00:00
|
|
|
ret = pci_get_bus(PCI_BUS(bdf), &bus);
|
2015-03-05 19:25:25 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
return pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), devp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pci_device_matches_ids(struct udevice *dev,
|
2021-06-27 23:50:56 +00:00
|
|
|
const struct pci_device_id *ids)
|
2015-03-05 19:25:25 +00:00
|
|
|
{
|
2020-12-03 23:55:23 +00:00
|
|
|
struct pci_child_plat *pplat;
|
2015-03-05 19:25:25 +00:00
|
|
|
int i;
|
|
|
|
|
2020-12-03 23:55:18 +00:00
|
|
|
pplat = dev_get_parent_plat(dev);
|
2015-03-05 19:25:25 +00:00
|
|
|
if (!pplat)
|
|
|
|
return -EINVAL;
|
|
|
|
for (i = 0; ids[i].vendor != 0; i++) {
|
|
|
|
if (pplat->vendor == ids[i].vendor &&
|
|
|
|
pplat->device == ids[i].device)
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-06-27 23:50:56 +00:00
|
|
|
int pci_bus_find_devices(struct udevice *bus, const struct pci_device_id *ids,
|
2015-03-05 19:25:25 +00:00
|
|
|
int *indexp, struct udevice **devp)
|
|
|
|
{
|
|
|
|
struct udevice *dev;
|
|
|
|
|
|
|
|
/* Scan all devices on this bus */
|
|
|
|
for (device_find_first_child(bus, &dev);
|
|
|
|
dev;
|
|
|
|
device_find_next_child(&dev)) {
|
|
|
|
if (pci_device_matches_ids(dev, ids) >= 0) {
|
|
|
|
if ((*indexp)-- <= 0) {
|
|
|
|
*devp = dev;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2021-06-27 23:50:56 +00:00
|
|
|
int pci_find_device_id(const struct pci_device_id *ids, int index,
|
2015-03-05 19:25:25 +00:00
|
|
|
struct udevice **devp)
|
|
|
|
{
|
|
|
|
struct udevice *bus;
|
|
|
|
|
|
|
|
/* Scan all known buses */
|
|
|
|
for (uclass_first_device(UCLASS_PCI, &bus);
|
|
|
|
bus;
|
|
|
|
uclass_next_device(&bus)) {
|
|
|
|
if (!pci_bus_find_devices(bus, ids, &index, devp))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
*devp = NULL;
|
|
|
|
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2015-11-29 20:17:50 +00:00
|
|
|
static int dm_pci_bus_find_device(struct udevice *bus, unsigned int vendor,
|
|
|
|
unsigned int device, int *indexp,
|
|
|
|
struct udevice **devp)
|
|
|
|
{
|
2020-12-03 23:55:23 +00:00
|
|
|
struct pci_child_plat *pplat;
|
2015-11-29 20:17:50 +00:00
|
|
|
struct udevice *dev;
|
|
|
|
|
|
|
|
for (device_find_first_child(bus, &dev);
|
|
|
|
dev;
|
|
|
|
device_find_next_child(&dev)) {
|
2020-12-03 23:55:18 +00:00
|
|
|
pplat = dev_get_parent_plat(dev);
|
2015-11-29 20:17:50 +00:00
|
|
|
if (pplat->vendor == vendor && pplat->device == device) {
|
|
|
|
if (!(*indexp)--) {
|
|
|
|
*devp = dev;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
int dm_pci_find_device(unsigned int vendor, unsigned int device, int index,
|
|
|
|
struct udevice **devp)
|
|
|
|
{
|
|
|
|
struct udevice *bus;
|
|
|
|
|
|
|
|
/* Scan all known buses */
|
|
|
|
for (uclass_first_device(UCLASS_PCI, &bus);
|
|
|
|
bus;
|
|
|
|
uclass_next_device(&bus)) {
|
|
|
|
if (!dm_pci_bus_find_device(bus, vendor, device, &index, devp))
|
|
|
|
return device_probe(*devp);
|
|
|
|
}
|
|
|
|
*devp = NULL;
|
|
|
|
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2015-11-29 20:17:52 +00:00
|
|
|
int dm_pci_find_class(uint find_class, int index, struct udevice **devp)
|
|
|
|
{
|
|
|
|
struct udevice *dev;
|
|
|
|
|
|
|
|
/* Scan all known buses */
|
|
|
|
for (pci_find_first_device(&dev);
|
|
|
|
dev;
|
|
|
|
pci_find_next_device(&dev)) {
|
2020-12-03 23:55:23 +00:00
|
|
|
struct pci_child_plat *pplat = dev_get_parent_plat(dev);
|
2015-11-29 20:17:52 +00:00
|
|
|
|
|
|
|
if (pplat->class == find_class && !index--) {
|
|
|
|
*devp = dev;
|
|
|
|
return device_probe(*devp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*devp = NULL;
|
|
|
|
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2015-03-05 19:25:25 +00:00
|
|
|
int pci_bus_write_config(struct udevice *bus, pci_dev_t bdf, int offset,
|
|
|
|
unsigned long value, enum pci_size_t size)
|
|
|
|
{
|
|
|
|
struct dm_pci_ops *ops;
|
|
|
|
|
|
|
|
ops = pci_get_ops(bus);
|
|
|
|
if (!ops->write_config)
|
|
|
|
return -ENOSYS;
|
|
|
|
return ops->write_config(bus, bdf, offset, value, size);
|
|
|
|
}
|
|
|
|
|
2016-03-07 02:27:52 +00:00
|
|
|
int pci_bus_clrset_config32(struct udevice *bus, pci_dev_t bdf, int offset,
|
|
|
|
u32 clr, u32 set)
|
|
|
|
{
|
|
|
|
ulong val;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = pci_bus_read_config(bus, bdf, offset, &val, PCI_SIZE_32);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
val &= ~clr;
|
|
|
|
val |= set;
|
|
|
|
|
|
|
|
return pci_bus_write_config(bus, bdf, offset, val, PCI_SIZE_32);
|
|
|
|
}
|
|
|
|
|
2021-09-17 12:11:25 +00:00
|
|
|
static int pci_write_config(pci_dev_t bdf, int offset, unsigned long value,
|
|
|
|
enum pci_size_t size)
|
2015-03-05 19:25:25 +00:00
|
|
|
{
|
|
|
|
struct udevice *bus;
|
|
|
|
int ret;
|
|
|
|
|
2015-09-01 00:55:35 +00:00
|
|
|
ret = pci_get_bus(PCI_BUS(bdf), &bus);
|
2015-03-05 19:25:25 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2015-07-18 16:20:04 +00:00
|
|
|
return pci_bus_write_config(bus, bdf, offset, value, size);
|
2015-03-05 19:25:25 +00:00
|
|
|
}
|
|
|
|
|
2015-08-10 13:05:03 +00:00
|
|
|
int dm_pci_write_config(struct udevice *dev, int offset, unsigned long value,
|
|
|
|
enum pci_size_t size)
|
|
|
|
{
|
|
|
|
struct udevice *bus;
|
|
|
|
|
2015-09-11 10:24:34 +00:00
|
|
|
for (bus = dev; device_is_on_pci_bus(bus);)
|
2015-08-10 13:05:03 +00:00
|
|
|
bus = bus->parent;
|
2015-11-29 20:17:47 +00:00
|
|
|
return pci_bus_write_config(bus, dm_pci_get_bdf(dev), offset, value,
|
|
|
|
size);
|
2015-08-10 13:05:03 +00:00
|
|
|
}
|
|
|
|
|
2015-03-05 19:25:25 +00:00
|
|
|
int pci_write_config32(pci_dev_t bdf, int offset, u32 value)
|
|
|
|
{
|
|
|
|
return pci_write_config(bdf, offset, value, PCI_SIZE_32);
|
|
|
|
}
|
|
|
|
|
|
|
|
int pci_write_config16(pci_dev_t bdf, int offset, u16 value)
|
|
|
|
{
|
|
|
|
return pci_write_config(bdf, offset, value, PCI_SIZE_16);
|
|
|
|
}
|
|
|
|
|
|
|
|
int pci_write_config8(pci_dev_t bdf, int offset, u8 value)
|
|
|
|
{
|
|
|
|
return pci_write_config(bdf, offset, value, PCI_SIZE_8);
|
|
|
|
}
|
|
|
|
|
2015-08-10 13:05:03 +00:00
|
|
|
int dm_pci_write_config8(struct udevice *dev, int offset, u8 value)
|
|
|
|
{
|
|
|
|
return dm_pci_write_config(dev, offset, value, PCI_SIZE_8);
|
|
|
|
}
|
|
|
|
|
|
|
|
int dm_pci_write_config16(struct udevice *dev, int offset, u16 value)
|
|
|
|
{
|
|
|
|
return dm_pci_write_config(dev, offset, value, PCI_SIZE_16);
|
|
|
|
}
|
|
|
|
|
|
|
|
int dm_pci_write_config32(struct udevice *dev, int offset, u32 value)
|
|
|
|
{
|
|
|
|
return dm_pci_write_config(dev, offset, value, PCI_SIZE_32);
|
|
|
|
}
|
|
|
|
|
2020-01-27 15:49:38 +00:00
|
|
|
int pci_bus_read_config(const struct udevice *bus, pci_dev_t bdf, int offset,
|
2015-03-05 19:25:25 +00:00
|
|
|
unsigned long *valuep, enum pci_size_t size)
|
|
|
|
{
|
|
|
|
struct dm_pci_ops *ops;
|
|
|
|
|
|
|
|
ops = pci_get_ops(bus);
|
|
|
|
if (!ops->read_config)
|
|
|
|
return -ENOSYS;
|
|
|
|
return ops->read_config(bus, bdf, offset, valuep, size);
|
|
|
|
}
|
|
|
|
|
2021-09-17 12:11:26 +00:00
|
|
|
static int pci_read_config(pci_dev_t bdf, int offset, unsigned long *valuep,
|
|
|
|
enum pci_size_t size)
|
2015-03-05 19:25:25 +00:00
|
|
|
{
|
|
|
|
struct udevice *bus;
|
|
|
|
int ret;
|
|
|
|
|
2015-09-01 00:55:35 +00:00
|
|
|
ret = pci_get_bus(PCI_BUS(bdf), &bus);
|
2015-03-05 19:25:25 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2015-07-18 16:20:04 +00:00
|
|
|
return pci_bus_read_config(bus, bdf, offset, valuep, size);
|
2015-03-05 19:25:25 +00:00
|
|
|
}
|
|
|
|
|
2020-01-27 15:49:38 +00:00
|
|
|
int dm_pci_read_config(const struct udevice *dev, int offset,
|
|
|
|
unsigned long *valuep, enum pci_size_t size)
|
2015-08-10 13:05:03 +00:00
|
|
|
{
|
2020-01-27 15:49:38 +00:00
|
|
|
const struct udevice *bus;
|
2015-08-10 13:05:03 +00:00
|
|
|
|
2015-09-11 10:24:34 +00:00
|
|
|
for (bus = dev; device_is_on_pci_bus(bus);)
|
2015-08-10 13:05:03 +00:00
|
|
|
bus = bus->parent;
|
2015-11-29 20:17:47 +00:00
|
|
|
return pci_bus_read_config(bus, dm_pci_get_bdf(dev), offset, valuep,
|
2015-08-10 13:05:03 +00:00
|
|
|
size);
|
|
|
|
}
|
|
|
|
|
2015-03-05 19:25:25 +00:00
|
|
|
int pci_read_config32(pci_dev_t bdf, int offset, u32 *valuep)
|
|
|
|
{
|
|
|
|
unsigned long value;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = pci_read_config(bdf, offset, &value, PCI_SIZE_32);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
*valuep = value;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pci_read_config16(pci_dev_t bdf, int offset, u16 *valuep)
|
|
|
|
{
|
|
|
|
unsigned long value;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = pci_read_config(bdf, offset, &value, PCI_SIZE_16);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
*valuep = value;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pci_read_config8(pci_dev_t bdf, int offset, u8 *valuep)
|
|
|
|
{
|
|
|
|
unsigned long value;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = pci_read_config(bdf, offset, &value, PCI_SIZE_8);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
*valuep = value;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-01-27 15:49:38 +00:00
|
|
|
int dm_pci_read_config8(const struct udevice *dev, int offset, u8 *valuep)
|
2015-08-10 13:05:03 +00:00
|
|
|
{
|
|
|
|
unsigned long value;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_8);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
*valuep = value;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-01-27 15:49:38 +00:00
|
|
|
int dm_pci_read_config16(const struct udevice *dev, int offset, u16 *valuep)
|
2015-08-10 13:05:03 +00:00
|
|
|
{
|
|
|
|
unsigned long value;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_16);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
*valuep = value;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-01-27 15:49:38 +00:00
|
|
|
int dm_pci_read_config32(const struct udevice *dev, int offset, u32 *valuep)
|
2015-08-10 13:05:03 +00:00
|
|
|
{
|
|
|
|
unsigned long value;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = dm_pci_read_config(dev, offset, &value, PCI_SIZE_32);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
*valuep = value;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-03-07 02:27:52 +00:00
|
|
|
int dm_pci_clrset_config8(struct udevice *dev, int offset, u32 clr, u32 set)
|
|
|
|
{
|
|
|
|
u8 val;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = dm_pci_read_config8(dev, offset, &val);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
val &= ~clr;
|
|
|
|
val |= set;
|
|
|
|
|
|
|
|
return dm_pci_write_config8(dev, offset, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
int dm_pci_clrset_config16(struct udevice *dev, int offset, u32 clr, u32 set)
|
|
|
|
{
|
|
|
|
u16 val;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = dm_pci_read_config16(dev, offset, &val);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
val &= ~clr;
|
|
|
|
val |= set;
|
|
|
|
|
|
|
|
return dm_pci_write_config16(dev, offset, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
int dm_pci_clrset_config32(struct udevice *dev, int offset, u32 clr, u32 set)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = dm_pci_read_config32(dev, offset, &val);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
val &= ~clr;
|
|
|
|
val |= set;
|
|
|
|
|
|
|
|
return dm_pci_write_config32(dev, offset, val);
|
|
|
|
}
|
|
|
|
|
2015-10-01 07:36:02 +00:00
|
|
|
static void set_vga_bridge_bits(struct udevice *dev)
|
|
|
|
{
|
|
|
|
struct udevice *parent = dev->parent;
|
|
|
|
u16 bc;
|
|
|
|
|
2020-12-17 04:20:07 +00:00
|
|
|
while (dev_seq(parent) != 0) {
|
2015-10-01 07:36:02 +00:00
|
|
|
dm_pci_read_config16(parent, PCI_BRIDGE_CONTROL, &bc);
|
|
|
|
bc |= PCI_BRIDGE_CTL_VGA;
|
|
|
|
dm_pci_write_config16(parent, PCI_BRIDGE_CONTROL, bc);
|
|
|
|
parent = parent->parent;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-05 19:25:25 +00:00
|
|
|
int pci_auto_config_devices(struct udevice *bus)
|
|
|
|
{
|
2020-12-23 02:30:28 +00:00
|
|
|
struct pci_controller *hose = dev_get_uclass_priv(bus);
|
2020-12-03 23:55:23 +00:00
|
|
|
struct pci_child_plat *pplat;
|
2015-03-05 19:25:25 +00:00
|
|
|
unsigned int sub_bus;
|
|
|
|
struct udevice *dev;
|
|
|
|
int ret;
|
|
|
|
|
2020-12-17 04:20:07 +00:00
|
|
|
sub_bus = dev_seq(bus);
|
2015-03-05 19:25:25 +00:00
|
|
|
debug("%s: start\n", __func__);
|
|
|
|
pciauto_config_init(hose);
|
|
|
|
for (ret = device_find_first_child(bus, &dev);
|
|
|
|
!ret && dev;
|
|
|
|
ret = device_find_next_child(&dev)) {
|
|
|
|
unsigned int max_bus;
|
2015-09-08 23:52:47 +00:00
|
|
|
int ret;
|
2015-03-05 19:25:25 +00:00
|
|
|
|
|
|
|
debug("%s: device %s\n", __func__, dev->name);
|
2020-12-19 17:40:13 +00:00
|
|
|
if (dev_has_ofnode(dev) &&
|
2020-05-05 04:25:25 +00:00
|
|
|
dev_read_bool(dev, "pci,no-autoconfig"))
|
2020-04-08 22:57:26 +00:00
|
|
|
continue;
|
2015-11-29 20:17:49 +00:00
|
|
|
ret = dm_pciauto_config_device(dev);
|
2015-09-08 23:52:47 +00:00
|
|
|
if (ret < 0)
|
2020-12-17 04:20:18 +00:00
|
|
|
return log_msg_ret("auto", ret);
|
2015-09-08 23:52:47 +00:00
|
|
|
max_bus = ret;
|
2015-03-05 19:25:25 +00:00
|
|
|
sub_bus = max(sub_bus, max_bus);
|
2015-10-01 07:36:02 +00:00
|
|
|
|
dm: pci: Skip setting VGA bridge bits if parent device is the host bus
Commit bbbcb5262839 ("dm: pci: Enable VGA address forwarding on bridges")
sets the VGA bridge bits by checking pplat->class, but if the parent
device is the pci host bus device, it can be skipped. Moreover, it
shouldn't access the pplat because the parent has different plat data.
Without this fix, "pci enum" command cause a synchronous abort.
pci_auto_config_devices: start
PCI Autoconfig: Bus Memory region: [78000000-7fffffff],
Physical Memory [78000000-7fffffffx]
PCI Autoconfig: Bus I/O region: [0-ffff],
Physical Memory [77f00000-77f0ffffx]
pci_auto_config_devices: device pci_6:0.0
PCI Autoconfig: BAR 0, Mem, size=0x1000000, address=0x78000000 bus_lower=0x79000000
PCI Autoconfig: BAR 1, Mem, size=0x8000000, No room in resource, avail start=79000000 / size=8000000, need=8000000
PCI: Failed autoconfig bar 14
PCI Autoconfig: BAR 2, I/O, size=0x4, address=0x1000 bus_lower=0x1004
PCI Autoconfig: BAR 3, Mem, size=0x2000000, address=0x7a000000 bus_lower=0x7c000000
PCI Autoconfig: BAR 4, I/O, size=0x80, address=0x1080 bus_lower=0x1100
PCI Autoconfig: ROM, size=0x80000, address=0x7c000000 bus_lower=0x7c080000
"Synchronous Abort" handler, esr 0x96000006
elr: 00000000e002bd28 lr : 00000000e002bce8 (reloc)
elr: 00000000fff6fd28 lr : 00000000fff6fce8
x0 : 0000000000001041 x1 : 000000000000003e
x2 : 00000000ffb0f8c8 x3 : 0000000000000001
x4 : 0000000000000080 x5 : 0000000000000000
x6 : 00000000fff718fc x7 : 000000000000000f
x8 : 00000000ffb0f238 x9 : 0000000000000008
x10: 0000000000000000 x11: 0000000000000010
x12: 0000000000000006 x13: 000000000001869f
x14: 00000000ffb0fcd0 x15: 0000000000000020
x16: 00000000fff71cc4 x17: 0000000000000000
x18: 00000000ffb13d90 x19: 00000000ffb14320
x20: 0000000000000000 x21: 00000000ffb14090
x22: 00000000ffb0f8c8 x23: 0000000000000001
x24: 00000000ffb14c10 x25: 0000000000000000
x26: 0000000000000000 x27: 0000000000000000
x28: 00000000ffb14c70 x29: 00000000ffb0f830
Code: 52800843 52800061 52800e00 97ffcf65 (b9400280)
Resetting CPU ...
Signed-off-by: Masami Hiramatsu <masami.hiramatsu@linaro.org>
Reviewed-by: Simon Glass <sjg@chromium.org>
2021-06-04 09:43:34 +00:00
|
|
|
if (dev_get_parent(dev) == bus)
|
|
|
|
continue;
|
|
|
|
|
2020-12-03 23:55:18 +00:00
|
|
|
pplat = dev_get_parent_plat(dev);
|
2015-10-01 07:36:02 +00:00
|
|
|
if (pplat->class == (PCI_CLASS_DISPLAY_VGA << 8))
|
|
|
|
set_vga_bridge_bits(dev);
|
2015-03-05 19:25:25 +00:00
|
|
|
}
|
2022-01-17 15:38:37 +00:00
|
|
|
if (hose->last_busno < sub_bus)
|
|
|
|
hose->last_busno = sub_bus;
|
2015-03-05 19:25:25 +00:00
|
|
|
debug("%s: done\n", __func__);
|
|
|
|
|
2020-12-17 04:20:18 +00:00
|
|
|
return log_msg_ret("sub", sub_bus);
|
2015-03-05 19:25:25 +00:00
|
|
|
}
|
|
|
|
|
2017-09-19 20:18:03 +00:00
|
|
|
int pci_generic_mmap_write_config(
|
2020-01-27 15:49:37 +00:00
|
|
|
const struct udevice *bus,
|
|
|
|
int (*addr_f)(const struct udevice *bus, pci_dev_t bdf, uint offset,
|
|
|
|
void **addrp),
|
2017-09-19 20:18:03 +00:00
|
|
|
pci_dev_t bdf,
|
|
|
|
uint offset,
|
|
|
|
ulong value,
|
|
|
|
enum pci_size_t size)
|
|
|
|
{
|
|
|
|
void *address;
|
|
|
|
|
|
|
|
if (addr_f(bus, bdf, offset, &address) < 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
switch (size) {
|
|
|
|
case PCI_SIZE_8:
|
|
|
|
writeb(value, address);
|
|
|
|
return 0;
|
|
|
|
case PCI_SIZE_16:
|
|
|
|
writew(value, address);
|
|
|
|
return 0;
|
|
|
|
case PCI_SIZE_32:
|
|
|
|
writel(value, address);
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int pci_generic_mmap_read_config(
|
2020-01-27 15:49:37 +00:00
|
|
|
const struct udevice *bus,
|
|
|
|
int (*addr_f)(const struct udevice *bus, pci_dev_t bdf, uint offset,
|
|
|
|
void **addrp),
|
2017-09-19 20:18:03 +00:00
|
|
|
pci_dev_t bdf,
|
|
|
|
uint offset,
|
|
|
|
ulong *valuep,
|
|
|
|
enum pci_size_t size)
|
|
|
|
{
|
|
|
|
void *address;
|
|
|
|
|
|
|
|
if (addr_f(bus, bdf, offset, &address) < 0) {
|
|
|
|
*valuep = pci_get_ff(size);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (size) {
|
|
|
|
case PCI_SIZE_8:
|
|
|
|
*valuep = readb(address);
|
|
|
|
return 0;
|
|
|
|
case PCI_SIZE_16:
|
|
|
|
*valuep = readw(address);
|
|
|
|
return 0;
|
|
|
|
case PCI_SIZE_32:
|
|
|
|
*valuep = readl(address);
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-29 20:17:49 +00:00
|
|
|
int dm_pci_hose_probe_bus(struct udevice *bus)
|
2015-03-05 19:25:25 +00:00
|
|
|
{
|
2021-10-07 12:50:58 +00:00
|
|
|
u8 header_type;
|
2015-03-05 19:25:25 +00:00
|
|
|
int sub_bus;
|
|
|
|
int ret;
|
2019-10-19 22:52:32 +00:00
|
|
|
int ea_pos;
|
|
|
|
u8 reg;
|
2015-03-05 19:25:25 +00:00
|
|
|
|
|
|
|
debug("%s\n", __func__);
|
|
|
|
|
2021-10-07 12:50:58 +00:00
|
|
|
dm_pci_read_config8(bus, PCI_HEADER_TYPE, &header_type);
|
|
|
|
header_type &= 0x7f;
|
|
|
|
if (header_type != PCI_HEADER_TYPE_BRIDGE) {
|
|
|
|
debug("%s: Skipping PCI device %d with Non-Bridge Header Type 0x%x\n",
|
|
|
|
__func__, PCI_DEV(dm_pci_get_bdf(bus)), header_type);
|
|
|
|
return log_msg_ret("probe", -EINVAL);
|
|
|
|
}
|
|
|
|
|
2019-10-19 22:52:32 +00:00
|
|
|
ea_pos = dm_pci_find_capability(bus, PCI_CAP_ID_EA);
|
|
|
|
if (ea_pos) {
|
|
|
|
dm_pci_read_config8(bus, ea_pos + sizeof(u32) + sizeof(u8),
|
|
|
|
®);
|
|
|
|
sub_bus = reg;
|
|
|
|
} else {
|
|
|
|
sub_bus = pci_get_bus_max() + 1;
|
|
|
|
}
|
2015-03-05 19:25:25 +00:00
|
|
|
debug("%s: bus = %d/%s\n", __func__, sub_bus, bus->name);
|
2015-11-29 20:17:49 +00:00
|
|
|
dm_pciauto_prescan_setup_bridge(bus, sub_bus);
|
2015-03-05 19:25:25 +00:00
|
|
|
|
|
|
|
ret = device_probe(bus);
|
|
|
|
if (ret) {
|
2015-09-08 23:52:48 +00:00
|
|
|
debug("%s: Cannot probe bus %s: %d\n", __func__, bus->name,
|
2015-03-05 19:25:25 +00:00
|
|
|
ret);
|
2020-12-17 04:20:18 +00:00
|
|
|
return log_msg_ret("probe", ret);
|
2015-03-05 19:25:25 +00:00
|
|
|
}
|
2019-10-19 22:52:32 +00:00
|
|
|
|
2021-04-16 21:53:46 +00:00
|
|
|
if (!ea_pos)
|
|
|
|
sub_bus = pci_get_bus_max();
|
|
|
|
|
2015-11-29 20:17:49 +00:00
|
|
|
dm_pciauto_postscan_setup_bridge(bus, sub_bus);
|
2015-03-05 19:25:25 +00:00
|
|
|
|
|
|
|
return sub_bus;
|
|
|
|
}
|
|
|
|
|
2015-07-06 22:47:44 +00:00
|
|
|
/**
|
|
|
|
* pci_match_one_device - Tell if a PCI device structure has a matching
|
|
|
|
* PCI device id structure
|
|
|
|
* @id: single PCI device id structure to match
|
2017-03-22 08:07:24 +00:00
|
|
|
* @find: the PCI device id structure to match against
|
2015-07-06 22:47:44 +00:00
|
|
|
*
|
2017-03-22 08:07:24 +00:00
|
|
|
* Returns true if the finding pci_device_id structure matched or false if
|
|
|
|
* there is no match.
|
2015-07-06 22:47:44 +00:00
|
|
|
*/
|
|
|
|
static bool pci_match_one_id(const struct pci_device_id *id,
|
|
|
|
const struct pci_device_id *find)
|
|
|
|
{
|
|
|
|
if ((id->vendor == PCI_ANY_ID || id->vendor == find->vendor) &&
|
|
|
|
(id->device == PCI_ANY_ID || id->device == find->device) &&
|
|
|
|
(id->subvendor == PCI_ANY_ID || id->subvendor == find->subvendor) &&
|
|
|
|
(id->subdevice == PCI_ANY_ID || id->subdevice == find->subdevice) &&
|
|
|
|
!((id->class ^ find->class) & id->class_mask))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-06-27 23:50:57 +00:00
|
|
|
/**
|
|
|
|
* pci_need_device_pre_reloc() - Check if a device should be bound
|
|
|
|
*
|
|
|
|
* This checks a list of vendor/device-ID values indicating devices that should
|
|
|
|
* be bound before relocation.
|
|
|
|
*
|
|
|
|
* @bus: Bus to check
|
|
|
|
* @vendor: Vendor ID to check
|
|
|
|
* @device: Device ID to check
|
2022-01-19 17:05:50 +00:00
|
|
|
* Return: true if the vendor/device is in the list, false if not
|
2021-06-27 23:50:57 +00:00
|
|
|
*/
|
|
|
|
static bool pci_need_device_pre_reloc(struct udevice *bus, uint vendor,
|
|
|
|
uint device)
|
|
|
|
{
|
|
|
|
u32 vendev;
|
|
|
|
int index;
|
|
|
|
|
|
|
|
for (index = 0;
|
|
|
|
!dev_read_u32_index(bus, "u-boot,pci-pre-reloc", index,
|
|
|
|
&vendev);
|
|
|
|
index++) {
|
|
|
|
if (vendev == PCI_VENDEV(vendor, device))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-07-06 22:47:44 +00:00
|
|
|
/**
|
|
|
|
* pci_find_and_bind_driver() - Find and bind the right PCI driver
|
|
|
|
*
|
|
|
|
* This only looks at certain fields in the descriptor.
|
2015-09-08 23:52:49 +00:00
|
|
|
*
|
|
|
|
* @parent: Parent bus
|
|
|
|
* @find_id: Specification of the driver to find
|
|
|
|
* @bdf: Bus/device/function addreess - see PCI_BDF()
|
|
|
|
* @devp: Returns a pointer to the device created
|
2022-01-19 17:05:50 +00:00
|
|
|
* Return: 0 if OK, -EPERM if the device is not needed before relocation and
|
2015-09-08 23:52:49 +00:00
|
|
|
* therefore was not created, other -ve value on error
|
2015-07-06 22:47:44 +00:00
|
|
|
*/
|
|
|
|
static int pci_find_and_bind_driver(struct udevice *parent,
|
2015-09-08 23:52:49 +00:00
|
|
|
struct pci_device_id *find_id,
|
|
|
|
pci_dev_t bdf, struct udevice **devp)
|
2015-07-06 22:47:44 +00:00
|
|
|
{
|
|
|
|
struct pci_driver_entry *start, *entry;
|
2018-10-10 19:27:06 +00:00
|
|
|
ofnode node = ofnode_null();
|
2015-07-06 22:47:44 +00:00
|
|
|
const char *drv;
|
|
|
|
int n_ents;
|
|
|
|
int ret;
|
|
|
|
char name[30], *str;
|
2015-08-20 13:40:17 +00:00
|
|
|
bool bridge;
|
2015-07-06 22:47:44 +00:00
|
|
|
|
|
|
|
*devp = NULL;
|
|
|
|
|
|
|
|
debug("%s: Searching for driver: vendor=%x, device=%x\n", __func__,
|
|
|
|
find_id->vendor, find_id->device);
|
2018-10-10 19:27:06 +00:00
|
|
|
|
|
|
|
/* Determine optional OF node */
|
2019-10-19 23:02:48 +00:00
|
|
|
if (ofnode_valid(dev_ofnode(parent)))
|
|
|
|
pci_dev_find_ofnode(parent, bdf, &node);
|
2018-10-10 19:27:06 +00:00
|
|
|
|
2019-12-01 16:45:18 +00:00
|
|
|
if (ofnode_valid(node) && !ofnode_is_available(node)) {
|
|
|
|
debug("%s: Ignoring disabled device\n", __func__);
|
2020-12-17 04:20:18 +00:00
|
|
|
return log_msg_ret("dis", -EPERM);
|
2019-12-01 16:45:18 +00:00
|
|
|
}
|
|
|
|
|
2015-07-06 22:47:44 +00:00
|
|
|
start = ll_entry_start(struct pci_driver_entry, pci_driver_entry);
|
|
|
|
n_ents = ll_entry_count(struct pci_driver_entry, pci_driver_entry);
|
|
|
|
for (entry = start; entry != start + n_ents; entry++) {
|
|
|
|
const struct pci_device_id *id;
|
|
|
|
struct udevice *dev;
|
|
|
|
const struct driver *drv;
|
|
|
|
|
|
|
|
for (id = entry->match;
|
|
|
|
id->vendor || id->subvendor || id->class_mask;
|
|
|
|
id++) {
|
|
|
|
if (!pci_match_one_id(id, find_id))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
drv = entry->driver;
|
2015-08-20 13:40:17 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* In the pre-relocation phase, we only bind devices
|
|
|
|
* whose driver has the DM_FLAG_PRE_RELOC set, to save
|
|
|
|
* precious memory space as on some platforms as that
|
|
|
|
* space is pretty limited (ie: using Cache As RAM).
|
|
|
|
*/
|
|
|
|
if (!(gd->flags & GD_FLG_RELOC) &&
|
|
|
|
!(drv->flags & DM_FLAG_PRE_RELOC))
|
2020-12-17 04:20:18 +00:00
|
|
|
return log_msg_ret("pre", -EPERM);
|
2015-08-20 13:40:17 +00:00
|
|
|
|
2015-07-06 22:47:44 +00:00
|
|
|
/*
|
|
|
|
* We could pass the descriptor to the driver as
|
2020-12-03 23:55:18 +00:00
|
|
|
* plat (instead of NULL) and allow its bind()
|
2015-07-06 22:47:44 +00:00
|
|
|
* method to return -ENOENT if it doesn't support this
|
|
|
|
* device. That way we could continue the search to
|
|
|
|
* find another driver. For now this doesn't seem
|
|
|
|
* necesssary, so just bind the first match.
|
|
|
|
*/
|
2020-11-29 00:50:01 +00:00
|
|
|
ret = device_bind(parent, drv, drv->name, NULL, node,
|
|
|
|
&dev);
|
2015-07-06 22:47:44 +00:00
|
|
|
if (ret)
|
|
|
|
goto error;
|
|
|
|
debug("%s: Match found: %s\n", __func__, drv->name);
|
2018-08-03 08:14:44 +00:00
|
|
|
dev->driver_data = id->driver_data;
|
2015-07-06 22:47:44 +00:00
|
|
|
*devp = dev;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-20 13:40:17 +00:00
|
|
|
bridge = (find_id->class >> 8) == PCI_CLASS_BRIDGE_PCI;
|
|
|
|
/*
|
|
|
|
* In the pre-relocation phase, we only bind bridge devices to save
|
|
|
|
* precious memory space as on some platforms as that space is pretty
|
|
|
|
* limited (ie: using Cache As RAM).
|
|
|
|
*/
|
2021-06-27 23:50:57 +00:00
|
|
|
if (!(gd->flags & GD_FLG_RELOC) && !bridge &&
|
|
|
|
!pci_need_device_pre_reloc(parent, find_id->vendor,
|
|
|
|
find_id->device))
|
2020-12-17 04:20:18 +00:00
|
|
|
return log_msg_ret("notbr", -EPERM);
|
2015-08-20 13:40:17 +00:00
|
|
|
|
2015-07-06 22:47:44 +00:00
|
|
|
/* Bind a generic driver so that the device can be used */
|
2020-12-17 04:20:07 +00:00
|
|
|
sprintf(name, "pci_%x:%x.%x", dev_seq(parent), PCI_DEV(bdf),
|
2015-07-18 16:20:04 +00:00
|
|
|
PCI_FUNC(bdf));
|
2015-07-06 22:47:44 +00:00
|
|
|
str = strdup(name);
|
|
|
|
if (!str)
|
|
|
|
return -ENOMEM;
|
2015-08-20 13:40:17 +00:00
|
|
|
drv = bridge ? "pci_bridge_drv" : "pci_generic_drv";
|
|
|
|
|
2018-10-10 19:27:06 +00:00
|
|
|
ret = device_bind_driver_to_node(parent, drv, str, node, devp);
|
2015-07-06 22:47:44 +00:00
|
|
|
if (ret) {
|
2015-09-08 23:52:48 +00:00
|
|
|
debug("%s: Failed to bind generic driver: %d\n", __func__, ret);
|
2017-05-08 18:40:16 +00:00
|
|
|
free(str);
|
2015-07-06 22:47:44 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
debug("%s: No match found: bound generic driver instead\n", __func__);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
|
|
|
debug("%s: No match found: error %d\n", __func__, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-04-16 21:53:47 +00:00
|
|
|
__weak extern void board_pci_fixup_dev(struct udevice *bus, struct udevice *dev)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2015-03-05 19:25:25 +00:00
|
|
|
int pci_bind_bus_devices(struct udevice *bus)
|
|
|
|
{
|
|
|
|
ulong vendor, device;
|
|
|
|
ulong header_type;
|
2015-07-18 16:20:04 +00:00
|
|
|
pci_dev_t bdf, end;
|
2015-03-05 19:25:25 +00:00
|
|
|
bool found_multi;
|
2019-10-24 01:40:36 +00:00
|
|
|
int ari_off;
|
2015-03-05 19:25:25 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
found_multi = false;
|
2020-12-17 04:20:07 +00:00
|
|
|
end = PCI_BDF(dev_seq(bus), PCI_MAX_PCI_DEVICES - 1,
|
2015-07-18 16:20:04 +00:00
|
|
|
PCI_MAX_PCI_FUNCTIONS - 1);
|
2020-12-17 04:20:07 +00:00
|
|
|
for (bdf = PCI_BDF(dev_seq(bus), 0, 0); bdf <= end;
|
2015-07-18 16:20:04 +00:00
|
|
|
bdf += PCI_BDF(0, 0, 1)) {
|
2020-12-03 23:55:23 +00:00
|
|
|
struct pci_child_plat *pplat;
|
2015-03-05 19:25:25 +00:00
|
|
|
struct udevice *dev;
|
|
|
|
ulong class;
|
|
|
|
|
2018-08-03 08:14:37 +00:00
|
|
|
if (!PCI_FUNC(bdf))
|
|
|
|
found_multi = false;
|
2015-07-18 16:20:04 +00:00
|
|
|
if (PCI_FUNC(bdf) && !found_multi)
|
2015-03-05 19:25:25 +00:00
|
|
|
continue;
|
2018-10-08 08:35:47 +00:00
|
|
|
|
2015-03-05 19:25:25 +00:00
|
|
|
/* Check only the first access, we don't expect problems */
|
2018-10-08 08:35:47 +00:00
|
|
|
ret = pci_bus_read_config(bus, bdf, PCI_VENDOR_ID, &vendor,
|
|
|
|
PCI_SIZE_16);
|
2021-09-07 16:07:08 +00:00
|
|
|
if (ret || vendor == 0xffff || vendor == 0x0000)
|
2015-03-05 19:25:25 +00:00
|
|
|
continue;
|
|
|
|
|
2018-10-08 08:35:47 +00:00
|
|
|
pci_bus_read_config(bus, bdf, PCI_HEADER_TYPE,
|
|
|
|
&header_type, PCI_SIZE_8);
|
|
|
|
|
2015-07-18 16:20:04 +00:00
|
|
|
if (!PCI_FUNC(bdf))
|
2015-03-05 19:25:25 +00:00
|
|
|
found_multi = header_type & 0x80;
|
|
|
|
|
2019-09-25 14:56:12 +00:00
|
|
|
debug("%s: bus %d/%s: found device %x, function %d", __func__,
|
2020-12-17 04:20:07 +00:00
|
|
|
dev_seq(bus), bus->name, PCI_DEV(bdf), PCI_FUNC(bdf));
|
2015-07-18 16:20:04 +00:00
|
|
|
pci_bus_read_config(bus, bdf, PCI_DEVICE_ID, &device,
|
2015-03-05 19:25:25 +00:00
|
|
|
PCI_SIZE_16);
|
2015-07-18 16:20:04 +00:00
|
|
|
pci_bus_read_config(bus, bdf, PCI_CLASS_REVISION, &class,
|
2015-07-06 22:47:44 +00:00
|
|
|
PCI_SIZE_32);
|
|
|
|
class >>= 8;
|
2015-03-05 19:25:25 +00:00
|
|
|
|
|
|
|
/* Find this device in the device tree */
|
2015-07-18 16:20:04 +00:00
|
|
|
ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev);
|
2019-09-25 14:56:12 +00:00
|
|
|
debug(": find ret=%d\n", ret);
|
2015-03-05 19:25:25 +00:00
|
|
|
|
2015-11-29 20:18:09 +00:00
|
|
|
/* If nothing in the device tree, bind a device */
|
2015-03-05 19:25:25 +00:00
|
|
|
if (ret == -ENODEV) {
|
2015-07-06 22:47:44 +00:00
|
|
|
struct pci_device_id find_id;
|
|
|
|
ulong val;
|
|
|
|
|
|
|
|
memset(&find_id, '\0', sizeof(find_id));
|
|
|
|
find_id.vendor = vendor;
|
|
|
|
find_id.device = device;
|
|
|
|
find_id.class = class;
|
|
|
|
if ((header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL) {
|
2015-07-18 16:20:04 +00:00
|
|
|
pci_bus_read_config(bus, bdf,
|
2015-07-06 22:47:44 +00:00
|
|
|
PCI_SUBSYSTEM_VENDOR_ID,
|
|
|
|
&val, PCI_SIZE_32);
|
|
|
|
find_id.subvendor = val & 0xffff;
|
|
|
|
find_id.subdevice = val >> 16;
|
|
|
|
}
|
2015-07-18 16:20:04 +00:00
|
|
|
ret = pci_find_and_bind_driver(bus, &find_id, bdf,
|
2015-07-06 22:47:44 +00:00
|
|
|
&dev);
|
2015-03-05 19:25:25 +00:00
|
|
|
}
|
2015-09-08 23:52:49 +00:00
|
|
|
if (ret == -EPERM)
|
|
|
|
continue;
|
|
|
|
else if (ret)
|
2015-03-05 19:25:25 +00:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Update the platform data */
|
2020-12-03 23:55:18 +00:00
|
|
|
pplat = dev_get_parent_plat(dev);
|
2015-09-08 23:52:49 +00:00
|
|
|
pplat->devfn = PCI_MASK_BUS(bdf);
|
|
|
|
pplat->vendor = vendor;
|
|
|
|
pplat->device = device;
|
|
|
|
pplat->class = class;
|
2019-10-24 01:40:36 +00:00
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_PCI_ARID)) {
|
|
|
|
ari_off = dm_pci_find_ext_capability(dev,
|
|
|
|
PCI_EXT_CAP_ID_ARI);
|
|
|
|
if (ari_off) {
|
|
|
|
u16 ari_cap;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read Next Function number in ARI Cap
|
|
|
|
* Register
|
|
|
|
*/
|
|
|
|
dm_pci_read_config16(dev, ari_off + 4,
|
|
|
|
&ari_cap);
|
|
|
|
/*
|
|
|
|
* Update next scan on this function number,
|
|
|
|
* subtract 1 in BDF to satisfy loop increment.
|
|
|
|
*/
|
|
|
|
if (ari_cap & 0xff00) {
|
|
|
|
bdf = PCI_BDF(PCI_BUS(bdf),
|
|
|
|
PCI_DEV(ari_cap),
|
|
|
|
PCI_FUNC(ari_cap));
|
|
|
|
bdf = bdf - 0x100;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-04-16 21:53:47 +00:00
|
|
|
|
|
|
|
board_pci_fixup_dev(bus, dev);
|
2015-03-05 19:25:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-06-10 13:25:05 +00:00
|
|
|
static void decode_regions(struct pci_controller *hose, ofnode parent_node,
|
|
|
|
ofnode node)
|
2015-03-05 19:25:25 +00:00
|
|
|
{
|
|
|
|
int pci_addr_cells, addr_cells, size_cells;
|
|
|
|
int cells_per_record;
|
2020-08-12 09:55:46 +00:00
|
|
|
struct bd_info *bd;
|
2015-03-05 19:25:25 +00:00
|
|
|
const u32 *prop;
|
2020-07-23 14:34:10 +00:00
|
|
|
int max_regions;
|
2015-03-05 19:25:25 +00:00
|
|
|
int len;
|
|
|
|
int i;
|
|
|
|
|
2017-06-22 07:54:05 +00:00
|
|
|
prop = ofnode_get_property(node, "ranges", &len);
|
2018-06-10 13:25:05 +00:00
|
|
|
if (!prop) {
|
|
|
|
debug("%s: Cannot decode regions\n", __func__);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-06-12 12:21:31 +00:00
|
|
|
pci_addr_cells = ofnode_read_simple_addr_cells(node);
|
|
|
|
addr_cells = ofnode_read_simple_addr_cells(parent_node);
|
|
|
|
size_cells = ofnode_read_simple_size_cells(node);
|
2015-03-05 19:25:25 +00:00
|
|
|
|
|
|
|
/* PCI addresses are always 3-cells */
|
|
|
|
len /= sizeof(u32);
|
|
|
|
cells_per_record = pci_addr_cells + addr_cells + size_cells;
|
|
|
|
hose->region_count = 0;
|
|
|
|
debug("%s: len=%d, cells_per_record=%d\n", __func__, len,
|
|
|
|
cells_per_record);
|
2020-07-23 14:34:10 +00:00
|
|
|
|
|
|
|
/* Dynamically allocate the regions array */
|
|
|
|
max_regions = len / cells_per_record + CONFIG_NR_DRAM_BANKS;
|
|
|
|
hose->regions = (struct pci_region *)
|
|
|
|
calloc(1, max_regions * sizeof(struct pci_region));
|
|
|
|
|
|
|
|
for (i = 0; i < max_regions; i++, len -= cells_per_record) {
|
2015-03-05 19:25:25 +00:00
|
|
|
u64 pci_addr, addr, size;
|
|
|
|
int space_code;
|
|
|
|
u32 flags;
|
|
|
|
int type;
|
2015-11-20 03:26:58 +00:00
|
|
|
int pos;
|
2015-03-05 19:25:25 +00:00
|
|
|
|
|
|
|
if (len < cells_per_record)
|
|
|
|
break;
|
|
|
|
flags = fdt32_to_cpu(prop[0]);
|
|
|
|
space_code = (flags >> 24) & 3;
|
|
|
|
pci_addr = fdtdec_get_number(prop + 1, 2);
|
|
|
|
prop += pci_addr_cells;
|
|
|
|
addr = fdtdec_get_number(prop, addr_cells);
|
|
|
|
prop += addr_cells;
|
|
|
|
size = fdtdec_get_number(prop, size_cells);
|
|
|
|
prop += size_cells;
|
2018-08-06 11:47:40 +00:00
|
|
|
debug("%s: region %d, pci_addr=%llx, addr=%llx, size=%llx, space_code=%d\n",
|
|
|
|
__func__, hose->region_count, pci_addr, addr, size, space_code);
|
2015-03-05 19:25:25 +00:00
|
|
|
if (space_code & 2) {
|
|
|
|
type = flags & (1U << 30) ? PCI_REGION_PREFETCH :
|
|
|
|
PCI_REGION_MEM;
|
|
|
|
} else if (space_code & 1) {
|
|
|
|
type = PCI_REGION_IO;
|
|
|
|
} else {
|
|
|
|
continue;
|
|
|
|
}
|
2018-05-14 15:47:50 +00:00
|
|
|
|
|
|
|
if (!IS_ENABLED(CONFIG_SYS_PCI_64BIT) &&
|
|
|
|
type == PCI_REGION_MEM && upper_32_bits(pci_addr)) {
|
|
|
|
debug(" - beyond the 32-bit boundary, ignoring\n");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-11-20 03:26:58 +00:00
|
|
|
pos = -1;
|
2019-10-20 00:10:20 +00:00
|
|
|
if (!IS_ENABLED(CONFIG_PCI_REGION_MULTI_ENTRY)) {
|
|
|
|
for (i = 0; i < hose->region_count; i++) {
|
|
|
|
if (hose->regions[i].flags == type)
|
|
|
|
pos = i;
|
|
|
|
}
|
2015-11-20 03:26:58 +00:00
|
|
|
}
|
2019-10-20 00:10:20 +00:00
|
|
|
|
2015-11-20 03:26:58 +00:00
|
|
|
if (pos == -1)
|
|
|
|
pos = hose->region_count++;
|
|
|
|
debug(" - type=%d, pos=%d\n", type, pos);
|
|
|
|
pci_set_region(hose->regions + pos, pci_addr, addr, size, type);
|
2015-03-05 19:25:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Add a region for our local memory */
|
2020-08-12 09:55:46 +00:00
|
|
|
bd = gd->bd;
|
2018-03-27 07:46:05 +00:00
|
|
|
if (!bd)
|
2018-06-10 13:25:05 +00:00
|
|
|
return;
|
2018-03-27 07:46:05 +00:00
|
|
|
|
2018-02-15 07:59:53 +00:00
|
|
|
for (i = 0; i < CONFIG_NR_DRAM_BANKS; ++i) {
|
|
|
|
if (bd->bi_dram[i].size) {
|
2021-07-15 18:53:56 +00:00
|
|
|
phys_addr_t start = bd->bi_dram[i].start;
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_PCI_MAP_SYSTEM_MEMORY))
|
|
|
|
start = virt_to_phys((void *)(uintptr_t)bd->bi_dram[i].start);
|
|
|
|
|
2018-02-15 07:59:53 +00:00
|
|
|
pci_set_region(hose->regions + hose->region_count++,
|
2021-07-15 18:53:56 +00:00
|
|
|
start, start, bd->bi_dram[i].size,
|
2018-02-15 07:59:53 +00:00
|
|
|
PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
|
|
|
|
}
|
|
|
|
}
|
2015-03-05 19:25:25 +00:00
|
|
|
|
2018-06-10 13:25:05 +00:00
|
|
|
return;
|
2015-03-05 19:25:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int pci_uclass_pre_probe(struct udevice *bus)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose;
|
2020-12-17 04:20:18 +00:00
|
|
|
struct uclass *uc;
|
|
|
|
int ret;
|
2015-03-05 19:25:25 +00:00
|
|
|
|
2020-12-17 04:20:07 +00:00
|
|
|
debug("%s, bus=%d/%s, parent=%s\n", __func__, dev_seq(bus), bus->name,
|
2015-03-05 19:25:25 +00:00
|
|
|
bus->parent->name);
|
2020-12-23 02:30:28 +00:00
|
|
|
hose = dev_get_uclass_priv(bus);
|
2015-03-05 19:25:25 +00:00
|
|
|
|
2020-12-17 04:20:18 +00:00
|
|
|
/*
|
|
|
|
* Set the sequence number, if device_bind() doesn't. We want control
|
|
|
|
* of this so that numbers are allocated as devices are probed. That
|
|
|
|
* ensures that sub-bus numbered is correct (sub-buses must get numbers
|
|
|
|
* higher than their parents)
|
|
|
|
*/
|
|
|
|
if (dev_seq(bus) == -1) {
|
|
|
|
ret = uclass_get(UCLASS_PCI, &uc);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2020-12-19 17:40:09 +00:00
|
|
|
bus->seq_ = uclass_find_next_free_seq(uc);
|
2020-12-17 04:20:18 +00:00
|
|
|
}
|
|
|
|
|
2015-03-05 19:25:25 +00:00
|
|
|
/* For bridges, use the top-level PCI controller */
|
2016-09-08 06:47:32 +00:00
|
|
|
if (!device_is_on_pci_bus(bus)) {
|
2015-03-05 19:25:25 +00:00
|
|
|
hose->ctlr = bus;
|
2018-06-10 13:25:05 +00:00
|
|
|
decode_regions(hose, dev_ofnode(bus->parent), dev_ofnode(bus));
|
2015-03-05 19:25:25 +00:00
|
|
|
} else {
|
|
|
|
struct pci_controller *parent_hose;
|
|
|
|
|
|
|
|
parent_hose = dev_get_uclass_priv(bus->parent);
|
|
|
|
hose->ctlr = parent_hose->bus;
|
|
|
|
}
|
2020-12-17 04:20:18 +00:00
|
|
|
|
2015-03-05 19:25:25 +00:00
|
|
|
hose->bus = bus;
|
2020-12-17 04:20:07 +00:00
|
|
|
hose->first_busno = dev_seq(bus);
|
|
|
|
hose->last_busno = dev_seq(bus);
|
2020-12-19 17:40:13 +00:00
|
|
|
if (dev_has_ofnode(bus)) {
|
2020-05-05 04:25:25 +00:00
|
|
|
hose->skip_auto_config_until_reloc =
|
|
|
|
dev_read_bool(bus,
|
|
|
|
"u-boot,skip-auto-config-until-reloc");
|
|
|
|
}
|
2015-03-05 19:25:25 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pci_uclass_post_probe(struct udevice *bus)
|
|
|
|
{
|
2019-12-07 04:41:37 +00:00
|
|
|
struct pci_controller *hose = dev_get_uclass_priv(bus);
|
2015-03-05 19:25:25 +00:00
|
|
|
int ret;
|
|
|
|
|
2020-12-17 04:20:07 +00:00
|
|
|
debug("%s: probing bus %d\n", __func__, dev_seq(bus));
|
2015-03-05 19:25:25 +00:00
|
|
|
ret = pci_bind_bus_devices(bus);
|
|
|
|
if (ret)
|
2020-12-17 04:20:18 +00:00
|
|
|
return log_msg_ret("bind", ret);
|
2015-03-05 19:25:25 +00:00
|
|
|
|
2020-04-26 15:12:56 +00:00
|
|
|
if (CONFIG_IS_ENABLED(PCI_PNP) && ll_boot_init() &&
|
2019-12-07 04:41:37 +00:00
|
|
|
(!hose->skip_auto_config_until_reloc ||
|
|
|
|
(gd->flags & GD_FLG_RELOC))) {
|
|
|
|
ret = pci_auto_config_devices(bus);
|
|
|
|
if (ret < 0)
|
2020-12-17 04:20:18 +00:00
|
|
|
return log_msg_ret("cfg", ret);
|
2019-12-07 04:41:37 +00:00
|
|
|
}
|
2015-03-05 19:25:25 +00:00
|
|
|
|
2015-08-20 13:40:23 +00:00
|
|
|
#if defined(CONFIG_X86) && defined(CONFIG_HAVE_FSP)
|
|
|
|
/*
|
|
|
|
* Per Intel FSP specification, we should call FSP notify API to
|
|
|
|
* inform FSP that PCI enumeration has been done so that FSP will
|
|
|
|
* do any necessary initialization as required by the chipset's
|
|
|
|
* BIOS Writer's Guide (BWG).
|
|
|
|
*
|
|
|
|
* Unfortunately we have to put this call here as with driver model,
|
|
|
|
* the enumeration is all done on a lazy basis as needed, so until
|
|
|
|
* something is touched on PCI it won't happen.
|
|
|
|
*
|
|
|
|
* Note we only call this 1) after U-Boot is relocated, and 2)
|
|
|
|
* root bus has finished probing.
|
|
|
|
*/
|
2020-12-17 04:20:07 +00:00
|
|
|
if ((gd->flags & GD_FLG_RELOC) && dev_seq(bus) == 0 && ll_boot_init()) {
|
2015-08-20 13:40:23 +00:00
|
|
|
ret = fsp_init_phase_pci();
|
2015-09-08 23:52:47 +00:00
|
|
|
if (ret)
|
2020-12-17 04:20:18 +00:00
|
|
|
return log_msg_ret("fsp", ret);
|
2015-09-08 23:52:47 +00:00
|
|
|
}
|
2015-08-20 13:40:23 +00:00
|
|
|
#endif
|
|
|
|
|
2015-09-08 23:52:47 +00:00
|
|
|
return 0;
|
2015-03-05 19:25:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int pci_uclass_child_post_bind(struct udevice *dev)
|
|
|
|
{
|
2020-12-03 23:55:23 +00:00
|
|
|
struct pci_child_plat *pplat;
|
2015-03-05 19:25:25 +00:00
|
|
|
|
2020-12-19 17:40:13 +00:00
|
|
|
if (!dev_has_ofnode(dev))
|
2015-03-05 19:25:25 +00:00
|
|
|
return 0;
|
|
|
|
|
2020-12-03 23:55:18 +00:00
|
|
|
pplat = dev_get_parent_plat(dev);
|
2018-08-03 08:14:36 +00:00
|
|
|
|
|
|
|
/* Extract vendor id and device id if available */
|
|
|
|
ofnode_read_pci_vendev(dev_ofnode(dev), &pplat->vendor, &pplat->device);
|
|
|
|
|
|
|
|
/* Extract the devfn from fdt_pci_addr */
|
2019-01-25 10:52:42 +00:00
|
|
|
pplat->devfn = pci_get_devfn(dev);
|
2015-03-05 19:25:25 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-01-27 15:49:37 +00:00
|
|
|
static int pci_bridge_read_config(const struct udevice *bus, pci_dev_t bdf,
|
2015-07-18 16:20:04 +00:00
|
|
|
uint offset, ulong *valuep,
|
|
|
|
enum pci_size_t size)
|
2015-03-05 19:25:25 +00:00
|
|
|
{
|
2020-12-23 02:30:28 +00:00
|
|
|
struct pci_controller *hose = dev_get_uclass_priv(bus);
|
2015-03-05 19:25:25 +00:00
|
|
|
|
|
|
|
return pci_bus_read_config(hose->ctlr, bdf, offset, valuep, size);
|
|
|
|
}
|
|
|
|
|
2015-07-18 16:20:04 +00:00
|
|
|
static int pci_bridge_write_config(struct udevice *bus, pci_dev_t bdf,
|
|
|
|
uint offset, ulong value,
|
|
|
|
enum pci_size_t size)
|
2015-03-05 19:25:25 +00:00
|
|
|
{
|
2020-12-23 02:30:28 +00:00
|
|
|
struct pci_controller *hose = dev_get_uclass_priv(bus);
|
2015-03-05 19:25:25 +00:00
|
|
|
|
|
|
|
return pci_bus_write_config(hose->ctlr, bdf, offset, value, size);
|
|
|
|
}
|
|
|
|
|
2015-08-10 13:05:04 +00:00
|
|
|
static int skip_to_next_device(struct udevice *bus, struct udevice **devp)
|
|
|
|
{
|
|
|
|
struct udevice *dev;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Scan through all the PCI controllers. On x86 there will only be one
|
|
|
|
* but that is not necessarily true on other hardware.
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
device_find_first_child(bus, &dev);
|
|
|
|
if (dev) {
|
|
|
|
*devp = dev;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
ret = uclass_next_device(&bus);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
} while (bus);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pci_find_next_device(struct udevice **devp)
|
|
|
|
{
|
|
|
|
struct udevice *child = *devp;
|
|
|
|
struct udevice *bus = child->parent;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* First try all the siblings */
|
|
|
|
*devp = NULL;
|
|
|
|
while (child) {
|
|
|
|
device_find_next_child(&child);
|
|
|
|
if (child) {
|
|
|
|
*devp = child;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We ran out of siblings. Try the next bus */
|
|
|
|
ret = uclass_next_device(&bus);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return bus ? skip_to_next_device(bus, devp) : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pci_find_first_device(struct udevice **devp)
|
|
|
|
{
|
|
|
|
struct udevice *bus;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
*devp = NULL;
|
|
|
|
ret = uclass_first_device(UCLASS_PCI, &bus);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return skip_to_next_device(bus, devp);
|
|
|
|
}
|
|
|
|
|
2015-11-20 03:26:59 +00:00
|
|
|
ulong pci_conv_32_to_size(ulong value, uint offset, enum pci_size_t size)
|
|
|
|
{
|
|
|
|
switch (size) {
|
|
|
|
case PCI_SIZE_8:
|
|
|
|
return (value >> ((offset & 3) * 8)) & 0xff;
|
|
|
|
case PCI_SIZE_16:
|
|
|
|
return (value >> ((offset & 2) * 8)) & 0xffff;
|
|
|
|
default:
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ulong pci_conv_size_to_32(ulong old, ulong value, uint offset,
|
|
|
|
enum pci_size_t size)
|
|
|
|
{
|
|
|
|
uint off_mask;
|
|
|
|
uint val_mask, shift;
|
|
|
|
ulong ldata, mask;
|
|
|
|
|
|
|
|
switch (size) {
|
|
|
|
case PCI_SIZE_8:
|
|
|
|
off_mask = 3;
|
|
|
|
val_mask = 0xff;
|
|
|
|
break;
|
|
|
|
case PCI_SIZE_16:
|
|
|
|
off_mask = 2;
|
|
|
|
val_mask = 0xffff;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
shift = (offset & off_mask) * 8;
|
|
|
|
ldata = (value & val_mask) << shift;
|
|
|
|
mask = val_mask << shift;
|
|
|
|
value = (old & ~mask) | ldata;
|
|
|
|
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
2020-05-12 07:59:49 +00:00
|
|
|
int pci_get_dma_regions(struct udevice *dev, struct pci_region *memp, int index)
|
|
|
|
{
|
|
|
|
int pci_addr_cells, addr_cells, size_cells;
|
|
|
|
int cells_per_record;
|
|
|
|
const u32 *prop;
|
|
|
|
int len;
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
prop = ofnode_get_property(dev_ofnode(dev), "dma-ranges", &len);
|
|
|
|
if (!prop) {
|
|
|
|
log_err("PCI: Device '%s': Cannot decode dma-ranges\n",
|
|
|
|
dev->name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_addr_cells = ofnode_read_simple_addr_cells(dev_ofnode(dev));
|
|
|
|
addr_cells = ofnode_read_simple_addr_cells(dev_ofnode(dev->parent));
|
|
|
|
size_cells = ofnode_read_simple_size_cells(dev_ofnode(dev));
|
|
|
|
|
|
|
|
/* PCI addresses are always 3-cells */
|
|
|
|
len /= sizeof(u32);
|
|
|
|
cells_per_record = pci_addr_cells + addr_cells + size_cells;
|
|
|
|
debug("%s: len=%d, cells_per_record=%d\n", __func__, len,
|
|
|
|
cells_per_record);
|
|
|
|
|
|
|
|
while (len) {
|
|
|
|
memp->bus_start = fdtdec_get_number(prop + 1, 2);
|
|
|
|
prop += pci_addr_cells;
|
|
|
|
memp->phys_start = fdtdec_get_number(prop, addr_cells);
|
|
|
|
prop += addr_cells;
|
|
|
|
memp->size = fdtdec_get_number(prop, size_cells);
|
|
|
|
prop += size_cells;
|
|
|
|
|
|
|
|
if (i == index)
|
|
|
|
return 0;
|
|
|
|
i++;
|
|
|
|
len -= cells_per_record;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-11-20 03:27:01 +00:00
|
|
|
int pci_get_regions(struct udevice *dev, struct pci_region **iop,
|
|
|
|
struct pci_region **memp, struct pci_region **prefp)
|
|
|
|
{
|
|
|
|
struct udevice *bus = pci_get_controller(dev);
|
|
|
|
struct pci_controller *hose = dev_get_uclass_priv(bus);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
*iop = NULL;
|
|
|
|
*memp = NULL;
|
|
|
|
*prefp = NULL;
|
|
|
|
for (i = 0; i < hose->region_count; i++) {
|
|
|
|
switch (hose->regions[i].flags) {
|
|
|
|
case PCI_REGION_IO:
|
|
|
|
if (!*iop || (*iop)->size < hose->regions[i].size)
|
|
|
|
*iop = hose->regions + i;
|
|
|
|
break;
|
|
|
|
case PCI_REGION_MEM:
|
|
|
|
if (!*memp || (*memp)->size < hose->regions[i].size)
|
|
|
|
*memp = hose->regions + i;
|
|
|
|
break;
|
|
|
|
case (PCI_REGION_MEM | PCI_REGION_PREFETCH):
|
|
|
|
if (!*prefp || (*prefp)->size < hose->regions[i].size)
|
|
|
|
*prefp = hose->regions + i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (*iop != NULL) + (*memp != NULL) + (*prefp != NULL);
|
|
|
|
}
|
|
|
|
|
2020-01-27 15:49:38 +00:00
|
|
|
u32 dm_pci_read_bar32(const struct udevice *dev, int barnum)
|
2015-11-29 20:17:53 +00:00
|
|
|
{
|
|
|
|
u32 addr;
|
|
|
|
int bar;
|
|
|
|
|
|
|
|
bar = PCI_BASE_ADDRESS_0 + barnum * 4;
|
|
|
|
dm_pci_read_config32(dev, bar, &addr);
|
2020-04-09 16:27:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we get an invalid address, return this so that comparisons with
|
|
|
|
* FDT_ADDR_T_NONE work correctly
|
|
|
|
*/
|
|
|
|
if (addr == 0xffffffff)
|
|
|
|
return addr;
|
|
|
|
else if (addr & PCI_BASE_ADDRESS_SPACE_IO)
|
2015-11-29 20:17:53 +00:00
|
|
|
return addr & PCI_BASE_ADDRESS_IO_MASK;
|
|
|
|
else
|
|
|
|
return addr & PCI_BASE_ADDRESS_MEM_MASK;
|
|
|
|
}
|
|
|
|
|
2016-01-19 03:19:15 +00:00
|
|
|
void dm_pci_write_bar32(struct udevice *dev, int barnum, u32 addr)
|
|
|
|
{
|
|
|
|
int bar;
|
|
|
|
|
|
|
|
bar = PCI_BASE_ADDRESS_0 + barnum * 4;
|
|
|
|
dm_pci_write_config32(dev, bar, addr);
|
|
|
|
}
|
|
|
|
|
2015-11-29 20:18:03 +00:00
|
|
|
static int _dm_pci_bus_to_phys(struct udevice *ctlr,
|
|
|
|
pci_addr_t bus_addr, unsigned long flags,
|
|
|
|
unsigned long skip_mask, phys_addr_t *pa)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose = dev_get_uclass_priv(ctlr);
|
|
|
|
struct pci_region *res;
|
|
|
|
int i;
|
|
|
|
|
2018-06-10 13:25:06 +00:00
|
|
|
if (hose->region_count == 0) {
|
|
|
|
*pa = bus_addr;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-29 20:18:03 +00:00
|
|
|
for (i = 0; i < hose->region_count; i++) {
|
|
|
|
res = &hose->regions[i];
|
|
|
|
|
|
|
|
if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (res->flags & skip_mask)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (bus_addr >= res->bus_start &&
|
|
|
|
(bus_addr - res->bus_start) < res->size) {
|
|
|
|
*pa = (bus_addr - res->bus_start + res->phys_start);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
phys_addr_t dm_pci_bus_to_phys(struct udevice *dev, pci_addr_t bus_addr,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
|
|
|
phys_addr_t phys_addr = 0;
|
|
|
|
struct udevice *ctlr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* The root controller has the region information */
|
|
|
|
ctlr = pci_get_controller(dev);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if PCI_REGION_MEM is set we do a two pass search with preference
|
|
|
|
* on matches that don't have PCI_REGION_SYS_MEMORY set
|
|
|
|
*/
|
|
|
|
if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) {
|
|
|
|
ret = _dm_pci_bus_to_phys(ctlr, bus_addr,
|
|
|
|
flags, PCI_REGION_SYS_MEMORY,
|
|
|
|
&phys_addr);
|
|
|
|
if (!ret)
|
|
|
|
return phys_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = _dm_pci_bus_to_phys(ctlr, bus_addr, flags, 0, &phys_addr);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
puts("pci_hose_bus_to_phys: invalid physical address\n");
|
|
|
|
|
|
|
|
return phys_addr;
|
|
|
|
}
|
|
|
|
|
2021-09-17 12:11:27 +00:00
|
|
|
static int _dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr,
|
|
|
|
unsigned long flags, unsigned long skip_mask,
|
|
|
|
pci_addr_t *ba)
|
2015-11-29 20:18:03 +00:00
|
|
|
{
|
|
|
|
struct pci_region *res;
|
|
|
|
struct udevice *ctlr;
|
|
|
|
pci_addr_t bus_addr;
|
|
|
|
int i;
|
|
|
|
struct pci_controller *hose;
|
|
|
|
|
|
|
|
/* The root controller has the region information */
|
|
|
|
ctlr = pci_get_controller(dev);
|
|
|
|
hose = dev_get_uclass_priv(ctlr);
|
|
|
|
|
2018-06-10 13:25:06 +00:00
|
|
|
if (hose->region_count == 0) {
|
|
|
|
*ba = phys_addr;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-29 20:18:03 +00:00
|
|
|
for (i = 0; i < hose->region_count; i++) {
|
|
|
|
res = &hose->regions[i];
|
|
|
|
|
|
|
|
if (((res->flags ^ flags) & PCI_REGION_TYPE) != 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (res->flags & skip_mask)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
bus_addr = phys_addr - res->phys_start + res->bus_start;
|
|
|
|
|
|
|
|
if (bus_addr >= res->bus_start &&
|
|
|
|
(bus_addr - res->bus_start) < res->size) {
|
|
|
|
*ba = bus_addr;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_addr_t dm_pci_phys_to_bus(struct udevice *dev, phys_addr_t phys_addr,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
|
|
|
pci_addr_t bus_addr = 0;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if PCI_REGION_MEM is set we do a two pass search with preference
|
|
|
|
* on matches that don't have PCI_REGION_SYS_MEMORY set
|
|
|
|
*/
|
|
|
|
if ((flags & PCI_REGION_TYPE) == PCI_REGION_MEM) {
|
|
|
|
ret = _dm_pci_phys_to_bus(dev, phys_addr, flags,
|
|
|
|
PCI_REGION_SYS_MEMORY, &bus_addr);
|
|
|
|
if (!ret)
|
|
|
|
return bus_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = _dm_pci_phys_to_bus(dev, phys_addr, flags, 0, &bus_addr);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
puts("pci_hose_phys_to_bus: invalid physical address\n");
|
|
|
|
|
|
|
|
return bus_addr;
|
|
|
|
}
|
|
|
|
|
2019-10-19 23:34:16 +00:00
|
|
|
static phys_addr_t dm_pci_map_ea_virt(struct udevice *dev, int ea_off,
|
2020-12-03 23:55:23 +00:00
|
|
|
struct pci_child_plat *pdata)
|
2019-10-19 23:34:16 +00:00
|
|
|
{
|
|
|
|
phys_addr_t addr = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In the case of a Virtual Function device using BAR
|
|
|
|
* base and size, add offset for VFn BAR(1, 2, 3...n)
|
|
|
|
*/
|
|
|
|
if (pdata->is_virtfn) {
|
|
|
|
size_t sz;
|
|
|
|
u32 ea_entry;
|
|
|
|
|
|
|
|
/* MaxOffset, 1st DW */
|
|
|
|
dm_pci_read_config32(dev, ea_off + 8, &ea_entry);
|
|
|
|
sz = ea_entry & PCI_EA_FIELD_MASK;
|
|
|
|
/* Fill up lower 2 bits */
|
|
|
|
sz |= (~PCI_EA_FIELD_MASK);
|
|
|
|
|
|
|
|
if (ea_entry & PCI_EA_IS_64) {
|
|
|
|
/* MaxOffset 2nd DW */
|
|
|
|
dm_pci_read_config32(dev, ea_off + 16, &ea_entry);
|
|
|
|
sz |= ((u64)ea_entry) << 32;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = (pdata->virtid - 1) * (sz + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
2019-06-07 08:24:23 +00:00
|
|
|
static void *dm_pci_map_ea_bar(struct udevice *dev, int bar, int flags,
|
2020-12-03 23:55:23 +00:00
|
|
|
int ea_off, struct pci_child_plat *pdata)
|
2019-06-07 08:24:23 +00:00
|
|
|
{
|
|
|
|
int ea_cnt, i, entry_size;
|
|
|
|
int bar_id = (bar - PCI_BASE_ADDRESS_0) >> 2;
|
|
|
|
u32 ea_entry;
|
|
|
|
phys_addr_t addr;
|
|
|
|
|
2019-10-19 23:34:16 +00:00
|
|
|
if (IS_ENABLED(CONFIG_PCI_SRIOV)) {
|
|
|
|
/*
|
|
|
|
* In the case of a Virtual Function device, device is
|
|
|
|
* Physical function, so pdata will point to required VF
|
|
|
|
* specific data.
|
|
|
|
*/
|
|
|
|
if (pdata->is_virtfn)
|
|
|
|
bar_id += PCI_EA_BEI_VF_BAR0;
|
|
|
|
}
|
|
|
|
|
2019-06-07 08:24:23 +00:00
|
|
|
/* EA capability structure header */
|
|
|
|
dm_pci_read_config32(dev, ea_off, &ea_entry);
|
|
|
|
ea_cnt = (ea_entry >> 16) & PCI_EA_NUM_ENT_MASK;
|
|
|
|
ea_off += PCI_EA_FIRST_ENT;
|
|
|
|
|
|
|
|
for (i = 0; i < ea_cnt; i++, ea_off += entry_size) {
|
|
|
|
/* Entry header */
|
|
|
|
dm_pci_read_config32(dev, ea_off, &ea_entry);
|
|
|
|
entry_size = ((ea_entry & PCI_EA_ES) + 1) << 2;
|
|
|
|
|
|
|
|
if (((ea_entry & PCI_EA_BEI) >> 4) != bar_id)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Base address, 1st DW */
|
|
|
|
dm_pci_read_config32(dev, ea_off + 4, &ea_entry);
|
|
|
|
addr = ea_entry & PCI_EA_FIELD_MASK;
|
|
|
|
if (ea_entry & PCI_EA_IS_64) {
|
|
|
|
/* Base address, 2nd DW, skip over 4B MaxOffset */
|
|
|
|
dm_pci_read_config32(dev, ea_off + 12, &ea_entry);
|
|
|
|
addr |= ((u64)ea_entry) << 32;
|
|
|
|
}
|
|
|
|
|
2019-10-19 23:34:16 +00:00
|
|
|
if (IS_ENABLED(CONFIG_PCI_SRIOV))
|
|
|
|
addr += dm_pci_map_ea_virt(dev, ea_off, pdata);
|
|
|
|
|
2019-06-07 08:24:23 +00:00
|
|
|
/* size ignored for now */
|
2019-10-19 23:44:35 +00:00
|
|
|
return map_physmem(addr, 0, flags);
|
2019-06-07 08:24:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-29 20:18:03 +00:00
|
|
|
void *dm_pci_map_bar(struct udevice *dev, int bar, int flags)
|
|
|
|
{
|
2020-12-03 23:55:23 +00:00
|
|
|
struct pci_child_plat *pdata = dev_get_parent_plat(dev);
|
2019-10-19 23:34:16 +00:00
|
|
|
struct udevice *udev = dev;
|
2015-11-29 20:18:03 +00:00
|
|
|
pci_addr_t pci_bus_addr;
|
|
|
|
u32 bar_response;
|
2019-06-07 08:24:23 +00:00
|
|
|
int ea_off;
|
|
|
|
|
2019-10-19 23:34:16 +00:00
|
|
|
if (IS_ENABLED(CONFIG_PCI_SRIOV)) {
|
|
|
|
/*
|
|
|
|
* In case of Virtual Function devices, use PF udevice
|
|
|
|
* as EA capability is defined in Physical Function
|
|
|
|
*/
|
|
|
|
if (pdata->is_virtfn)
|
|
|
|
udev = pdata->pfdev;
|
|
|
|
}
|
|
|
|
|
2019-06-07 08:24:23 +00:00
|
|
|
/*
|
|
|
|
* if the function supports Enhanced Allocation use that instead of
|
|
|
|
* BARs
|
2019-10-19 23:34:16 +00:00
|
|
|
* Incase of virtual functions, pdata will help read VF BEI
|
|
|
|
* and EA entry size.
|
2019-06-07 08:24:23 +00:00
|
|
|
*/
|
2019-10-19 23:34:16 +00:00
|
|
|
ea_off = dm_pci_find_capability(udev, PCI_CAP_ID_EA);
|
2019-06-07 08:24:23 +00:00
|
|
|
if (ea_off)
|
2019-10-19 23:34:16 +00:00
|
|
|
return dm_pci_map_ea_bar(udev, bar, flags, ea_off, pdata);
|
2015-11-29 20:18:03 +00:00
|
|
|
|
|
|
|
/* read BAR address */
|
2019-10-19 23:34:16 +00:00
|
|
|
dm_pci_read_config32(udev, bar, &bar_response);
|
2015-11-29 20:18:03 +00:00
|
|
|
pci_bus_addr = (pci_addr_t)(bar_response & ~0xf);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pass "0" as the length argument to pci_bus_to_virt. The arg
|
2019-10-19 23:44:35 +00:00
|
|
|
* isn't actually used on any platform because U-Boot assumes a static
|
2015-11-29 20:18:03 +00:00
|
|
|
* linear mapping. In the future, this could read the BAR size
|
|
|
|
* and pass that as the size if needed.
|
|
|
|
*/
|
2019-10-19 23:34:16 +00:00
|
|
|
return dm_pci_bus_to_virt(udev, pci_bus_addr, flags, 0, MAP_NOCACHE);
|
2015-11-29 20:18:03 +00:00
|
|
|
}
|
|
|
|
|
2018-10-15 09:21:21 +00:00
|
|
|
static int _dm_pci_find_next_capability(struct udevice *dev, u8 pos, int cap)
|
2018-08-03 08:14:52 +00:00
|
|
|
{
|
|
|
|
int ttl = PCI_FIND_CAP_TTL;
|
|
|
|
u8 id;
|
|
|
|
u16 ent;
|
|
|
|
|
|
|
|
dm_pci_read_config8(dev, pos, &pos);
|
2018-10-15 09:21:21 +00:00
|
|
|
|
2018-08-03 08:14:52 +00:00
|
|
|
while (ttl--) {
|
|
|
|
if (pos < PCI_STD_HEADER_SIZEOF)
|
|
|
|
break;
|
|
|
|
pos &= ~3;
|
|
|
|
dm_pci_read_config16(dev, pos, &ent);
|
|
|
|
|
|
|
|
id = ent & 0xff;
|
|
|
|
if (id == 0xff)
|
|
|
|
break;
|
|
|
|
if (id == cap)
|
|
|
|
return pos;
|
|
|
|
pos = (ent >> 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-15 09:21:21 +00:00
|
|
|
int dm_pci_find_next_capability(struct udevice *dev, u8 start, int cap)
|
|
|
|
{
|
|
|
|
return _dm_pci_find_next_capability(dev, start + PCI_CAP_LIST_NEXT,
|
|
|
|
cap);
|
|
|
|
}
|
|
|
|
|
|
|
|
int dm_pci_find_capability(struct udevice *dev, int cap)
|
|
|
|
{
|
|
|
|
u16 status;
|
|
|
|
u8 header_type;
|
|
|
|
u8 pos;
|
|
|
|
|
|
|
|
dm_pci_read_config16(dev, PCI_STATUS, &status);
|
|
|
|
if (!(status & PCI_STATUS_CAP_LIST))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
dm_pci_read_config8(dev, PCI_HEADER_TYPE, &header_type);
|
|
|
|
if ((header_type & 0x7f) == PCI_HEADER_TYPE_CARDBUS)
|
|
|
|
pos = PCI_CB_CAPABILITY_LIST;
|
|
|
|
else
|
|
|
|
pos = PCI_CAPABILITY_LIST;
|
|
|
|
|
|
|
|
return _dm_pci_find_next_capability(dev, pos, cap);
|
|
|
|
}
|
|
|
|
|
|
|
|
int dm_pci_find_next_ext_capability(struct udevice *dev, int start, int cap)
|
2018-08-03 08:14:52 +00:00
|
|
|
{
|
|
|
|
u32 header;
|
|
|
|
int ttl;
|
|
|
|
int pos = PCI_CFG_SPACE_SIZE;
|
|
|
|
|
|
|
|
/* minimum 8 bytes per capability */
|
|
|
|
ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
|
|
|
|
|
2018-10-15 09:21:21 +00:00
|
|
|
if (start)
|
|
|
|
pos = start;
|
|
|
|
|
2018-08-03 08:14:52 +00:00
|
|
|
dm_pci_read_config32(dev, pos, &header);
|
|
|
|
/*
|
|
|
|
* If we have no capabilities, this is indicated by cap ID,
|
|
|
|
* cap version and next pointer all being 0.
|
|
|
|
*/
|
|
|
|
if (header == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
while (ttl--) {
|
|
|
|
if (PCI_EXT_CAP_ID(header) == cap)
|
|
|
|
return pos;
|
|
|
|
|
|
|
|
pos = PCI_EXT_CAP_NEXT(header);
|
|
|
|
if (pos < PCI_CFG_SPACE_SIZE)
|
|
|
|
break;
|
|
|
|
|
|
|
|
dm_pci_read_config32(dev, pos, &header);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-15 09:21:21 +00:00
|
|
|
int dm_pci_find_ext_capability(struct udevice *dev, int cap)
|
|
|
|
{
|
|
|
|
return dm_pci_find_next_ext_capability(dev, 0, cap);
|
|
|
|
}
|
|
|
|
|
2019-06-07 08:24:25 +00:00
|
|
|
int dm_pci_flr(struct udevice *dev)
|
|
|
|
{
|
|
|
|
int pcie_off;
|
|
|
|
u32 cap;
|
|
|
|
|
|
|
|
/* look for PCI Express Capability */
|
|
|
|
pcie_off = dm_pci_find_capability(dev, PCI_CAP_ID_EXP);
|
|
|
|
if (!pcie_off)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
/* check FLR capability */
|
|
|
|
dm_pci_read_config32(dev, pcie_off + PCI_EXP_DEVCAP, &cap);
|
|
|
|
if (!(cap & PCI_EXP_DEVCAP_FLR))
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
dm_pci_clrset_config16(dev, pcie_off + PCI_EXP_DEVCTL, 0,
|
|
|
|
PCI_EXP_DEVCTL_BCR_FLR);
|
|
|
|
|
|
|
|
/* wait 100ms, per PCI spec */
|
|
|
|
mdelay(100);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-10-19 23:07:20 +00:00
|
|
|
#if defined(CONFIG_PCI_SRIOV)
|
|
|
|
int pci_sriov_init(struct udevice *pdev, int vf_en)
|
|
|
|
{
|
|
|
|
u16 vendor, device;
|
|
|
|
struct udevice *bus;
|
|
|
|
struct udevice *dev;
|
|
|
|
pci_dev_t bdf;
|
|
|
|
u16 ctrl;
|
|
|
|
u16 num_vfs;
|
|
|
|
u16 total_vf;
|
|
|
|
u16 vf_offset;
|
|
|
|
u16 vf_stride;
|
|
|
|
int vf, ret;
|
|
|
|
int pos;
|
|
|
|
|
|
|
|
pos = dm_pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
|
|
|
|
if (!pos) {
|
|
|
|
debug("Error: SRIOV capability not found\n");
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
dm_pci_read_config16(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
|
|
|
|
|
|
|
|
dm_pci_read_config16(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vf);
|
|
|
|
if (vf_en > total_vf)
|
|
|
|
vf_en = total_vf;
|
|
|
|
dm_pci_write_config16(pdev, pos + PCI_SRIOV_NUM_VF, vf_en);
|
|
|
|
|
|
|
|
ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
|
|
|
|
dm_pci_write_config16(pdev, pos + PCI_SRIOV_CTRL, ctrl);
|
|
|
|
|
|
|
|
dm_pci_read_config16(pdev, pos + PCI_SRIOV_NUM_VF, &num_vfs);
|
|
|
|
if (num_vfs > vf_en)
|
|
|
|
num_vfs = vf_en;
|
|
|
|
|
|
|
|
dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_OFFSET, &vf_offset);
|
|
|
|
dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_STRIDE, &vf_stride);
|
|
|
|
|
|
|
|
dm_pci_read_config16(pdev, PCI_VENDOR_ID, &vendor);
|
|
|
|
dm_pci_read_config16(pdev, pos + PCI_SRIOV_VF_DID, &device);
|
|
|
|
|
|
|
|
bdf = dm_pci_get_bdf(pdev);
|
|
|
|
|
|
|
|
pci_get_bus(PCI_BUS(bdf), &bus);
|
|
|
|
|
|
|
|
if (!bus)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
bdf += PCI_BDF(0, 0, vf_offset);
|
|
|
|
|
|
|
|
for (vf = 0; vf < num_vfs; vf++) {
|
2020-12-03 23:55:23 +00:00
|
|
|
struct pci_child_plat *pplat;
|
2019-10-19 23:07:20 +00:00
|
|
|
ulong class;
|
|
|
|
|
|
|
|
pci_bus_read_config(bus, bdf, PCI_CLASS_DEVICE,
|
|
|
|
&class, PCI_SIZE_16);
|
|
|
|
|
|
|
|
debug("%s: bus %d/%s: found VF %x:%x\n", __func__,
|
2020-12-17 04:20:07 +00:00
|
|
|
dev_seq(bus), bus->name, PCI_DEV(bdf), PCI_FUNC(bdf));
|
2019-10-19 23:07:20 +00:00
|
|
|
|
|
|
|
/* Find this device in the device tree */
|
|
|
|
ret = pci_bus_find_devfn(bus, PCI_MASK_BUS(bdf), &dev);
|
|
|
|
|
|
|
|
if (ret == -ENODEV) {
|
|
|
|
struct pci_device_id find_id;
|
|
|
|
|
|
|
|
memset(&find_id, '\0', sizeof(find_id));
|
|
|
|
find_id.vendor = vendor;
|
|
|
|
find_id.device = device;
|
|
|
|
find_id.class = class;
|
|
|
|
|
|
|
|
ret = pci_find_and_bind_driver(bus, &find_id,
|
|
|
|
bdf, &dev);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the platform data */
|
2020-12-03 23:55:18 +00:00
|
|
|
pplat = dev_get_parent_plat(dev);
|
2019-10-19 23:07:20 +00:00
|
|
|
pplat->devfn = PCI_MASK_BUS(bdf);
|
|
|
|
pplat->vendor = vendor;
|
|
|
|
pplat->device = device;
|
|
|
|
pplat->class = class;
|
|
|
|
pplat->is_virtfn = true;
|
|
|
|
pplat->pfdev = pdev;
|
|
|
|
pplat->virtid = vf * vf_stride + vf_offset;
|
|
|
|
|
|
|
|
debug("%s: bus %d/%s: found VF %x:%x %x:%x class %lx id %x\n",
|
2020-12-17 04:20:07 +00:00
|
|
|
__func__, dev_seq(dev), dev->name, PCI_DEV(bdf),
|
2019-10-19 23:07:20 +00:00
|
|
|
PCI_FUNC(bdf), vendor, device, class, pplat->virtid);
|
|
|
|
bdf += PCI_BDF(0, 0, vf_stride);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int pci_sriov_get_totalvfs(struct udevice *pdev)
|
|
|
|
{
|
|
|
|
u16 total_vf;
|
|
|
|
int pos;
|
|
|
|
|
|
|
|
pos = dm_pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
|
|
|
|
if (!pos) {
|
|
|
|
debug("Error: SRIOV capability not found\n");
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
dm_pci_read_config16(pdev, pos + PCI_SRIOV_TOTAL_VF, &total_vf);
|
|
|
|
|
|
|
|
return total_vf;
|
|
|
|
}
|
|
|
|
#endif /* SRIOV */
|
|
|
|
|
2015-03-05 19:25:25 +00:00
|
|
|
UCLASS_DRIVER(pci) = {
|
|
|
|
.id = UCLASS_PCI,
|
|
|
|
.name = "pci",
|
2020-12-17 04:20:18 +00:00
|
|
|
.flags = DM_UC_FLAG_SEQ_ALIAS | DM_UC_FLAG_NO_AUTO_SEQ,
|
2016-07-05 23:10:10 +00:00
|
|
|
.post_bind = dm_scan_fdt_dev,
|
2015-03-05 19:25:25 +00:00
|
|
|
.pre_probe = pci_uclass_pre_probe,
|
|
|
|
.post_probe = pci_uclass_post_probe,
|
|
|
|
.child_post_bind = pci_uclass_child_post_bind,
|
2020-12-03 23:55:17 +00:00
|
|
|
.per_device_auto = sizeof(struct pci_controller),
|
2020-12-03 23:55:23 +00:00
|
|
|
.per_child_plat_auto = sizeof(struct pci_child_plat),
|
2015-03-05 19:25:25 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct dm_pci_ops pci_bridge_ops = {
|
|
|
|
.read_config = pci_bridge_read_config,
|
|
|
|
.write_config = pci_bridge_write_config,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct udevice_id pci_bridge_ids[] = {
|
|
|
|
{ .compatible = "pci-bridge" },
|
|
|
|
{ }
|
|
|
|
};
|
|
|
|
|
|
|
|
U_BOOT_DRIVER(pci_bridge_drv) = {
|
|
|
|
.name = "pci_bridge_drv",
|
|
|
|
.id = UCLASS_PCI,
|
|
|
|
.of_match = pci_bridge_ids,
|
|
|
|
.ops = &pci_bridge_ops,
|
|
|
|
};
|
|
|
|
|
|
|
|
UCLASS_DRIVER(pci_generic) = {
|
|
|
|
.id = UCLASS_PCI_GENERIC,
|
|
|
|
.name = "pci_generic",
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct udevice_id pci_generic_ids[] = {
|
|
|
|
{ .compatible = "pci-generic" },
|
|
|
|
{ }
|
|
|
|
};
|
|
|
|
|
|
|
|
U_BOOT_DRIVER(pci_generic_drv) = {
|
|
|
|
.name = "pci_generic_drv",
|
|
|
|
.id = UCLASS_PCI_GENERIC,
|
|
|
|
.of_match = pci_generic_ids,
|
|
|
|
};
|
Implement "pci enum" command for CONFIG_DM_PCI
With CONFIG_DM_PCI enabled, PCI buses are not enumerated at boot, as they
are without that config option enabled. No command exists to enumerate the
PCI buses. Hence, unless some board-specific code causes PCI enumeration,
PCI-based Ethernet devices are not detected, and network access is not
available.
This patch implements "pci enum" in the CONFIG_DM_PCI case, thus giving a
mechanism whereby PCI can be enumerated.
do_pci()'s handling of case 'e' is moved into a single location before the
dev variable is assigned, in order to skip calculation of dev. The enum
sub-command doesn't need the dev value, and skipping its calculation
avoids an irrelevant error being printed.
Using a command to initialize PCI like this has a disadvantage relative to
enumerating PCI at boot. In particular, Ethernet devices are not probed
during PCI enumeration, but only when used. This defers setting variables
such as ethact, ethaddr, etc. until the first network-related command is
executed. Hopefully this will not cause further issues. Perhaps in the
long term, we need a "net start/enum" command too?
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Reviewed-by: Simon Glass <sjg@chromium.org>
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
2016-01-26 18:10:11 +00:00
|
|
|
|
2020-11-28 08:43:12 +00:00
|
|
|
int pci_init(void)
|
Implement "pci enum" command for CONFIG_DM_PCI
With CONFIG_DM_PCI enabled, PCI buses are not enumerated at boot, as they
are without that config option enabled. No command exists to enumerate the
PCI buses. Hence, unless some board-specific code causes PCI enumeration,
PCI-based Ethernet devices are not detected, and network access is not
available.
This patch implements "pci enum" in the CONFIG_DM_PCI case, thus giving a
mechanism whereby PCI can be enumerated.
do_pci()'s handling of case 'e' is moved into a single location before the
dev variable is assigned, in order to skip calculation of dev. The enum
sub-command doesn't need the dev value, and skipping its calculation
avoids an irrelevant error being printed.
Using a command to initialize PCI like this has a disadvantage relative to
enumerating PCI at boot. In particular, Ethernet devices are not probed
during PCI enumeration, but only when used. This defers setting variables
such as ethact, ethaddr, etc. until the first network-related command is
executed. Hopefully this will not cause further issues. Perhaps in the
long term, we need a "net start/enum" command too?
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Reviewed-by: Simon Glass <sjg@chromium.org>
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
2016-01-26 18:10:11 +00:00
|
|
|
{
|
|
|
|
struct udevice *bus;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enumerate all known controller devices. Enumeration has the side-
|
|
|
|
* effect of probing them, so PCIe devices will be enumerated too.
|
|
|
|
*/
|
2019-05-21 10:04:31 +00:00
|
|
|
for (uclass_first_device_check(UCLASS_PCI, &bus);
|
Implement "pci enum" command for CONFIG_DM_PCI
With CONFIG_DM_PCI enabled, PCI buses are not enumerated at boot, as they
are without that config option enabled. No command exists to enumerate the
PCI buses. Hence, unless some board-specific code causes PCI enumeration,
PCI-based Ethernet devices are not detected, and network access is not
available.
This patch implements "pci enum" in the CONFIG_DM_PCI case, thus giving a
mechanism whereby PCI can be enumerated.
do_pci()'s handling of case 'e' is moved into a single location before the
dev variable is assigned, in order to skip calculation of dev. The enum
sub-command doesn't need the dev value, and skipping its calculation
avoids an irrelevant error being printed.
Using a command to initialize PCI like this has a disadvantage relative to
enumerating PCI at boot. In particular, Ethernet devices are not probed
during PCI enumeration, but only when used. This defers setting variables
such as ethact, ethaddr, etc. until the first network-related command is
executed. Hopefully this will not cause further issues. Perhaps in the
long term, we need a "net start/enum" command too?
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Reviewed-by: Simon Glass <sjg@chromium.org>
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
2016-01-26 18:10:11 +00:00
|
|
|
bus;
|
2019-05-21 10:04:31 +00:00
|
|
|
uclass_next_device_check(&bus)) {
|
Implement "pci enum" command for CONFIG_DM_PCI
With CONFIG_DM_PCI enabled, PCI buses are not enumerated at boot, as they
are without that config option enabled. No command exists to enumerate the
PCI buses. Hence, unless some board-specific code causes PCI enumeration,
PCI-based Ethernet devices are not detected, and network access is not
available.
This patch implements "pci enum" in the CONFIG_DM_PCI case, thus giving a
mechanism whereby PCI can be enumerated.
do_pci()'s handling of case 'e' is moved into a single location before the
dev variable is assigned, in order to skip calculation of dev. The enum
sub-command doesn't need the dev value, and skipping its calculation
avoids an irrelevant error being printed.
Using a command to initialize PCI like this has a disadvantage relative to
enumerating PCI at boot. In particular, Ethernet devices are not probed
during PCI enumeration, but only when used. This defers setting variables
such as ethact, ethaddr, etc. until the first network-related command is
executed. Hopefully this will not cause further issues. Perhaps in the
long term, we need a "net start/enum" command too?
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Reviewed-by: Simon Glass <sjg@chromium.org>
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
2016-01-26 18:10:11 +00:00
|
|
|
;
|
|
|
|
}
|
2020-11-28 08:43:12 +00:00
|
|
|
|
|
|
|
return 0;
|
Implement "pci enum" command for CONFIG_DM_PCI
With CONFIG_DM_PCI enabled, PCI buses are not enumerated at boot, as they
are without that config option enabled. No command exists to enumerate the
PCI buses. Hence, unless some board-specific code causes PCI enumeration,
PCI-based Ethernet devices are not detected, and network access is not
available.
This patch implements "pci enum" in the CONFIG_DM_PCI case, thus giving a
mechanism whereby PCI can be enumerated.
do_pci()'s handling of case 'e' is moved into a single location before the
dev variable is assigned, in order to skip calculation of dev. The enum
sub-command doesn't need the dev value, and skipping its calculation
avoids an irrelevant error being printed.
Using a command to initialize PCI like this has a disadvantage relative to
enumerating PCI at boot. In particular, Ethernet devices are not probed
during PCI enumeration, but only when used. This defers setting variables
such as ethact, ethaddr, etc. until the first network-related command is
executed. Hopefully this will not cause further issues. Perhaps in the
long term, we need a "net start/enum" command too?
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Reviewed-by: Simon Glass <sjg@chromium.org>
Reviewed-by: Bin Meng <bmeng.cn@gmail.com>
2016-01-26 18:10:11 +00:00
|
|
|
}
|