2018-05-06 21:58:06 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2016-06-17 15:43:58 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2016, NVIDIA CORPORATION.
|
|
|
|
*/
|
|
|
|
|
2021-04-27 09:02:19 +00:00
|
|
|
#define LOG_CATEGORY UCLASS_RESET
|
|
|
|
|
2016-06-17 15:43:58 +00:00
|
|
|
#include <common.h>
|
|
|
|
#include <dm.h>
|
|
|
|
#include <fdtdec.h>
|
2020-05-10 17:40:05 +00:00
|
|
|
#include <log.h>
|
2020-02-03 14:36:16 +00:00
|
|
|
#include <malloc.h>
|
2016-06-17 15:43:58 +00:00
|
|
|
#include <reset.h>
|
|
|
|
#include <reset-uclass.h>
|
2020-02-03 14:36:15 +00:00
|
|
|
#include <dm/devres.h>
|
2020-09-09 10:07:03 +00:00
|
|
|
#include <dm/lists.h>
|
2016-06-17 15:43:58 +00:00
|
|
|
|
|
|
|
static inline struct reset_ops *reset_dev_ops(struct udevice *dev)
|
|
|
|
{
|
|
|
|
return (struct reset_ops *)dev->driver->ops;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int reset_of_xlate_default(struct reset_ctl *reset_ctl,
|
2017-05-19 02:09:50 +00:00
|
|
|
struct ofnode_phandle_args *args)
|
2016-06-17 15:43:58 +00:00
|
|
|
{
|
|
|
|
debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
|
|
|
|
|
|
|
|
if (args->args_count != 1) {
|
|
|
|
debug("Invaild args_count: %d\n", args->args_count);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
reset_ctl->id = args->args[0];
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-27 18:56:55 +00:00
|
|
|
static int reset_get_by_index_tail(int ret, ofnode node,
|
|
|
|
struct ofnode_phandle_args *args,
|
|
|
|
const char *list_name, int index,
|
|
|
|
struct reset_ctl *reset_ctl)
|
2016-06-17 15:43:58 +00:00
|
|
|
{
|
|
|
|
struct udevice *dev_reset;
|
|
|
|
struct reset_ops *ops;
|
|
|
|
|
2019-02-27 18:56:55 +00:00
|
|
|
assert(reset_ctl);
|
2017-07-18 09:57:06 +00:00
|
|
|
reset_ctl->dev = NULL;
|
2019-02-27 18:56:55 +00:00
|
|
|
if (ret)
|
2016-06-17 15:43:58 +00:00
|
|
|
return ret;
|
|
|
|
|
2019-02-27 18:56:55 +00:00
|
|
|
ret = uclass_get_device_by_ofnode(UCLASS_RESET, args->node,
|
2017-05-19 02:09:50 +00:00
|
|
|
&dev_reset);
|
2016-06-17 15:43:58 +00:00
|
|
|
if (ret) {
|
2017-05-19 02:09:50 +00:00
|
|
|
debug("%s: uclass_get_device_by_ofnode() failed: %d\n",
|
2016-06-17 15:43:58 +00:00
|
|
|
__func__, ret);
|
2019-02-27 18:56:55 +00:00
|
|
|
debug("%s %d\n", ofnode_get_name(args->node), args->args[0]);
|
2016-06-17 15:43:58 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
ops = reset_dev_ops(dev_reset);
|
|
|
|
|
|
|
|
reset_ctl->dev = dev_reset;
|
|
|
|
if (ops->of_xlate)
|
2019-02-27 18:56:55 +00:00
|
|
|
ret = ops->of_xlate(reset_ctl, args);
|
2016-06-17 15:43:58 +00:00
|
|
|
else
|
2019-02-27 18:56:55 +00:00
|
|
|
ret = reset_of_xlate_default(reset_ctl, args);
|
2016-06-17 15:43:58 +00:00
|
|
|
if (ret) {
|
|
|
|
debug("of_xlate() failed: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = ops->request(reset_ctl);
|
|
|
|
if (ret) {
|
|
|
|
debug("ops->request() failed: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-27 18:56:55 +00:00
|
|
|
int reset_get_by_index(struct udevice *dev, int index,
|
|
|
|
struct reset_ctl *reset_ctl)
|
|
|
|
{
|
|
|
|
struct ofnode_phandle_args args;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = dev_read_phandle_with_args(dev, "resets", "#reset-cells", 0,
|
|
|
|
index, &args);
|
|
|
|
|
|
|
|
return reset_get_by_index_tail(ret, dev_ofnode(dev), &args, "resets",
|
|
|
|
index > 0, reset_ctl);
|
|
|
|
}
|
|
|
|
|
|
|
|
int reset_get_by_index_nodev(ofnode node, int index,
|
|
|
|
struct reset_ctl *reset_ctl)
|
|
|
|
{
|
|
|
|
struct ofnode_phandle_args args;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = ofnode_parse_phandle_with_args(node, "resets", "#reset-cells", 0,
|
2021-04-20 08:42:26 +00:00
|
|
|
index, &args);
|
2019-02-27 18:56:55 +00:00
|
|
|
|
|
|
|
return reset_get_by_index_tail(ret, node, &args, "resets",
|
|
|
|
index > 0, reset_ctl);
|
|
|
|
}
|
|
|
|
|
2020-09-09 10:07:03 +00:00
|
|
|
static int __reset_get_bulk(struct udevice *dev, ofnode node,
|
|
|
|
struct reset_ctl_bulk *bulk)
|
2018-04-03 09:40:50 +00:00
|
|
|
{
|
|
|
|
int i, ret, err, count;
|
2020-09-09 10:07:03 +00:00
|
|
|
|
2018-04-03 09:40:50 +00:00
|
|
|
bulk->count = 0;
|
|
|
|
|
2020-09-25 07:41:14 +00:00
|
|
|
count = ofnode_count_phandle_with_args(node, "resets", "#reset-cells",
|
|
|
|
0);
|
2018-04-17 09:30:22 +00:00
|
|
|
if (count < 1)
|
|
|
|
return count;
|
2018-04-03 09:40:50 +00:00
|
|
|
|
|
|
|
bulk->resets = devm_kcalloc(dev, count, sizeof(struct reset_ctl),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!bulk->resets)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
2020-09-09 10:07:03 +00:00
|
|
|
ret = reset_get_by_index_nodev(node, i, &bulk->resets[i]);
|
2018-04-03 09:40:50 +00:00
|
|
|
if (ret < 0)
|
|
|
|
goto bulk_get_err;
|
|
|
|
|
|
|
|
++bulk->count;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
bulk_get_err:
|
|
|
|
err = reset_release_all(bulk->resets, bulk->count);
|
|
|
|
if (err)
|
|
|
|
debug("%s: could release all resets for %p\n",
|
|
|
|
__func__, dev);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-09-09 10:07:03 +00:00
|
|
|
int reset_get_bulk(struct udevice *dev, struct reset_ctl_bulk *bulk)
|
|
|
|
{
|
|
|
|
return __reset_get_bulk(dev, dev_ofnode(dev), bulk);
|
|
|
|
}
|
|
|
|
|
2016-06-17 15:43:58 +00:00
|
|
|
int reset_get_by_name(struct udevice *dev, const char *name,
|
|
|
|
struct reset_ctl *reset_ctl)
|
|
|
|
{
|
|
|
|
int index;
|
|
|
|
|
|
|
|
debug("%s(dev=%p, name=%s, reset_ctl=%p)\n", __func__, dev, name,
|
|
|
|
reset_ctl);
|
2017-07-18 09:57:06 +00:00
|
|
|
reset_ctl->dev = NULL;
|
2016-06-17 15:43:58 +00:00
|
|
|
|
2017-05-19 02:09:50 +00:00
|
|
|
index = dev_read_stringlist_search(dev, "reset-names", name);
|
2016-06-17 15:43:58 +00:00
|
|
|
if (index < 0) {
|
2016-10-02 23:59:28 +00:00
|
|
|
debug("fdt_stringlist_search() failed: %d\n", index);
|
2016-06-17 15:43:58 +00:00
|
|
|
return index;
|
|
|
|
}
|
|
|
|
|
|
|
|
return reset_get_by_index(dev, index, reset_ctl);
|
|
|
|
}
|
|
|
|
|
2017-07-18 09:57:05 +00:00
|
|
|
int reset_request(struct reset_ctl *reset_ctl)
|
|
|
|
{
|
|
|
|
struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
|
|
|
|
|
|
|
|
debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
|
|
|
|
|
|
|
|
return ops->request(reset_ctl);
|
|
|
|
}
|
|
|
|
|
2016-06-17 15:43:58 +00:00
|
|
|
int reset_free(struct reset_ctl *reset_ctl)
|
|
|
|
{
|
|
|
|
struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
|
|
|
|
|
|
|
|
debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
|
|
|
|
|
2020-02-03 14:35:52 +00:00
|
|
|
return ops->rfree(reset_ctl);
|
2016-06-17 15:43:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int reset_assert(struct reset_ctl *reset_ctl)
|
|
|
|
{
|
|
|
|
struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
|
|
|
|
|
|
|
|
debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
|
|
|
|
|
|
|
|
return ops->rst_assert(reset_ctl);
|
|
|
|
}
|
|
|
|
|
2018-04-03 09:40:50 +00:00
|
|
|
int reset_assert_bulk(struct reset_ctl_bulk *bulk)
|
|
|
|
{
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
for (i = 0; i < bulk->count; i++) {
|
|
|
|
ret = reset_assert(&bulk->resets[i]);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-17 15:43:58 +00:00
|
|
|
int reset_deassert(struct reset_ctl *reset_ctl)
|
|
|
|
{
|
|
|
|
struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
|
|
|
|
|
|
|
|
debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
|
|
|
|
|
|
|
|
return ops->rst_deassert(reset_ctl);
|
|
|
|
}
|
|
|
|
|
2018-04-03 09:40:50 +00:00
|
|
|
int reset_deassert_bulk(struct reset_ctl_bulk *bulk)
|
|
|
|
{
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
for (i = 0; i < bulk->count; i++) {
|
|
|
|
ret = reset_deassert(&bulk->resets[i]);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-27 10:27:39 +00:00
|
|
|
int reset_status(struct reset_ctl *reset_ctl)
|
|
|
|
{
|
|
|
|
struct reset_ops *ops = reset_dev_ops(reset_ctl->dev);
|
|
|
|
|
|
|
|
debug("%s(reset_ctl=%p)\n", __func__, reset_ctl);
|
|
|
|
|
|
|
|
return ops->rst_status(reset_ctl);
|
|
|
|
}
|
|
|
|
|
2017-07-18 09:57:06 +00:00
|
|
|
int reset_release_all(struct reset_ctl *reset_ctl, int count)
|
|
|
|
{
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
debug("%s(reset_ctl[%d]=%p)\n", __func__, i, &reset_ctl[i]);
|
|
|
|
|
|
|
|
/* check if reset has been previously requested */
|
|
|
|
if (!reset_ctl[i].dev)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = reset_assert(&reset_ctl[i]);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = reset_free(&reset_ctl[i]);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-09-09 10:07:03 +00:00
|
|
|
static void devm_reset_release(struct udevice *dev, void *res)
|
|
|
|
{
|
|
|
|
reset_free(res);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct reset_ctl *devm_reset_control_get_by_index(struct udevice *dev,
|
|
|
|
int index)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
struct reset_ctl *reset_ctl;
|
|
|
|
|
|
|
|
reset_ctl = devres_alloc(devm_reset_release, sizeof(struct reset_ctl),
|
|
|
|
__GFP_ZERO);
|
|
|
|
if (unlikely(!reset_ctl))
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
rc = reset_get_by_index(dev, index, reset_ctl);
|
|
|
|
if (rc)
|
|
|
|
return ERR_PTR(rc);
|
|
|
|
|
|
|
|
devres_add(dev, reset_ctl);
|
|
|
|
return reset_ctl;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct reset_ctl *devm_reset_control_get(struct udevice *dev, const char *id)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
struct reset_ctl *reset_ctl;
|
|
|
|
|
|
|
|
reset_ctl = devres_alloc(devm_reset_release, sizeof(struct reset_ctl),
|
|
|
|
__GFP_ZERO);
|
|
|
|
if (unlikely(!reset_ctl))
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
rc = reset_get_by_name(dev, id, reset_ctl);
|
|
|
|
if (rc)
|
|
|
|
return ERR_PTR(rc);
|
|
|
|
|
|
|
|
devres_add(dev, reset_ctl);
|
|
|
|
return reset_ctl;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct reset_ctl *devm_reset_control_get_optional(struct udevice *dev,
|
|
|
|
const char *id)
|
|
|
|
{
|
|
|
|
struct reset_ctl *r = devm_reset_control_get(dev, id);
|
|
|
|
|
|
|
|
if (IS_ERR(r))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void devm_reset_bulk_release(struct udevice *dev, void *res)
|
|
|
|
{
|
|
|
|
struct reset_ctl_bulk *bulk = res;
|
|
|
|
|
|
|
|
reset_release_all(bulk->resets, bulk->count);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct reset_ctl_bulk *devm_reset_bulk_get_by_node(struct udevice *dev,
|
|
|
|
ofnode node)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
struct reset_ctl_bulk *bulk;
|
|
|
|
|
|
|
|
bulk = devres_alloc(devm_reset_bulk_release,
|
|
|
|
sizeof(struct reset_ctl_bulk),
|
|
|
|
__GFP_ZERO);
|
2021-05-14 01:39:21 +00:00
|
|
|
|
|
|
|
/* this looks like a leak, but devres takes care of it */
|
2020-09-09 10:07:03 +00:00
|
|
|
if (unlikely(!bulk))
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
rc = __reset_get_bulk(dev, node, bulk);
|
|
|
|
if (rc)
|
|
|
|
return ERR_PTR(rc);
|
|
|
|
|
|
|
|
devres_add(dev, bulk);
|
|
|
|
return bulk;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct reset_ctl_bulk *devm_reset_bulk_get_optional_by_node(struct udevice *dev,
|
|
|
|
ofnode node)
|
|
|
|
{
|
|
|
|
struct reset_ctl_bulk *bulk;
|
|
|
|
|
|
|
|
bulk = devm_reset_bulk_get_by_node(dev, node);
|
|
|
|
|
|
|
|
if (IS_ERR(bulk))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return bulk;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct reset_ctl_bulk *devm_reset_bulk_get(struct udevice *dev)
|
|
|
|
{
|
|
|
|
return devm_reset_bulk_get_by_node(dev, dev_ofnode(dev));
|
|
|
|
}
|
|
|
|
|
|
|
|
struct reset_ctl_bulk *devm_reset_bulk_get_optional(struct udevice *dev)
|
|
|
|
{
|
|
|
|
return devm_reset_bulk_get_optional_by_node(dev, dev_ofnode(dev));
|
|
|
|
}
|
|
|
|
|
2016-06-17 15:43:58 +00:00
|
|
|
UCLASS_DRIVER(reset) = {
|
|
|
|
.id = UCLASS_RESET,
|
|
|
|
.name = "reset",
|
|
|
|
};
|