u-boot/drivers/block/blkcache.c
Ovidiu Panait 365af27f0d blkcache: Extend blkcache_init to cover CONFIG_NEEDS_MANUAL_RELOC
Extend manual relocation of block_cache list pointers to all platforms that
enable CONFIG_NEEDS_MANUAL_RELOC. Remove m68k-specific checks and provide a
single implementation that adds gd->reloc_off to the pre-relocation
pointers.

Acked-by: Angelo Dureghello <angelo.dureghello@timesys.com>
Tested-by: Angelo Dureghello <angelo.dureghello@timesys.com>
Reviewed-by: Simon Glass <sjg@chromium.org>
Reviewed-by: Eric Nelson <eric@nelint.com>
Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
[trini: Add guard around DECLARE_GLOBAL_DATA_PTR to avoid size growth]
Signed-off-by: Tom Rini <trini@konsulko.com>
2020-08-06 18:08:22 -04:00

189 lines
4 KiB
C

// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) Nelson Integration, LLC 2016
* Author: Eric Nelson<eric@nelint.com>
*
*/
#include <common.h>
#include <blk.h>
#include <log.h>
#include <malloc.h>
#include <part.h>
#include <linux/ctype.h>
#include <linux/list.h>
#ifdef CONFIG_NEEDS_MANUAL_RELOC
DECLARE_GLOBAL_DATA_PTR;
#endif
struct block_cache_node {
struct list_head lh;
int iftype;
int devnum;
lbaint_t start;
lbaint_t blkcnt;
unsigned long blksz;
char *cache;
};
static LIST_HEAD(block_cache);
static struct block_cache_stats _stats = {
.max_blocks_per_entry = 8,
.max_entries = 32
};
#ifdef CONFIG_NEEDS_MANUAL_RELOC
int blkcache_init(void)
{
struct list_head *head = &block_cache;
head->next = (uintptr_t)head->next + gd->reloc_off;
head->prev = (uintptr_t)head->prev + gd->reloc_off;
return 0;
}
#endif
static struct block_cache_node *cache_find(int iftype, int devnum,
lbaint_t start, lbaint_t blkcnt,
unsigned long blksz)
{
struct block_cache_node *node;
list_for_each_entry(node, &block_cache, lh)
if ((node->iftype == iftype) &&
(node->devnum == devnum) &&
(node->blksz == blksz) &&
(node->start <= start) &&
(node->start + node->blkcnt >= start + blkcnt)) {
if (block_cache.next != &node->lh) {
/* maintain MRU ordering */
list_del(&node->lh);
list_add(&node->lh, &block_cache);
}
return node;
}
return 0;
}
int blkcache_read(int iftype, int devnum,
lbaint_t start, lbaint_t blkcnt,
unsigned long blksz, void *buffer)
{
struct block_cache_node *node = cache_find(iftype, devnum, start,
blkcnt, blksz);
if (node) {
const char *src = node->cache + (start - node->start) * blksz;
memcpy(buffer, src, blksz * blkcnt);
debug("hit: start " LBAF ", count " LBAFU "\n",
start, blkcnt);
++_stats.hits;
return 1;
}
debug("miss: start " LBAF ", count " LBAFU "\n",
start, blkcnt);
++_stats.misses;
return 0;
}
void blkcache_fill(int iftype, int devnum,
lbaint_t start, lbaint_t blkcnt,
unsigned long blksz, void const *buffer)
{
lbaint_t bytes;
struct block_cache_node *node;
/* don't cache big stuff */
if (blkcnt > _stats.max_blocks_per_entry)
return;
if (_stats.max_entries == 0)
return;
bytes = blksz * blkcnt;
if (_stats.max_entries <= _stats.entries) {
/* pop LRU */
node = (struct block_cache_node *)block_cache.prev;
list_del(&node->lh);
_stats.entries--;
debug("drop: start " LBAF ", count " LBAFU "\n",
node->start, node->blkcnt);
if (node->blkcnt * node->blksz < bytes) {
free(node->cache);
node->cache = 0;
}
} else {
node = malloc(sizeof(*node));
if (!node)
return;
node->cache = 0;
}
if (!node->cache) {
node->cache = malloc(bytes);
if (!node->cache) {
free(node);
return;
}
}
debug("fill: start " LBAF ", count " LBAFU "\n",
start, blkcnt);
node->iftype = iftype;
node->devnum = devnum;
node->start = start;
node->blkcnt = blkcnt;
node->blksz = blksz;
memcpy(node->cache, buffer, bytes);
list_add(&node->lh, &block_cache);
_stats.entries++;
}
void blkcache_invalidate(int iftype, int devnum)
{
struct list_head *entry, *n;
struct block_cache_node *node;
list_for_each_safe(entry, n, &block_cache) {
node = (struct block_cache_node *)entry;
if ((node->iftype == iftype) &&
(node->devnum == devnum)) {
list_del(entry);
free(node->cache);
free(node);
--_stats.entries;
}
}
}
void blkcache_configure(unsigned blocks, unsigned entries)
{
struct block_cache_node *node;
if ((blocks != _stats.max_blocks_per_entry) ||
(entries != _stats.max_entries)) {
/* invalidate cache */
while (!list_empty(&block_cache)) {
node = (struct block_cache_node *)block_cache.next;
list_del(&node->lh);
free(node->cache);
free(node);
}
_stats.entries = 0;
}
_stats.max_blocks_per_entry = blocks;
_stats.max_entries = entries;
_stats.hits = 0;
_stats.misses = 0;
}
void blkcache_stats(struct block_cache_stats *stats)
{
memcpy(stats, &_stats, sizeof(*stats));
_stats.hits = 0;
_stats.misses = 0;
}