Merge branch '2022-04-11-initial-valgrind-support'

To quote the author:
This series adds support for running valgrind against U-Boot's internal
malloc. This allows for much more useful reports to be generated.

Some example output of valgrind run against u-boot/master with this
branch applied may be found at [1]. Note that valgrind gives up around
acpi.  This feature still needs a lot of work on suppressions/hints to
filter out the noise properly.

[1] 2a2f99108e/gistfile1.txt
This commit is contained in:
Tom Rini 2022-04-11 10:15:06 -04:00
commit c45d38d651
10 changed files with 7509 additions and 5 deletions

14
Kconfig
View file

@ -297,6 +297,20 @@ config TPL_SYS_MALLOC_F_LEN
particular needs this to operate, so that it can allocate the
initial serial device and any others that are needed.
config VALGRIND
bool "Inform valgrind about memory allocations"
help
Valgrind is an instrumentation framework for building dynamic analysis
tools. In particular, it may be used to detect memory management bugs
in U-Boot. It relies on knowing when heap blocks are allocated in
order to give accurate results. This happens automatically for
standard allocator functions provided by the host OS. However, this
doesn't automatically happen for U-Boot's malloc implementation.
Enable this option to annotate U-Boot's malloc implementation so that
it can be handled accurately by Valgrind. If you aren't planning on
using valgrind to debug U-Boot, say 'n'.
menuconfig EXPERT
bool "Configure standard U-Boot features (expert users)"
default y

View file

@ -139,6 +139,7 @@ License identifier syntax
Full name SPDX Identifier OSI Approved File name URI
=======================================================================================================================================
bzip2 and libbzip2 License v1.0.6 bzip2-1.0.6 bzip2-1.0.6.txt https://spdx.org/licenses/bzip2-1.0.6.html
GNU General Public License v2.0 only GPL-2.0 Y gpl-2.0.txt http://www.gnu.org/licenses/gpl-2.0.txt
GNU General Public License v2.0 or later GPL-2.0+ Y gpl-2.0.txt http://www.gnu.org/licenses/gpl-2.0.txt
GNU Library General Public License v2 or later LGPL-2.0+ Y lgpl-2.0.txt http://www.gnu.org/licenses/old-licenses/lgpl-2.0.txt

30
Licenses/bzip2-1.0.6.txt Normal file
View file

@ -0,0 +1,30 @@
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
4. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -18,6 +18,7 @@
#include <malloc.h>
#include <asm/io.h>
#include <valgrind/memcheck.h>
#ifdef DEBUG
#if __STD_C
@ -1339,6 +1340,7 @@ Void_t* mALLOc(bytes) size_t bytes;
unlink(victim, bck, fwd);
set_inuse_bit_at_offset(victim, victim_size);
check_malloced_chunk(victim, nb);
VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
return chunk2mem(victim);
}
@ -1366,6 +1368,7 @@ Void_t* mALLOc(bytes) size_t bytes;
unlink(victim, bck, fwd);
set_inuse_bit_at_offset(victim, victim_size);
check_malloced_chunk(victim, nb);
VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
return chunk2mem(victim);
}
}
@ -1389,6 +1392,7 @@ Void_t* mALLOc(bytes) size_t bytes;
set_head(remainder, remainder_size | PREV_INUSE);
set_foot(remainder, remainder_size);
check_malloced_chunk(victim, nb);
VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
return chunk2mem(victim);
}
@ -1398,6 +1402,7 @@ Void_t* mALLOc(bytes) size_t bytes;
{
set_inuse_bit_at_offset(victim, victim_size);
check_malloced_chunk(victim, nb);
VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
return chunk2mem(victim);
}
@ -1453,6 +1458,7 @@ Void_t* mALLOc(bytes) size_t bytes;
set_head(remainder, remainder_size | PREV_INUSE);
set_foot(remainder, remainder_size);
check_malloced_chunk(victim, nb);
VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
return chunk2mem(victim);
}
@ -1461,6 +1467,7 @@ Void_t* mALLOc(bytes) size_t bytes;
set_inuse_bit_at_offset(victim, victim_size);
unlink(victim, bck, fwd);
check_malloced_chunk(victim, nb);
VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
return chunk2mem(victim);
}
@ -1509,6 +1516,7 @@ Void_t* mALLOc(bytes) size_t bytes;
/* If big and would otherwise need to extend, try to use mmap instead */
if ((unsigned long)nb >= (unsigned long)mmap_threshold &&
(victim = mmap_chunk(nb)))
VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
return chunk2mem(victim);
#endif
@ -1523,6 +1531,7 @@ Void_t* mALLOc(bytes) size_t bytes;
top = chunk_at_offset(victim, nb);
set_head(top, remainder_size | PREV_INUSE);
check_malloced_chunk(victim, nb);
VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
return chunk2mem(victim);
}
@ -1571,8 +1580,10 @@ void fREe(mem) Void_t* mem;
#if CONFIG_VAL(SYS_MALLOC_F_LEN)
/* free() is a no-op - all the memory will be freed on relocation */
if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT))
if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
VALGRIND_FREELIKE_BLOCK(mem, SIZE_SZ);
return;
}
#endif
if (mem == NULL) /* free(0) has no effect */
@ -1594,6 +1605,7 @@ void fREe(mem) Void_t* mem;
sz = hd & ~PREV_INUSE;
next = chunk_at_offset(p, sz);
nextsz = chunksize(next);
VALGRIND_FREELIKE_BLOCK(mem, SIZE_SZ);
if (next == top) /* merge with top */
{
@ -1782,6 +1794,8 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
top = chunk_at_offset(oldp, nb);
set_head(top, (newsize - nb) | PREV_INUSE);
set_head_size(oldp, nb);
VALGRIND_RESIZEINPLACE_BLOCK(chunk2mem(oldp), 0, bytes, SIZE_SZ);
VALGRIND_MAKE_MEM_DEFINED(chunk2mem(oldp), bytes);
return chunk2mem(oldp);
}
}
@ -1791,6 +1805,8 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
{
unlink(next, bck, fwd);
newsize += nextsize;
VALGRIND_RESIZEINPLACE_BLOCK(chunk2mem(oldp), 0, bytes, SIZE_SZ);
VALGRIND_MAKE_MEM_DEFINED(chunk2mem(oldp), bytes);
goto split;
}
}
@ -1820,10 +1836,12 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
newp = prev;
newsize += prevsize + nextsize;
newmem = chunk2mem(newp);
VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, SIZE_SZ, false);
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
top = chunk_at_offset(newp, nb);
set_head(top, (newsize - nb) | PREV_INUSE);
set_head_size(newp, nb);
VALGRIND_FREELIKE_BLOCK(oldmem, SIZE_SZ);
return newmem;
}
}
@ -1836,6 +1854,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
newp = prev;
newsize += nextsize + prevsize;
newmem = chunk2mem(newp);
VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, SIZE_SZ, false);
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
goto split;
}
@ -1848,6 +1867,7 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
newp = prev;
newsize += prevsize;
newmem = chunk2mem(newp);
VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, SIZE_SZ, false);
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
goto split;
}
@ -1874,6 +1894,9 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
fREe(oldmem);
return newmem;
} else {
VALGRIND_RESIZEINPLACE_BLOCK(oldmem, 0, bytes, SIZE_SZ);
VALGRIND_MAKE_MEM_DEFINED(oldmem, bytes);
}
@ -1886,6 +1909,8 @@ Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
set_head_size(newp, nb);
set_head(remainder, remainder_size | PREV_INUSE);
set_inuse_bit_at_offset(remainder, remainder_size);
VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(remainder), remainder_size, SIZE_SZ,
false);
fREe(chunk2mem(remainder)); /* let free() deal with it */
}
else
@ -2043,6 +2068,7 @@ Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
set_head_size(p, leadsize);
fREe(chunk2mem(p));
p = newp;
VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(p), bytes, SIZE_SZ, false);
assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
}
@ -2056,6 +2082,8 @@ Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
remainder = chunk_at_offset(p, nb);
set_head(remainder, remainder_size | PREV_INUSE);
set_head_size(p, nb);
VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(remainder), remainder_size, SIZE_SZ,
false);
fREe(chunk2mem(remainder));
}
@ -2159,6 +2187,7 @@ Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size;
#endif
MALLOC_ZERO(mem, csz - SIZE_SZ);
VALGRIND_MAKE_MEM_DEFINED(mem, sz);
return mem;
}
}

View file

@ -13,6 +13,7 @@
#include <mapmem.h>
#include <asm/global_data.h>
#include <asm/io.h>
#include <valgrind/valgrind.h>
DECLARE_GLOBAL_DATA_PTR;
@ -45,6 +46,7 @@ void *malloc_simple(size_t bytes)
return ptr;
log_debug("%lx\n", (ulong)ptr);
VALGRIND_MALLOCLIKE_BLOCK(ptr, bytes, 0, false);
return ptr;
}
@ -57,6 +59,7 @@ void *memalign_simple(size_t align, size_t bytes)
if (!ptr)
return ptr;
log_debug("aligned to %lx\n", (ulong)ptr);
VALGRIND_MALLOCLIKE_BLOCK(ptr, bytes, 0, false);
return ptr;
}
@ -74,6 +77,13 @@ void *calloc(size_t nmemb, size_t elem_size)
return ptr;
}
#if IS_ENABLED(CONFIG_VALGRIND)
void free_simple(void *ptr)
{
VALGRIND_FREELIKE_BLOCK(ptr, 0);
}
#endif
#endif
void malloc_simple_info(void)

View file

@ -477,14 +477,20 @@ Using valgrind / memcheck
It is possible to run U-Boot under valgrind to check memory allocations::
valgrind u-boot
valgrind ./u-boot
For more detailed results, enable `CONFIG_VALGRIND`. There are many false
positives due to `malloc` itself. Suppress these with::
valgrind --suppressions=scripts/u-boot.supp ./u-boot
If you are running sandbox SPL or TPL, then valgrind will not by default
notice when U-Boot jumps from TPL to SPL, or from SPL to U-Boot proper. To
fix this, use::
valgrind --trace-children=yes u-boot
fix this, use `--trace-children=yes`. To show who alloc'd some troublesome
memory, use `--track-origins=yes`. To uncover possible errors, try running all
unit tests with::
valgrind --track-origins=yes --suppressions=scripts/u-boot.supp ./u-boot -Tc 'ut all'
Testing
-------

View file

@ -887,7 +887,11 @@ void malloc_simple_info(void);
#define malloc malloc_simple
#define realloc realloc_simple
#define memalign memalign_simple
#if IS_ENABLED(CONFIG_VALGRIND)
#define free free_simple
#else
static inline void free(void *ptr) {}
#endif
void *calloc(size_t nmemb, size_t size);
void *realloc_simple(void *ptr, size_t size);
#else

251
include/valgrind/memcheck.h Normal file
View file

@ -0,0 +1,251 @@
/* SPDX-License-Identifier: bzip2-1.0.6 */
/*
This file is part of MemCheck, a heavyweight Valgrind tool for
detecting memory errors.
Copyright (C) 2000-2017 Julian Seward. All rights reserved.
*/
#ifndef __MEMCHECK_H
#define __MEMCHECK_H
/* This file is for inclusion into client (your!) code.
You can use these macros to manipulate and query memory permissions
inside your own programs.
See comment near the top of valgrind.h on how to use them.
*/
#include "valgrind.h"
/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
which use client requests. DO NOT CHANGE THE ORDER OF THESE
ENTRIES, NOR DELETE ANY -- add new ones at the end. */
typedef
enum {
VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
VG_USERREQ__MAKE_MEM_UNDEFINED,
VG_USERREQ__MAKE_MEM_DEFINED,
VG_USERREQ__DISCARD,
VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
VG_USERREQ__CHECK_MEM_IS_DEFINED,
VG_USERREQ__DO_LEAK_CHECK,
VG_USERREQ__COUNT_LEAKS,
VG_USERREQ__GET_VBITS,
VG_USERREQ__SET_VBITS,
VG_USERREQ__CREATE_BLOCK,
VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
/* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
VG_USERREQ__COUNT_LEAK_BLOCKS,
VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE,
VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE,
/* This is just for memcheck's internal use - don't use it */
_VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR
= VG_USERREQ_TOOL_BASE('M','C') + 256
} Vg_MemCheckClientRequest;
/* Client-code macros to manipulate the state of memory. */
/* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
VG_USERREQ__MAKE_MEM_NOACCESS, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Similarly, mark memory at _qzz_addr as addressable but undefined
for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
VG_USERREQ__MAKE_MEM_UNDEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Similarly, mark memory at _qzz_addr as addressable and defined
for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
VG_USERREQ__MAKE_MEM_DEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is
not altered: bytes which are addressable are marked as defined,
but those which are not addressable are left unchanged. */
#define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Create a block-description handle. The description is an ascii
string which is included in any messages pertaining to addresses
within the specified memory range. Has no other effect on the
properties of the memory range. */
#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc) \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
VG_USERREQ__CREATE_BLOCK, \
(_qzz_addr), (_qzz_len), (_qzz_desc), \
0, 0)
/* Discard a block-description-handle. Returns 1 for an
invalid handle, 0 for a valid handle. */
#define VALGRIND_DISCARD(_qzz_blkindex) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__DISCARD, \
0, (_qzz_blkindex), 0, 0, 0)
/* Client-code macros to check the state of memory. */
/* Check that memory at _qzz_addr is addressable for _qzz_len bytes.
If suitable addressibility is not established, Valgrind prints an
error message and returns the address of the first offending byte.
Otherwise it returns zero. */
#define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Check that memory at _qzz_addr is addressable and defined for
_qzz_len bytes. If suitable addressibility and definedness are not
established, Valgrind prints an error message and returns the
address of the first offending byte. Otherwise it returns zero. */
#define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__CHECK_MEM_IS_DEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Use this macro to force the definedness and addressibility of an
lvalue to be checked. If suitable addressibility and definedness
are not established, Valgrind prints an error message and returns
the address of the first offending byte. Otherwise it returns
zero. */
#define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue) \
VALGRIND_CHECK_MEM_IS_DEFINED( \
(volatile unsigned char *)&(__lvalue), \
(unsigned long)(sizeof (__lvalue)))
/* Do a full memory leak check (like --leak-check=full) mid-execution. */
#define VALGRIND_DO_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 0, 0, 0, 0)
/* Same as VALGRIND_DO_LEAK_CHECK but only showing the entries for
which there was an increase in leaked bytes or leaked nr of blocks
since the previous leak search. */
#define VALGRIND_DO_ADDED_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 1, 0, 0, 0)
/* Same as VALGRIND_DO_ADDED_LEAK_CHECK but showing entries with
increased or decreased leaked bytes/blocks since previous leak
search. */
#define VALGRIND_DO_CHANGED_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 2, 0, 0, 0)
/* Do a summary memory leak check (like --leak-check=summary) mid-execution. */
#define VALGRIND_DO_QUICK_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
1, 0, 0, 0, 0)
/* Return number of leaked, dubious, reachable and suppressed bytes found by
all previous leak checks. They must be lvalues. */
#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed) \
/* For safety on 64-bit platforms we assign the results to private
unsigned long variables, then assign these to the lvalues the user
specified, which works no matter what type 'leaked', 'dubious', etc
are. We also initialise '_qzz_leaked', etc because
VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
defined. */ \
{ \
unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
VG_USERREQ__COUNT_LEAKS, \
&_qzz_leaked, &_qzz_dubious, \
&_qzz_reachable, &_qzz_suppressed, 0); \
leaked = _qzz_leaked; \
dubious = _qzz_dubious; \
reachable = _qzz_reachable; \
suppressed = _qzz_suppressed; \
}
/* Return number of leaked, dubious, reachable and suppressed bytes found by
all previous leak checks. They must be lvalues. */
#define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \
/* For safety on 64-bit platforms we assign the results to private
unsigned long variables, then assign these to the lvalues the user
specified, which works no matter what type 'leaked', 'dubious', etc
are. We also initialise '_qzz_leaked', etc because
VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
defined. */ \
{ \
unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
VG_USERREQ__COUNT_LEAK_BLOCKS, \
&_qzz_leaked, &_qzz_dubious, \
&_qzz_reachable, &_qzz_suppressed, 0); \
leaked = _qzz_leaked; \
dubious = _qzz_dubious; \
reachable = _qzz_reachable; \
suppressed = _qzz_suppressed; \
}
/* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it
into the provided zzvbits array. Return values:
0 if not running on valgrind
1 success
2 [previously indicated unaligned arrays; these are now allowed]
3 if any parts of zzsrc/zzvbits are not addressable.
The metadata is not copied in cases 0, 2 or 3 so it should be
impossible to segfault your system by using this call.
*/
#define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes) \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__GET_VBITS, \
(const char*)(zza), \
(char*)(zzvbits), \
(zznbytes), 0, 0)
/* Set the validity data for addresses [zza..zza+zznbytes-1], copying it
from the provided zzvbits array. Return values:
0 if not running on valgrind
1 success
2 [previously indicated unaligned arrays; these are now allowed]
3 if any parts of zza/zzvbits are not addressable.
The metadata is not copied in cases 0, 2 or 3 so it should be
impossible to segfault your system by using this call.
*/
#define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes) \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__SET_VBITS, \
(const char*)(zza), \
(const char*)(zzvbits), \
(zznbytes), 0, 0 )
/* Disable and re-enable reporting of addressing errors in the
specified address range. */
#define VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
#define VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
#endif

7106
include/valgrind/valgrind.h Normal file

File diff suppressed because it is too large Load diff

53
scripts/u-boot.supp Normal file
View file

@ -0,0 +1,53 @@
{
dlmalloc
Memcheck:Addr1
src:dlmalloc.c
}
{
dlmalloc
Memcheck:Addr4
src:dlmalloc.c
}
{
dlmalloc
Memcheck:Addr8
src:dlmalloc.c
}
{
dlmalloc
Memcheck:Addr1
fun:*
src:dlmalloc.c
}
{
dlmalloc
Memcheck:Addr4
fun:*
src:dlmalloc.c
}
{
dlmalloc
Memcheck:Addr8
fun:*
src:dlmalloc.c
}
{
dlmalloc
Memcheck:Value4
src:dlmalloc.c
}
{
dlmalloc
Memcheck:Value8
src:dlmalloc.c
}
{
dlmalloc
Memcheck:Cond
src:dlmalloc.c
}
{
dlmalloc
Memcheck:Free
src:dlmalloc.c
}