mirror of
https://github.com/AsahiLinux/u-boot
synced 2024-12-13 23:02:59 +00:00
binman enhancements for FIT
sunxi64 conversion to use binman New 'no-map' property for reserved memory -----BEGIN PGP SIGNATURE----- iQFFBAABCgAvFiEEslwAIq+Gp8wWVbYnfxc6PpAIreYFAl9qZDsRHHNqZ0BjaHJv bWl1bS5vcmcACgkQfxc6PpAIreZSngf/UHS8BWPy1cLHUBB1RlwH212qQu+zvome yh9VjY9C6nC6CSQhZUuwMqk3HACcjSmXG8JP6UY7WnvYmuKGaYtpwvAk04llG5Da 5+srmmrMtAMsILtVS82je3aLzHoXFiGex0xmxGrkVvGY60HglMkEliQlFG3WnlS0 A0YRWhDuDMKTCg1FLimnFX2j7HjmigDZcUZ0MdhWSbxtvqPnK7y89FjPeMa25CWH PxQ0vQXcWazoRqZcMCAx8fyq+Fi1htk/176LN6Y91Xhtfhufmx5GKtfYnfR24MY5 6p+cfE+Tfk8DwB0Hy9eWEjUwjFhOovYcopjynEXdn/lXBKi/hjMhXQ== =n3yY -----END PGP SIGNATURE----- Merge tag 'dm-pull-22sep20' of git://git.denx.de/u-boot-dm into next binman enhancements for FIT sunxi64 conversion to use binman New 'no-map' property for reserved memory
This commit is contained in:
commit
3c1e6a6c23
47 changed files with 1298 additions and 228 deletions
3
Kconfig
3
Kconfig
|
@ -659,12 +659,11 @@ config SPL_FIT_SOURCE
|
|||
|
||||
config USE_SPL_FIT_GENERATOR
|
||||
bool "Use a script to generate the .its script"
|
||||
default y if SPL_FIT
|
||||
default y if SPL_FIT && !ARCH_SUNXI
|
||||
|
||||
config SPL_FIT_GENERATOR
|
||||
string ".its file generator script for U-Boot FIT image"
|
||||
depends on USE_SPL_FIT_GENERATOR
|
||||
default "board/sunxi/mksunxi_fit_atf.sh" if SPL_LOAD_FIT && ARCH_SUNXI
|
||||
default "arch/arm/mach-rockchip/make_fit_atf.py" if SPL_LOAD_FIT && ARCH_ROCKCHIP
|
||||
default "arch/arm/mach-zynqmp/mkimage_fit_atf.sh" if SPL_LOAD_FIT && ARCH_ZYNQMP
|
||||
default "arch/riscv/lib/mkimage_fit_opensbi.sh" if SPL_LOAD_FIT && RISCV
|
||||
|
|
23
Makefile
23
Makefile
|
@ -923,11 +923,6 @@ INPUTS-$(CONFIG_REMAKE_ELF) += u-boot.elf
|
|||
INPUTS-$(CONFIG_EFI_APP) += u-boot-app.efi
|
||||
INPUTS-$(CONFIG_EFI_STUB) += u-boot-payload.efi
|
||||
|
||||
# Build a combined spl + u-boot image for sunxi
|
||||
ifeq ($(CONFIG_ARCH_SUNXI)$(CONFIG_ARM64)$(CONFIG_SPL),yyy)
|
||||
INPUTS-y += u-boot-sunxi-with-spl.bin
|
||||
endif
|
||||
|
||||
# Generate this input file for binman
|
||||
ifeq ($(CONFIG_SPL),)
|
||||
INPUTS-$(CONFIG_ARCH_MEDIATEK) += u-boot-mtk.bin
|
||||
|
@ -1024,13 +1019,9 @@ PHONY += inputs
|
|||
inputs: $(INPUTS-y)
|
||||
|
||||
all: .binman_stamp inputs
|
||||
# Hack for sunxi which doesn't have a proper binman definition for
|
||||
# 64-bit boards
|
||||
ifneq ($(CONFIG_ARCH_SUNXI)$(CONFIG_ARM64),yy)
|
||||
ifeq ($(CONFIG_BINMAN),y)
|
||||
$(call if_changed,binman)
|
||||
endif
|
||||
endif
|
||||
|
||||
# Timestamp file to make sure that binman always runs
|
||||
.binman_stamp: FORCE
|
||||
|
@ -1330,13 +1321,16 @@ u-boot.ldr: u-boot
|
|||
# binman
|
||||
# ---------------------------------------------------------------------------
|
||||
# Use 'make BINMAN_DEBUG=1' to enable debugging
|
||||
default_dt := $(if $(DEVICE_TREE),$(DEVICE_TREE),$(CONFIG_DEFAULT_DEVICE_TREE))
|
||||
quiet_cmd_binman = BINMAN $@
|
||||
cmd_binman = $(srctree)/tools/binman/binman $(if $(BINMAN_DEBUG),-D) \
|
||||
--toolpath $(objtree)/tools \
|
||||
$(if $(BINMAN_VERBOSE),-v$(BINMAN_VERBOSE)) \
|
||||
build -u -d u-boot.dtb -O . \
|
||||
$(if $(BUILD_ROM),,-m --allow-missing) \
|
||||
build -u -d u-boot.dtb -O . -m --allow-missing \
|
||||
-I . -I $(srctree) -I $(srctree)/board/$(BOARDDIR) \
|
||||
-I arch/$(ARCH)/dts -a of-list=$(CONFIG_OF_LIST) \
|
||||
-a atf-bl31-path=${BL31} \
|
||||
-a default-dt=$(default_dt) \
|
||||
$(BINMAN_$(@F))
|
||||
|
||||
OBJCOPYFLAGS_u-boot.ldr.hex := -I binary -O ihex
|
||||
|
@ -1626,13 +1620,6 @@ u-boot-x86-reset16.bin: u-boot FORCE
|
|||
|
||||
endif # CONFIG_X86
|
||||
|
||||
ifneq ($(CONFIG_ARCH_SUNXI),)
|
||||
ifeq ($(CONFIG_ARM64),y)
|
||||
u-boot-sunxi-with-spl.bin: spl/sunxi-spl.bin u-boot.itb FORCE
|
||||
$(call if_changed,cat)
|
||||
endif
|
||||
endif
|
||||
|
||||
OBJCOPYFLAGS_u-boot-app.efi := $(OBJCOPYFLAGS_EFI)
|
||||
u-boot-app.efi: u-boot FORCE
|
||||
$(call if_changed,zobjcopy)
|
||||
|
|
|
@ -5,14 +5,74 @@
|
|||
mmc1 = &mmc2;
|
||||
};
|
||||
|
||||
binman {
|
||||
binman: binman {
|
||||
multiple-images;
|
||||
};
|
||||
};
|
||||
|
||||
&binman {
|
||||
u-boot-sunxi-with-spl {
|
||||
filename = "u-boot-sunxi-with-spl.bin";
|
||||
pad-byte = <0xff>;
|
||||
blob {
|
||||
filename = "spl/sunxi-spl.bin";
|
||||
};
|
||||
#ifdef CONFIG_ARM64
|
||||
fit {
|
||||
description = "Configuration to load ATF before U-Boot";
|
||||
#address-cells = <1>;
|
||||
fit,fdt-list = "of-list";
|
||||
|
||||
images {
|
||||
uboot {
|
||||
description = "U-Boot (64-bit)";
|
||||
type = "standalone";
|
||||
arch = "arm64";
|
||||
compression = "none";
|
||||
load = <0x4a000000>;
|
||||
|
||||
u-boot-nodtb {
|
||||
};
|
||||
};
|
||||
atf {
|
||||
description = "ARM Trusted Firmware";
|
||||
type = "firmware";
|
||||
arch = "arm64";
|
||||
compression = "none";
|
||||
/* TODO: Do this with an overwrite in this board's dtb? */
|
||||
#ifdef CONFIG_MACH_SUN50I_H6
|
||||
load = <0x104000>;
|
||||
entry = <0x104000>;
|
||||
#else
|
||||
load = <0x44000>;
|
||||
entry = <0x44000>;
|
||||
#endif
|
||||
atf-bl31 {
|
||||
missing-msg = "atf-bl31-sunxi";
|
||||
};
|
||||
};
|
||||
|
||||
@fdt-SEQ {
|
||||
description = "NAME";
|
||||
type = "flat_dt";
|
||||
compression = "none";
|
||||
};
|
||||
};
|
||||
|
||||
configurations {
|
||||
default = "config-1";
|
||||
@config-SEQ {
|
||||
description = "NAME";
|
||||
firmware = "uboot";
|
||||
loadables = "atf";
|
||||
fdt = "fdt-SEQ";
|
||||
};
|
||||
};
|
||||
};
|
||||
#else
|
||||
u-boot-img {
|
||||
offset = <CONFIG_SPL_PAD_TO>;
|
||||
};
|
||||
#endif
|
||||
};
|
||||
};
|
||||
|
|
|
@ -75,7 +75,7 @@ int riscv_fdt_copy_resv_mem_node(const void *src, void *dst)
|
|||
pmp_mem.start = addr;
|
||||
pmp_mem.end = addr + size - 1;
|
||||
err = fdtdec_add_reserved_memory(dst, basename, &pmp_mem,
|
||||
&phandle);
|
||||
&phandle, false);
|
||||
if (err < 0 && err != -FDT_ERR_EXISTS) {
|
||||
log_err("failed to add reserved memory: %d\n", err);
|
||||
return err;
|
||||
|
|
|
@ -1,87 +0,0 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# script to generate FIT image source for 64-bit sunxi boards with
|
||||
# ARM Trusted Firmware and multiple device trees (given on the command line)
|
||||
#
|
||||
# usage: $0 <dt_name> [<dt_name> [<dt_name] ...]
|
||||
|
||||
[ -z "$BL31" ] && BL31="bl31.bin"
|
||||
|
||||
if [ ! -f $BL31 ]; then
|
||||
echo "WARNING: BL31 file $BL31 NOT found, resulting binary is non-functional" >&2
|
||||
echo "Please read the section on ARM Trusted Firmware (ATF) in board/sunxi/README.sunxi64" >&2
|
||||
BL31=/dev/null
|
||||
fi
|
||||
|
||||
if grep -q "^CONFIG_MACH_SUN50I_H6=y" .config; then
|
||||
BL31_ADDR=0x104000
|
||||
else
|
||||
BL31_ADDR=0x44000
|
||||
fi
|
||||
|
||||
cat << __HEADER_EOF
|
||||
/dts-v1/;
|
||||
|
||||
/ {
|
||||
description = "Configuration to load ATF before U-Boot";
|
||||
#address-cells = <1>;
|
||||
|
||||
images {
|
||||
uboot {
|
||||
description = "U-Boot (64-bit)";
|
||||
data = /incbin/("u-boot-nodtb.bin");
|
||||
type = "standalone";
|
||||
arch = "arm64";
|
||||
compression = "none";
|
||||
load = <0x4a000000>;
|
||||
};
|
||||
atf {
|
||||
description = "ARM Trusted Firmware";
|
||||
data = /incbin/("$BL31");
|
||||
type = "firmware";
|
||||
arch = "arm64";
|
||||
compression = "none";
|
||||
load = <$BL31_ADDR>;
|
||||
entry = <$BL31_ADDR>;
|
||||
};
|
||||
__HEADER_EOF
|
||||
|
||||
cnt=1
|
||||
for dtname in $*
|
||||
do
|
||||
cat << __FDT_IMAGE_EOF
|
||||
fdt_$cnt {
|
||||
description = "$(basename $dtname .dtb)";
|
||||
data = /incbin/("$dtname");
|
||||
type = "flat_dt";
|
||||
compression = "none";
|
||||
};
|
||||
__FDT_IMAGE_EOF
|
||||
cnt=$((cnt+1))
|
||||
done
|
||||
|
||||
cat << __CONF_HEADER_EOF
|
||||
};
|
||||
configurations {
|
||||
default = "config_1";
|
||||
|
||||
__CONF_HEADER_EOF
|
||||
|
||||
cnt=1
|
||||
for dtname in $*
|
||||
do
|
||||
cat << __CONF_SECTION_EOF
|
||||
config_$cnt {
|
||||
description = "$(basename $dtname .dtb)";
|
||||
firmware = "uboot";
|
||||
loadables = "atf";
|
||||
fdt = "fdt_$cnt";
|
||||
};
|
||||
__CONF_SECTION_EOF
|
||||
cnt=$((cnt+1))
|
||||
done
|
||||
|
||||
cat << __ITS_EOF
|
||||
};
|
||||
};
|
||||
__ITS_EOF
|
13
common/log.c
13
common/log.c
|
@ -191,12 +191,23 @@ static bool log_passes_filters(struct log_device *ldev, struct log_rec *rec)
|
|||
static int log_dispatch(struct log_rec *rec)
|
||||
{
|
||||
struct log_device *ldev;
|
||||
static int processing_msg;
|
||||
|
||||
/*
|
||||
* When a log driver writes messages (e.g. via the network stack) this
|
||||
* may result in further generated messages. We cannot process them here
|
||||
* as this might result in infinite recursion.
|
||||
*/
|
||||
if (processing_msg)
|
||||
return 0;
|
||||
|
||||
/* Emit message */
|
||||
processing_msg = 1;
|
||||
list_for_each_entry(ldev, &gd->log_head, sibling_node) {
|
||||
if (log_passes_filters(ldev, rec))
|
||||
ldev->drv->emit(ldev, rec);
|
||||
}
|
||||
|
||||
processing_msg = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -35,16 +35,9 @@ static int log_syslog_emit(struct log_device *ldev, struct log_rec *rec)
|
|||
char *log_msg;
|
||||
int eth_hdr_size;
|
||||
struct in_addr bcast_ip;
|
||||
static int processing_msg;
|
||||
unsigned int log_level;
|
||||
char *log_hostname;
|
||||
|
||||
/* Fend off messages from the network stack while writing a message */
|
||||
if (processing_msg)
|
||||
return 0;
|
||||
|
||||
processing_msg = 1;
|
||||
|
||||
/* Setup packet buffers */
|
||||
net_init();
|
||||
/* Disable hardware and put it into the reset state */
|
||||
|
@ -108,7 +101,6 @@ static int log_syslog_emit(struct log_device *ldev, struct log_rec *rec)
|
|||
net_send_packet((uchar *)msg, ptr - msg);
|
||||
|
||||
out:
|
||||
processing_msg = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -745,13 +745,14 @@ struct device_node *of_parse_phandle(const struct device_node *np,
|
|||
|
||||
int of_parse_phandle_with_args(const struct device_node *np,
|
||||
const char *list_name, const char *cells_name,
|
||||
int index, struct of_phandle_args *out_args)
|
||||
int cell_count, int index,
|
||||
struct of_phandle_args *out_args)
|
||||
{
|
||||
if (index < 0)
|
||||
return -EINVAL;
|
||||
|
||||
return __of_parse_phandle_with_args(np, list_name, cells_name, 0,
|
||||
index, out_args);
|
||||
return __of_parse_phandle_with_args(np, list_name, cells_name,
|
||||
cell_count, index, out_args);
|
||||
}
|
||||
|
||||
int of_count_phandle_with_args(const struct device_node *np,
|
||||
|
|
|
@ -409,7 +409,8 @@ int ofnode_parse_phandle_with_args(ofnode node, const char *list_name,
|
|||
int ret;
|
||||
|
||||
ret = of_parse_phandle_with_args(ofnode_to_np(node),
|
||||
list_name, cells_name, index,
|
||||
list_name, cells_name,
|
||||
cell_count, index,
|
||||
&args);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -18,11 +18,15 @@
|
|||
|
||||
/*
|
||||
* Caution:
|
||||
* This API requires the given device has alerady been bound to syscon driver.
|
||||
* For example,
|
||||
* This API requires the given device has already been bound to the syscon
|
||||
* driver. For example,
|
||||
*
|
||||
* compatible = "syscon", "simple-mfd";
|
||||
*
|
||||
* works, but
|
||||
*
|
||||
* compatible = "simple-mfd", "syscon";
|
||||
*
|
||||
* does not. The behavior is different from Linux.
|
||||
*/
|
||||
struct regmap *syscon_get_regmap(struct udevice *dev)
|
||||
|
|
|
@ -197,7 +197,7 @@ struct udevice_id {
|
|||
ulong data;
|
||||
};
|
||||
|
||||
#if CONFIG_IS_ENABLED(OF_CONTROL)
|
||||
#if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA)
|
||||
#define of_match_ptr(_ptr) (_ptr)
|
||||
#else
|
||||
#define of_match_ptr(_ptr) NULL
|
||||
|
|
|
@ -407,6 +407,7 @@ struct device_node *of_parse_phandle(const struct device_node *np,
|
|||
* @np: pointer to a device tree node containing a list
|
||||
* @list_name: property name that contains a list
|
||||
* @cells_name: property name that specifies phandles' arguments count
|
||||
* @cells_count: Cell count to use if @cells_name is NULL
|
||||
* @index: index of a phandle to parse out
|
||||
* @out_args: optional pointer to output arguments structure (will be filled)
|
||||
* @return 0 on success (with @out_args filled out if not NULL), -ENOENT if
|
||||
|
@ -440,7 +441,8 @@ struct device_node *of_parse_phandle(const struct device_node *np,
|
|||
*/
|
||||
int of_parse_phandle_with_args(const struct device_node *np,
|
||||
const char *list_name, const char *cells_name,
|
||||
int index, struct of_phandle_args *out_args);
|
||||
int cells_count, int index,
|
||||
struct of_phandle_args *out_args);
|
||||
|
||||
/**
|
||||
* of_count_phandle_with_args() - Count the number of phandle in a list
|
||||
|
|
|
@ -1029,7 +1029,7 @@ static inline int fdtdec_set_phandle(void *blob, int node, uint32_t phandle)
|
|||
* };
|
||||
* uint32_t phandle;
|
||||
*
|
||||
* fdtdec_add_reserved_memory(fdt, "framebuffer", &fb, &phandle);
|
||||
* fdtdec_add_reserved_memory(fdt, "framebuffer", &fb, &phandle, false);
|
||||
*
|
||||
* This results in the following subnode being added to the top-level
|
||||
* /reserved-memory node:
|
||||
|
@ -1056,11 +1056,12 @@ static inline int fdtdec_set_phandle(void *blob, int node, uint32_t phandle)
|
|||
* @param carveout information about the carveout region
|
||||
* @param phandlep return location for the phandle of the carveout region
|
||||
* can be NULL if no phandle should be added
|
||||
* @param no_map add "no-map" property if true
|
||||
* @return 0 on success or a negative error code on failure
|
||||
*/
|
||||
int fdtdec_add_reserved_memory(void *blob, const char *basename,
|
||||
const struct fdt_memory *carveout,
|
||||
uint32_t *phandlep);
|
||||
uint32_t *phandlep, bool no_map);
|
||||
|
||||
/**
|
||||
* fdtdec_get_carveout() - reads a carveout from an FDT
|
||||
|
|
10
lib/fdtdec.c
10
lib/fdtdec.c
|
@ -1316,7 +1316,7 @@ static int fdtdec_init_reserved_memory(void *blob)
|
|||
|
||||
int fdtdec_add_reserved_memory(void *blob, const char *basename,
|
||||
const struct fdt_memory *carveout,
|
||||
uint32_t *phandlep)
|
||||
uint32_t *phandlep, bool no_map)
|
||||
{
|
||||
fdt32_t cells[4] = {}, *ptr = cells;
|
||||
uint32_t upper, lower, phandle;
|
||||
|
@ -1416,6 +1416,12 @@ int fdtdec_add_reserved_memory(void *blob, const char *basename,
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (no_map) {
|
||||
err = fdt_setprop(blob, node, "no-map", NULL, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* return the phandle for the new node for the caller to use */
|
||||
if (phandlep)
|
||||
*phandlep = phandle;
|
||||
|
@ -1481,7 +1487,7 @@ int fdtdec_set_carveout(void *blob, const char *node, const char *prop_name,
|
|||
fdt32_t value;
|
||||
void *prop;
|
||||
|
||||
err = fdtdec_add_reserved_memory(blob, name, carveout, &phandle);
|
||||
err = fdtdec_add_reserved_memory(blob, name, carveout, &phandle, false);
|
||||
if (err < 0) {
|
||||
debug("failed to add reserved memory: %d\n", err);
|
||||
return err;
|
||||
|
|
|
@ -192,7 +192,7 @@ int optee_copy_fdt_nodes(const void *old_blob, void *new_blob)
|
|||
ret = fdtdec_add_reserved_memory(new_blob,
|
||||
nodename,
|
||||
&carveout,
|
||||
NULL);
|
||||
NULL, true);
|
||||
free(oldname);
|
||||
|
||||
if (ret < 0)
|
||||
|
|
|
@ -786,7 +786,8 @@ class FdtSw(FdtRo):
|
|||
Fdt object allowing access to the newly created device tree
|
||||
"""
|
||||
fdtsw = bytearray(self._fdt)
|
||||
check_err(fdt_finish(fdtsw))
|
||||
while self.check_space(fdt_finish(fdtsw)):
|
||||
fdtsw = bytearray(self._fdt)
|
||||
return Fdt(fdtsw)
|
||||
|
||||
def check_space(self, val):
|
||||
|
|
|
@ -80,7 +80,7 @@ static int dm_test_fdtdec_add_reserved_memory(struct unit_test_state *uts)
|
|||
resv.start = 0x1000;
|
||||
resv.end = 0x1fff;
|
||||
ut_assertok(fdtdec_add_reserved_memory(blob, "rsvd_region",
|
||||
&resv, &phandle));
|
||||
&resv, &phandle, false));
|
||||
|
||||
/* Test /reserve-memory and its subnode should exist */
|
||||
parent = fdt_path_offset(blob, "/reserved-memory");
|
||||
|
@ -101,10 +101,13 @@ static int dm_test_fdtdec_add_reserved_memory(struct unit_test_state *uts)
|
|||
resv.start = 0x2000;
|
||||
resv.end = 0x2fff;
|
||||
ut_assertok(fdtdec_add_reserved_memory(blob, "rsvd_region1",
|
||||
&resv, &phandle1));
|
||||
&resv, &phandle1, true));
|
||||
subnode = fdt_path_offset(blob, "/reserved-memory/rsvd_region1");
|
||||
ut_assert(subnode > 0);
|
||||
|
||||
/* check that no-map property is present */
|
||||
ut_assert(fdt_getprop(blob, subnode, "no-map", NULL) > 0);
|
||||
|
||||
/* phandles must be different */
|
||||
ut_assert(phandle != phandle1);
|
||||
|
||||
|
@ -115,7 +118,7 @@ static int dm_test_fdtdec_add_reserved_memory(struct unit_test_state *uts)
|
|||
resv.start = 0x1000;
|
||||
resv.end = 0x1fff;
|
||||
ut_assertok(fdtdec_add_reserved_memory(blob, "rsvd_region2",
|
||||
&resv, &phandle1));
|
||||
&resv, &phandle1, false));
|
||||
subnode = fdt_path_offset(blob, "/reserved-memory/rsvd_region2");
|
||||
ut_assert(subnode < 0);
|
||||
|
||||
|
|
|
@ -343,6 +343,12 @@ compress:
|
|||
Sets the compression algortihm to use (for blobs only). See the entry
|
||||
documentation for details.
|
||||
|
||||
missing-msg:
|
||||
Sets the tag of the message to show if this entry is missing. This is
|
||||
used for external blobs. When they are missing it is helpful to show
|
||||
information about what needs to be fixed. See missing-blob-help for the
|
||||
message for each tag.
|
||||
|
||||
The attributes supported for images and sections are described below. Several
|
||||
are similar to those for entries.
|
||||
|
||||
|
@ -884,6 +890,12 @@ the 'tools' module's Run() method. The tools generally must exist on the PATH,
|
|||
but the --toolpath option can be used to specify additional search paths to
|
||||
use. This option can be specified multiple times to add more than one path.
|
||||
|
||||
For some compile tools binman will use the versions specified by commonly-used
|
||||
environment variables like CC and HOSTCC for the C compiler, based on whether
|
||||
the tool's output will be used for the target or for the host machine. If those
|
||||
aren't given, it will also try to derive target-specific versions from the
|
||||
CROSS_COMPILE environment variable during a cross-compilation.
|
||||
|
||||
|
||||
Code coverage
|
||||
-------------
|
||||
|
@ -918,6 +930,24 @@ directories so they can be examined later. Use -X or --test-preserve-dirs for
|
|||
this.
|
||||
|
||||
|
||||
Running tests on non-x86 architectures
|
||||
--------------------------------------
|
||||
|
||||
Binman's tests have been written under the assumption that they'll be run on a
|
||||
x86-like host and there hasn't been an attempt to make them portable yet.
|
||||
However, it's possible to run the tests by cross-compiling to x86.
|
||||
|
||||
To install an x86 cross-compiler on Debian-type distributions (e.g. Ubuntu):
|
||||
|
||||
$ sudo apt-get install gcc-x86-64-linux-gnu
|
||||
|
||||
Then, you can run the tests under cross-compilation:
|
||||
|
||||
$ CROSS_COMPILE=x86_64-linux-gnu- binman test -T
|
||||
|
||||
You can also use gcc-i686-linux-gnu similar to the above.
|
||||
|
||||
|
||||
Advanced Features / Technical docs
|
||||
----------------------------------
|
||||
|
||||
|
|
|
@ -11,6 +11,20 @@ features to produce new behaviours.
|
|||
|
||||
|
||||
|
||||
Entry: atf-bl31: Entry containing an ARM Trusted Firmware (ATF) BL31 blob
|
||||
-------------------------------------------------------------------------
|
||||
|
||||
Properties / Entry arguments:
|
||||
- atf-bl31-path: Filename of file to read into entry. This is typically
|
||||
called bl31.bin or bl31.elf
|
||||
|
||||
This entry holds the run-time firmware, typically started by U-Boot SPL.
|
||||
See the U-Boot README for your architecture or board for how to use it. See
|
||||
https://github.com/ARM-software/arm-trusted-firmware for more information
|
||||
about ATF.
|
||||
|
||||
|
||||
|
||||
Entry: blob: Entry containing an arbitrary binary blob
|
||||
------------------------------------------------------
|
||||
|
||||
|
@ -60,7 +74,7 @@ Entry: blob-named-by-arg: A blob entry which gets its filename property from its
|
|||
|
||||
Properties / Entry arguments:
|
||||
- <xxx>-path: Filename containing the contents of this entry (optional,
|
||||
defaults to 0)
|
||||
defaults to None)
|
||||
|
||||
where <xxx> is the blob_fname argument to the constructor.
|
||||
|
||||
|
@ -325,6 +339,7 @@ For example, this creates an image containing a FIT with U-Boot SPL:
|
|||
binman {
|
||||
fit {
|
||||
description = "Test FIT";
|
||||
fit,fdt-list = "of-list";
|
||||
|
||||
images {
|
||||
kernel@1 {
|
||||
|
@ -343,7 +358,56 @@ For example, this creates an image containing a FIT with U-Boot SPL:
|
|||
};
|
||||
};
|
||||
|
||||
Properties:
|
||||
U-Boot supports creating fdt and config nodes automatically. To do this,
|
||||
pass an of-list property (e.g. -a of-list=file1 file2). This tells binman
|
||||
that you want to generates nodes for two files: file1.dtb and file2.dtb
|
||||
The fit,fdt-list property (see above) indicates that of-list should be used.
|
||||
If the property is missing you will get an error.
|
||||
|
||||
Then add a 'generator node', a node with a name starting with '@':
|
||||
|
||||
images {
|
||||
@fdt-SEQ {
|
||||
description = "fdt-NAME";
|
||||
type = "flat_dt";
|
||||
compression = "none";
|
||||
};
|
||||
};
|
||||
|
||||
This tells binman to create nodes fdt-1 and fdt-2 for each of your two
|
||||
files. All the properties you specify will be included in the node. This
|
||||
node acts like a template to generate the nodes. The generator node itself
|
||||
does not appear in the output - it is replaced with what binman generates.
|
||||
|
||||
You can create config nodes in a similar way:
|
||||
|
||||
configurations {
|
||||
default = "@config-DEFAULT-SEQ";
|
||||
@config-SEQ {
|
||||
description = "NAME";
|
||||
firmware = "uboot";
|
||||
loadables = "atf";
|
||||
fdt = "fdt-SEQ";
|
||||
};
|
||||
};
|
||||
|
||||
This tells binman to create nodes config-1 and config-2, i.e. a config for
|
||||
each of your two files.
|
||||
|
||||
Available substitutions for '@' nodes are:
|
||||
|
||||
SEQ Sequence number of the generated fdt (1, 2, ...)
|
||||
NAME Name of the dtb as provided (i.e. without adding '.dtb')
|
||||
|
||||
Note that if no devicetree files are provided (with '-a of-list' as above)
|
||||
then no nodes will be generated.
|
||||
|
||||
The 'default' property, if present, will be automatically set to the name
|
||||
if of configuration whose devicetree matches the 'default-dt' entry
|
||||
argument, e.g. with '-a default-dt=sun50i-a64-pine64-lts'.
|
||||
|
||||
|
||||
Properties (in the 'fit' node itself):
|
||||
fit,external-offset: Indicates that the contents of the FIT are external
|
||||
and provides the external offset. This is passsed to mkimage via
|
||||
the -E and -p flags.
|
||||
|
@ -691,6 +755,11 @@ Properties / Entry arguments: (see binman README for more information)
|
|||
name-prefix: Adds a prefix to the name of every entry in the section
|
||||
when writing out the map
|
||||
|
||||
Properties:
|
||||
allow_missing: True if this section permits external blobs to be
|
||||
missing their contents. The second will produce an image but of
|
||||
course it will not work.
|
||||
|
||||
Since a section is also an entry, it inherits all the properies of entries
|
||||
too.
|
||||
|
||||
|
|
|
@ -8,6 +8,9 @@
|
|||
from collections import OrderedDict
|
||||
import glob
|
||||
import os
|
||||
import pkg_resources
|
||||
import re
|
||||
|
||||
import sys
|
||||
from patman import tools
|
||||
|
||||
|
@ -20,6 +23,11 @@ from patman import tout
|
|||
# Make this global so that it can be referenced from tests
|
||||
images = OrderedDict()
|
||||
|
||||
# Help text for each type of missing blob, dict:
|
||||
# key: Value of the entry's 'missing-msg' or entry name
|
||||
# value: Text for the help
|
||||
missing_blob_help = {}
|
||||
|
||||
def _ReadImageDesc(binman_node):
|
||||
"""Read the image descriptions from the /binman node
|
||||
|
||||
|
@ -52,14 +60,74 @@ def _FindBinmanNode(dtb):
|
|||
return node
|
||||
return None
|
||||
|
||||
def _ReadMissingBlobHelp():
|
||||
"""Read the missing-blob-help file
|
||||
|
||||
This file containins help messages explaining what to do when external blobs
|
||||
are missing.
|
||||
|
||||
Returns:
|
||||
Dict:
|
||||
key: Message tag (str)
|
||||
value: Message text (str)
|
||||
"""
|
||||
|
||||
def _FinishTag(tag, msg, result):
|
||||
if tag:
|
||||
result[tag] = msg.rstrip()
|
||||
tag = None
|
||||
msg = ''
|
||||
return tag, msg
|
||||
|
||||
my_data = pkg_resources.resource_string(__name__, 'missing-blob-help')
|
||||
re_tag = re.compile('^([-a-z0-9]+):$')
|
||||
result = {}
|
||||
tag = None
|
||||
msg = ''
|
||||
for line in my_data.decode('utf-8').splitlines():
|
||||
if not line.startswith('#'):
|
||||
m_tag = re_tag.match(line)
|
||||
if m_tag:
|
||||
_, msg = _FinishTag(tag, msg, result)
|
||||
tag = m_tag.group(1)
|
||||
elif tag:
|
||||
msg += line + '\n'
|
||||
_FinishTag(tag, msg, result)
|
||||
return result
|
||||
|
||||
def _ShowBlobHelp(path, text):
|
||||
tout.Warning('\n%s:' % path)
|
||||
for line in text.splitlines():
|
||||
tout.Warning(' %s' % line)
|
||||
|
||||
def _ShowHelpForMissingBlobs(missing_list):
|
||||
"""Show help for each missing blob to help the user take action
|
||||
|
||||
Args:
|
||||
missing_list: List of Entry objects to show help for
|
||||
"""
|
||||
global missing_blob_help
|
||||
|
||||
if not missing_blob_help:
|
||||
missing_blob_help = _ReadMissingBlobHelp()
|
||||
|
||||
for entry in missing_list:
|
||||
tags = entry.GetHelpTags()
|
||||
|
||||
# Show the first match help message
|
||||
for tag in tags:
|
||||
if tag in missing_blob_help:
|
||||
_ShowBlobHelp(entry._node.path, missing_blob_help[tag])
|
||||
break
|
||||
|
||||
def GetEntryModules(include_testing=True):
|
||||
"""Get a set of entry class implementations
|
||||
|
||||
Returns:
|
||||
Set of paths to entry class filenames
|
||||
"""
|
||||
our_path = os.path.dirname(os.path.realpath(__file__))
|
||||
glob_list = glob.glob(os.path.join(our_path, 'etype/*.py'))
|
||||
glob_list = pkg_resources.resource_listdir(__name__, 'etype')
|
||||
glob_list = [fname for fname in glob_list if fname.endswith('.py')]
|
||||
return set([os.path.splitext(os.path.basename(item))[0]
|
||||
for item in glob_list
|
||||
if include_testing or '_testing' not in item])
|
||||
|
@ -344,6 +412,11 @@ def PrepareImagesAndDtbs(dtb_fname, select_images, update_fdt):
|
|||
dtb_fname: Filename of the device tree file to use (.dts or .dtb)
|
||||
selected_images: List of images to output, or None for all
|
||||
update_fdt: True to update the FDT wth entry offsets, etc.
|
||||
|
||||
Returns:
|
||||
OrderedDict of images:
|
||||
key: Image name (str)
|
||||
value: Image object
|
||||
"""
|
||||
# Import these here in case libfdt.py is not available, in which case
|
||||
# the above help option still works.
|
||||
|
@ -471,6 +544,7 @@ def ProcessImage(image, update_fdt, write_map, get_contents=True,
|
|||
if missing_list:
|
||||
tout.Warning("Image '%s' is missing external blobs and is non-functional: %s" %
|
||||
(image.name, ' '.join([e.name for e in missing_list])))
|
||||
_ShowHelpForMissingBlobs(missing_list)
|
||||
return bool(missing_list)
|
||||
|
||||
|
||||
|
@ -556,7 +630,7 @@ def Binman(args):
|
|||
tools.WriteFile(dtb_item._fname, dtb_item.GetContents())
|
||||
|
||||
if missing:
|
||||
tout.Warning("Some images are invalid")
|
||||
tout.Warning("\nSome images are invalid")
|
||||
finally:
|
||||
tools.FinaliseOutputDir()
|
||||
finally:
|
||||
|
|
|
@ -234,8 +234,10 @@ SECTIONS
|
|||
# text section at the start
|
||||
# -m32: Build for 32-bit x86
|
||||
# -T...: Specifies the link script, which sets the start address
|
||||
stdout = command.Output('cc', '-static', '-nostdlib', '-Wl,--build-id=none',
|
||||
'-m32','-T', lds_file, '-o', elf_fname, s_file)
|
||||
cc, args = tools.GetTargetCompileTool('cc')
|
||||
args += ['-static', '-nostdlib', '-Wl,--build-id=none', '-m32', '-T',
|
||||
lds_file, '-o', elf_fname, s_file]
|
||||
stdout = command.Output(cc, *args)
|
||||
shutil.rmtree(outdir)
|
||||
|
||||
def DecodeElf(data, location):
|
||||
|
|
|
@ -186,7 +186,9 @@ class TestElf(unittest.TestCase):
|
|||
# Make an Elf file and then convert it to a fkat binary file. This
|
||||
# should produce the original data.
|
||||
elf.MakeElf(elf_fname, expected_text, expected_data)
|
||||
stdout = command.Output('objcopy', '-O', 'binary', elf_fname, bin_fname)
|
||||
objcopy, args = tools.GetTargetCompileTool('objcopy')
|
||||
args += ['-O', 'binary', elf_fname, bin_fname]
|
||||
stdout = command.Output(objcopy, *args)
|
||||
with open(bin_fname, 'rb') as fd:
|
||||
data = fd.read()
|
||||
self.assertEqual(expected_text + expected_data, data)
|
||||
|
|
|
@ -16,8 +16,6 @@ from patman import tout
|
|||
|
||||
modules = {}
|
||||
|
||||
our_path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
|
||||
# An argument which can be passed to entries on the command line, in lieu of
|
||||
# device-tree properties.
|
||||
|
@ -59,6 +57,10 @@ class Entry(object):
|
|||
compress: Compression algoithm used (e.g. 'lz4'), 'none' if none
|
||||
orig_offset: Original offset value read from node
|
||||
orig_size: Original size value read from node
|
||||
missing: True if this entry is missing its contents
|
||||
allow_missing: Allow children of this entry to be missing (used by
|
||||
subclasses such as Entry_section)
|
||||
external: True if this entry contains an external binary blob
|
||||
"""
|
||||
def __init__(self, section, etype, node, name_prefix=''):
|
||||
# Put this here to allow entry-docs and help to work without libfdt
|
||||
|
@ -85,6 +87,8 @@ class Entry(object):
|
|||
self._expand_size = False
|
||||
self.compress = 'none'
|
||||
self.missing = False
|
||||
self.external = False
|
||||
self.allow_missing = False
|
||||
|
||||
@staticmethod
|
||||
def Lookup(node_path, etype):
|
||||
|
@ -174,6 +178,7 @@ class Entry(object):
|
|||
self.align_end = fdt_util.GetInt(self._node, 'align-end')
|
||||
self.offset_unset = fdt_util.GetBool(self._node, 'offset-unset')
|
||||
self.expand_size = fdt_util.GetBool(self._node, 'expand-size')
|
||||
self.missing_msg = fdt_util.GetString(self._node, 'missing-msg')
|
||||
|
||||
def GetDefaultFilename(self):
|
||||
return None
|
||||
|
@ -815,3 +820,19 @@ features to produce new behaviours.
|
|||
"""
|
||||
if self.missing:
|
||||
missing_list.append(self)
|
||||
|
||||
def GetAllowMissing(self):
|
||||
"""Get whether a section allows missing external blobs
|
||||
|
||||
Returns:
|
||||
True if allowed, False if not allowed
|
||||
"""
|
||||
return self.allow_missing
|
||||
|
||||
def GetHelpTags(self):
|
||||
"""Get the tags use for missing-blob help
|
||||
|
||||
Returns:
|
||||
list of possible tags, most desirable first
|
||||
"""
|
||||
return list(filter(None, [self.missing_msg, self.name, self.etype]))
|
||||
|
|
24
tools/binman/etype/atf_bl31.py
Normal file
24
tools/binman/etype/atf_bl31.py
Normal file
|
@ -0,0 +1,24 @@
|
|||
# SPDX-License-Identifier: GPL-2.0+
|
||||
# Copyright 2020 Google LLC
|
||||
# Written by Simon Glass <sjg@chromium.org>
|
||||
#
|
||||
# Entry-type module for Intel Management Engine binary blob
|
||||
#
|
||||
|
||||
from binman.etype.blob_named_by_arg import Entry_blob_named_by_arg
|
||||
|
||||
class Entry_atf_bl31(Entry_blob_named_by_arg):
|
||||
"""Entry containing an ARM Trusted Firmware (ATF) BL31 blob
|
||||
|
||||
Properties / Entry arguments:
|
||||
- atf-bl31-path: Filename of file to read into entry. This is typically
|
||||
called bl31.bin or bl31.elf
|
||||
|
||||
This entry holds the run-time firmware, typically started by U-Boot SPL.
|
||||
See the U-Boot README for your architecture or board for how to use it. See
|
||||
https://github.com/ARM-software/arm-trusted-firmware for more information
|
||||
about ATF.
|
||||
"""
|
||||
def __init__(self, section, etype, node):
|
||||
super().__init__(section, etype, node, 'atf-bl31')
|
||||
self.external = True
|
|
@ -37,7 +37,13 @@ class Entry_blob(Entry):
|
|||
|
||||
def ObtainContents(self):
|
||||
self._filename = self.GetDefaultFilename()
|
||||
self._pathname = tools.GetInputFilename(self._filename)
|
||||
self._pathname = tools.GetInputFilename(self._filename,
|
||||
self.section.GetAllowMissing())
|
||||
# Allow the file to be missing
|
||||
if self.external and not self._pathname:
|
||||
self.SetContents(b'')
|
||||
self.missing = True
|
||||
return True
|
||||
self.ReadBlobContents()
|
||||
return True
|
||||
|
||||
|
|
|
@ -26,14 +26,3 @@ class Entry_blob_ext(Entry_blob):
|
|||
def __init__(self, section, etype, node):
|
||||
Entry_blob.__init__(self, section, etype, node)
|
||||
self.external = True
|
||||
|
||||
def ObtainContents(self):
|
||||
self._filename = self.GetDefaultFilename()
|
||||
self._pathname = tools.GetInputFilename(self._filename,
|
||||
self.section.GetAllowMissing())
|
||||
# Allow the file to be missing
|
||||
if not self._pathname:
|
||||
self.SetContents(b'')
|
||||
self.missing = True
|
||||
return True
|
||||
return super().ObtainContents()
|
||||
|
|
|
@ -17,7 +17,7 @@ class Entry_blob_named_by_arg(Entry_blob):
|
|||
|
||||
Properties / Entry arguments:
|
||||
- <xxx>-path: Filename containing the contents of this entry (optional,
|
||||
defaults to 0)
|
||||
defaults to None)
|
||||
|
||||
where <xxx> is the blob_fname argument to the constructor.
|
||||
|
||||
|
@ -28,7 +28,9 @@ class Entry_blob_named_by_arg(Entry_blob):
|
|||
|
||||
See cros_ec_rw for an example of this.
|
||||
"""
|
||||
def __init__(self, section, etype, node, blob_fname):
|
||||
def __init__(self, section, etype, node, blob_fname, required=False):
|
||||
super().__init__(section, etype, node)
|
||||
self._filename, = self.GetEntryArgsOrProps(
|
||||
[EntryArg('%s-path' % blob_fname, str)])
|
||||
filename, = self.GetEntryArgsOrProps(
|
||||
[EntryArg('%s-path' % blob_fname, str)], required=required)
|
||||
if filename:
|
||||
self._filename = filename
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
|
||||
from binman.etype.blob_named_by_arg import Entry_blob_named_by_arg
|
||||
|
||||
|
||||
class Entry_cros_ec_rw(Entry_blob_named_by_arg):
|
||||
"""A blob entry which contains a Chromium OS read-write EC image
|
||||
|
||||
|
@ -18,5 +17,5 @@ class Entry_cros_ec_rw(Entry_blob_named_by_arg):
|
|||
updating the EC on startup via software sync.
|
||||
"""
|
||||
def __init__(self, section, etype, node):
|
||||
super().__init__(section, etype, node, 'cros-ec-rw')
|
||||
super().__init__(section, etype, node, 'cros-ec-rw', required=True)
|
||||
self.external = True
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
from collections import defaultdict, OrderedDict
|
||||
import libfdt
|
||||
|
||||
from binman.entry import Entry
|
||||
from binman.entry import Entry, EntryArg
|
||||
from dtoc import fdt_util
|
||||
from dtoc.fdt import Fdt
|
||||
from patman import tools
|
||||
|
@ -27,6 +27,7 @@ class Entry_fit(Entry):
|
|||
binman {
|
||||
fit {
|
||||
description = "Test FIT";
|
||||
fit,fdt-list = "of-list";
|
||||
|
||||
images {
|
||||
kernel@1 {
|
||||
|
@ -45,7 +46,60 @@ class Entry_fit(Entry):
|
|||
};
|
||||
};
|
||||
|
||||
Properties:
|
||||
U-Boot supports creating fdt and config nodes automatically. To do this,
|
||||
pass an of-list property (e.g. -a of-list=file1 file2). This tells binman
|
||||
that you want to generates nodes for two files: file1.dtb and file2.dtb
|
||||
The fit,fdt-list property (see above) indicates that of-list should be used.
|
||||
If the property is missing you will get an error.
|
||||
|
||||
Then add a 'generator node', a node with a name starting with '@':
|
||||
|
||||
images {
|
||||
@fdt-SEQ {
|
||||
description = "fdt-NAME";
|
||||
type = "flat_dt";
|
||||
compression = "none";
|
||||
};
|
||||
};
|
||||
|
||||
This tells binman to create nodes fdt-1 and fdt-2 for each of your two
|
||||
files. All the properties you specify will be included in the node. This
|
||||
node acts like a template to generate the nodes. The generator node itself
|
||||
does not appear in the output - it is replaced with what binman generates.
|
||||
|
||||
You can create config nodes in a similar way:
|
||||
|
||||
configurations {
|
||||
default = "@config-DEFAULT-SEQ";
|
||||
@config-SEQ {
|
||||
description = "NAME";
|
||||
firmware = "uboot";
|
||||
loadables = "atf";
|
||||
fdt = "fdt-SEQ";
|
||||
};
|
||||
};
|
||||
|
||||
This tells binman to create nodes config-1 and config-2, i.e. a config for
|
||||
each of your two files.
|
||||
|
||||
Available substitutions for '@' nodes are:
|
||||
|
||||
SEQ Sequence number of the generated fdt (1, 2, ...)
|
||||
NAME Name of the dtb as provided (i.e. without adding '.dtb')
|
||||
|
||||
Note that if no devicetree files are provided (with '-a of-list' as above)
|
||||
then no nodes will be generated.
|
||||
|
||||
The 'default' property, if present, will be automatically set to the name
|
||||
if of configuration whose devicetree matches the 'default-dt' entry
|
||||
argument, e.g. with '-a default-dt=sun50i-a64-pine64-lts'.
|
||||
|
||||
Available substitutions for '@' property values are:
|
||||
|
||||
DEFAULT-SEQ Sequence number of the default fdt,as provided by the
|
||||
'default-dt' entry argument
|
||||
|
||||
Properties (in the 'fit' node itself):
|
||||
fit,external-offset: Indicates that the contents of the FIT are external
|
||||
and provides the external offset. This is passsed to mkimage via
|
||||
the -E and -p flags.
|
||||
|
@ -55,15 +109,28 @@ class Entry_fit(Entry):
|
|||
"""
|
||||
Members:
|
||||
_fit: FIT file being built
|
||||
_fit_content: dict:
|
||||
_fit_sections: dict:
|
||||
key: relative path to entry Node (from the base of the FIT)
|
||||
value: List of Entry objects comprising the contents of this
|
||||
value: Entry_section object comprising the contents of this
|
||||
node
|
||||
"""
|
||||
super().__init__(section, etype, node)
|
||||
self._fit = None
|
||||
self._fit_content = defaultdict(list)
|
||||
self._fit_sections = {}
|
||||
self._fit_props = {}
|
||||
for pname, prop in self._node.props.items():
|
||||
if pname.startswith('fit,'):
|
||||
self._fit_props[pname] = prop
|
||||
|
||||
self._fdts = None
|
||||
self._fit_list_prop = self._fit_props.get('fit,fdt-list')
|
||||
if self._fit_list_prop:
|
||||
fdts, = self.GetEntryArgsOrProps(
|
||||
[EntryArg(self._fit_list_prop.value, str)])
|
||||
if fdts is not None:
|
||||
self._fdts = fdts.split()
|
||||
self._fit_default_dt = self.GetEntryArgsOrProps([EntryArg('default-dt',
|
||||
str)])[0]
|
||||
|
||||
def ReadNode(self):
|
||||
self._ReadSubnodes()
|
||||
|
@ -84,22 +151,71 @@ class Entry_fit(Entry):
|
|||
image
|
||||
"""
|
||||
for pname, prop in node.props.items():
|
||||
if pname.startswith('fit,'):
|
||||
self._fit_props[pname] = prop
|
||||
else:
|
||||
if not pname.startswith('fit,'):
|
||||
if pname == 'default':
|
||||
val = prop.value
|
||||
# Handle the 'default' property
|
||||
if val.startswith('@'):
|
||||
if not self._fdts:
|
||||
continue
|
||||
if not self._fit_default_dt:
|
||||
self.Raise("Generated 'default' node requires default-dt entry argument")
|
||||
if self._fit_default_dt not in self._fdts:
|
||||
self.Raise("default-dt entry argument '%s' not found in fdt list: %s" %
|
||||
(self._fit_default_dt,
|
||||
', '.join(self._fdts)))
|
||||
seq = self._fdts.index(self._fit_default_dt)
|
||||
val = val[1:].replace('DEFAULT-SEQ', str(seq + 1))
|
||||
fsw.property_string(pname, val)
|
||||
continue
|
||||
fsw.property(pname, prop.bytes)
|
||||
|
||||
rel_path = node.path[len(base_node.path):]
|
||||
has_images = depth == 2 and rel_path.startswith('/images/')
|
||||
in_images = rel_path.startswith('/images')
|
||||
has_images = depth == 2 and in_images
|
||||
if has_images:
|
||||
# This node is a FIT subimage node (e.g. "/images/kernel")
|
||||
# containing content nodes. We collect the subimage nodes and
|
||||
# section entries for them here to merge the content subnodes
|
||||
# together and put the merged contents in the subimage node's
|
||||
# 'data' property later.
|
||||
entry = Entry.Create(self.section, node, etype='section')
|
||||
entry.ReadNode()
|
||||
self._fit_sections[rel_path] = entry
|
||||
|
||||
for subnode in node.subnodes:
|
||||
if has_images and not (subnode.name.startswith('hash') or
|
||||
subnode.name.startswith('signature')):
|
||||
# This is a content node. We collect all of these together
|
||||
# and put them in the 'data' property. They do not appear
|
||||
# in the FIT.
|
||||
entry = Entry.Create(self.section, subnode)
|
||||
entry.ReadNode()
|
||||
self._fit_content[rel_path].append(entry)
|
||||
# This subnode is a content node not meant to appear in
|
||||
# the FIT (e.g. "/images/kernel/u-boot"), so don't call
|
||||
# fsw.add_node() or _AddNode() for it.
|
||||
pass
|
||||
elif subnode.name.startswith('@'):
|
||||
if self._fdts:
|
||||
# Generate notes for each FDT
|
||||
for seq, fdt_fname in enumerate(self._fdts):
|
||||
node_name = subnode.name[1:].replace('SEQ',
|
||||
str(seq + 1))
|
||||
fname = tools.GetInputFilename(fdt_fname + '.dtb')
|
||||
with fsw.add_node(node_name):
|
||||
for pname, prop in subnode.props.items():
|
||||
val = prop.bytes.replace(
|
||||
b'NAME', tools.ToBytes(fdt_fname))
|
||||
val = val.replace(
|
||||
b'SEQ', tools.ToBytes(str(seq + 1)))
|
||||
fsw.property(pname, val)
|
||||
|
||||
# Add data for 'fdt' nodes (but not 'config')
|
||||
if depth == 1 and in_images:
|
||||
fsw.property('data',
|
||||
tools.ReadFile(fname))
|
||||
else:
|
||||
if self._fdts is None:
|
||||
if self._fit_list_prop:
|
||||
self.Raise("Generator node requires '%s' entry argument" %
|
||||
self._fit_list_prop.value)
|
||||
else:
|
||||
self.Raise("Generator node requires 'fit,fdt-list' property")
|
||||
else:
|
||||
with fsw.add_node(subnode.name):
|
||||
_AddNode(base_node, depth + 1, subnode)
|
||||
|
@ -123,9 +239,8 @@ class Entry_fit(Entry):
|
|||
This adds the 'data' properties to the input ITB (Image-tree Binary)
|
||||
then runs mkimage to process it.
|
||||
"""
|
||||
# self._BuildInput() either returns bytes or raises an exception.
|
||||
data = self._BuildInput(self._fdt)
|
||||
if data == False:
|
||||
return False
|
||||
uniq = self.GetUniqueName()
|
||||
input_fname = tools.GetOutputFilename('%s.itb' % uniq)
|
||||
output_fname = tools.GetOutputFilename('%s.fit' % uniq)
|
||||
|
@ -150,15 +265,30 @@ class Entry_fit(Entry):
|
|||
Returns:
|
||||
New fdt contents (bytes)
|
||||
"""
|
||||
for path, entries in self._fit_content.items():
|
||||
for path, section in self._fit_sections.items():
|
||||
node = fdt.GetNode(path)
|
||||
data = b''
|
||||
for entry in entries:
|
||||
if not entry.ObtainContents():
|
||||
return False
|
||||
data += entry.GetData()
|
||||
# Entry_section.ObtainContents() either returns True or
|
||||
# raises an exception.
|
||||
section.ObtainContents()
|
||||
section.Pack(0)
|
||||
data = section.GetData()
|
||||
node.AddData('data', data)
|
||||
|
||||
fdt.Sync(auto_resize=True)
|
||||
data = fdt.GetContents()
|
||||
return data
|
||||
|
||||
def CheckMissing(self, missing_list):
|
||||
"""Check if any entries in this FIT have missing external blobs
|
||||
|
||||
If there are missing blobs, the entries are added to the list
|
||||
|
||||
Args:
|
||||
missing_list: List of Entry objects to be added to
|
||||
"""
|
||||
for path, section in self._fit_sections.items():
|
||||
section.CheckMissing(missing_list)
|
||||
|
||||
def SetAllowMissing(self, allow_missing):
|
||||
for section in self._fit_sections.values():
|
||||
section.SetAllowMissing(allow_missing)
|
||||
|
|
|
@ -35,7 +35,7 @@ class Entry_section(Entry):
|
|||
when writing out the map
|
||||
|
||||
Properties:
|
||||
_allow_missing: True if this section permits external blobs to be
|
||||
allow_missing: True if this section permits external blobs to be
|
||||
missing their contents. The second will produce an image but of
|
||||
course it will not work.
|
||||
|
||||
|
@ -54,8 +54,6 @@ class Entry_section(Entry):
|
|||
self._sort = False
|
||||
self._skip_at_start = None
|
||||
self._end_4gb = False
|
||||
self._allow_missing = False
|
||||
self.missing = False
|
||||
|
||||
def ReadNode(self):
|
||||
"""Read properties from the image node"""
|
||||
|
@ -83,7 +81,7 @@ class Entry_section(Entry):
|
|||
|
||||
def _ReadEntries(self):
|
||||
for node in self._node.subnodes:
|
||||
if node.name == 'hash':
|
||||
if node.name.startswith('hash') or node.name.startswith('signature'):
|
||||
continue
|
||||
entry = Entry.Create(self, node)
|
||||
entry.ReadNode()
|
||||
|
@ -152,7 +150,7 @@ class Entry_section(Entry):
|
|||
for entry in self._entries.values():
|
||||
data = entry.GetData()
|
||||
base = self.pad_before + (entry.offset or 0) - self._skip_at_start
|
||||
pad = base - len(section_data)
|
||||
pad = base - len(section_data) + (entry.pad_before or 0)
|
||||
if pad > 0:
|
||||
section_data += tools.GetBytes(self._pad_byte, pad)
|
||||
section_data += data
|
||||
|
@ -549,18 +547,10 @@ class Entry_section(Entry):
|
|||
Args:
|
||||
allow_missing: True if allowed, False if not allowed
|
||||
"""
|
||||
self._allow_missing = allow_missing
|
||||
self.allow_missing = allow_missing
|
||||
for entry in self._entries.values():
|
||||
entry.SetAllowMissing(allow_missing)
|
||||
|
||||
def GetAllowMissing(self):
|
||||
"""Get whether a section allows missing external blobs
|
||||
|
||||
Returns:
|
||||
True if allowed, False if not allowed
|
||||
"""
|
||||
return self._allow_missing
|
||||
|
||||
def CheckMissing(self, missing_list):
|
||||
"""Check if any entries in this section have missing external blobs
|
||||
|
||||
|
|
|
@ -74,6 +74,12 @@ REFCODE_DATA = b'refcode'
|
|||
FSP_M_DATA = b'fsp_m'
|
||||
FSP_S_DATA = b'fsp_s'
|
||||
FSP_T_DATA = b'fsp_t'
|
||||
ATF_BL31_DATA = b'bl31'
|
||||
TEST_FDT1_DATA = b'fdt1'
|
||||
TEST_FDT2_DATA = b'test-fdt2'
|
||||
|
||||
# Subdirectory of the input dir to use to put test FDTs
|
||||
TEST_FDT_SUBDIR = 'fdts'
|
||||
|
||||
# The expected size for the device tree in some tests
|
||||
EXTRACT_DTB_SIZE = 0x3c9
|
||||
|
@ -167,6 +173,13 @@ class TestFunctional(unittest.TestCase):
|
|||
os.path.join(cls._indir, 'files'))
|
||||
|
||||
TestFunctional._MakeInputFile('compress', COMPRESS_DATA)
|
||||
TestFunctional._MakeInputFile('bl31.bin', ATF_BL31_DATA)
|
||||
|
||||
# Add a few .dtb files for testing
|
||||
TestFunctional._MakeInputFile('%s/test-fdt1.dtb' % TEST_FDT_SUBDIR,
|
||||
TEST_FDT1_DATA)
|
||||
TestFunctional._MakeInputFile('%s/test-fdt2.dtb' % TEST_FDT_SUBDIR,
|
||||
TEST_FDT2_DATA)
|
||||
|
||||
# Travis-CI may have an old lz4
|
||||
cls.have_lz4 = True
|
||||
|
@ -285,7 +298,7 @@ class TestFunctional(unittest.TestCase):
|
|||
|
||||
def _DoTestFile(self, fname, debug=False, map=False, update_dtb=False,
|
||||
entry_args=None, images=None, use_real_dtb=False,
|
||||
verbosity=None, allow_missing=False):
|
||||
verbosity=None, allow_missing=False, extra_indirs=None):
|
||||
"""Run binman with a given test file
|
||||
|
||||
Args:
|
||||
|
@ -298,6 +311,14 @@ class TestFunctional(unittest.TestCase):
|
|||
key: arg name
|
||||
value: value of that arg
|
||||
images: List of image names to build
|
||||
use_real_dtb: True to use the test file as the contents of
|
||||
the u-boot-dtb entry. Normally this is not needed and the
|
||||
test contents (the U_BOOT_DTB_DATA string) can be used.
|
||||
But in some test we need the real contents.
|
||||
verbosity: Verbosity level to use (0-3, None=don't set it)
|
||||
allow_missing: Set the '--allow-missing' flag so that missing
|
||||
external binaries just produce a warning instead of an error
|
||||
extra_indirs: Extra input directories to add using -I
|
||||
"""
|
||||
args = []
|
||||
if debug:
|
||||
|
@ -324,6 +345,9 @@ class TestFunctional(unittest.TestCase):
|
|||
if images:
|
||||
for image in images:
|
||||
args += ['-i', image]
|
||||
if extra_indirs:
|
||||
for indir in extra_indirs:
|
||||
args += ['-I', indir]
|
||||
return self._DoBinman(*args)
|
||||
|
||||
def _SetupDtb(self, fname, outfile='u-boot.dtb'):
|
||||
|
@ -357,6 +381,13 @@ class TestFunctional(unittest.TestCase):
|
|||
We still want the DTBs for SPL and TPL to be different though, since
|
||||
otherwise it is confusing to know which one we are looking at. So add
|
||||
an 'spl' or 'tpl' property to the top-level node.
|
||||
|
||||
Args:
|
||||
dtb_data: dtb data to modify (this should be a value devicetree)
|
||||
name: Name of a new property to add
|
||||
|
||||
Returns:
|
||||
New dtb data with the property added
|
||||
"""
|
||||
dtb = fdt.Fdt.FromData(dtb_data)
|
||||
dtb.Scan()
|
||||
|
@ -366,7 +397,8 @@ class TestFunctional(unittest.TestCase):
|
|||
return dtb.GetContents()
|
||||
|
||||
def _DoReadFileDtb(self, fname, use_real_dtb=False, map=False,
|
||||
update_dtb=False, entry_args=None, reset_dtbs=True):
|
||||
update_dtb=False, entry_args=None, reset_dtbs=True,
|
||||
extra_indirs=None):
|
||||
"""Run binman and return the resulting image
|
||||
|
||||
This runs binman with a given test file and then reads the resulting
|
||||
|
@ -384,6 +416,13 @@ class TestFunctional(unittest.TestCase):
|
|||
map: True to output map files for the images
|
||||
update_dtb: Update the offset and size of each entry in the device
|
||||
tree before packing it into the image
|
||||
entry_args: Dict of entry args to supply to binman
|
||||
key: arg name
|
||||
value: value of that arg
|
||||
reset_dtbs: With use_real_dtb the test dtb is overwritten by this
|
||||
function. If reset_dtbs is True, then the original test dtb
|
||||
is written back before this function finishes
|
||||
extra_indirs: Extra input directories to add using -I
|
||||
|
||||
Returns:
|
||||
Tuple:
|
||||
|
@ -407,7 +446,8 @@ class TestFunctional(unittest.TestCase):
|
|||
|
||||
try:
|
||||
retcode = self._DoTestFile(fname, map=map, update_dtb=update_dtb,
|
||||
entry_args=entry_args, use_real_dtb=use_real_dtb)
|
||||
entry_args=entry_args, use_real_dtb=use_real_dtb,
|
||||
extra_indirs=extra_indirs)
|
||||
self.assertEqual(0, retcode)
|
||||
out_dtb_fname = tools.GetOutputFilename('u-boot.dtb.out')
|
||||
|
||||
|
@ -1382,8 +1422,9 @@ class TestFunctional(unittest.TestCase):
|
|||
}
|
||||
with self.assertRaises(ValueError) as e:
|
||||
self._DoReadFileDtb('064_entry_args_required.dts')
|
||||
self.assertIn("Node '/binman/_testing': Missing required "
|
||||
'properties/entry args: test-str-arg, test-int-fdt, test-int-arg',
|
||||
self.assertIn("Node '/binman/_testing': "
|
||||
'Missing required properties/entry args: test-str-arg, '
|
||||
'test-int-fdt, test-int-arg',
|
||||
str(e.exception))
|
||||
|
||||
def testEntryArgsInvalidFormat(self):
|
||||
|
@ -1487,8 +1528,7 @@ class TestFunctional(unittest.TestCase):
|
|||
entry_args = {
|
||||
'cros-ec-rw-path': 'ecrw.bin',
|
||||
}
|
||||
data, _, _, _ = self._DoReadFileDtb('068_blob_named_by_arg.dts',
|
||||
entry_args=entry_args)
|
||||
self._DoReadFileDtb('068_blob_named_by_arg.dts', entry_args=entry_args)
|
||||
|
||||
def testFill(self):
|
||||
"""Test for an fill entry type"""
|
||||
|
@ -3467,7 +3507,7 @@ class TestFunctional(unittest.TestCase):
|
|||
self.assertEqual(len(U_BOOT_SPL_DTB_DATA), int(data_sizes[1].split()[0]))
|
||||
|
||||
def testFitExternal(self):
|
||||
"""Test an image with an FIT"""
|
||||
"""Test an image with an FIT with external images"""
|
||||
data = self._DoReadFile('162_fit_external.dts')
|
||||
fit_data = data[len(U_BOOT_DATA):-2] # _testing is 2 bytes
|
||||
|
||||
|
@ -3477,5 +3517,196 @@ class TestFunctional(unittest.TestCase):
|
|||
fnode = dtb.GetNode('/images/kernel')
|
||||
self.assertNotIn('data', fnode.props)
|
||||
|
||||
def testSectionIgnoreHashSignature(self):
|
||||
"""Test that sections ignore hash, signature nodes for its data"""
|
||||
data = self._DoReadFile('165_section_ignore_hash_signature.dts')
|
||||
expected = (U_BOOT_DATA + U_BOOT_DATA)
|
||||
self.assertEqual(expected, data)
|
||||
|
||||
def testPadInSections(self):
|
||||
"""Test pad-before, pad-after for entries in sections"""
|
||||
data = self._DoReadFile('166_pad_in_sections.dts')
|
||||
expected = (U_BOOT_DATA + tools.GetBytes(ord('!'), 12) +
|
||||
U_BOOT_DATA + tools.GetBytes(ord('!'), 6) +
|
||||
U_BOOT_DATA)
|
||||
self.assertEqual(expected, data)
|
||||
|
||||
def testFitImageSubentryAlignment(self):
|
||||
"""Test relative alignability of FIT image subentries"""
|
||||
entry_args = {
|
||||
'test-id': TEXT_DATA,
|
||||
}
|
||||
data, _, _, _ = self._DoReadFileDtb('167_fit_image_subentry_alignment.dts',
|
||||
entry_args=entry_args)
|
||||
dtb = fdt.Fdt.FromData(data)
|
||||
dtb.Scan()
|
||||
|
||||
node = dtb.GetNode('/images/kernel')
|
||||
data = dtb.GetProps(node)["data"].bytes
|
||||
align_pad = 0x10 - (len(U_BOOT_SPL_DATA) % 0x10)
|
||||
expected = (tools.GetBytes(0, 0x20) + U_BOOT_SPL_DATA +
|
||||
tools.GetBytes(0, align_pad) + U_BOOT_DATA)
|
||||
self.assertEqual(expected, data)
|
||||
|
||||
node = dtb.GetNode('/images/fdt-1')
|
||||
data = dtb.GetProps(node)["data"].bytes
|
||||
expected = (U_BOOT_SPL_DTB_DATA + tools.GetBytes(0, 20) +
|
||||
tools.ToBytes(TEXT_DATA) + tools.GetBytes(0, 30) +
|
||||
U_BOOT_DTB_DATA)
|
||||
self.assertEqual(expected, data)
|
||||
|
||||
def testFitExtblobMissingOk(self):
|
||||
"""Test a FIT with a missing external blob that is allowed"""
|
||||
with test_util.capture_sys_output() as (stdout, stderr):
|
||||
self._DoTestFile('168_fit_missing_blob.dts',
|
||||
allow_missing=True)
|
||||
err = stderr.getvalue()
|
||||
self.assertRegex(err, "Image 'main-section'.*missing.*: atf-bl31")
|
||||
|
||||
def testBlobNamedByArgMissing(self):
|
||||
"""Test handling of a missing entry arg"""
|
||||
with self.assertRaises(ValueError) as e:
|
||||
self._DoReadFile('068_blob_named_by_arg.dts')
|
||||
self.assertIn("Missing required properties/entry args: cros-ec-rw-path",
|
||||
str(e.exception))
|
||||
|
||||
def testPackBl31(self):
|
||||
"""Test that an image with an ATF BL31 binary can be created"""
|
||||
data = self._DoReadFile('169_atf_bl31.dts')
|
||||
self.assertEqual(ATF_BL31_DATA, data[:len(ATF_BL31_DATA)])
|
||||
|
||||
def testFitFdt(self):
|
||||
"""Test an image with an FIT with multiple FDT images"""
|
||||
def _CheckFdt(seq, expected_data):
|
||||
"""Check the FDT nodes
|
||||
|
||||
Args:
|
||||
seq: Sequence number to check (0 or 1)
|
||||
expected_data: Expected contents of 'data' property
|
||||
"""
|
||||
name = 'fdt-%d' % seq
|
||||
fnode = dtb.GetNode('/images/%s' % name)
|
||||
self.assertIsNotNone(fnode)
|
||||
self.assertEqual({'description','type', 'compression', 'data'},
|
||||
set(fnode.props.keys()))
|
||||
self.assertEqual(expected_data, fnode.props['data'].bytes)
|
||||
self.assertEqual('fdt-test-fdt%d.dtb' % seq,
|
||||
fnode.props['description'].value)
|
||||
|
||||
def _CheckConfig(seq, expected_data):
|
||||
"""Check the configuration nodes
|
||||
|
||||
Args:
|
||||
seq: Sequence number to check (0 or 1)
|
||||
expected_data: Expected contents of 'data' property
|
||||
"""
|
||||
cnode = dtb.GetNode('/configurations')
|
||||
self.assertIn('default', cnode.props)
|
||||
self.assertEqual('config-2', cnode.props['default'].value)
|
||||
|
||||
name = 'config-%d' % seq
|
||||
fnode = dtb.GetNode('/configurations/%s' % name)
|
||||
self.assertIsNotNone(fnode)
|
||||
self.assertEqual({'description','firmware', 'loadables', 'fdt'},
|
||||
set(fnode.props.keys()))
|
||||
self.assertEqual('conf-test-fdt%d.dtb' % seq,
|
||||
fnode.props['description'].value)
|
||||
self.assertEqual('fdt-%d' % seq, fnode.props['fdt'].value)
|
||||
|
||||
entry_args = {
|
||||
'of-list': 'test-fdt1 test-fdt2',
|
||||
'default-dt': 'test-fdt2',
|
||||
}
|
||||
data = self._DoReadFileDtb(
|
||||
'172_fit_fdt.dts',
|
||||
entry_args=entry_args,
|
||||
extra_indirs=[os.path.join(self._indir, TEST_FDT_SUBDIR)])[0]
|
||||
self.assertEqual(U_BOOT_NODTB_DATA, data[-len(U_BOOT_NODTB_DATA):])
|
||||
fit_data = data[len(U_BOOT_DATA):-len(U_BOOT_NODTB_DATA)]
|
||||
|
||||
dtb = fdt.Fdt.FromData(fit_data)
|
||||
dtb.Scan()
|
||||
fnode = dtb.GetNode('/images/kernel')
|
||||
self.assertIn('data', fnode.props)
|
||||
|
||||
# Check all the properties in fdt-1 and fdt-2
|
||||
_CheckFdt(1, TEST_FDT1_DATA)
|
||||
_CheckFdt(2, TEST_FDT2_DATA)
|
||||
|
||||
# Check configurations
|
||||
_CheckConfig(1, TEST_FDT1_DATA)
|
||||
_CheckConfig(2, TEST_FDT2_DATA)
|
||||
|
||||
def testFitFdtMissingList(self):
|
||||
"""Test handling of a missing 'of-list' entry arg"""
|
||||
with self.assertRaises(ValueError) as e:
|
||||
self._DoReadFile('172_fit_fdt.dts')
|
||||
self.assertIn("Generator node requires 'of-list' entry argument",
|
||||
str(e.exception))
|
||||
|
||||
def testFitFdtEmptyList(self):
|
||||
"""Test handling of an empty 'of-list' entry arg"""
|
||||
entry_args = {
|
||||
'of-list': '',
|
||||
}
|
||||
data = self._DoReadFileDtb('170_fit_fdt.dts', entry_args=entry_args)[0]
|
||||
|
||||
def testFitFdtMissingProp(self):
|
||||
"""Test handling of a missing 'fit,fdt-list' property"""
|
||||
with self.assertRaises(ValueError) as e:
|
||||
self._DoReadFile('171_fit_fdt_missing_prop.dts')
|
||||
self.assertIn("Generator node requires 'fit,fdt-list' property",
|
||||
str(e.exception))
|
||||
|
||||
def testFitFdtEmptyList(self):
|
||||
"""Test handling of an empty 'of-list' entry arg"""
|
||||
entry_args = {
|
||||
'of-list': '',
|
||||
}
|
||||
data = self._DoReadFileDtb('172_fit_fdt.dts', entry_args=entry_args)[0]
|
||||
|
||||
def testFitFdtMissing(self):
|
||||
"""Test handling of a missing 'default-dt' entry arg"""
|
||||
entry_args = {
|
||||
'of-list': 'test-fdt1 test-fdt2',
|
||||
}
|
||||
with self.assertRaises(ValueError) as e:
|
||||
self._DoReadFileDtb(
|
||||
'172_fit_fdt.dts',
|
||||
entry_args=entry_args,
|
||||
extra_indirs=[os.path.join(self._indir, TEST_FDT_SUBDIR)])[0]
|
||||
self.assertIn("Generated 'default' node requires default-dt entry argument",
|
||||
str(e.exception))
|
||||
|
||||
def testFitFdtNotInList(self):
|
||||
"""Test handling of a default-dt that is not in the of-list"""
|
||||
entry_args = {
|
||||
'of-list': 'test-fdt1 test-fdt2',
|
||||
'default-dt': 'test-fdt3',
|
||||
}
|
||||
with self.assertRaises(ValueError) as e:
|
||||
self._DoReadFileDtb(
|
||||
'172_fit_fdt.dts',
|
||||
entry_args=entry_args,
|
||||
extra_indirs=[os.path.join(self._indir, TEST_FDT_SUBDIR)])[0]
|
||||
self.assertIn("default-dt entry argument 'test-fdt3' not found in fdt list: test-fdt1, test-fdt2",
|
||||
str(e.exception))
|
||||
|
||||
def testFitExtblobMissingHelp(self):
|
||||
"""Test display of help messages when an external blob is missing"""
|
||||
control.missing_blob_help = control._ReadMissingBlobHelp()
|
||||
control.missing_blob_help['wibble'] = 'Wibble test'
|
||||
control.missing_blob_help['another'] = 'Another test'
|
||||
with test_util.capture_sys_output() as (stdout, stderr):
|
||||
self._DoTestFile('168_fit_missing_blob.dts',
|
||||
allow_missing=True)
|
||||
err = stderr.getvalue()
|
||||
|
||||
# We can get the tag from the name, the type or the missing-msg
|
||||
# property. Check all three.
|
||||
self.assertIn('You may need to build ARM Trusted', err)
|
||||
self.assertIn('Wibble test', err)
|
||||
self.assertIn('Another test', err)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
15
tools/binman/missing-blob-help
Normal file
15
tools/binman/missing-blob-help
Normal file
|
@ -0,0 +1,15 @@
|
|||
# This file contains help messages for missing external blobs. Each message has
|
||||
# a tag (MUST be just lower-case text, digits and hyphens) starting in column 1,
|
||||
# followed by a colon (:) to indicate its start. The message can include any
|
||||
# number of lines, including blank lines.
|
||||
#
|
||||
# When looking for a tag, Binman uses the value of 'missing-msg' for the entry,
|
||||
# the entry name or the entry type, in that order
|
||||
|
||||
atf-bl31:
|
||||
See the documentation for your board. You may need to build ARM Trusted
|
||||
Firmware and build with BL31=/path/to/bl31.bin
|
||||
|
||||
atf-bl31-sunxi:
|
||||
Please read the section on ARM Trusted Firmware (ATF) in
|
||||
board/sunxi/README.sunxi64
|
40
tools/binman/test/165_section_ignore_hash_signature.dts
Normal file
40
tools/binman/test/165_section_ignore_hash_signature.dts
Normal file
|
@ -0,0 +1,40 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
/dts-v1/;
|
||||
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
binman {
|
||||
section@0 {
|
||||
u-boot {
|
||||
};
|
||||
hash {
|
||||
algo = "sha256";
|
||||
};
|
||||
signature {
|
||||
algo = "sha256,rsa2048";
|
||||
key-name-hint = "dev";
|
||||
};
|
||||
};
|
||||
section@1 {
|
||||
u-boot {
|
||||
};
|
||||
hash-1 {
|
||||
algo = "sha1";
|
||||
};
|
||||
hash-2 {
|
||||
algo = "sha256";
|
||||
};
|
||||
signature-1 {
|
||||
algo = "sha1,rsa2048";
|
||||
key-name-hint = "dev";
|
||||
};
|
||||
signature-2 {
|
||||
algo = "sha256,rsa2048";
|
||||
key-name-hint = "dev";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
26
tools/binman/test/166_pad_in_sections.dts
Normal file
26
tools/binman/test/166_pad_in_sections.dts
Normal file
|
@ -0,0 +1,26 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
/dts-v1/;
|
||||
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
binman {
|
||||
pad-byte = <0x26>;
|
||||
section {
|
||||
pad-byte = <0x21>;
|
||||
|
||||
before {
|
||||
type = "u-boot";
|
||||
};
|
||||
u-boot {
|
||||
pad-before = <12>;
|
||||
pad-after = <6>;
|
||||
};
|
||||
after {
|
||||
type = "u-boot";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
57
tools/binman/test/167_fit_image_subentry_alignment.dts
Normal file
57
tools/binman/test/167_fit_image_subentry_alignment.dts
Normal file
|
@ -0,0 +1,57 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
/dts-v1/;
|
||||
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
binman {
|
||||
fit {
|
||||
description = "test-desc";
|
||||
#address-cells = <1>;
|
||||
|
||||
images {
|
||||
kernel {
|
||||
description = "Offset-Align Test";
|
||||
type = "kernel";
|
||||
arch = "arm64";
|
||||
os = "linux";
|
||||
compression = "none";
|
||||
load = <00000000>;
|
||||
entry = <00000000>;
|
||||
u-boot-spl {
|
||||
offset = <0x20>;
|
||||
};
|
||||
u-boot {
|
||||
align = <0x10>;
|
||||
};
|
||||
};
|
||||
fdt-1 {
|
||||
description = "Pad-Before-After Test";
|
||||
type = "flat_dt";
|
||||
arch = "arm64";
|
||||
compression = "none";
|
||||
u-boot-spl-dtb {
|
||||
};
|
||||
text {
|
||||
text-label = "test-id";
|
||||
pad-before = <20>;
|
||||
pad-after = <30>;
|
||||
};
|
||||
u-boot-dtb {
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
configurations {
|
||||
default = "conf-1";
|
||||
conf-1 {
|
||||
description = "Kernel with FDT blob";
|
||||
kernel = "kernel";
|
||||
fdt = "fdt-1";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
48
tools/binman/test/168_fit_missing_blob.dts
Normal file
48
tools/binman/test/168_fit_missing_blob.dts
Normal file
|
@ -0,0 +1,48 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
/dts-v1/;
|
||||
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
binman {
|
||||
u-boot {
|
||||
};
|
||||
fit {
|
||||
description = "test-desc";
|
||||
#address-cells = <1>;
|
||||
fit,fdt-list = "of-list";
|
||||
|
||||
images {
|
||||
kernel {
|
||||
description = "ATF BL31";
|
||||
type = "kernel";
|
||||
arch = "ppc";
|
||||
os = "linux";
|
||||
compression = "gzip";
|
||||
load = <00000000>;
|
||||
entry = <00000000>;
|
||||
hash-1 {
|
||||
algo = "crc32";
|
||||
};
|
||||
hash-2 {
|
||||
algo = "sha1";
|
||||
};
|
||||
atf-bl31 {
|
||||
filename = "missing";
|
||||
};
|
||||
cros-ec-rw {
|
||||
type = "atf-bl31";
|
||||
missing-msg = "wibble";
|
||||
};
|
||||
another {
|
||||
type = "atf-bl31";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
u-boot-nodtb {
|
||||
};
|
||||
};
|
||||
};
|
16
tools/binman/test/169_atf_bl31.dts
Normal file
16
tools/binman/test/169_atf_bl31.dts
Normal file
|
@ -0,0 +1,16 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
/dts-v1/;
|
||||
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
binman {
|
||||
size = <16>;
|
||||
|
||||
atf-bl31 {
|
||||
filename = "bl31.bin";
|
||||
};
|
||||
};
|
||||
};
|
54
tools/binman/test/171_fit_fdt_missing_prop.dts
Normal file
54
tools/binman/test/171_fit_fdt_missing_prop.dts
Normal file
|
@ -0,0 +1,54 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
/dts-v1/;
|
||||
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
binman {
|
||||
u-boot {
|
||||
};
|
||||
fit {
|
||||
description = "test-desc";
|
||||
#address-cells = <1>;
|
||||
|
||||
images {
|
||||
kernel {
|
||||
description = "Vanilla Linux kernel";
|
||||
type = "kernel";
|
||||
arch = "ppc";
|
||||
os = "linux";
|
||||
compression = "gzip";
|
||||
load = <00000000>;
|
||||
entry = <00000000>;
|
||||
hash-1 {
|
||||
algo = "crc32";
|
||||
};
|
||||
hash-2 {
|
||||
algo = "sha1";
|
||||
};
|
||||
u-boot {
|
||||
};
|
||||
};
|
||||
@fdt-SEQ {
|
||||
description = "fdt-NAME.dtb";
|
||||
type = "flat_dt";
|
||||
compression = "none";
|
||||
};
|
||||
};
|
||||
|
||||
configurations {
|
||||
default = "config-1";
|
||||
@config-SEQ {
|
||||
description = "conf-NAME.dtb";
|
||||
firmware = "uboot";
|
||||
loadables = "atf";
|
||||
fdt = "fdt-SEQ";
|
||||
};
|
||||
};
|
||||
};
|
||||
u-boot-nodtb {
|
||||
};
|
||||
};
|
||||
};
|
55
tools/binman/test/172_fit_fdt.dts
Normal file
55
tools/binman/test/172_fit_fdt.dts
Normal file
|
@ -0,0 +1,55 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
/dts-v1/;
|
||||
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
binman {
|
||||
u-boot {
|
||||
};
|
||||
fit {
|
||||
description = "test-desc";
|
||||
#address-cells = <1>;
|
||||
fit,fdt-list = "of-list";
|
||||
|
||||
images {
|
||||
kernel {
|
||||
description = "Vanilla Linux kernel";
|
||||
type = "kernel";
|
||||
arch = "ppc";
|
||||
os = "linux";
|
||||
compression = "gzip";
|
||||
load = <00000000>;
|
||||
entry = <00000000>;
|
||||
hash-1 {
|
||||
algo = "crc32";
|
||||
};
|
||||
hash-2 {
|
||||
algo = "sha1";
|
||||
};
|
||||
u-boot {
|
||||
};
|
||||
};
|
||||
@fdt-SEQ {
|
||||
description = "fdt-NAME.dtb";
|
||||
type = "flat_dt";
|
||||
compression = "none";
|
||||
};
|
||||
};
|
||||
|
||||
configurations {
|
||||
default = "@config-DEFAULT-SEQ";
|
||||
@config-SEQ {
|
||||
description = "conf-NAME.dtb";
|
||||
firmware = "uboot";
|
||||
loadables = "atf";
|
||||
fdt = "fdt-SEQ";
|
||||
};
|
||||
};
|
||||
};
|
||||
u-boot-nodtb {
|
||||
};
|
||||
};
|
||||
};
|
|
@ -7,6 +7,19 @@
|
|||
# SPDX-License-Identifier: GPL-2.0+
|
||||
#
|
||||
|
||||
HOSTARCH := $(shell uname -m | sed -e s/i.86/x86/ )
|
||||
ifeq ($(findstring $(HOSTARCH),"x86" "x86_64"),)
|
||||
ifeq ($(findstring $(MAKECMDGOALS),"help" "clean"),)
|
||||
ifndef CROSS_COMPILE
|
||||
$(error Binman tests need to compile to x86, but the CPU arch of your \
|
||||
machine is $(HOSTARCH). Set CROSS_COMPILE to a suitable cross compiler)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
CC = $(CROSS_COMPILE)gcc
|
||||
OBJCOPY = $(CROSS_COMPILE)objcopy
|
||||
|
||||
VPATH := $(SRC)
|
||||
CFLAGS := -march=i386 -m32 -nostdlib -I $(SRC)../../../include \
|
||||
-Wl,--no-dynamic-linker
|
||||
|
@ -32,7 +45,7 @@ bss_data: CFLAGS += $(SRC)bss_data.lds
|
|||
bss_data: bss_data.c
|
||||
|
||||
u_boot_binman_syms.bin: u_boot_binman_syms
|
||||
objcopy -O binary $< -R .note.gnu.build-id $@
|
||||
$(OBJCOPY) -O binary $< -R .note.gnu.build-id $@
|
||||
|
||||
u_boot_binman_syms: CFLAGS += $(LDS_BINMAN)
|
||||
u_boot_binman_syms: u_boot_binman_syms.c
|
||||
|
|
|
@ -1541,41 +1541,73 @@ class Builder:
|
|||
"""Prepare the working directory for a thread.
|
||||
|
||||
This clones or fetches the repo into the thread's work directory.
|
||||
Optionally, it can create a linked working tree of the repo in the
|
||||
thread's work directory instead.
|
||||
|
||||
Args:
|
||||
thread_num: Thread number (0, 1, ...)
|
||||
setup_git: True to set up a git repo clone
|
||||
setup_git:
|
||||
'clone' to set up a git clone
|
||||
'worktree' to set up a git worktree
|
||||
"""
|
||||
thread_dir = self.GetThreadDir(thread_num)
|
||||
builderthread.Mkdir(thread_dir)
|
||||
git_dir = os.path.join(thread_dir, '.git')
|
||||
|
||||
# Clone the repo if it doesn't already exist
|
||||
# TODO(sjg@chromium): Perhaps some git hackery to symlink instead, so
|
||||
# we have a private index but uses the origin repo's contents?
|
||||
# Create a worktree or a git repo clone for this thread if it
|
||||
# doesn't already exist
|
||||
if setup_git and self.git_dir:
|
||||
src_dir = os.path.abspath(self.git_dir)
|
||||
if os.path.exists(git_dir):
|
||||
if os.path.isdir(git_dir):
|
||||
# This is a clone of the src_dir repo, we can keep using
|
||||
# it but need to fetch from src_dir.
|
||||
Print('\rFetching repo for thread %d' % thread_num,
|
||||
newline=False)
|
||||
gitutil.Fetch(git_dir, thread_dir)
|
||||
terminal.PrintClear()
|
||||
else:
|
||||
elif os.path.isfile(git_dir):
|
||||
# This is a worktree of the src_dir repo, we don't need to
|
||||
# create it again or update it in any way.
|
||||
pass
|
||||
elif os.path.exists(git_dir):
|
||||
# Don't know what could trigger this, but we probably
|
||||
# can't create a git worktree/clone here.
|
||||
raise ValueError('Git dir %s exists, but is not a file '
|
||||
'or a directory.' % git_dir)
|
||||
elif setup_git == 'worktree':
|
||||
Print('\rChecking out worktree for thread %d' % thread_num,
|
||||
newline=False)
|
||||
gitutil.AddWorktree(src_dir, thread_dir)
|
||||
terminal.PrintClear()
|
||||
elif setup_git == 'clone' or setup_git == True:
|
||||
Print('\rCloning repo for thread %d' % thread_num,
|
||||
newline=False)
|
||||
gitutil.Clone(src_dir, thread_dir)
|
||||
terminal.PrintClear()
|
||||
else:
|
||||
raise ValueError("Can't setup git repo with %s." % setup_git)
|
||||
|
||||
def _PrepareWorkingSpace(self, max_threads, setup_git):
|
||||
"""Prepare the working directory for use.
|
||||
|
||||
Set up the git repo for each thread.
|
||||
Set up the git repo for each thread. Creates a linked working tree
|
||||
if git-worktree is available, or clones the repo if it isn't.
|
||||
|
||||
Args:
|
||||
max_threads: Maximum number of threads we expect to need.
|
||||
setup_git: True to set up a git repo clone
|
||||
setup_git: True to set up a git worktree or a git clone
|
||||
"""
|
||||
builderthread.Mkdir(self._working_dir)
|
||||
if setup_git and self.git_dir:
|
||||
src_dir = os.path.abspath(self.git_dir)
|
||||
if gitutil.CheckWorktreeIsAvailable(src_dir):
|
||||
setup_git = 'worktree'
|
||||
# If we previously added a worktree but the directory for it
|
||||
# got deleted, we need to prune its files from the repo so
|
||||
# that we can check out another in its place.
|
||||
gitutil.PruneWorktrees(src_dir)
|
||||
else:
|
||||
setup_git = 'clone'
|
||||
for thread in range(max_threads):
|
||||
self._PrepareThread(thread, setup_git)
|
||||
|
||||
|
|
|
@ -319,6 +319,8 @@ class TestFunctional(unittest.TestCase):
|
|||
return command.CommandResult(return_code=0)
|
||||
elif sub_cmd == 'checkout':
|
||||
return command.CommandResult(return_code=0)
|
||||
elif sub_cmd == 'worktree':
|
||||
return command.CommandResult(return_code=0)
|
||||
|
||||
# Not handled, so abort
|
||||
print('git', git_args, sub_cmd, args)
|
||||
|
|
|
@ -9,9 +9,6 @@ import tempfile
|
|||
import time
|
||||
import unittest
|
||||
|
||||
# Bring in the patman libraries
|
||||
our_path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
from buildman import board
|
||||
from buildman import bsettings
|
||||
from buildman import builder
|
||||
|
|
|
@ -68,22 +68,23 @@ def EnsureCompiled(fname, tmpdir=None, capture_stderr=False):
|
|||
|
||||
search_paths = [os.path.join(os.getcwd(), 'include')]
|
||||
root, _ = os.path.splitext(fname)
|
||||
args = ['-E', '-P', '-x', 'assembler-with-cpp', '-D__ASSEMBLY__']
|
||||
cc, args = tools.GetTargetCompileTool('cc')
|
||||
args += ['-E', '-P', '-x', 'assembler-with-cpp', '-D__ASSEMBLY__']
|
||||
args += ['-Ulinux']
|
||||
for path in search_paths:
|
||||
args.extend(['-I', path])
|
||||
args += ['-o', dts_input, fname]
|
||||
command.Run('cc', *args)
|
||||
command.Run(cc, *args)
|
||||
|
||||
# If we don't have a directory, put it in the tools tempdir
|
||||
search_list = []
|
||||
for path in search_paths:
|
||||
search_list.extend(['-i', path])
|
||||
args = ['-I', 'dts', '-o', dtb_output, '-O', 'dtb',
|
||||
dtc, args = tools.GetTargetCompileTool('dtc')
|
||||
args += ['-I', 'dts', '-o', dtb_output, '-O', 'dtb',
|
||||
'-W', 'no-unit_address_vs_reg']
|
||||
args.extend(search_list)
|
||||
args.append(dts_input)
|
||||
dtc = os.environ.get('DTC') or 'dtc'
|
||||
command.Run(dtc, *args, capture_stderr=capture_stderr)
|
||||
return dtb_output
|
||||
|
||||
|
|
|
@ -259,6 +259,48 @@ def Fetch(git_dir=None, work_tree=None):
|
|||
if result.return_code != 0:
|
||||
raise OSError('git fetch: %s' % result.stderr)
|
||||
|
||||
def CheckWorktreeIsAvailable(git_dir):
|
||||
"""Check if git-worktree functionality is available
|
||||
|
||||
Args:
|
||||
git_dir: The repository to test in
|
||||
|
||||
Returns:
|
||||
True if git-worktree commands will work, False otherwise.
|
||||
"""
|
||||
pipe = ['git', '--git-dir', git_dir, 'worktree', 'list']
|
||||
result = command.RunPipe([pipe], capture=True, capture_stderr=True,
|
||||
raise_on_error=False)
|
||||
return result.return_code == 0
|
||||
|
||||
def AddWorktree(git_dir, output_dir, commit_hash=None):
|
||||
"""Create and checkout a new git worktree for this build
|
||||
|
||||
Args:
|
||||
git_dir: The repository to checkout the worktree from
|
||||
output_dir: Path for the new worktree
|
||||
commit_hash: Commit hash to checkout
|
||||
"""
|
||||
# We need to pass --detach to avoid creating a new branch
|
||||
pipe = ['git', '--git-dir', git_dir, 'worktree', 'add', '.', '--detach']
|
||||
if commit_hash:
|
||||
pipe.append(commit_hash)
|
||||
result = command.RunPipe([pipe], capture=True, cwd=output_dir,
|
||||
capture_stderr=True)
|
||||
if result.return_code != 0:
|
||||
raise OSError('git worktree add: %s' % result.stderr)
|
||||
|
||||
def PruneWorktrees(git_dir):
|
||||
"""Remove administrative files for deleted worktrees
|
||||
|
||||
Args:
|
||||
git_dir: The repository whose deleted worktrees should be pruned
|
||||
"""
|
||||
pipe = ['git', '--git-dir', git_dir, 'worktree', 'prune']
|
||||
result = command.RunPipe([pipe], capture=True, capture_stderr=True)
|
||||
if result.return_code != 0:
|
||||
raise OSError('git worktree prune: %s' % result.stderr)
|
||||
|
||||
def CreatePatches(branch, start, count, ignore_binary, series):
|
||||
"""Create a series of patches from the top of the current branch.
|
||||
|
||||
|
|
|
@ -188,6 +188,120 @@ def PathHasFile(path_spec, fname):
|
|||
return True
|
||||
return False
|
||||
|
||||
def GetHostCompileTool(name):
|
||||
"""Get the host-specific version for a compile tool
|
||||
|
||||
This checks the environment variables that specify which version of
|
||||
the tool should be used (e.g. ${HOSTCC}).
|
||||
|
||||
The following table lists the host-specific versions of the tools
|
||||
this function resolves to:
|
||||
|
||||
Compile Tool | Host version
|
||||
--------------+----------------
|
||||
as | ${HOSTAS}
|
||||
ld | ${HOSTLD}
|
||||
cc | ${HOSTCC}
|
||||
cpp | ${HOSTCPP}
|
||||
c++ | ${HOSTCXX}
|
||||
ar | ${HOSTAR}
|
||||
nm | ${HOSTNM}
|
||||
ldr | ${HOSTLDR}
|
||||
strip | ${HOSTSTRIP}
|
||||
objcopy | ${HOSTOBJCOPY}
|
||||
objdump | ${HOSTOBJDUMP}
|
||||
dtc | ${HOSTDTC}
|
||||
|
||||
Args:
|
||||
name: Command name to run
|
||||
|
||||
Returns:
|
||||
host_name: Exact command name to run instead
|
||||
extra_args: List of extra arguments to pass
|
||||
"""
|
||||
host_name = None
|
||||
extra_args = []
|
||||
if name in ('as', 'ld', 'cc', 'cpp', 'ar', 'nm', 'ldr', 'strip',
|
||||
'objcopy', 'objdump', 'dtc'):
|
||||
host_name, *host_args = env.get('HOST' + name.upper(), '').split(' ')
|
||||
elif name == 'c++':
|
||||
host_name, *host_args = env.get('HOSTCXX', '').split(' ')
|
||||
|
||||
if host_name:
|
||||
return host_name, extra_args
|
||||
return name, []
|
||||
|
||||
def GetTargetCompileTool(name, cross_compile=None):
|
||||
"""Get the target-specific version for a compile tool
|
||||
|
||||
This first checks the environment variables that specify which
|
||||
version of the tool should be used (e.g. ${CC}). If those aren't
|
||||
specified, it checks the CROSS_COMPILE variable as a prefix for the
|
||||
tool with some substitutions (e.g. "${CROSS_COMPILE}gcc" for cc).
|
||||
|
||||
The following table lists the target-specific versions of the tools
|
||||
this function resolves to:
|
||||
|
||||
Compile Tool | First choice | Second choice
|
||||
--------------+----------------+----------------------------
|
||||
as | ${AS} | ${CROSS_COMPILE}as
|
||||
ld | ${LD} | ${CROSS_COMPILE}ld.bfd
|
||||
| | or ${CROSS_COMPILE}ld
|
||||
cc | ${CC} | ${CROSS_COMPILE}gcc
|
||||
cpp | ${CPP} | ${CROSS_COMPILE}gcc -E
|
||||
c++ | ${CXX} | ${CROSS_COMPILE}g++
|
||||
ar | ${AR} | ${CROSS_COMPILE}ar
|
||||
nm | ${NM} | ${CROSS_COMPILE}nm
|
||||
ldr | ${LDR} | ${CROSS_COMPILE}ldr
|
||||
strip | ${STRIP} | ${CROSS_COMPILE}strip
|
||||
objcopy | ${OBJCOPY} | ${CROSS_COMPILE}objcopy
|
||||
objdump | ${OBJDUMP} | ${CROSS_COMPILE}objdump
|
||||
dtc | ${DTC} | (no CROSS_COMPILE version)
|
||||
|
||||
Args:
|
||||
name: Command name to run
|
||||
|
||||
Returns:
|
||||
target_name: Exact command name to run instead
|
||||
extra_args: List of extra arguments to pass
|
||||
"""
|
||||
env = dict(os.environ)
|
||||
|
||||
target_name = None
|
||||
extra_args = []
|
||||
if name in ('as', 'ld', 'cc', 'cpp', 'ar', 'nm', 'ldr', 'strip',
|
||||
'objcopy', 'objdump', 'dtc'):
|
||||
target_name, *extra_args = env.get(name.upper(), '').split(' ')
|
||||
elif name == 'c++':
|
||||
target_name, *extra_args = env.get('CXX', '').split(' ')
|
||||
|
||||
if target_name:
|
||||
return target_name, extra_args
|
||||
|
||||
if cross_compile is None:
|
||||
cross_compile = env.get('CROSS_COMPILE', '')
|
||||
if not cross_compile:
|
||||
return name, []
|
||||
|
||||
if name in ('as', 'ar', 'nm', 'ldr', 'strip', 'objcopy', 'objdump'):
|
||||
target_name = cross_compile + name
|
||||
elif name == 'ld':
|
||||
try:
|
||||
if Run(cross_compile + 'ld.bfd', '-v'):
|
||||
target_name = cross_compile + 'ld.bfd'
|
||||
except:
|
||||
target_name = cross_compile + 'ld'
|
||||
elif name == 'cc':
|
||||
target_name = cross_compile + 'gcc'
|
||||
elif name == 'cpp':
|
||||
target_name = cross_compile + 'gcc'
|
||||
extra_args = ['-E']
|
||||
elif name == 'c++':
|
||||
target_name = cross_compile + 'g++'
|
||||
else:
|
||||
target_name = name
|
||||
return target_name, extra_args
|
||||
|
||||
def Run(name, *args, **kwargs):
|
||||
"""Run a tool with some arguments
|
||||
|
||||
|
@ -198,16 +312,27 @@ def Run(name, *args, **kwargs):
|
|||
Args:
|
||||
name: Command name to run
|
||||
args: Arguments to the tool
|
||||
for_host: True to resolve the command to the version for the host
|
||||
for_target: False to run the command as-is, without resolving it
|
||||
to the version for the compile target
|
||||
|
||||
Returns:
|
||||
CommandResult object
|
||||
"""
|
||||
try:
|
||||
binary = kwargs.get('binary')
|
||||
for_host = kwargs.get('for_host', False)
|
||||
for_target = kwargs.get('for_target', not for_host)
|
||||
env = None
|
||||
if tool_search_paths:
|
||||
env = dict(os.environ)
|
||||
env['PATH'] = ':'.join(tool_search_paths) + ':' + env['PATH']
|
||||
if for_target:
|
||||
name, extra_args = GetTargetCompileTool(name)
|
||||
args = tuple(extra_args) + args
|
||||
elif for_host:
|
||||
name, extra_args = GetHostCompileTool(name)
|
||||
args = tuple(extra_args) + args
|
||||
all_args = (name,) + args
|
||||
result = command.RunPipe([all_args], capture=True, capture_stderr=True,
|
||||
env=env, raise_on_error=False, binary=binary)
|
||||
|
|
|
@ -28,9 +28,6 @@ import os
|
|||
import re
|
||||
import sys
|
||||
|
||||
# Bring in the patman libraries
|
||||
our_path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
from patman import command
|
||||
|
||||
def rm_kconfig_include(path):
|
||||
|
|
Loading…
Reference in a new issue