mirror of
https://github.com/AsahiLinux/u-boot
synced 2024-11-11 07:34:31 +00:00
Merge tag 'dm-pull-2nov23' of https://source.denx.de/u-boot/custodians/u-boot-dm
Just various bugfixes, apart from the TI one
This commit is contained in:
commit
1e044a9bd6
22 changed files with 168 additions and 99 deletions
|
@ -2,7 +2,7 @@
|
|||
# Copyright (c) 2011 The Chromium OS Authors.
|
||||
|
||||
PLATFORM_CPPFLAGS += -D__SANDBOX__ -U_FORTIFY_SOURCE
|
||||
PLATFORM_CPPFLAGS += -fPIC
|
||||
PLATFORM_CPPFLAGS += -fPIC -ffunction-sections -fdata-sections
|
||||
PLATFORM_LIBS += -lrt
|
||||
SDL_CONFIG ?= sdl2-config
|
||||
|
||||
|
@ -30,7 +30,7 @@ cmd_u-boot__ = $(CC) -o $@ -Wl,-T u-boot.lds $(u-boot-init) \
|
|||
$(u-boot-main) \
|
||||
$(u-boot-keep-syms-lto) \
|
||||
-Wl,--no-whole-archive \
|
||||
$(PLATFORM_LIBS) -Wl,-Map -Wl,u-boot.map
|
||||
$(PLATFORM_LIBS) -Wl,-Map -Wl,u-boot.map -Wl,--gc-sections
|
||||
|
||||
cmd_u-boot-spl = (cd $(obj) && $(CC) -o $(SPL_BIN) -Wl,-T u-boot-spl.lds \
|
||||
$(KBUILD_LDFLAGS:%=-Wl,%) \
|
||||
|
|
|
@ -15,7 +15,7 @@ SECTIONS
|
|||
|
||||
_u_boot_sandbox_getopt : {
|
||||
*(_u_boot_sandbox_getopt_start)
|
||||
*(_u_boot_sandbox_getopt)
|
||||
KEEP(*(_u_boot_sandbox_getopt))
|
||||
*(_u_boot_sandbox_getopt_end)
|
||||
}
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ static int do_cbfs_ls(struct cmd_tbl *cmdtp, int flag, int argc,
|
|||
case CBFS_TYPE_CBFSHEADER:
|
||||
type_name = "cbfs header";
|
||||
break;
|
||||
case CBFS_TYPE_STAGE:
|
||||
case CBFS_TYPE_LEGACY_STAGE:
|
||||
type_name = "stage";
|
||||
break;
|
||||
case CBFS_TYPE_PAYLOAD:
|
||||
|
|
|
@ -151,7 +151,7 @@ int cros_ec_spi_command(struct udevice *udev, uint8_t cmd, int cmd_version,
|
|||
|
||||
/* Response code is first byte of message */
|
||||
if (p[0] != EC_RES_SUCCESS) {
|
||||
printf("%s: Returned status %d\n", __func__, p[0]);
|
||||
log_debug("Returned status %d\n", p[0]);
|
||||
return -(int)(p[0]);
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ enum cbfs_result {
|
|||
enum cbfs_filetype {
|
||||
CBFS_TYPE_BOOTBLOCK = 0x01,
|
||||
CBFS_TYPE_CBFSHEADER = 0x02,
|
||||
CBFS_TYPE_STAGE = 0x10,
|
||||
CBFS_TYPE_LEGACY_STAGE = 0x10,
|
||||
CBFS_TYPE_PAYLOAD = 0x20,
|
||||
CBFS_TYPE_SELF = CBFS_TYPE_PAYLOAD,
|
||||
|
||||
|
|
|
@ -155,6 +155,7 @@ authInPlace = INTEGER:2
|
|||
C, ST, L, O, OU, CN and emailAddress
|
||||
cert_type (int): Certification type
|
||||
bootcore (int): Booting core
|
||||
bootcore_opts(int): Booting core option, lockstep (0) or split (2) mode
|
||||
load_addr (int): Load address of image
|
||||
sha (int): Hash function
|
||||
|
||||
|
@ -225,7 +226,7 @@ emailAddress = {req_dist_name_dict['emailAddress']}
|
|||
imagesize_sbl, hashval_sbl, load_addr_sysfw, imagesize_sysfw,
|
||||
hashval_sysfw, load_addr_sysfw_data, imagesize_sysfw_data,
|
||||
hashval_sysfw_data, sysfw_inner_cert_ext_boot_block,
|
||||
dm_data_ext_boot_block):
|
||||
dm_data_ext_boot_block, bootcore_opts):
|
||||
"""Create a certificate
|
||||
|
||||
Args:
|
||||
|
@ -241,6 +242,7 @@ emailAddress = {req_dist_name_dict['emailAddress']}
|
|||
bootcore (int): Booting core
|
||||
load_addr (int): Load address of image
|
||||
sha (int): Hash function
|
||||
bootcore_opts (int): Booting core option, lockstep (0) or split (2) mode
|
||||
|
||||
Returns:
|
||||
str: Tool output
|
||||
|
@ -285,7 +287,7 @@ sysfw_data=SEQUENCE:sysfw_data
|
|||
[sbl]
|
||||
compType = INTEGER:1
|
||||
bootCore = INTEGER:16
|
||||
compOpts = INTEGER:0
|
||||
compOpts = INTEGER:{bootcore_opts}
|
||||
destAddr = FORMAT:HEX,OCT:{load_addr:08x}
|
||||
compSize = INTEGER:{imagesize_sbl}
|
||||
shaType = OID:{sha_type}
|
||||
|
|
|
@ -42,27 +42,24 @@ HEADER_VERSION2 = 0x31313132
|
|||
FILE_HEADER_FORMAT = b'>8sIIII'
|
||||
FILE_HEADER_LEN = 0x18
|
||||
FILE_MAGIC = b'LARCHIVE'
|
||||
FILENAME_ALIGN = 16 # Filename lengths are aligned to this
|
||||
ATTRIBUTE_ALIGN = 4 # All attribute sizes must be divisible by this
|
||||
|
||||
# A stage header containing information about 'stage' files
|
||||
# A stage-header attribute containing information about 'stage' files
|
||||
# Yes this is correct: this header is in litte-endian format
|
||||
STAGE_FORMAT = '<IQQII'
|
||||
STAGE_LEN = 0x1c
|
||||
ATTR_STAGE_FORMAT = '>IIQII'
|
||||
ATTR_STAGE_LEN = 0x18
|
||||
|
||||
# An attribute describring the compression used in a file
|
||||
ATTR_COMPRESSION_FORMAT = '>IIII'
|
||||
ATTR_COMPRESSION_LEN = 0x10
|
||||
|
||||
# Attribute tags
|
||||
# Depending on how the header was initialised, it may be backed with 0x00 or
|
||||
# 0xff. Support both.
|
||||
FILE_ATTR_TAG_UNUSED = 0
|
||||
FILE_ATTR_TAG_UNUSED2 = 0xffffffff
|
||||
FILE_ATTR_TAG_COMPRESSION = 0x42435a4c
|
||||
FILE_ATTR_TAG_HASH = 0x68736148
|
||||
FILE_ATTR_TAG_POSITION = 0x42435350 # PSCB
|
||||
FILE_ATTR_TAG_ALIGNMENT = 0x42434c41 # ALCB
|
||||
FILE_ATTR_TAG_PADDING = 0x47444150 # PDNG
|
||||
FILE_ATTR_TAG_STAGEHEADER = 0x53746748 # StgH
|
||||
|
||||
# This is 'the size of bootblock reserved in firmware image (cbfs.txt)'
|
||||
# Not much more info is available, but we set it to 4, due to this comment in
|
||||
|
@ -100,7 +97,8 @@ ARCH_NAMES = {
|
|||
|
||||
# File types. Only supported ones are included here
|
||||
TYPE_CBFSHEADER = 0x02 # Master header, HEADER_FORMAT
|
||||
TYPE_STAGE = 0x10 # Stage, holding an executable, see STAGE_FORMAT
|
||||
TYPE_LEGACY_STAGE = 0x10 # Stage, holding an executable
|
||||
TYPE_STAGE = 0x11 # New-type stage with ATTR_STAGE_FORMAT
|
||||
TYPE_RAW = 0x50 # Raw file, possibly compressed
|
||||
TYPE_EMPTY = 0xffffffff # Empty data
|
||||
|
||||
|
@ -190,7 +188,7 @@ def _pack_string(instr):
|
|||
String with required padding (at least one 0x00 byte) at the end
|
||||
"""
|
||||
val = tools.to_bytes(instr)
|
||||
pad_len = align_int(len(val) + 1, FILENAME_ALIGN)
|
||||
pad_len = align_int(len(val) + 1, ATTRIBUTE_ALIGN)
|
||||
return val + tools.get_bytes(0, pad_len - len(val))
|
||||
|
||||
|
||||
|
@ -304,7 +302,7 @@ class CbfsFile(object):
|
|||
CbfsFile object containing the file information
|
||||
"""
|
||||
cfile = CbfsFile('', TYPE_EMPTY, b'', None)
|
||||
cfile.size = space_to_use - FILE_HEADER_LEN - FILENAME_ALIGN
|
||||
cfile.size = space_to_use - FILE_HEADER_LEN - ATTRIBUTE_ALIGN
|
||||
cfile.erase_byte = erase_byte
|
||||
return cfile
|
||||
|
||||
|
@ -331,9 +329,10 @@ class CbfsFile(object):
|
|||
name = _pack_string(self.name)
|
||||
hdr_len = len(name) + FILE_HEADER_LEN
|
||||
if self.ftype == TYPE_STAGE:
|
||||
pass
|
||||
hdr_len += ATTR_STAGE_LEN
|
||||
elif self.ftype == TYPE_RAW:
|
||||
hdr_len += ATTR_COMPRESSION_LEN
|
||||
if self.compress:
|
||||
hdr_len += ATTR_COMPRESSION_LEN
|
||||
elif self.ftype == TYPE_EMPTY:
|
||||
pass
|
||||
else:
|
||||
|
@ -359,9 +358,9 @@ class CbfsFile(object):
|
|||
data = self.data
|
||||
if self.ftype == TYPE_STAGE:
|
||||
elf_data = elf.DecodeElf(data, self.base_address)
|
||||
content = struct.pack(STAGE_FORMAT, self.compress,
|
||||
elf_data.entry, elf_data.load,
|
||||
len(elf_data.data), elf_data.memsize)
|
||||
attr = struct.pack(ATTR_STAGE_FORMAT, FILE_ATTR_TAG_STAGEHEADER,
|
||||
ATTR_STAGE_LEN, elf_data.load,
|
||||
elf_data.entry - elf_data.load, elf_data.memsize)
|
||||
data = elf_data.data
|
||||
elif self.ftype == TYPE_RAW:
|
||||
orig_data = data
|
||||
|
@ -369,9 +368,11 @@ class CbfsFile(object):
|
|||
data = self.comp_bintool.compress(orig_data)
|
||||
self.memlen = len(orig_data)
|
||||
self.data_len = len(data)
|
||||
attr = struct.pack(ATTR_COMPRESSION_FORMAT,
|
||||
FILE_ATTR_TAG_COMPRESSION, ATTR_COMPRESSION_LEN,
|
||||
self.compress, self.memlen)
|
||||
if self.compress:
|
||||
attr = struct.pack(ATTR_COMPRESSION_FORMAT,
|
||||
FILE_ATTR_TAG_COMPRESSION,
|
||||
ATTR_COMPRESSION_LEN, self.compress,
|
||||
self.memlen)
|
||||
elif self.ftype == TYPE_EMPTY:
|
||||
data = tools.get_bytes(self.erase_byte, self.size)
|
||||
else:
|
||||
|
@ -391,6 +392,8 @@ class CbfsFile(object):
|
|||
raise ValueError("Internal error: CBFS file '%s': Requested offset %#x but current output position is %#x" %
|
||||
(self.name, self.cbfs_offset, offset))
|
||||
pad = tools.get_bytes(pad_byte, pad_len)
|
||||
if attr_pos:
|
||||
attr_pos += pad_len
|
||||
hdr_len += pad_len
|
||||
|
||||
# This is the offset of the start of the file's data,
|
||||
|
@ -405,9 +408,9 @@ class CbfsFile(object):
|
|||
if expected_len != actual_len: # pragma: no cover
|
||||
# Test coverage of this is not available since this should never
|
||||
# happen. It probably indicates that get_header_len() is broken.
|
||||
raise ValueError("Internal error: CBFS file '%s': Expected headers of %#x bytes, got %#d" %
|
||||
raise ValueError("Internal error: CBFS file '%s': Expected headers of %#x bytes, got %#x" %
|
||||
(self.name, expected_len, actual_len))
|
||||
return hdr + name + attr + pad + content + data, hdr_len
|
||||
return hdr + name + pad + attr + content + data, hdr_len
|
||||
|
||||
|
||||
class CbfsWriter(object):
|
||||
|
@ -453,6 +456,9 @@ class CbfsWriter(object):
|
|||
self._arch = arch
|
||||
self._bootblock_size = 0
|
||||
self._erase_byte = 0xff
|
||||
|
||||
# Small padding to align a file uses 0
|
||||
self._small_pad_byte = 0
|
||||
self._align = ENTRY_ALIGN
|
||||
self._add_fileheader = False
|
||||
if self._arch == ARCHITECTURE_X86:
|
||||
|
@ -474,7 +480,7 @@ class CbfsWriter(object):
|
|||
self._bootblock_size, self._align)
|
||||
self._hdr_at_start = True
|
||||
|
||||
def _skip_to(self, fd, offset):
|
||||
def _skip_to(self, fd, offset, pad_byte):
|
||||
"""Write out pad bytes until a given offset
|
||||
|
||||
Args:
|
||||
|
@ -484,16 +490,16 @@ class CbfsWriter(object):
|
|||
if fd.tell() > offset:
|
||||
raise ValueError('No space for data before offset %#x (current offset %#x)' %
|
||||
(offset, fd.tell()))
|
||||
fd.write(tools.get_bytes(self._erase_byte, offset - fd.tell()))
|
||||
fd.write(tools.get_bytes(pad_byte, offset - fd.tell()))
|
||||
|
||||
def _pad_to(self, fd, offset):
|
||||
def _pad_to(self, fd, offset, pad_byte):
|
||||
"""Write out pad bytes and/or an empty file until a given offset
|
||||
|
||||
Args:
|
||||
fd: File objext to write to
|
||||
offset: Offset to write to
|
||||
"""
|
||||
self._align_to(fd, self._align)
|
||||
self._align_to(fd, self._align, pad_byte)
|
||||
upto = fd.tell()
|
||||
if upto > offset:
|
||||
raise ValueError('No space for data before pad offset %#x (current offset %#x)' %
|
||||
|
@ -502,9 +508,9 @@ class CbfsWriter(object):
|
|||
if todo:
|
||||
cbf = CbfsFile.empty(todo, self._erase_byte)
|
||||
fd.write(cbf.get_data_and_offset()[0])
|
||||
self._skip_to(fd, offset)
|
||||
self._skip_to(fd, offset, pad_byte)
|
||||
|
||||
def _align_to(self, fd, align):
|
||||
def _align_to(self, fd, align, pad_byte):
|
||||
"""Write out pad bytes until a given alignment is reached
|
||||
|
||||
This only aligns if the resulting output would not reach the end of the
|
||||
|
@ -518,7 +524,7 @@ class CbfsWriter(object):
|
|||
"""
|
||||
offset = align_int(fd.tell(), align)
|
||||
if offset < self._size:
|
||||
self._skip_to(fd, offset)
|
||||
self._skip_to(fd, offset, pad_byte)
|
||||
|
||||
def add_file_stage(self, name, data, cbfs_offset=None):
|
||||
"""Add a new stage file to the CBFS
|
||||
|
@ -568,7 +574,7 @@ class CbfsWriter(object):
|
|||
raise ValueError('No space for header at offset %#x (current offset %#x)' %
|
||||
(self._header_offset, fd.tell()))
|
||||
if not add_fileheader:
|
||||
self._pad_to(fd, self._header_offset)
|
||||
self._pad_to(fd, self._header_offset, self._erase_byte)
|
||||
hdr = struct.pack(HEADER_FORMAT, HEADER_MAGIC, HEADER_VERSION2,
|
||||
self._size, self._bootblock_size, self._align,
|
||||
self._contents_offset, self._arch, 0xffffffff)
|
||||
|
@ -580,7 +586,7 @@ class CbfsWriter(object):
|
|||
fd.write(name)
|
||||
self._header_offset = fd.tell()
|
||||
fd.write(hdr)
|
||||
self._align_to(fd, self._align)
|
||||
self._align_to(fd, self._align, self._erase_byte)
|
||||
else:
|
||||
fd.write(hdr)
|
||||
|
||||
|
@ -597,24 +603,26 @@ class CbfsWriter(object):
|
|||
# THe header can go at the start in some cases
|
||||
if self._hdr_at_start:
|
||||
self._write_header(fd, add_fileheader=self._add_fileheader)
|
||||
self._skip_to(fd, self._contents_offset)
|
||||
self._skip_to(fd, self._contents_offset, self._erase_byte)
|
||||
|
||||
# Write out each file
|
||||
for cbf in self._files.values():
|
||||
# Place the file at its requested place, if any
|
||||
offset = cbf.calc_start_offset()
|
||||
if offset is not None:
|
||||
self._pad_to(fd, align_int_down(offset, self._align))
|
||||
self._pad_to(fd, align_int_down(offset, self._align),
|
||||
self._erase_byte)
|
||||
pos = fd.tell()
|
||||
data, data_offset = cbf.get_data_and_offset(pos, self._erase_byte)
|
||||
data, data_offset = cbf.get_data_and_offset(pos,
|
||||
self._small_pad_byte)
|
||||
fd.write(data)
|
||||
self._align_to(fd, self._align)
|
||||
self._align_to(fd, self._align, self._erase_byte)
|
||||
cbf.calced_cbfs_offset = pos + data_offset
|
||||
if not self._hdr_at_start:
|
||||
self._write_header(fd, add_fileheader=self._add_fileheader)
|
||||
|
||||
# Pad to the end and write a pointer to the CBFS master header
|
||||
self._pad_to(fd, self._base_address or self._size - 4)
|
||||
self._pad_to(fd, self._base_address or self._size - 4, self._erase_byte)
|
||||
rel_offset = self._header_offset - self._size
|
||||
fd.write(struct.pack('<I', rel_offset & 0xffffffff))
|
||||
|
||||
|
@ -734,26 +742,28 @@ class CbfsReader(object):
|
|||
print('name', name)
|
||||
|
||||
# If there are attribute headers present, read those
|
||||
compress = self._read_attr(fd, file_pos, attr, offset)
|
||||
if compress is None:
|
||||
attrs = self._read_attr(fd, file_pos, attr, offset)
|
||||
if attrs is None:
|
||||
return False
|
||||
|
||||
# Create the correct CbfsFile object depending on the type
|
||||
cfile = None
|
||||
cbfs_offset = file_pos + offset
|
||||
fd.seek(cbfs_offset, io.SEEK_SET)
|
||||
if DEBUG:
|
||||
print(f'ftype {ftype:x}')
|
||||
if ftype == TYPE_CBFSHEADER:
|
||||
self._read_header(fd)
|
||||
elif ftype == TYPE_STAGE:
|
||||
data = fd.read(STAGE_LEN)
|
||||
cfile = CbfsFile.stage(self.stage_base_address, name, b'',
|
||||
cbfs_offset)
|
||||
(cfile.compress, cfile.entry, cfile.load, cfile.data_len,
|
||||
cfile.memlen) = struct.unpack(STAGE_FORMAT, data)
|
||||
cfile.data = fd.read(cfile.data_len)
|
||||
cfile.load, entry_offset, cfile.memlen = attrs
|
||||
cfile.entry = cfile.load + entry_offset
|
||||
cfile.data = fd.read(cfile.memlen)
|
||||
cfile.data_len = cfile.memlen
|
||||
elif ftype == TYPE_RAW:
|
||||
data = fd.read(size)
|
||||
cfile = CbfsFile.raw(name, data, cbfs_offset, compress)
|
||||
cfile = CbfsFile.raw(name, data, cbfs_offset, attrs)
|
||||
cfile.decompress()
|
||||
if DEBUG:
|
||||
print('data', data)
|
||||
|
@ -777,8 +787,8 @@ class CbfsReader(object):
|
|||
"""Read attributes from the file
|
||||
|
||||
CBFS files can have attributes which are things that cannot fit into the
|
||||
header. The only attributes currently supported are compression and the
|
||||
unused tag.
|
||||
header. The only attributes currently supported are compression, stage
|
||||
header and the unused tag
|
||||
|
||||
Args:
|
||||
fd: File to read from
|
||||
|
@ -788,11 +798,16 @@ class CbfsReader(object):
|
|||
attributes)
|
||||
|
||||
Returns:
|
||||
Compression to use for the file (COMPRESS_...)
|
||||
Either:
|
||||
Compression to use for the file (COMPRESS_...)
|
||||
tuple containing stage info:
|
||||
load address
|
||||
entry offset
|
||||
memory size
|
||||
"""
|
||||
compress = COMPRESS_NONE
|
||||
attrs = None
|
||||
if not attr:
|
||||
return compress
|
||||
return COMPRESS_NONE
|
||||
attr_size = offset - attr
|
||||
fd.seek(file_pos + attr, io.SEEK_SET)
|
||||
while attr_size:
|
||||
|
@ -807,12 +822,15 @@ class CbfsReader(object):
|
|||
# We don't currently use this information
|
||||
atag, alen, compress, _decomp_size = struct.unpack(
|
||||
ATTR_COMPRESSION_FORMAT, data)
|
||||
elif atag == FILE_ATTR_TAG_UNUSED2:
|
||||
break
|
||||
attrs = compress
|
||||
elif atag == FILE_ATTR_TAG_STAGEHEADER:
|
||||
atag, alen, load, entry_offset, memsize = struct.unpack(
|
||||
ATTR_STAGE_FORMAT, data)
|
||||
attrs = (load, entry_offset, memsize)
|
||||
else:
|
||||
print('Unknown attribute tag %x' % atag)
|
||||
attr_size -= len(data)
|
||||
return compress
|
||||
return attrs
|
||||
|
||||
def _read_header(self, fd):
|
||||
"""Read the master header
|
||||
|
@ -843,7 +861,8 @@ class CbfsReader(object):
|
|||
def _read_string(cls, fd):
|
||||
"""Read a string from a file
|
||||
|
||||
This reads a string and aligns the data to the next alignment boundary
|
||||
This reads a string and aligns the data to the next alignment boundary.
|
||||
The string must be nul-terminated
|
||||
|
||||
Args:
|
||||
fd: File to read from
|
||||
|
@ -854,8 +873,8 @@ class CbfsReader(object):
|
|||
"""
|
||||
val = b''
|
||||
while True:
|
||||
data = fd.read(FILENAME_ALIGN)
|
||||
if len(data) < FILENAME_ALIGN:
|
||||
data = fd.read(ATTRIBUTE_ALIGN)
|
||||
if len(data) < ATTRIBUTE_ALIGN:
|
||||
return None
|
||||
pos = data.find(b'\0')
|
||||
if pos == -1:
|
||||
|
|
|
@ -96,7 +96,7 @@ class TestCbfs(unittest.TestCase):
|
|||
self.assertEqual(arch, cbfs.arch)
|
||||
return cbfs
|
||||
|
||||
def _check_uboot(self, cbfs, ftype=cbfs_util.TYPE_RAW, offset=0x38,
|
||||
def _check_uboot(self, cbfs, ftype=cbfs_util.TYPE_RAW, offset=0x20,
|
||||
data=U_BOOT_DATA, cbfs_offset=None):
|
||||
"""Check that the U-Boot file is as expected
|
||||
|
||||
|
@ -122,7 +122,7 @@ class TestCbfs(unittest.TestCase):
|
|||
self.assertEqual(len(data), cfile.memlen)
|
||||
return cfile
|
||||
|
||||
def _check_dtb(self, cbfs, offset=0x38, data=U_BOOT_DTB_DATA,
|
||||
def _check_dtb(self, cbfs, offset=0x24, data=U_BOOT_DTB_DATA,
|
||||
cbfs_offset=None):
|
||||
"""Check that the U-Boot dtb file is as expected
|
||||
|
||||
|
@ -391,7 +391,7 @@ class TestCbfs(unittest.TestCase):
|
|||
cbfs_util.DEBUG = True
|
||||
with test_util.capture_sys_output() as (stdout, _stderr):
|
||||
cbfs_util.CbfsReader(data)
|
||||
self.assertEqual('name u-boot\ndata %s\n' % U_BOOT_DATA,
|
||||
self.assertEqual('name u-boot\nftype 50\ndata %s\n' % U_BOOT_DATA,
|
||||
stdout.getvalue())
|
||||
finally:
|
||||
cbfs_util.DEBUG = False
|
||||
|
@ -437,8 +437,9 @@ class TestCbfs(unittest.TestCase):
|
|||
pos = fd.tell()
|
||||
|
||||
# Create a new CBFS with only the first 4 bytes of the compression tag,
|
||||
# then try to read the file
|
||||
tag_pos = pos + cbfs_util.FILE_HEADER_LEN + cbfs_util.FILENAME_ALIGN
|
||||
# then try to read the file. Note that the tag gets pushed out 4 bytes
|
||||
tag_pos = (4 + pos + cbfs_util.FILE_HEADER_LEN +
|
||||
cbfs_util.ATTRIBUTE_ALIGN)
|
||||
newdata = data[:tag_pos + 4]
|
||||
with test_util.capture_sys_output() as (stdout, _stderr):
|
||||
with io.BytesIO(newdata) as fd:
|
||||
|
@ -474,7 +475,7 @@ class TestCbfs(unittest.TestCase):
|
|||
self._compare_expected_cbfs(data, cbfs_fname)
|
||||
|
||||
def test_cbfs_stage(self):
|
||||
"""Tests handling of a Coreboot Filesystem (CBFS)"""
|
||||
"""Tests handling of a CBFS stage"""
|
||||
if not elf.ELF_TOOLS:
|
||||
self.skipTest('Python elftools not available')
|
||||
elf_fname = os.path.join(self._indir, 'cbfs-stage.elf')
|
||||
|
@ -489,7 +490,7 @@ class TestCbfs(unittest.TestCase):
|
|||
load = 0xfef20000
|
||||
entry = load + 2
|
||||
|
||||
cfile = self._check_uboot(cbfs, cbfs_util.TYPE_STAGE, offset=0x28,
|
||||
cfile = self._check_uboot(cbfs, cbfs_util.TYPE_STAGE, offset=0x38,
|
||||
data=U_BOOT_DATA + U_BOOT_DTB_DATA)
|
||||
|
||||
self.assertEqual(entry, cfile.entry)
|
||||
|
@ -520,7 +521,7 @@ class TestCbfs(unittest.TestCase):
|
|||
self.assertIn('u-boot', cbfs.files)
|
||||
cfile = cbfs.files['u-boot']
|
||||
self.assertEqual(cfile.name, 'u-boot')
|
||||
self.assertEqual(cfile.offset, 56)
|
||||
self.assertEqual(cfile.offset, 0x30)
|
||||
self.assertEqual(cfile.data, COMPRESS_DATA)
|
||||
self.assertEqual(cfile.ftype, cbfs_util.TYPE_RAW)
|
||||
self.assertEqual(cfile.compress, cbfs_util.COMPRESS_LZ4)
|
||||
|
@ -529,7 +530,7 @@ class TestCbfs(unittest.TestCase):
|
|||
self.assertIn('u-boot-dtb', cbfs.files)
|
||||
cfile = cbfs.files['u-boot-dtb']
|
||||
self.assertEqual(cfile.name, 'u-boot-dtb')
|
||||
self.assertEqual(cfile.offset, 56)
|
||||
self.assertEqual(cfile.offset, 0x34)
|
||||
self.assertEqual(cfile.data, COMPRESS_DATA)
|
||||
self.assertEqual(cfile.ftype, cbfs_util.TYPE_RAW)
|
||||
self.assertEqual(cfile.compress, cbfs_util.COMPRESS_LZMA)
|
||||
|
@ -598,8 +599,8 @@ class TestCbfs(unittest.TestCase):
|
|||
data = cbw.get_data()
|
||||
|
||||
cbfs = cbfs_util.CbfsReader(data)
|
||||
self.assertEqual(0x38, cbfs.files['u-boot'].cbfs_offset)
|
||||
self.assertEqual(0x78, cbfs.files['u-boot-dtb'].cbfs_offset)
|
||||
self.assertEqual(0x20, cbfs.files['u-boot'].cbfs_offset)
|
||||
self.assertEqual(0x64, cbfs.files['u-boot-dtb'].cbfs_offset)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -858,6 +858,8 @@ def Binman(args):
|
|||
data = state.GetFdtForEtype('u-boot-dtb').GetContents()
|
||||
elf.UpdateFile(*elf_params, data)
|
||||
|
||||
bintool.Bintool.set_missing_list(None)
|
||||
|
||||
# This can only be True if -M is provided, since otherwise binman
|
||||
# would have raised an error already
|
||||
if invalid:
|
||||
|
|
|
@ -1944,6 +1944,7 @@ Properties / Entry arguments:
|
|||
- core: core on which bootloader runs, valid cores are 'secure' and 'public'
|
||||
- content: phandle of SPL in case of legacy bootflow or phandles of component binaries
|
||||
in case of combined bootflow
|
||||
- core-opts (optional): lockstep (0) or split (2) mode set to 0 by default
|
||||
|
||||
The following properties are only for generating a combined bootflow binary:
|
||||
- sysfw-inner-cert: boolean if binary contains sysfw inner certificate
|
||||
|
|
|
@ -32,6 +32,7 @@ class Entry_ti_secure_rom(Entry_x509_cert):
|
|||
- core: core on which bootloader runs, valid cores are 'secure' and 'public'
|
||||
- content: phandle of SPL in case of legacy bootflow or phandles of component binaries
|
||||
in case of combined bootflow
|
||||
- core-opts (optional): lockstep (0) or split (2) mode set to 0 by default
|
||||
|
||||
The following properties are only for generating a combined bootflow binary:
|
||||
- sysfw-inner-cert: boolean if binary contains sysfw inner certificate
|
||||
|
@ -69,6 +70,7 @@ class Entry_ti_secure_rom(Entry_x509_cert):
|
|||
self.sw_rev = fdt_util.GetInt(self._node, 'sw-rev', 1)
|
||||
self.sha = fdt_util.GetInt(self._node, 'sha', 512)
|
||||
self.core = fdt_util.GetString(self._node, 'core', 'secure')
|
||||
self.bootcore_opts = fdt_util.GetInt(self._node, 'core-opts')
|
||||
self.key_fname = self.GetEntryArgsOrProps([
|
||||
EntryArg('keyfile', str)], required=True)[0]
|
||||
if self.combined:
|
||||
|
@ -97,17 +99,19 @@ class Entry_ti_secure_rom(Entry_x509_cert):
|
|||
bytes content of the entry, which is the certificate binary for the
|
||||
provided data
|
||||
"""
|
||||
if self.bootcore_opts is None:
|
||||
self.bootcore_opts = 0
|
||||
|
||||
if self.core == 'secure':
|
||||
if self.countersign:
|
||||
self.cert_type = 3
|
||||
else:
|
||||
self.cert_type = 2
|
||||
self.bootcore = 0
|
||||
self.bootcore_opts = 32
|
||||
else:
|
||||
self.cert_type = 1
|
||||
self.bootcore = 16
|
||||
self.bootcore_opts = 0
|
||||
|
||||
return super().GetCertificate(required=required, type='rom')
|
||||
|
||||
def CombinedGetCertificate(self, required):
|
||||
|
@ -126,6 +130,9 @@ class Entry_ti_secure_rom(Entry_x509_cert):
|
|||
self.num_comps = 3
|
||||
self.sha_type = SHA_OIDS[self.sha]
|
||||
|
||||
if self.bootcore_opts is None:
|
||||
self.bootcore_opts = 0
|
||||
|
||||
# sbl
|
||||
self.content = fdt_util.GetPhandleList(self._node, 'content-sbl')
|
||||
input_data_sbl = self.GetContents(required)
|
||||
|
|
|
@ -136,7 +136,8 @@ class Entry_x509_cert(Entry_collection):
|
|||
imagesize_sysfw_data=self.imagesize_sysfw_data,
|
||||
hashval_sysfw_data=self.hashval_sysfw_data,
|
||||
sysfw_inner_cert_ext_boot_block=self.sysfw_inner_cert_ext_boot_block,
|
||||
dm_data_ext_boot_block=self.dm_data_ext_boot_block
|
||||
dm_data_ext_boot_block=self.dm_data_ext_boot_block,
|
||||
bootcore_opts=self.bootcore_opts
|
||||
)
|
||||
if stdout is not None:
|
||||
data = tools.read_file(output_fname)
|
||||
|
|
|
@ -2667,12 +2667,12 @@ class TestFunctional(unittest.TestCase):
|
|||
'cbfs:offset': 0,
|
||||
'cbfs:size': len(data),
|
||||
'cbfs:image-pos': 0,
|
||||
'cbfs/u-boot:offset': 0x38,
|
||||
'cbfs/u-boot:offset': 0x30,
|
||||
'cbfs/u-boot:uncomp-size': len(U_BOOT_DATA),
|
||||
'cbfs/u-boot:image-pos': 0x38,
|
||||
'cbfs/u-boot-dtb:offset': 0xb8,
|
||||
'cbfs/u-boot:image-pos': 0x30,
|
||||
'cbfs/u-boot-dtb:offset': 0xa4,
|
||||
'cbfs/u-boot-dtb:size': len(U_BOOT_DATA),
|
||||
'cbfs/u-boot-dtb:image-pos': 0xb8,
|
||||
'cbfs/u-boot-dtb:image-pos': 0xa4,
|
||||
}, props)
|
||||
|
||||
def testCbfsBadType(self):
|
||||
|
@ -2854,7 +2854,7 @@ class TestFunctional(unittest.TestCase):
|
|||
' u-boot 0 4 u-boot 0',
|
||||
' section 100 %x section 100' % section_size,
|
||||
' cbfs 100 400 cbfs 0',
|
||||
' u-boot 138 4 u-boot 38',
|
||||
' u-boot 120 4 u-boot 20',
|
||||
' u-boot-dtb 180 105 u-boot-dtb 80 3c9',
|
||||
' u-boot-dtb 500 %x u-boot-dtb 400 3c9' % fdt_size,
|
||||
' fdtmap %x 3bd fdtmap %x' %
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
binman {
|
||||
ti-secure-rom {
|
||||
content = <&unsecure_binary>;
|
||||
core-opts = <2>;
|
||||
};
|
||||
unsecure_binary: blob-ext {
|
||||
filename = "ti_unsecure.bin";
|
||||
|
|
|
@ -35,6 +35,10 @@ from u_boot_pylib.terminal import tprint
|
|||
# which indicates that BREAK_ME has an empty default
|
||||
RE_NO_DEFAULT = re.compile(b'\((\w+)\) \[] \(NEW\)')
|
||||
|
||||
# Symbol types which appear in the bloat feature (-B). Others are silently
|
||||
# dropped when reading in the 'nm' output
|
||||
NM_SYMBOL_TYPES = 'tTdDbBr'
|
||||
|
||||
"""
|
||||
Theory of Operation
|
||||
|
||||
|
@ -693,7 +697,7 @@ class Builder:
|
|||
parts = line.split()
|
||||
if line and len(parts) == 3:
|
||||
size, type, name = line.split()
|
||||
if type in 'tTdDbB':
|
||||
if type in NM_SYMBOL_TYPES:
|
||||
# function names begin with '.' on 64-bit powerpc
|
||||
if '.' in name[1:]:
|
||||
name = 'static.' + name.split('.')[0]
|
||||
|
|
|
@ -103,6 +103,8 @@ send.add_argument('--no-signoff', action='store_false', dest='add_signoff',
|
|||
default=True, help="Don't add Signed-off-by to patches")
|
||||
send.add_argument('--smtp-server', type=str,
|
||||
help="Specify the SMTP server to 'git send-email'")
|
||||
send.add_argument('--keep-change-id', action='store_true',
|
||||
help='Preserve Change-Id tags in patches to send.')
|
||||
|
||||
send.add_argument('patchfiles', nargs='*')
|
||||
|
||||
|
|
|
@ -16,11 +16,14 @@ from patman import gitutil
|
|||
from patman import patchstream
|
||||
from u_boot_pylib import terminal
|
||||
|
||||
|
||||
def setup():
|
||||
"""Do required setup before doing anything"""
|
||||
gitutil.setup()
|
||||
|
||||
def prepare_patches(col, branch, count, start, end, ignore_binary, signoff):
|
||||
|
||||
def prepare_patches(col, branch, count, start, end, ignore_binary, signoff,
|
||||
keep_change_id=False):
|
||||
"""Figure out what patches to generate, then generate them
|
||||
|
||||
The patch files are written to the current directory, e.g. 0001_xxx.patch
|
||||
|
@ -35,6 +38,7 @@ def prepare_patches(col, branch, count, start, end, ignore_binary, signoff):
|
|||
end (int): End patch to use (0=last one in series, 1=one before that,
|
||||
etc.)
|
||||
ignore_binary (bool): Don't generate patches for binary files
|
||||
keep_change_id (bool): Preserve the Change-Id tag.
|
||||
|
||||
Returns:
|
||||
Tuple:
|
||||
|
@ -59,11 +63,12 @@ def prepare_patches(col, branch, count, start, end, ignore_binary, signoff):
|
|||
branch, start, to_do, ignore_binary, series, signoff)
|
||||
|
||||
# Fix up the patch files to our liking, and insert the cover letter
|
||||
patchstream.fix_patches(series, patch_files)
|
||||
patchstream.fix_patches(series, patch_files, keep_change_id)
|
||||
if cover_fname and series.get('cover'):
|
||||
patchstream.insert_cover_letter(cover_fname, series, to_do)
|
||||
return series, cover_fname, patch_files
|
||||
|
||||
|
||||
def check_patches(series, patch_files, run_checkpatch, verbose, use_tree):
|
||||
"""Run some checks on a set of patches
|
||||
|
||||
|
@ -166,7 +171,8 @@ def send(args):
|
|||
col = terminal.Color()
|
||||
series, cover_fname, patch_files = prepare_patches(
|
||||
col, args.branch, args.count, args.start, args.end,
|
||||
args.ignore_binary, args.add_signoff)
|
||||
args.ignore_binary, args.add_signoff,
|
||||
keep_change_id=args.keep_change_id)
|
||||
ok = check_patches(series, patch_files, args.check_patch,
|
||||
args.verbose, args.check_patch_use_tree)
|
||||
|
||||
|
|
|
@ -147,8 +147,9 @@ def get_upstream(git_dir, branch):
|
|||
if remote == '.':
|
||||
return merge, None
|
||||
elif remote and merge:
|
||||
leaf = merge.split('/')[-1]
|
||||
return '%s/%s' % (remote, leaf), None
|
||||
# Drop the initial refs/heads from merge
|
||||
leaf = merge.split('/', maxsplit=2)[2:]
|
||||
return '%s/%s' % (remote, '/'.join(leaf)), None
|
||||
else:
|
||||
raise ValueError("Cannot determine upstream branch for branch "
|
||||
"'%s' remote='%s', merge='%s'"
|
||||
|
|
|
@ -68,6 +68,7 @@ STATE_PATCH_SUBJECT = 1 # In patch subject (first line of log for a commit)
|
|||
STATE_PATCH_HEADER = 2 # In patch header (after the subject)
|
||||
STATE_DIFFS = 3 # In the diff part (past --- line)
|
||||
|
||||
|
||||
class PatchStream:
|
||||
"""Class for detecting/injecting tags in a patch or series of patches
|
||||
|
||||
|
@ -76,7 +77,7 @@ class PatchStream:
|
|||
unwanted tags or inject additional ones. These correspond to the two
|
||||
phases of processing.
|
||||
"""
|
||||
def __init__(self, series, is_log=False):
|
||||
def __init__(self, series, is_log=False, keep_change_id=False):
|
||||
self.skip_blank = False # True to skip a single blank line
|
||||
self.found_test = False # Found a TEST= line
|
||||
self.lines_after_test = 0 # Number of lines found after TEST=
|
||||
|
@ -86,6 +87,7 @@ class PatchStream:
|
|||
self.section = [] # The current section...END section
|
||||
self.series = series # Info about the patch series
|
||||
self.is_log = is_log # True if indent like git log
|
||||
self.keep_change_id = keep_change_id # True to keep Change-Id tags
|
||||
self.in_change = None # Name of the change list we are in
|
||||
self.change_version = 0 # Non-zero if we are in a change list
|
||||
self.change_lines = [] # Lines of the current change
|
||||
|
@ -452,6 +454,8 @@ class PatchStream:
|
|||
|
||||
# Detect Change-Id tags
|
||||
elif change_id_match:
|
||||
if self.keep_change_id:
|
||||
out = [line]
|
||||
value = change_id_match.group(1)
|
||||
if self.is_log:
|
||||
if self.commit.change_id:
|
||||
|
@ -763,7 +767,7 @@ def get_metadata_for_test(text):
|
|||
pst.finalise()
|
||||
return series
|
||||
|
||||
def fix_patch(backup_dir, fname, series, cmt):
|
||||
def fix_patch(backup_dir, fname, series, cmt, keep_change_id=False):
|
||||
"""Fix up a patch file, by adding/removing as required.
|
||||
|
||||
We remove our tags from the patch file, insert changes lists, etc.
|
||||
|
@ -776,6 +780,7 @@ def fix_patch(backup_dir, fname, series, cmt):
|
|||
fname (str): Filename to patch file to process
|
||||
series (Series): Series information about this patch set
|
||||
cmt (Commit): Commit object for this patch file
|
||||
keep_change_id (bool): Keep the Change-Id tag.
|
||||
|
||||
Return:
|
||||
list: A list of errors, each str, or [] if all ok.
|
||||
|
@ -783,7 +788,7 @@ def fix_patch(backup_dir, fname, series, cmt):
|
|||
handle, tmpname = tempfile.mkstemp()
|
||||
outfd = os.fdopen(handle, 'w', encoding='utf-8')
|
||||
infd = open(fname, 'r', encoding='utf-8')
|
||||
pst = PatchStream(series)
|
||||
pst = PatchStream(series, keep_change_id=keep_change_id)
|
||||
pst.commit = cmt
|
||||
pst.process_stream(infd, outfd)
|
||||
infd.close()
|
||||
|
@ -795,7 +800,7 @@ def fix_patch(backup_dir, fname, series, cmt):
|
|||
shutil.move(tmpname, fname)
|
||||
return cmt.warn
|
||||
|
||||
def fix_patches(series, fnames):
|
||||
def fix_patches(series, fnames, keep_change_id=False):
|
||||
"""Fix up a list of patches identified by filenames
|
||||
|
||||
The patch files are processed in place, and overwritten.
|
||||
|
@ -803,6 +808,7 @@ def fix_patches(series, fnames):
|
|||
Args:
|
||||
series (Series): The Series object
|
||||
fnames (:type: list of str): List of patch files to process
|
||||
keep_change_id (bool): Keep the Change-Id tag.
|
||||
"""
|
||||
# Current workflow creates patches, so we shouldn't need a backup
|
||||
backup_dir = None #tempfile.mkdtemp('clean-patch')
|
||||
|
@ -811,7 +817,8 @@ def fix_patches(series, fnames):
|
|||
cmt = series.commits[count]
|
||||
cmt.patch = fname
|
||||
cmt.count = count
|
||||
result = fix_patch(backup_dir, fname, series, cmt)
|
||||
result = fix_patch(backup_dir, fname, series, cmt,
|
||||
keep_change_id=keep_change_id)
|
||||
if result:
|
||||
print('%d warning%s for %s:' %
|
||||
(len(result), 's' if len(result) > 1 else '', fname))
|
||||
|
|
|
@ -371,11 +371,12 @@ Series-process-log: sort, uniq
|
|||
Separate each tag with a comma.
|
||||
|
||||
Change-Id:
|
||||
This tag is stripped out but is used to generate the Message-Id
|
||||
of the emails that will be sent. When you keep the Change-Id the
|
||||
same you are asserting that this is a slightly different version
|
||||
(but logically the same patch) as other patches that have been
|
||||
sent out with the same Change-Id.
|
||||
This tag is used to generate the Message-Id of the emails that
|
||||
will be sent. When you keep the Change-Id the same you are
|
||||
asserting that this is a slightly different version (but logically
|
||||
the same patch) as other patches that have been sent out with the
|
||||
same Change-Id. The Change-Id tag line is removed from outgoing
|
||||
patches, unless the `keep_change_id` settings is set to `True`.
|
||||
|
||||
Various other tags are silently removed, like these Chrome OS and
|
||||
Gerrit tags::
|
||||
|
|
|
@ -209,6 +209,22 @@ Signed-off-by: Simon Glass <sjg@chromium.org>
|
|||
|
||||
rc = os.system('diff -u %s %s' % (inname, expname))
|
||||
self.assertEqual(rc, 0)
|
||||
os.remove(inname)
|
||||
|
||||
# Test whether the keep_change_id settings works.
|
||||
inhandle, inname = tempfile.mkstemp()
|
||||
infd = os.fdopen(inhandle, 'w', encoding='utf-8')
|
||||
infd.write(data)
|
||||
infd.close()
|
||||
|
||||
patchstream.fix_patch(None, inname, series.Series(), com,
|
||||
keep_change_id=True)
|
||||
|
||||
with open(inname, 'r') as f:
|
||||
content = f.read()
|
||||
self.assertIn(
|
||||
'Change-Id: I80fe1d0c0b7dd10aa58ce5bb1d9290b6664d5413',
|
||||
content)
|
||||
|
||||
os.remove(inname)
|
||||
os.remove(expname)
|
||||
|
|
|
@ -105,9 +105,7 @@ def run_pipe(pipe_list, infile=None, outfile=None,
|
|||
last_pipe.communicate_filter(output_func))
|
||||
if result.stdout and oneline:
|
||||
result.output = result.stdout.rstrip(b'\r\n')
|
||||
result.return_code = last_pipe.wait()
|
||||
else:
|
||||
result.return_code = os.waitpid(last_pipe.pid, 0)[1]
|
||||
result.return_code = last_pipe.wait()
|
||||
if raise_on_error and result.return_code:
|
||||
raise Exception("Error running '%s'" % user_pipestr)
|
||||
return result.to_output(binary)
|
||||
|
|
Loading…
Reference in a new issue