Merge branch '2023-07-17-assorted-updates'

- Merge in some Kconfig dependencies fixes, typo fixes, erofs update,
  shell portability fix, an env save fix, better mbr+gpt support, and
  some android A/B enhancements.
This commit is contained in:
Tom Rini 2023-07-18 09:55:32 -04:00
commit 890233ca55
31 changed files with 974 additions and 451 deletions

View file

@ -2444,7 +2444,7 @@ quiet_cmd_genenv = GENENV $@
cmd_genenv = \
$(objtree)/tools/printinitialenv | \
sed -e '/^\s*$$/d' | \
sort --field-separator== -k1,1 --stable -o $@
sort --field-separator='=' -k1,1 --stable -o $@
u-boot-initial-env: $(env_h) FORCE
$(Q)$(MAKE) $(build)=tools $(objtree)/tools/printinitialenv

View file

@ -85,11 +85,13 @@ static int ab_control_default(struct bootloader_control *abc)
*/
static int ab_control_create_from_disk(struct blk_desc *dev_desc,
const struct disk_partition *part_info,
struct bootloader_control **abc)
struct bootloader_control **abc,
ulong offset)
{
ulong abc_offset, abc_blocks, ret;
abc_offset = offsetof(struct bootloader_message_ab, slot_suffix);
abc_offset = offset +
offsetof(struct bootloader_message_ab, slot_suffix);
if (abc_offset % part_info->blksz) {
log_err("ANDROID: Boot control block not block aligned.\n");
return -EINVAL;
@ -135,11 +137,12 @@ static int ab_control_create_from_disk(struct blk_desc *dev_desc,
*/
static int ab_control_store(struct blk_desc *dev_desc,
const struct disk_partition *part_info,
struct bootloader_control *abc)
struct bootloader_control *abc, ulong offset)
{
ulong abc_offset, abc_blocks, ret;
abc_offset = offsetof(struct bootloader_message_ab, slot_suffix) /
abc_offset = offset +
offsetof(struct bootloader_message_ab, slot_suffix) /
part_info->blksz;
abc_blocks = DIV_ROUND_UP(sizeof(struct bootloader_control),
part_info->blksz);
@ -181,15 +184,19 @@ static int ab_compare_slots(const struct slot_metadata *a,
return 0;
}
int ab_select_slot(struct blk_desc *dev_desc, struct disk_partition *part_info)
int ab_select_slot(struct blk_desc *dev_desc, struct disk_partition *part_info,
bool dec_tries)
{
struct bootloader_control *abc = NULL;
u32 crc32_le;
int slot, i, ret;
bool store_needed = false;
char slot_suffix[4];
#if ANDROID_AB_BACKUP_OFFSET
struct bootloader_control *backup_abc = NULL;
#endif
ret = ab_control_create_from_disk(dev_desc, part_info, &abc);
ret = ab_control_create_from_disk(dev_desc, part_info, &abc, 0);
if (ret < 0) {
/*
* This condition represents an actual problem with the code or
@ -199,22 +206,53 @@ int ab_select_slot(struct blk_desc *dev_desc, struct disk_partition *part_info)
return ret;
}
#if ANDROID_AB_BACKUP_OFFSET
ret = ab_control_create_from_disk(dev_desc, part_info, &backup_abc,
ANDROID_AB_BACKUP_OFFSET);
if (ret < 0) {
free(abc);
return ret;
}
#endif
crc32_le = ab_control_compute_crc(abc);
if (abc->crc32_le != crc32_le) {
log_err("ANDROID: Invalid CRC-32 (expected %.8x, found %.8x),",
crc32_le, abc->crc32_le);
log_err("re-initializing A/B metadata.\n");
#if ANDROID_AB_BACKUP_OFFSET
crc32_le = ab_control_compute_crc(backup_abc);
if (backup_abc->crc32_le != crc32_le) {
log_err("ANDROID: Invalid backup CRC-32 ")
log_err("expected %.8x, found %.8x),",
crc32_le, backup_abc->crc32_le);
#endif
ret = ab_control_default(abc);
if (ret < 0) {
free(abc);
return -ENODATA;
log_err("re-initializing A/B metadata.\n");
ret = ab_control_default(abc);
if (ret < 0) {
#if ANDROID_AB_BACKUP_OFFSET
free(backup_abc);
#endif
free(abc);
return -ENODATA;
}
#if ANDROID_AB_BACKUP_OFFSET
} else {
/*
* Backup is valid. Copy it to the primary
*/
memcpy(abc, backup_abc, sizeof(*abc));
}
#endif
store_needed = true;
}
if (abc->magic != BOOT_CTRL_MAGIC) {
log_err("ANDROID: Unknown A/B metadata: %.8x\n", abc->magic);
#if ANDROID_AB_BACKUP_OFFSET
free(backup_abc);
#endif
free(abc);
return -ENODATA;
}
@ -222,6 +260,9 @@ int ab_select_slot(struct blk_desc *dev_desc, struct disk_partition *part_info)
if (abc->version > BOOT_CTRL_VERSION) {
log_err("ANDROID: Unsupported A/B metadata version: %.8x\n",
abc->version);
#if ANDROID_AB_BACKUP_OFFSET
free(backup_abc);
#endif
free(abc);
return -ENODATA;
}
@ -272,8 +313,10 @@ int ab_select_slot(struct blk_desc *dev_desc, struct disk_partition *part_info)
log_err("ANDROID: Attempting slot %c, tries remaining %d\n",
BOOT_SLOT_NAME(slot),
abc->slot_info[slot].tries_remaining);
abc->slot_info[slot].tries_remaining--;
store_needed = true;
if (dec_tries) {
abc->slot_info[slot].tries_remaining--;
store_needed = true;
}
}
if (slot >= 0) {
@ -294,8 +337,21 @@ int ab_select_slot(struct blk_desc *dev_desc, struct disk_partition *part_info)
if (store_needed) {
abc->crc32_le = ab_control_compute_crc(abc);
ab_control_store(dev_desc, part_info, abc);
ab_control_store(dev_desc, part_info, abc, 0);
}
#if ANDROID_AB_BACKUP_OFFSET
/*
* If the backup doesn't match the primary, write the primary
* to the backup offset
*/
if (memcmp(backup_abc, abc, sizeof(*abc)) != 0) {
ab_control_store(dev_desc, part_info, abc,
ANDROID_AB_BACKUP_OFFSET);
}
free(backup_abc);
#endif
free(abc);
if (slot < 0)

View file

@ -16,10 +16,19 @@ static int do_ab_select(struct cmd_tbl *cmdtp, int flag, int argc,
struct blk_desc *dev_desc;
struct disk_partition part_info;
char slot[2];
bool dec_tries = true;
if (argc != 4)
if (argc < 4)
return CMD_RET_USAGE;
for (int i = 4; i < argc; i++) {
if (strcmp(argv[i], "--no-dec") == 0) {
dec_tries = false;
} else {
return CMD_RET_USAGE;
}
}
/* Lookup the "misc" partition from argv[2] and argv[3] */
if (part_get_info_by_dev_and_name_or_num(argv[2], argv[3],
&dev_desc, &part_info,
@ -27,7 +36,8 @@ static int do_ab_select(struct cmd_tbl *cmdtp, int flag, int argc,
return CMD_RET_FAILURE;
}
ret = ab_select_slot(dev_desc, &part_info);
ret = ab_select_slot(dev_desc, &part_info, dec_tries);
if (ret < 0) {
printf("Android boot failed, error %d.\n", ret);
return CMD_RET_FAILURE;
@ -41,9 +51,9 @@ static int do_ab_select(struct cmd_tbl *cmdtp, int flag, int argc,
return CMD_RET_SUCCESS;
}
U_BOOT_CMD(ab_select, 4, 0, do_ab_select,
U_BOOT_CMD(ab_select, 5, 0, do_ab_select,
"Select the slot used to boot from and register the boot attempt.",
"<slot_var_name> <interface> <dev[:part|#part_name]>\n"
"<slot_var_name> <interface> <dev[:part|#part_name]> [--no-dec]\n"
" - Load the slot metadata from the partition 'part' on\n"
" device type 'interface' instance 'dev' and store the active\n"
" slot in the 'slot_var_name' variable. This also updates the\n"
@ -53,4 +63,6 @@ U_BOOT_CMD(ab_select, 4, 0, do_ab_select,
" - If 'part_name' is passed, preceded with a # instead of :, the\n"
" partition name whose label is 'part_name' will be looked up in\n"
" the partition table. This is commonly the \"misc\" partition.\n"
" - If '--no-dec' is set, the number of tries remaining will not\n"
" decremented for the selected boot slot\n"
);

View file

@ -244,7 +244,7 @@ static int do_verify_mbr(struct blk_desc *dev, const char *str)
for (i = 0; i < count; i++) {
struct disk_partition p;
if (part_get_info(dev, i + 1, &p))
if (part_get_info_by_type(dev, i + 1, PART_TYPE_DOS, &p))
goto fail;
if ((partitions[i].size && p.size != partitions[i].size) ||

View file

@ -936,6 +936,15 @@ config ANDROID_AB
allows a bootloader to try a new version of the system but roll back
to previous version if the new one didn't boot all the way.
config ANDROID_AB_BACKUP_OFFSET
hex "Offset of backup bootloader control"
depends on ANDROID_AB
default 0x0
help
If non-zero, a backup bootloader message starting at this offset in
the partition will tried in the event that the primary one (starting
at offset 0) fails its checksum.
endmenu
menu "Blob list"

View file

@ -51,6 +51,7 @@ CONFIG_CMD_GPT_RENAME=y
CONFIG_CMD_IDE=y
CONFIG_CMD_I2C=y
CONFIG_CMD_LOADM=y
CONFIG_CMD_MBR=y
CONFIG_CMD_OSD=y
CONFIG_CMD_PCI=y
CONFIG_CMD_READ=y

View file

@ -76,6 +76,7 @@ CONFIG_CMD_IDE=y
CONFIG_CMD_I2C=y
CONFIG_CMD_LOADM=y
CONFIG_CMD_LSBLK=y
CONFIG_CMD_MBR=y
CONFIG_CMD_MMC=y
CONFIG_CMD_MUX=y
CONFIG_CMD_OSD=y
@ -132,6 +133,7 @@ CONFIG_CMD_MTDPARTS=y
CONFIG_CMD_STACKPROTECTOR_TEST=y
CONFIG_MAC_PARTITION=y
CONFIG_AMIGA_PARTITION=y
CONFIG_DOS_PARTITION=y
CONFIG_OF_CONTROL=y
CONFIG_OF_LIVE=y
CONFIG_ENV_IS_NOWHERE=y

View file

@ -44,6 +44,7 @@ CONFIG_CMD_GPIO=y
CONFIG_CMD_GPT=y
CONFIG_CMD_IDE=y
CONFIG_CMD_I2C=y
CONFIG_CMD_MBR=y
CONFIG_CMD_MMC=y
CONFIG_CMD_OSD=y
CONFIG_CMD_PCI=y

View file

@ -59,6 +59,7 @@ CONFIG_CMD_GPIO=y
CONFIG_CMD_GPT=y
CONFIG_CMD_IDE=y
CONFIG_CMD_I2C=y
CONFIG_CMD_MBR=y
CONFIG_CMD_OSD=y
CONFIG_CMD_PCI=y
CONFIG_CMD_REMOTEPROC=y

View file

@ -60,6 +60,7 @@ CONFIG_CMD_GPIO=y
CONFIG_CMD_GPT=y
CONFIG_CMD_IDE=y
CONFIG_CMD_I2C=y
CONFIG_CMD_MBR=y
CONFIG_CMD_OSD=y
CONFIG_CMD_PCI=y
CONFIG_CMD_REMOTEPROC=y

View file

@ -75,6 +75,7 @@ CONFIG_CMD_GPIO=y
CONFIG_CMD_GPT=y
CONFIG_CMD_IDE=y
CONFIG_CMD_I2C=y
CONFIG_CMD_MBR=y
CONFIG_CMD_OSD=y
CONFIG_CMD_PCI=y
CONFIG_CMD_REMOTEPROC=y

View file

@ -51,7 +51,7 @@ config SPL_MAC_PARTITION
config DOS_PARTITION
bool "Enable MS Dos partition table"
default y if DISTRO_DEFAULTS
default y if BOOT_DEFAULTS
default y if x86 || CMD_FAT || USB_STORAGE
select PARTITIONS
help
@ -67,7 +67,7 @@ config SPL_DOS_PARTITION
config ISO_PARTITION
bool "Enable ISO partition table"
default y if DISTRO_DEFAULTS
default y if BOOT_DEFAULTS
default y if MIPS || ARCH_TEGRA
select PARTITIONS
@ -91,7 +91,7 @@ config SPL_AMIGA_PARTITION
config EFI_PARTITION
bool "Enable EFI GPT partition table"
default y if DISTRO_DEFAULTS
default y if BOOT_DEFAULTS
default y if ARCH_TEGRA
select PARTITIONS
select LIB_UUID
@ -139,7 +139,7 @@ config SPL_EFI_PARTITION
config PARTITION_UUIDS
bool "Enable support of UUID for partition"
depends on PARTITIONS
default y if DISTRO_DEFAULTS
default y if BOOT_DEFAULTS
default y if EFI_PARTITION
select LIB_UUID
help

View file

@ -26,6 +26,22 @@
/* Check all partition types */
#define PART_TYPE_ALL -1
static struct part_driver *part_driver_get_type(int part_type)
{
struct part_driver *drv =
ll_entry_start(struct part_driver, part_driver);
const int n_ents = ll_entry_count(struct part_driver, part_driver);
struct part_driver *entry;
for (entry = drv; entry != drv + n_ents; entry++) {
if (part_type == entry->part_type)
return entry;
}
/* Not found */
return NULL;
}
static struct part_driver *part_driver_lookup_type(struct blk_desc *dev_desc)
{
struct part_driver *drv =
@ -44,10 +60,7 @@ static struct part_driver *part_driver_lookup_type(struct blk_desc *dev_desc)
}
}
} else {
for (entry = drv; entry != drv + n_ents; entry++) {
if (dev_desc->part_type == entry->part_type)
return entry;
}
return part_driver_get_type(dev_desc->part_type);
}
/* Not found */
@ -322,8 +335,8 @@ void part_print(struct blk_desc *dev_desc)
drv->print(dev_desc);
}
int part_get_info(struct blk_desc *dev_desc, int part,
struct disk_partition *info)
int part_get_info_by_type(struct blk_desc *dev_desc, int part, int part_type,
struct disk_partition *info)
{
struct part_driver *drv;
@ -336,7 +349,12 @@ int part_get_info(struct blk_desc *dev_desc, int part,
info->type_guid[0] = 0;
#endif
drv = part_driver_lookup_type(dev_desc);
if (part_type == PART_TYPE_UNKNOWN) {
drv = part_driver_lookup_type(dev_desc);
} else {
drv = part_driver_get_type(part_type);
}
if (!drv) {
debug("## Unknown partition table type %x\n",
dev_desc->part_type);
@ -356,6 +374,12 @@ int part_get_info(struct blk_desc *dev_desc, int part,
return -ENOENT;
}
int part_get_info(struct blk_desc *dev_desc, int part,
struct disk_partition *info)
{
return part_get_info_by_type(dev_desc, part, PART_TYPE_UNKNOWN, info);
}
int part_get_info_whole_disk(struct blk_desc *dev_desc,
struct disk_partition *info)
{

View file

@ -31,6 +31,12 @@ boot script. This command analyzes and processes A/B metadata stored on a
special partition (e.g. ``misc``) and determines which slot should be used for
booting up.
If the A/B metadata partition has a backup bootloader_message block that is used
to ensure one is always valid even in the event of interruption when writing, it
can be enabled in your board configuration file::
CONFIG_ANDROID_AB_BACKUP_OFFSET=0x1000
Command usage
-------------

View file

@ -35,6 +35,7 @@ config DFU_TIMEOUT
config DFU_MMC
bool "MMC back end for DFU"
depends on MMC
help
This option enables using DFU to read and write to MMC based storage.

1
env/common.c vendored
View file

@ -353,6 +353,7 @@ int env_check_redund(const char *buf1, int buf1_read_fail,
tmp_env2->crc;
if (!crc1_ok && !crc2_ok) {
gd->env_valid = ENV_INVALID;
return -ENOMSG; /* needed for env_load() */
} else if (crc1_ok && !crc2_ok) {
gd->env_valid = ENV_VALID;

View file

@ -12,23 +12,23 @@ static int erofs_map_blocks_flatmode(struct erofs_inode *inode,
struct erofs_inode *vi = inode;
bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
nblocks = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
nblocks = BLK_ROUND_UP(inode->i_size);
lastblk = nblocks - tailendpacking;
/* there is no hole in flatmode */
map->m_flags = EROFS_MAP_MAPPED;
if (offset < blknr_to_addr(lastblk)) {
map->m_pa = blknr_to_addr(vi->u.i_blkaddr) + map->m_la;
map->m_plen = blknr_to_addr(lastblk) - offset;
if (offset < erofs_pos(lastblk)) {
map->m_pa = erofs_pos(vi->u.i_blkaddr) + map->m_la;
map->m_plen = erofs_pos(lastblk) - offset;
} else if (tailendpacking) {
/* 2 - inode inline B: inode, [xattrs], inline last blk... */
map->m_pa = iloc(vi->nid) + vi->inode_isize +
vi->xattr_isize + erofs_blkoff(map->m_la);
map->m_plen = inode->i_size - offset;
/* inline data should be located in one meta block */
if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
/* inline data should be located in the same meta block */
if (erofs_blkoff(map->m_pa) + map->m_plen > erofs_blksiz()) {
erofs_err("inline data cross block boundary @ nid %" PRIu64,
vi->nid);
DBG_BUGON(1);
@ -55,7 +55,7 @@ int erofs_map_blocks(struct erofs_inode *inode,
{
struct erofs_inode *vi = inode;
struct erofs_inode_chunk_index *idx;
u8 buf[EROFS_BLKSIZ];
u8 buf[EROFS_MAX_BLOCK_SIZE];
u64 chunknr;
unsigned int unit;
erofs_off_t pos;
@ -87,7 +87,7 @@ int erofs_map_blocks(struct erofs_inode *inode,
map->m_la = chunknr << vi->u.chunkbits;
map->m_plen = min_t(erofs_off_t, 1UL << vi->u.chunkbits,
roundup(inode->i_size - map->m_la, EROFS_BLKSIZ));
roundup(inode->i_size - map->m_la, erofs_blksiz()));
/* handle block map */
if (!(vi->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
@ -96,7 +96,7 @@ int erofs_map_blocks(struct erofs_inode *inode,
if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
map->m_flags = 0;
} else {
map->m_pa = blknr_to_addr(le32_to_cpu(*blkaddr));
map->m_pa = erofs_pos(le32_to_cpu(*blkaddr));
map->m_flags = EROFS_MAP_MAPPED;
}
goto out;
@ -110,7 +110,7 @@ int erofs_map_blocks(struct erofs_inode *inode,
default:
map->m_deviceid = le16_to_cpu(idx->device_id) &
sbi.device_id_mask;
map->m_pa = blknr_to_addr(le32_to_cpu(idx->blkaddr));
map->m_pa = erofs_pos(le32_to_cpu(idx->blkaddr));
map->m_flags = EROFS_MAP_MAPPED;
break;
}
@ -119,23 +119,23 @@ out:
return err;
}
int erofs_map_dev(struct erofs_sb_info *sbi, struct erofs_map_dev *map)
int erofs_map_dev(struct erofs_map_dev *map)
{
struct erofs_device_info *dif;
int id;
if (map->m_deviceid) {
if (sbi->extra_devices < map->m_deviceid)
if (sbi.extra_devices < map->m_deviceid)
return -ENODEV;
} else if (sbi->extra_devices) {
for (id = 0; id < sbi->extra_devices; ++id) {
} else if (sbi.extra_devices) {
for (id = 0; id < sbi.extra_devices; ++id) {
erofs_off_t startoff, length;
dif = sbi->devs + id;
dif = sbi.devs + id;
if (!dif->mapped_blkaddr)
continue;
startoff = blknr_to_addr(dif->mapped_blkaddr);
length = blknr_to_addr(dif->blocks);
startoff = erofs_pos(dif->mapped_blkaddr);
length = erofs_pos(dif->blocks);
if (map->m_pa >= startoff &&
map->m_pa < startoff + length) {
@ -147,19 +147,38 @@ int erofs_map_dev(struct erofs_sb_info *sbi, struct erofs_map_dev *map)
return 0;
}
int erofs_read_one_data(struct erofs_map_blocks *map, char *buffer, u64 offset,
size_t len)
{
struct erofs_map_dev mdev;
int ret;
mdev = (struct erofs_map_dev) {
.m_deviceid = map->m_deviceid,
.m_pa = map->m_pa,
};
ret = erofs_map_dev(&mdev);
if (ret)
return ret;
ret = erofs_dev_read(mdev.m_deviceid, buffer, mdev.m_pa + offset, len);
if (ret < 0)
return -EIO;
return 0;
}
static int erofs_read_raw_data(struct erofs_inode *inode, char *buffer,
erofs_off_t size, erofs_off_t offset)
{
struct erofs_map_blocks map = {
.index = UINT_MAX,
};
struct erofs_map_dev mdev;
int ret;
erofs_off_t ptr = offset;
while (ptr < offset + size) {
char *const estart = buffer + ptr - offset;
erofs_off_t eend;
erofs_off_t eend, moff = 0;
map.m_la = ptr;
ret = erofs_map_blocks(inode, &map, 0);
@ -168,14 +187,6 @@ static int erofs_read_raw_data(struct erofs_inode *inode, char *buffer,
DBG_BUGON(map.m_plen != map.m_llen);
mdev = (struct erofs_map_dev) {
.m_deviceid = map.m_deviceid,
.m_pa = map.m_pa,
};
ret = erofs_map_dev(&sbi, &mdev);
if (ret)
return ret;
/* trim extent */
eend = min(offset + size, map.m_la + map.m_llen);
DBG_BUGON(ptr < map.m_la);
@ -193,19 +204,73 @@ static int erofs_read_raw_data(struct erofs_inode *inode, char *buffer,
}
if (ptr > map.m_la) {
mdev.m_pa += ptr - map.m_la;
moff = ptr - map.m_la;
map.m_la = ptr;
}
ret = erofs_dev_read(mdev.m_deviceid, estart, mdev.m_pa,
eend - map.m_la);
if (ret < 0)
return -EIO;
ret = erofs_read_one_data(&map, estart, moff, eend - map.m_la);
if (ret)
return ret;
ptr = eend;
}
return 0;
}
int z_erofs_read_one_data(struct erofs_inode *inode,
struct erofs_map_blocks *map, char *raw, char *buffer,
erofs_off_t skip, erofs_off_t length, bool trimmed)
{
struct erofs_map_dev mdev;
int ret = 0;
if (map->m_flags & EROFS_MAP_FRAGMENT) {
struct erofs_inode packed_inode = {
.nid = sbi.packed_nid,
};
ret = erofs_read_inode_from_disk(&packed_inode);
if (ret) {
erofs_err("failed to read packed inode from disk");
return ret;
}
return erofs_pread(&packed_inode, buffer, length - skip,
inode->fragmentoff + skip);
}
/* no device id here, thus it will always succeed */
mdev = (struct erofs_map_dev) {
.m_pa = map->m_pa,
};
ret = erofs_map_dev(&mdev);
if (ret) {
DBG_BUGON(1);
return ret;
}
ret = erofs_dev_read(mdev.m_deviceid, raw, mdev.m_pa, map->m_plen);
if (ret < 0)
return ret;
ret = z_erofs_decompress(&(struct z_erofs_decompress_req) {
.in = raw,
.out = buffer,
.decodedskip = skip,
.interlaced_offset =
map->m_algorithmformat == Z_EROFS_COMPRESSION_INTERLACED ?
erofs_blkoff(map->m_la) : 0,
.inputsize = map->m_plen,
.decodedlength = length,
.alg = map->m_algorithmformat,
.partial_decoding = trimmed ? true :
!(map->m_flags & EROFS_MAP_FULL_MAPPED) ||
(map->m_flags & EROFS_MAP_PARTIAL_REF),
});
if (ret < 0)
return ret;
return 0;
}
static int z_erofs_read_data(struct erofs_inode *inode, char *buffer,
erofs_off_t size, erofs_off_t offset)
{
@ -213,8 +278,7 @@ static int z_erofs_read_data(struct erofs_inode *inode, char *buffer,
struct erofs_map_blocks map = {
.index = UINT_MAX,
};
struct erofs_map_dev mdev;
bool partial;
bool trimmed;
unsigned int bufsize = 0;
char *raw = NULL;
int ret = 0;
@ -227,27 +291,17 @@ static int z_erofs_read_data(struct erofs_inode *inode, char *buffer,
if (ret)
break;
/* no device id here, thus it will always succeed */
mdev = (struct erofs_map_dev) {
.m_pa = map.m_pa,
};
ret = erofs_map_dev(&sbi, &mdev);
if (ret) {
DBG_BUGON(1);
break;
}
/*
* trim to the needed size if the returned extent is quite
* larger than requested, and set up partial flag as well.
*/
if (end < map.m_la + map.m_llen) {
length = end - map.m_la;
partial = true;
trimmed = true;
} else {
DBG_BUGON(end != map.m_la + map.m_llen);
length = map.m_llen;
partial = !(map.m_flags & EROFS_MAP_FULL_MAPPED);
trimmed = false;
}
if (map.m_la < offset) {
@ -272,19 +326,10 @@ static int z_erofs_read_data(struct erofs_inode *inode, char *buffer,
break;
}
}
ret = erofs_dev_read(mdev.m_deviceid, raw, mdev.m_pa, map.m_plen);
if (ret < 0)
break;
ret = z_erofs_decompress(&(struct z_erofs_decompress_req) {
.in = raw,
.out = buffer + end - offset,
.decodedskip = skip,
.inputsize = map.m_plen,
.decodedlength = length,
.alg = map.m_algorithmformat,
.partial_decoding = partial
});
ret = z_erofs_read_one_data(inode, &map, raw,
buffer + end - offset, skip, length,
trimmed);
if (ret < 0)
break;
}
@ -301,8 +346,8 @@ int erofs_pread(struct erofs_inode *inode, char *buf,
case EROFS_INODE_FLAT_INLINE:
case EROFS_INODE_CHUNK_BASED:
return erofs_read_raw_data(inode, buf, count, offset);
case EROFS_INODE_FLAT_COMPRESSION_LEGACY:
case EROFS_INODE_FLAT_COMPRESSION:
case EROFS_INODE_COMPRESSED_FULL:
case EROFS_INODE_COMPRESSED_COMPACT:
return z_erofs_read_data(inode, buf, count, offset);
default:
break;

View file

@ -15,8 +15,8 @@ static int z_erofs_decompress_lz4(struct z_erofs_decompress_req *rq)
if (erofs_sb_has_lz4_0padding()) {
support_0padding = true;
while (!src[inputmargin & ~PAGE_MASK])
if (!(++inputmargin & ~PAGE_MASK))
while (!src[inputmargin & (erofs_blksiz() - 1)])
if (!(++inputmargin & (erofs_blksiz() - 1)))
break;
if (inputmargin >= rq->inputsize)
@ -40,6 +40,9 @@ static int z_erofs_decompress_lz4(struct z_erofs_decompress_req *rq)
rq->decodedlength);
if (ret != (int)rq->decodedlength) {
erofs_err("failed to %s decompress %d in[%u, %u] out[%u]",
rq->partial_decoding ? "partial" : "full",
ret, rq->inputsize, inputmargin, rq->decodedlength);
ret = -EIO;
goto out;
}
@ -58,13 +61,30 @@ out:
int z_erofs_decompress(struct z_erofs_decompress_req *rq)
{
if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED) {
if (rq->inputsize != EROFS_BLKSIZ)
if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
unsigned int count, rightpart, skip;
/* XXX: should support inputsize >= erofs_blksiz() later */
if (rq->inputsize > erofs_blksiz())
return -EFSCORRUPTED;
DBG_BUGON(rq->decodedlength > EROFS_BLKSIZ);
DBG_BUGON(rq->decodedlength < rq->decodedskip);
if (rq->decodedlength > erofs_blksiz())
return -EFSCORRUPTED;
if (rq->decodedlength < rq->decodedskip)
return -EFSCORRUPTED;
count = rq->decodedlength - rq->decodedskip;
skip = erofs_blkoff(rq->interlaced_offset + rq->decodedskip);
rightpart = min(erofs_blksiz() - skip, count);
memcpy(rq->out, rq->in + skip, rightpart);
memcpy(rq->out + rightpart, rq->in, count - rightpart);
return 0;
} else if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED) {
if (rq->decodedlength > rq->inputsize)
return -EFSCORRUPTED;
DBG_BUGON(rq->decodedlength < rq->decodedskip);
memcpy(rq->out, rq->in + rq->decodedskip,
rq->decodedlength - rq->decodedskip);
return 0;

View file

@ -14,6 +14,9 @@ struct z_erofs_decompress_req {
unsigned int decodedskip;
unsigned int inputsize, decodedlength;
/* cut point of interlaced uncompressed data */
unsigned int interlaced_offset;
/* indicate the algorithm will be used for decompression */
unsigned int alg;
bool partial_decoding;

View file

@ -3,7 +3,7 @@
* EROFS (Enhanced ROM File System) on-disk format definition
*
* Copyright (C) 2017-2018 HUAWEI, Inc.
* http://www.huawei.com/
* https://www.huawei.com/
* Copyright (C) 2021, Alibaba Cloud
*/
#ifndef __EROFS_FS_H
@ -18,33 +18,41 @@
#define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2
#define EROFS_SUPER_OFFSET 1024
#define EROFS_FEATURE_COMPAT_SB_CHKSUM 0x00000001
#define EROFS_FEATURE_COMPAT_SB_CHKSUM 0x00000001
#define EROFS_FEATURE_COMPAT_MTIME 0x00000002
/*
* Any bits that aren't in EROFS_ALL_FEATURE_INCOMPAT should
* be incompatible with this kernel version.
*/
#define EROFS_FEATURE_INCOMPAT_LZ4_0PADDING 0x00000001
#define EROFS_FEATURE_INCOMPAT_ZERO_PADDING 0x00000001
#define EROFS_FEATURE_INCOMPAT_COMPR_CFGS 0x00000002
#define EROFS_FEATURE_INCOMPAT_BIG_PCLUSTER 0x00000002
#define EROFS_FEATURE_INCOMPAT_CHUNKED_FILE 0x00000004
#define EROFS_FEATURE_INCOMPAT_DEVICE_TABLE 0x00000008
#define EROFS_FEATURE_INCOMPAT_COMPR_HEAD2 0x00000008
#define EROFS_FEATURE_INCOMPAT_ZTAILPACKING 0x00000010
#define EROFS_FEATURE_INCOMPAT_FRAGMENTS 0x00000020
#define EROFS_FEATURE_INCOMPAT_DEDUPE 0x00000020
#define EROFS_FEATURE_INCOMPAT_XATTR_PREFIXES 0x00000040
#define EROFS_ALL_FEATURE_INCOMPAT \
(EROFS_FEATURE_INCOMPAT_LZ4_0PADDING | \
(EROFS_FEATURE_INCOMPAT_ZERO_PADDING | \
EROFS_FEATURE_INCOMPAT_COMPR_CFGS | \
EROFS_FEATURE_INCOMPAT_BIG_PCLUSTER | \
EROFS_FEATURE_INCOMPAT_CHUNKED_FILE | \
EROFS_FEATURE_INCOMPAT_DEVICE_TABLE)
EROFS_FEATURE_INCOMPAT_DEVICE_TABLE | \
EROFS_FEATURE_INCOMPAT_COMPR_HEAD2 | \
EROFS_FEATURE_INCOMPAT_ZTAILPACKING | \
EROFS_FEATURE_INCOMPAT_FRAGMENTS | \
EROFS_FEATURE_INCOMPAT_DEDUPE | \
EROFS_FEATURE_INCOMPAT_XATTR_PREFIXES)
#define EROFS_SB_EXTSLOT_SIZE 16
struct erofs_deviceslot {
union {
u8 uuid[16]; /* used for device manager later */
u8 userdata[64]; /* digest(sha256), etc. */
} u;
__le32 blocks; /* total fs blocks of this device */
__le32 mapped_blkaddr; /* map starting at mapped_blkaddr */
u8 tag[64]; /* digest(sha256), etc. */
__le32 blocks; /* total fs blocks of this device */
__le32 mapped_blkaddr; /* map starting at mapped_blkaddr */
u8 reserved[56];
};
@ -55,14 +63,14 @@ struct erofs_super_block {
__le32 magic; /* file system magic number */
__le32 checksum; /* crc32c(super_block) */
__le32 feature_compat;
__u8 blkszbits; /* support block_size == PAGE_SIZE only */
__u8 blkszbits; /* filesystem block size in bit shift */
__u8 sb_extslots; /* superblock size = 128 + sb_extslots * 16 */
__le16 root_nid; /* nid of root directory */
__le64 inos; /* total valid ino # (== f_files - f_favail) */
__le64 build_time; /* inode v1 time derivation */
__le32 build_time_nsec; /* inode v1 time derivation in nano scale */
__le64 build_time; /* compact inode time derivation */
__le32 build_time_nsec; /* compact inode time derivation in ns scale */
__le32 blocks; /* used for statfs */
__le32 meta_blkaddr; /* start block address of metadata area */
__le32 xattr_blkaddr; /* start block address of shared xattr area */
@ -77,39 +85,38 @@ struct erofs_super_block {
} __packed u1;
__le16 extra_devices; /* # of devices besides the primary device */
__le16 devt_slotoff; /* startoff = devt_slotoff * devt_slotsize */
__u8 reserved2[38];
__u8 dirblkbits; /* directory block size in bit shift */
__u8 xattr_prefix_count; /* # of long xattr name prefixes */
__le32 xattr_prefix_start; /* start of long xattr prefixes */
__le64 packed_nid; /* nid of the special packed inode */
__u8 reserved2[24];
};
/*
* erofs inode datalayout (i_format in on-disk inode):
* 0 - inode plain without inline data A:
* inode, [xattrs], ... | ... | no-holed data
* 1 - inode VLE compression B (legacy):
* inode, [xattrs], extents ... | ...
* 2 - inode plain with inline data C:
* inode, [xattrs], last_inline_data, ... | ... | no-holed data
* 3 - inode compression D:
* inode, [xattrs], map_header, extents ... | ...
* 4 - inode chunk-based E:
* inode, [xattrs], chunk indexes ... | ...
* EROFS inode datalayout (i_format in on-disk inode):
* 0 - uncompressed flat inode without tail-packing inline data:
* 1 - compressed inode with non-compact indexes:
* 2 - uncompressed flat inode with tail-packing inline data:
* 3 - compressed inode with compact indexes:
* 4 - chunk-based inode with (optional) multi-device support:
* 5~7 - reserved
*/
enum {
EROFS_INODE_FLAT_PLAIN = 0,
EROFS_INODE_FLAT_COMPRESSION_LEGACY = 1,
EROFS_INODE_COMPRESSED_FULL = 1,
EROFS_INODE_FLAT_INLINE = 2,
EROFS_INODE_FLAT_COMPRESSION = 3,
EROFS_INODE_COMPRESSED_COMPACT = 3,
EROFS_INODE_CHUNK_BASED = 4,
EROFS_INODE_DATALAYOUT_MAX
};
static inline bool erofs_inode_is_data_compressed(unsigned int datamode)
{
return datamode == EROFS_INODE_FLAT_COMPRESSION ||
datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY;
return datamode == EROFS_INODE_COMPRESSED_COMPACT ||
datamode == EROFS_INODE_COMPRESSED_FULL;
}
/* bit definitions of inode i_advise */
/* bit definitions of inode i_format */
#define EROFS_I_VERSION_BITS 1
#define EROFS_I_DATALAYOUT_BITS 3
@ -127,11 +134,30 @@ static inline bool erofs_inode_is_data_compressed(unsigned int datamode)
#define EROFS_CHUNK_FORMAT_ALL \
(EROFS_CHUNK_FORMAT_BLKBITS_MASK | EROFS_CHUNK_FORMAT_INDEXES)
/* 32-byte on-disk inode */
#define EROFS_INODE_LAYOUT_COMPACT 0
/* 64-byte on-disk inode */
#define EROFS_INODE_LAYOUT_EXTENDED 1
struct erofs_inode_chunk_info {
__le16 format; /* chunk blkbits, etc. */
__le16 reserved;
};
union erofs_inode_i_u {
/* total compressed blocks for compressed inodes */
__le32 compressed_blocks;
/* block address for uncompressed flat inodes */
__le32 raw_blkaddr;
/* for device files, used to indicate old/new device # */
__le32 rdev;
/* for chunk-based files, it contains the summary info */
struct erofs_inode_chunk_info c;
};
/* 32-byte reduced form of an ondisk inode */
struct erofs_inode_compact {
__le16 i_format; /* inode format hints */
@ -142,28 +168,14 @@ struct erofs_inode_compact {
__le16 i_nlink;
__le32 i_size;
__le32 i_reserved;
union {
/* file total compressed blocks for data mapping 1 */
__le32 compressed_blocks;
__le32 raw_blkaddr;
union erofs_inode_i_u i_u;
/* for device files, used to indicate old/new device # */
__le32 rdev;
/* for chunk-based files, it contains the summary info */
struct erofs_inode_chunk_info c;
} i_u;
__le32 i_ino; /* only used for 32-bit stat compatibility */
__le32 i_ino; /* only used for 32-bit stat compatibility */
__le16 i_uid;
__le16 i_gid;
__le32 i_reserved2;
};
/* 32 bytes on-disk inode */
#define EROFS_INODE_LAYOUT_COMPACT 0
/* 64 bytes on-disk inode */
#define EROFS_INODE_LAYOUT_EXTENDED 1
/* 64-byte complete form of an ondisk inode */
struct erofs_inode_extended {
__le16 i_format; /* inode format hints */
@ -173,33 +185,17 @@ struct erofs_inode_extended {
__le16 i_mode;
__le16 i_reserved;
__le64 i_size;
union {
/* file total compressed blocks for data mapping 1 */
__le32 compressed_blocks;
__le32 raw_blkaddr;
/* for device files, used to indicate old/new device # */
__le32 rdev;
/* for chunk-based files, it contains the summary info */
struct erofs_inode_chunk_info c;
} i_u;
/* only used for 32-bit stat compatibility */
__le32 i_ino;
union erofs_inode_i_u i_u;
__le32 i_ino; /* only used for 32-bit stat compatibility */
__le32 i_uid;
__le32 i_gid;
__le64 i_ctime;
__le32 i_ctime_nsec;
__le64 i_mtime;
__le32 i_mtime_nsec;
__le32 i_nlink;
__u8 i_reserved2[16];
};
#define EROFS_MAX_SHARED_XATTRS (128)
/* h_shared_count between 129 ... 255 are special # */
#define EROFS_SHARED_XATTR_EXTENT (255)
/*
* inline xattrs (n == i_xattr_icount):
* erofs_xattr_ibody_header(1) + (n - 1) * 4 bytes
@ -226,6 +222,13 @@ struct erofs_xattr_ibody_header {
#define EROFS_XATTR_INDEX_LUSTRE 5
#define EROFS_XATTR_INDEX_SECURITY 6
/*
* bit 7 of e_name_index is set when it refers to a long xattr name prefix,
* while the remained lower bits represent the index of the prefix.
*/
#define EROFS_XATTR_LONG_PREFIX 0x80
#define EROFS_XATTR_LONG_PREFIX_MASK 0x7f
/* xattr entry (for both inline & shared xattrs) */
struct erofs_xattr_entry {
__u8 e_name_len; /* length of name */
@ -235,6 +238,12 @@ struct erofs_xattr_entry {
char e_name[0]; /* attribute name */
};
/* long xattr name prefix */
struct erofs_xattr_long_prefix {
__u8 base_index; /* short xattr name prefix index */
char infix[0]; /* infix apart from short prefix */
};
static inline unsigned int erofs_xattr_ibody_size(__le16 i_xattr_icount)
{
if (!i_xattr_icount)
@ -265,129 +274,6 @@ struct erofs_inode_chunk_index {
__le32 blkaddr; /* start block address of this inode chunk */
};
/* maximum supported size of a physical compression cluster */
#define Z_EROFS_PCLUSTER_MAX_SIZE (1024 * 1024)
/* available compression algorithm types (for h_algorithmtype) */
enum {
Z_EROFS_COMPRESSION_LZ4 = 0,
Z_EROFS_COMPRESSION_LZMA = 1,
Z_EROFS_COMPRESSION_MAX
};
#define Z_EROFS_ALL_COMPR_ALGS (1 << (Z_EROFS_COMPRESSION_MAX - 1))
/* 14 bytes (+ length field = 16 bytes) */
struct z_erofs_lz4_cfgs {
__le16 max_distance;
__le16 max_pclusterblks;
u8 reserved[10];
} __packed;
/* 14 bytes (+ length field = 16 bytes) */
struct z_erofs_lzma_cfgs {
__le32 dict_size;
__le16 format;
u8 reserved[8];
} __packed;
#define Z_EROFS_LZMA_MAX_DICT_SIZE (8 * Z_EROFS_PCLUSTER_MAX_SIZE)
/*
* bit 0 : COMPACTED_2B indexes (0 - off; 1 - on)
* e.g. for 4k logical cluster size, 4B if compacted 2B is off;
* (4B) + 2B + (4B) if compacted 2B is on.
* bit 1 : HEAD1 big pcluster (0 - off; 1 - on)
* bit 2 : HEAD2 big pcluster (0 - off; 1 - on)
*/
#define Z_EROFS_ADVISE_COMPACTED_2B 0x0001
#define Z_EROFS_ADVISE_BIG_PCLUSTER_1 0x0002
#define Z_EROFS_ADVISE_BIG_PCLUSTER_2 0x0004
struct z_erofs_map_header {
__le32 h_reserved1;
__le16 h_advise;
/*
* bit 0-3 : algorithm type of head 1 (logical cluster type 01);
* bit 4-7 : algorithm type of head 2 (logical cluster type 11).
*/
__u8 h_algorithmtype;
/*
* bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096;
* bit 3-7 : reserved.
*/
__u8 h_clusterbits;
};
#define Z_EROFS_VLE_LEGACY_HEADER_PADDING 8
/*
* Fixed-sized output compression ondisk Logical Extent cluster type:
* 0 - literal (uncompressed) cluster
* 1 - compressed cluster (for the head logical cluster)
* 2 - compressed cluster (for the other logical clusters)
*
* In detail,
* 0 - literal (uncompressed) cluster,
* di_advise = 0
* di_clusterofs = the literal data offset of the cluster
* di_blkaddr = the blkaddr of the literal cluster
*
* 1 - compressed cluster (for the head logical cluster)
* di_advise = 1
* di_clusterofs = the decompressed data offset of the cluster
* di_blkaddr = the blkaddr of the compressed cluster
*
* 2 - compressed cluster (for the other logical clusters)
* di_advise = 2
* di_clusterofs =
* the decompressed data offset in its own head cluster
* di_u.delta[0] = distance to its corresponding head cluster
* di_u.delta[1] = distance to its corresponding tail cluster
* (di_advise could be 0, 1 or 2)
*/
enum {
Z_EROFS_VLE_CLUSTER_TYPE_PLAIN = 0,
Z_EROFS_VLE_CLUSTER_TYPE_HEAD = 1,
Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD = 2,
Z_EROFS_VLE_CLUSTER_TYPE_RESERVED = 3,
Z_EROFS_VLE_CLUSTER_TYPE_MAX
};
#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2
#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0
/*
* D0_CBLKCNT will be marked _only_ at the 1st non-head lcluster to store the
* compressed block count of a compressed extent (in logical clusters, aka.
* block count of a pcluster).
*/
#define Z_EROFS_VLE_DI_D0_CBLKCNT (1 << 11)
struct z_erofs_vle_decompressed_index {
__le16 di_advise;
/* where to decompress in the head cluster */
__le16 di_clusterofs;
union {
/* for the head cluster */
__le32 blkaddr;
/*
* for the rest clusters
* eg. for 4k page-sized cluster, maximum 4K*64k = 256M)
* [0] - pointing to the head cluster
* [1] - pointing to the tail cluster
*/
__le16 delta[2];
} di_u;
};
#define Z_EROFS_VLE_LEGACY_INDEX_ALIGN(size) \
(round_up(size, sizeof(struct z_erofs_vle_decompressed_index)) + \
sizeof(struct z_erofs_map_header) + Z_EROFS_VLE_LEGACY_HEADER_PADDING)
#define Z_EROFS_VLE_EXTENT_ALIGN(size) round_up(size, \
sizeof(struct z_erofs_vle_decompressed_index))
/* dirent sorts in alphabet order, thus we can do binary search */
struct erofs_dirent {
__le64 nid; /* node number */
@ -411,9 +297,149 @@ enum {
#define EROFS_NAME_LEN 255
/* maximum supported size of a physical compression cluster */
#define Z_EROFS_PCLUSTER_MAX_SIZE (1024 * 1024)
/* available compression algorithm types (for h_algorithmtype) */
enum {
Z_EROFS_COMPRESSION_LZ4 = 0,
Z_EROFS_COMPRESSION_LZMA = 1,
Z_EROFS_COMPRESSION_MAX
};
#define Z_EROFS_ALL_COMPR_ALGS ((1 << Z_EROFS_COMPRESSION_MAX) - 1)
/* 14 bytes (+ length field = 16 bytes) */
struct z_erofs_lz4_cfgs {
__le16 max_distance;
__le16 max_pclusterblks;
u8 reserved[10];
} __packed;
/* 14 bytes (+ length field = 16 bytes) */
struct z_erofs_lzma_cfgs {
__le32 dict_size;
__le16 format;
u8 reserved[8];
} __packed;
#define Z_EROFS_LZMA_MAX_DICT_SIZE (8 * Z_EROFS_PCLUSTER_MAX_SIZE)
/*
* bit 0 : COMPACTED_2B indexes (0 - off; 1 - on)
* e.g. for 4k logical cluster size, 4B if compacted 2B is off;
* (4B) + 2B + (4B) if compacted 2B is on.
* bit 1 : HEAD1 big pcluster (0 - off; 1 - on)
* bit 2 : HEAD2 big pcluster (0 - off; 1 - on)
* bit 3 : tailpacking inline pcluster (0 - off; 1 - on)
* bit 4 : interlaced plain pcluster (0 - off; 1 - on)
* bit 5 : fragment pcluster (0 - off; 1 - on)
*/
#define Z_EROFS_ADVISE_COMPACTED_2B 0x0001
#define Z_EROFS_ADVISE_BIG_PCLUSTER_1 0x0002
#define Z_EROFS_ADVISE_BIG_PCLUSTER_2 0x0004
#define Z_EROFS_ADVISE_INLINE_PCLUSTER 0x0008
#define Z_EROFS_ADVISE_INTERLACED_PCLUSTER 0x0010
#define Z_EROFS_ADVISE_FRAGMENT_PCLUSTER 0x0020
#define Z_EROFS_FRAGMENT_INODE_BIT 7
struct z_erofs_map_header {
union {
/* fragment data offset in the packed inode */
__le32 h_fragmentoff;
struct {
__le16 h_reserved1;
/* indicates the encoded size of tailpacking data */
__le16 h_idata_size;
};
};
__le16 h_advise;
/*
* bit 0-3 : algorithm type of head 1 (logical cluster type 01);
* bit 4-7 : algorithm type of head 2 (logical cluster type 11).
*/
__u8 h_algorithmtype;
/*
* bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096;
* bit 3-6 : reserved;
* bit 7 : move the whole file into packed inode or not.
*/
__u8 h_clusterbits;
};
/*
* On-disk logical cluster type:
* 0 - literal (uncompressed) lcluster
* 1,3 - compressed lcluster (for HEAD lclusters)
* 2 - compressed lcluster (for NONHEAD lclusters)
*
* In detail,
* 0 - literal (uncompressed) lcluster,
* di_advise = 0
* di_clusterofs = the literal data offset of the lcluster
* di_blkaddr = the blkaddr of the literal pcluster
*
* 1,3 - compressed lcluster (for HEAD lclusters)
* di_advise = 1 or 3
* di_clusterofs = the decompressed data offset of the lcluster
* di_blkaddr = the blkaddr of the compressed pcluster
*
* 2 - compressed lcluster (for NONHEAD lclusters)
* di_advise = 2
* di_clusterofs =
* the decompressed data offset in its own HEAD lcluster
* di_u.delta[0] = distance to this HEAD lcluster
* di_u.delta[1] = distance to the next HEAD lcluster
*/
enum {
Z_EROFS_LCLUSTER_TYPE_PLAIN = 0,
Z_EROFS_LCLUSTER_TYPE_HEAD1 = 1,
Z_EROFS_LCLUSTER_TYPE_NONHEAD = 2,
Z_EROFS_LCLUSTER_TYPE_HEAD2 = 3,
Z_EROFS_LCLUSTER_TYPE_MAX
};
#define Z_EROFS_LI_LCLUSTER_TYPE_BITS 2
#define Z_EROFS_LI_LCLUSTER_TYPE_BIT 0
/* (noncompact only, HEAD) This pcluster refers to partial decompressed data */
#define Z_EROFS_LI_PARTIAL_REF (1 << 15)
/*
* D0_CBLKCNT will be marked _only_ at the 1st non-head lcluster to store the
* compressed block count of a compressed extent (in logical clusters, aka.
* block count of a pcluster).
*/
#define Z_EROFS_LI_D0_CBLKCNT (1 << 11)
struct z_erofs_lcluster_index {
__le16 di_advise;
/* where to decompress in the head lcluster */
__le16 di_clusterofs;
union {
/* for the HEAD lclusters */
__le32 blkaddr;
/*
* for the NONHEAD lclusters
* [0] - distance to its HEAD lcluster
* [1] - distance to the next HEAD lcluster
*/
__le16 delta[2];
} di_u;
};
#define Z_EROFS_FULL_INDEX_ALIGN(end) \
(round_up(end, 8) + sizeof(struct z_erofs_map_header) + 8)
/* check the EROFS on-disk layout strictly at compile time */
static inline void erofs_check_ondisk_layout_definitions(void)
{
const __le64 fmh __maybe_unused =
*(__le64 *)&(struct z_erofs_map_header) {
.h_clusterbits = 1 << Z_EROFS_FRAGMENT_INODE_BIT
};
BUILD_BUG_ON(sizeof(struct erofs_super_block) != 128);
BUILD_BUG_ON(sizeof(struct erofs_inode_compact) != 32);
BUILD_BUG_ON(sizeof(struct erofs_inode_extended) != 64);
@ -422,15 +448,18 @@ static inline void erofs_check_ondisk_layout_definitions(void)
BUILD_BUG_ON(sizeof(struct erofs_inode_chunk_info) != 4);
BUILD_BUG_ON(sizeof(struct erofs_inode_chunk_index) != 8);
BUILD_BUG_ON(sizeof(struct z_erofs_map_header) != 8);
BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8);
BUILD_BUG_ON(sizeof(struct z_erofs_lcluster_index) != 8);
BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12);
/* keep in sync between 2 index structures for better extendibility */
BUILD_BUG_ON(sizeof(struct erofs_inode_chunk_index) !=
sizeof(struct z_erofs_vle_decompressed_index));
sizeof(struct z_erofs_lcluster_index));
BUILD_BUG_ON(sizeof(struct erofs_deviceslot) != 128);
BUILD_BUG_ON(BIT(Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) <
Z_EROFS_VLE_CLUSTER_TYPE_MAX - 1);
BUILD_BUG_ON(BIT(Z_EROFS_LI_LCLUSTER_TYPE_BITS) <
Z_EROFS_LCLUSTER_TYPE_MAX - 1);
/* exclude old compiler versions like gcc 7.5.0 */
BUILD_BUG_ON(__builtin_constant_p(fmh) ?
fmh != cpu_to_le64(1ULL << 63) : 0);
}
#endif

View file

@ -25,8 +25,8 @@ int erofs_dev_read(int device_id, void *buf, u64 offset, size_t len)
int erofs_blk_read(void *buf, erofs_blk_t start, u32 nblocks)
{
return erofs_dev_read(0, buf, blknr_to_addr(start),
blknr_to_addr(nblocks));
return erofs_dev_read(0, buf, erofs_pos(start),
erofs_pos(nblocks));
}
int erofs_probe(struct blk_desc *fs_dev_desc,
@ -52,7 +52,7 @@ struct erofs_dir_stream {
struct fs_dirent dirent;
struct erofs_inode inode;
char dblk[EROFS_BLKSIZ];
char dblk[EROFS_MAX_BLOCK_SIZE];
unsigned int maxsize, de_end;
erofs_off_t pos;
};
@ -125,7 +125,7 @@ int erofs_readdir(struct fs_dir_stream *fs_dirs, struct fs_dirent **dentp)
return 1;
if (!dirs->maxsize) {
dirs->maxsize = min_t(unsigned int, EROFS_BLKSIZ,
dirs->maxsize = min_t(unsigned int, EROFS_MAX_BLOCK_SIZE,
dirs->inode.i_size - pos);
err = erofs_pread(&dirs->inode, dirs->dblk,
@ -136,7 +136,7 @@ int erofs_readdir(struct fs_dir_stream *fs_dirs, struct fs_dirent **dentp)
de = (struct erofs_dirent *)dirs->dblk;
dirs->de_end = le16_to_cpu(de->nameoff);
if (dirs->de_end < sizeof(struct erofs_dirent) ||
dirs->de_end >= EROFS_BLKSIZ) {
dirs->de_end >= EROFS_MAX_BLOCK_SIZE) {
erofs_err("invalid de[0].nameoff %u @ nid %llu",
dirs->de_end, de->nid | 0ULL);
return -EFSCORRUPTED;
@ -183,7 +183,7 @@ int erofs_readdir(struct fs_dir_stream *fs_dirs, struct fs_dirent **dentp)
pos += sizeof(*de);
if (erofs_blkoff(pos) >= dirs->de_end) {
pos = blknr_to_addr(erofs_blknr(pos) + 1);
pos = erofs_pos(erofs_blknr(pos) + 1);
dirs->maxsize = 0;
}
dirs->pos = pos;

View file

@ -2,6 +2,7 @@
#ifndef __EROFS_INTERNAL_H
#define __EROFS_INTERNAL_H
#include "linux/compat.h"
#define __packed __attribute__((__packed__))
#include <linux/stat.h>
@ -30,8 +31,9 @@
#define PAGE_MASK (~(PAGE_SIZE - 1))
#define LOG_BLOCK_SIZE (12)
#define EROFS_BLKSIZ (1U << LOG_BLOCK_SIZE)
#ifndef EROFS_MAX_BLOCK_SIZE
#define EROFS_MAX_BLOCK_SIZE PAGE_SIZE
#endif
#define EROFS_ISLOTBITS 5
#define EROFS_SLOTSIZE (1U << EROFS_ISLOTBITS)
@ -44,11 +46,15 @@ typedef u32 erofs_blk_t;
#define NULL_ADDR ((unsigned int)-1)
#define NULL_ADDR_UL ((unsigned long)-1)
#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ)
#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ)
#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ)
/* global sbi */
extern struct erofs_sb_info sbi;
#define BLK_ROUND_UP(addr) DIV_ROUND_UP(addr, EROFS_BLKSIZ)
#define erofs_blksiz() (1u << sbi.blkszbits)
#define erofs_blknr(addr) ((addr) >> sbi.blkszbits)
#define erofs_blkoff(addr) ((addr) & (erofs_blksiz() - 1))
#define erofs_pos(nr) ((erofs_off_t)(nr) << sbi.blkszbits)
#define BLK_ROUND_UP(addr) DIV_ROUND_UP(addr, 1u << sbi.blkszbits)
struct erofs_buffer_head;
@ -57,6 +63,8 @@ struct erofs_device_info {
u32 mapped_blkaddr;
};
#define EROFS_PACKED_NID_UNALLOCATED -1
struct erofs_sb_info {
struct erofs_device_info *devs;
@ -72,6 +80,7 @@ struct erofs_sb_info {
u32 build_time_nsec;
unsigned char islotbits;
unsigned char blkszbits;
/* what we really care is nid, rather than ino.. */
erofs_nid_t root_nid;
@ -79,23 +88,29 @@ struct erofs_sb_info {
u64 inos;
u8 uuid[16];
char volume_name[16];
u16 available_compr_algs;
u16 lz4_max_distance;
u32 checksum;
u16 extra_devices;
union {
u16 devt_slotoff; /* used for mkfs */
u16 device_id_mask; /* used for others */
};
erofs_nid_t packed_nid;
u32 xattr_prefix_start;
u8 xattr_prefix_count;
};
/* global sbi */
extern struct erofs_sb_info sbi;
/* make sure that any user of the erofs headers has at least 64bit off_t type */
extern int erofs_assert_largefile[sizeof(off_t) - 8];
static inline erofs_off_t iloc(erofs_nid_t nid)
{
return blknr_to_addr(sbi.meta_blkaddr) + (nid << sbi.islotbits);
return erofs_pos(sbi.meta_blkaddr) + (nid << sbi.islotbits);
}
#define EROFS_FEATURE_FUNCS(name, compat, feature) \
@ -112,11 +127,15 @@ static inline void erofs_sb_clear_##name(void) \
sbi.feature_##compat &= ~EROFS_FEATURE_##feature; \
}
EROFS_FEATURE_FUNCS(lz4_0padding, incompat, INCOMPAT_LZ4_0PADDING)
EROFS_FEATURE_FUNCS(lz4_0padding, incompat, INCOMPAT_ZERO_PADDING)
EROFS_FEATURE_FUNCS(compr_cfgs, incompat, INCOMPAT_COMPR_CFGS)
EROFS_FEATURE_FUNCS(big_pcluster, incompat, INCOMPAT_BIG_PCLUSTER)
EROFS_FEATURE_FUNCS(chunked_file, incompat, INCOMPAT_CHUNKED_FILE)
EROFS_FEATURE_FUNCS(device_table, incompat, INCOMPAT_DEVICE_TABLE)
EROFS_FEATURE_FUNCS(ztailpacking, incompat, INCOMPAT_ZTAILPACKING)
EROFS_FEATURE_FUNCS(fragments, incompat, INCOMPAT_FRAGMENTS)
EROFS_FEATURE_FUNCS(dedupe, incompat, INCOMPAT_DEDUPE)
EROFS_FEATURE_FUNCS(xattr_prefixes, incompat, INCOMPAT_XATTR_PREFIXES)
EROFS_FEATURE_FUNCS(sb_chksum, compat, COMPAT_SB_CHKSUM)
#define EROFS_I_EA_INITED (1 << 0)
@ -130,6 +149,8 @@ struct erofs_inode {
unsigned int flags;
/* (mkfs.erofs) device ID containing source file */
u32 dev;
/* (mkfs.erofs) queued sub-directories blocking dump */
u32 subdirs_queued;
};
unsigned int i_count;
struct erofs_inode *i_parent;
@ -140,8 +161,8 @@ struct erofs_inode {
u64 i_ino[2];
u32 i_uid;
u32 i_gid;
u64 i_ctime;
u32 i_ctime_nsec;
u64 i_mtime;
u32 i_mtime_nsec;
u32 i_nlink;
union {
@ -154,20 +175,31 @@ struct erofs_inode {
};
} u;
char *i_srcpath;
unsigned char datalayout;
unsigned char inode_isize;
/* inline tail-end packing size */
unsigned short idata_size;
bool compressed_idata;
bool lazy_tailblock;
unsigned int xattr_isize;
unsigned int extent_isize;
unsigned int xattr_shared_count;
unsigned int *xattr_shared_xattrs;
erofs_nid_t nid;
struct erofs_buffer_head *bh;
struct erofs_buffer_head *bh_inline, *bh_data;
void *idata;
/* (ztailpacking) in order to recover uncompressed EOF data */
void *eof_tailraw;
unsigned int eof_tailrawsize;
union {
void *compressmeta;
void *chunkindexes;
@ -176,8 +208,14 @@ struct erofs_inode {
uint8_t z_algorithmtype[2];
uint8_t z_logical_clusterbits;
uint8_t z_physical_clusterblks;
uint64_t z_tailextent_headlcn;
unsigned int z_idataoff;
#define z_idata_size idata_size
};
};
uint64_t capabilities;
erofs_off_t fragmentoff;
unsigned int fragment_size;
};
static inline bool is_inode_layout_compression(struct erofs_inode *inode)
@ -216,6 +254,14 @@ struct erofs_dentry {
};
};
static inline bool is_dot_dotdot_len(const char *name, unsigned int len)
{
if (len >= 1 && name[0] != '.')
return false;
return len == 1 || (len == 2 && name[1] == '.');
}
static inline bool is_dot_dotdot(const char *name)
{
if (name[0] != '.')
@ -229,6 +275,8 @@ enum {
BH_Mapped,
BH_Encoded,
BH_FullMapped,
BH_Fragment,
BH_Partialref,
};
/* Has a disk mapping */
@ -239,9 +287,13 @@ enum {
#define EROFS_MAP_ENCODED (1 << BH_Encoded)
/* The length of extent is full */
#define EROFS_MAP_FULL_MAPPED (1 << BH_FullMapped)
/* Located in the special packed inode */
#define EROFS_MAP_FRAGMENT (1 << BH_Fragment)
/* The extent refers to partial decompressed data */
#define EROFS_MAP_PARTIAL_REF (1 << BH_Partialref)
struct erofs_map_blocks {
char mpage[EROFS_BLKSIZ];
char mpage[EROFS_MAX_BLOCK_SIZE];
erofs_off_t m_pa, m_la;
u64 m_plen, m_llen;
@ -257,9 +309,12 @@ struct erofs_map_blocks {
* approach instead if possible since it's more metadata lightweight.)
*/
#define EROFS_GET_BLOCKS_FIEMAP 0x0002
/* Used to map tail extent for tailpacking inline or fragment pcluster */
#define EROFS_GET_BLOCKS_FINDTAIL 0x0008
enum {
Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX,
Z_EROFS_COMPRESSION_INTERLACED,
Z_EROFS_COMPRESSION_RUNTIME_MAX
};
@ -274,6 +329,7 @@ int erofs_dev_read(int device_id, void *buf, u64 offset, size_t len);
/* super.c */
int erofs_read_superblock(void);
void erofs_put_super(void);
/* namei.c */
int erofs_read_inode_from_disk(struct erofs_inode *vi);
@ -283,9 +339,40 @@ int erofs_read_inode_from_disk(struct erofs_inode *vi);
/* data.c */
int erofs_pread(struct erofs_inode *inode, char *buf,
erofs_off_t count, erofs_off_t offset);
int erofs_map_blocks(struct erofs_inode *inode,
struct erofs_map_blocks *map, int flags);
int erofs_map_dev(struct erofs_sb_info *sbi, struct erofs_map_dev *map);
int erofs_map_blocks(struct erofs_inode *inode, struct erofs_map_blocks *map,
int flags);
int erofs_map_dev(struct erofs_map_dev *map);
int erofs_read_one_data(struct erofs_map_blocks *map, char *buffer, u64 offset,
size_t len);
int z_erofs_read_one_data(struct erofs_inode *inode,
struct erofs_map_blocks *map, char *raw, char *buffer,
erofs_off_t skip, erofs_off_t length, bool trimmed);
static inline int erofs_get_occupied_size(const struct erofs_inode *inode,
erofs_off_t *size)
{
*size = 0;
switch (inode->datalayout) {
case EROFS_INODE_FLAT_INLINE:
case EROFS_INODE_FLAT_PLAIN:
case EROFS_INODE_CHUNK_BASED:
*size = inode->i_size;
break;
case EROFS_INODE_COMPRESSED_FULL:
case EROFS_INODE_COMPRESSED_COMPACT:
*size = inode->u.i_blocks * erofs_blksiz();
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
/* data.c */
int erofs_getxattr(struct erofs_inode *vi, const char *name, char *buffer,
size_t buffer_size);
int erofs_listxattr(struct erofs_inode *vi, char *buffer, size_t buffer_size);
/* zmap.c */
int z_erofs_fill_inode(struct erofs_inode *vi);
int z_erofs_map_blocks_iter(struct erofs_inode *vi,

View file

@ -1,6 +1,15 @@
// SPDX-License-Identifier: GPL-2.0+
#include "internal.h"
#define makedev(major, minor) ((dev_t)((((major) & 0xfff) << 8) | ((minor) & 0xff)))
static dev_t erofs_new_decode_dev(u32 dev)
{
const unsigned int major = (dev & 0xfff00) >> 8;
const unsigned int minor = (dev & 0xff) | ((dev >> 12) & 0xfff00);
return makedev(major, minor);
}
int erofs_read_inode_from_disk(struct erofs_inode *vi)
{
int ret, ifmt;
@ -26,7 +35,8 @@ int erofs_read_inode_from_disk(struct erofs_inode *vi)
case EROFS_INODE_LAYOUT_EXTENDED:
vi->inode_isize = sizeof(struct erofs_inode_extended);
ret = erofs_dev_read(0, buf + sizeof(*dic), inode_loc + sizeof(*dic),
ret = erofs_dev_read(0, buf + sizeof(*dic),
inode_loc + sizeof(*dic),
sizeof(*die) - sizeof(*dic));
if (ret < 0)
return -EIO;
@ -43,7 +53,8 @@ int erofs_read_inode_from_disk(struct erofs_inode *vi)
break;
case S_IFCHR:
case S_IFBLK:
vi->u.i_rdev = 0;
vi->u.i_rdev =
erofs_new_decode_dev(le32_to_cpu(die->i_u.rdev));
break;
case S_IFIFO:
case S_IFSOCK:
@ -57,8 +68,8 @@ int erofs_read_inode_from_disk(struct erofs_inode *vi)
vi->i_gid = le32_to_cpu(die->i_gid);
vi->i_nlink = le32_to_cpu(die->i_nlink);
vi->i_ctime = le64_to_cpu(die->i_ctime);
vi->i_ctime_nsec = le64_to_cpu(die->i_ctime_nsec);
vi->i_mtime = le64_to_cpu(die->i_mtime);
vi->i_mtime_nsec = le64_to_cpu(die->i_mtime_nsec);
vi->i_size = le64_to_cpu(die->i_size);
if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
/* fill chunked inode summary info */
@ -77,7 +88,8 @@ int erofs_read_inode_from_disk(struct erofs_inode *vi)
break;
case S_IFCHR:
case S_IFBLK:
vi->u.i_rdev = 0;
vi->u.i_rdev =
erofs_new_decode_dev(le32_to_cpu(dic->i_u.rdev));
break;
case S_IFIFO:
case S_IFSOCK:
@ -91,8 +103,8 @@ int erofs_read_inode_from_disk(struct erofs_inode *vi)
vi->i_gid = le16_to_cpu(dic->i_gid);
vi->i_nlink = le16_to_cpu(dic->i_nlink);
vi->i_ctime = sbi.build_time;
vi->i_ctime_nsec = sbi.build_time_nsec;
vi->i_mtime = sbi.build_time;
vi->i_mtime_nsec = sbi.build_time_nsec;
vi->i_size = le32_to_cpu(dic->i_size);
if (vi->datalayout == EROFS_INODE_CHUNK_BASED)
@ -111,10 +123,13 @@ int erofs_read_inode_from_disk(struct erofs_inode *vi)
vi->u.chunkformat, vi->nid | 0ULL);
return -EOPNOTSUPP;
}
vi->u.chunkbits = LOG_BLOCK_SIZE +
vi->u.chunkbits = sbi.blkszbits +
(vi->u.chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK);
} else if (erofs_inode_is_data_compressed(vi->datalayout))
z_erofs_fill_inode(vi);
} else if (erofs_inode_is_data_compressed(vi->datalayout)) {
if (erofs_blksiz() != EROFS_MAX_BLOCK_SIZE)
return -EOPNOTSUPP;
return z_erofs_fill_inode(vi);
}
return 0;
bogusimode:
erofs_err("bogus i_mode (%o) @ nid %llu", vi->i_mode, vi->nid | 0ULL);
@ -163,12 +178,11 @@ struct nameidata {
unsigned int ftype;
};
int erofs_namei(struct nameidata *nd,
const char *name, unsigned int len)
int erofs_namei(struct nameidata *nd, const char *name, unsigned int len)
{
erofs_nid_t nid = nd->nid;
int ret;
char buf[EROFS_BLKSIZ];
char buf[EROFS_MAX_BLOCK_SIZE];
struct erofs_inode vi = { .nid = nid };
erofs_off_t offset;
@ -179,7 +193,7 @@ int erofs_namei(struct nameidata *nd,
offset = 0;
while (offset < vi.i_size) {
erofs_off_t maxsize = min_t(erofs_off_t,
vi.i_size - offset, EROFS_BLKSIZ);
vi.i_size - offset, erofs_blksiz());
struct erofs_dirent *de = (void *)buf;
unsigned int nameoff;
@ -189,7 +203,7 @@ int erofs_namei(struct nameidata *nd,
nameoff = le16_to_cpu(de->nameoff);
if (nameoff < sizeof(struct erofs_dirent) ||
nameoff >= PAGE_SIZE) {
nameoff >= erofs_blksiz()) {
erofs_err("invalid de[0].nameoff %u @ nid %llu",
nameoff, nid | 0ULL);
return -EFSCORRUPTED;

View file

@ -9,7 +9,7 @@ static bool check_layout_compatibility(struct erofs_sb_info *sbi,
sbi->feature_incompat = feature;
/* check if current kernel meets all mandatory requirements */
if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
if (feature & ~EROFS_ALL_FEATURE_INCOMPAT) {
erofs_err("unidentified incompatible feature %x, please upgrade kernel version",
feature & ~EROFS_ALL_FEATURE_INCOMPAT);
return false;
@ -40,14 +40,18 @@ static int erofs_init_devices(struct erofs_sb_info *sbi,
sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1;
sbi->devs = calloc(ondisk_extradevs, sizeof(*sbi->devs));
if (!sbi->devs)
return -ENOMEM;
pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE;
for (i = 0; i < ondisk_extradevs; ++i) {
struct erofs_deviceslot dis;
int ret;
ret = erofs_dev_read(0, &dis, pos, sizeof(dis));
if (ret < 0)
if (ret < 0) {
free(sbi->devs);
return ret;
}
sbi->devs[i].mapped_blkaddr = dis.mapped_blkaddr;
sbi->total_blocks += dis.blocks;
@ -58,42 +62,41 @@ static int erofs_init_devices(struct erofs_sb_info *sbi,
int erofs_read_superblock(void)
{
char data[EROFS_BLKSIZ];
u8 data[EROFS_MAX_BLOCK_SIZE];
struct erofs_super_block *dsb;
unsigned int blkszbits;
int ret;
ret = erofs_blk_read(data, 0, 1);
ret = erofs_blk_read(data, 0, erofs_blknr(sizeof(data)));
if (ret < 0) {
erofs_dbg("cannot read erofs superblock: %d", ret);
erofs_err("cannot read erofs superblock: %d", ret);
return -EIO;
}
dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
ret = -EINVAL;
if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
erofs_dbg("cannot find valid erofs superblock");
erofs_err("cannot find valid erofs superblock");
return ret;
}
sbi.feature_compat = le32_to_cpu(dsb->feature_compat);
blkszbits = dsb->blkszbits;
/* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
if (blkszbits != LOG_BLOCK_SIZE) {
erofs_err("blksize %u isn't supported on this platform",
1 << blkszbits);
sbi.blkszbits = dsb->blkszbits;
if (sbi.blkszbits < 9 ||
sbi.blkszbits > ilog2(EROFS_MAX_BLOCK_SIZE)) {
erofs_err("blksize %llu isn't supported on this platform",
erofs_blksiz() | 0ULL);
return ret;
} else if (!check_layout_compatibility(&sbi, dsb)) {
return ret;
}
if (!check_layout_compatibility(&sbi, dsb))
return ret;
sbi.primarydevice_blocks = le32_to_cpu(dsb->blocks);
sbi.meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
sbi.xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
sbi.islotbits = EROFS_ISLOTBITS;
sbi.root_nid = le16_to_cpu(dsb->root_nid);
sbi.packed_nid = le64_to_cpu(dsb->packed_nid);
sbi.inos = le64_to_cpu(dsb->inos);
sbi.checksum = le32_to_cpu(dsb->checksum);

View file

@ -1,14 +1,19 @@
// SPDX-License-Identifier: GPL-2.0+
#include "internal.h"
static int z_erofs_do_map_blocks(struct erofs_inode *vi,
struct erofs_map_blocks *map,
int flags);
int z_erofs_fill_inode(struct erofs_inode *vi)
{
if (!erofs_sb_has_big_pcluster() &&
vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) {
!erofs_sb_has_ztailpacking() && !erofs_sb_has_fragments() &&
vi->datalayout == EROFS_INODE_COMPRESSED_FULL) {
vi->z_advise = 0;
vi->z_algorithmtype[0] = 0;
vi->z_algorithmtype[1] = 0;
vi->z_logical_clusterbits = LOG_BLOCK_SIZE;
vi->z_logical_clusterbits = sbi.blkszbits;
vi->flags |= EROFS_I_Z_INITED;
}
@ -25,15 +30,23 @@ static int z_erofs_fill_inode_lazy(struct erofs_inode *vi)
if (vi->flags & EROFS_I_Z_INITED)
return 0;
DBG_BUGON(!erofs_sb_has_big_pcluster() &&
vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY);
pos = round_up(iloc(vi->nid) + vi->inode_isize + vi->xattr_isize, 8);
ret = erofs_dev_read(0, buf, pos, sizeof(buf));
if (ret < 0)
return -EIO;
h = (struct z_erofs_map_header *)buf;
/*
* if the highest bit of the 8-byte map header is set, the whole file
* is stored in the packed inode. The rest bits keeps z_fragmentoff.
*/
if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) {
vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
vi->fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63);
vi->z_tailextent_headlcn = 0;
goto out;
}
vi->z_advise = le16_to_cpu(h->h_advise);
vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
@ -44,14 +57,41 @@ static int z_erofs_fill_inode_lazy(struct erofs_inode *vi)
return -EOPNOTSUPP;
}
vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7);
if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION &&
vi->z_logical_clusterbits = sbi.blkszbits + (h->h_clusterbits & 7);
if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT &&
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) {
erofs_err("big pcluster head1/2 of compact indexes should be consistent for nid %llu",
vi->nid * 1ULL);
return -EFSCORRUPTED;
}
if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) {
struct erofs_map_blocks map = { .index = UINT_MAX };
vi->idata_size = le16_to_cpu(h->h_idata_size);
ret = z_erofs_do_map_blocks(vi, &map,
EROFS_GET_BLOCKS_FINDTAIL);
if (!map.m_plen ||
erofs_blkoff(map.m_pa) + map.m_plen > erofs_blksiz()) {
erofs_err("invalid tail-packing pclustersize %llu",
map.m_plen | 0ULL);
return -EFSCORRUPTED;
}
if (ret < 0)
return ret;
}
if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER &&
!(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) {
struct erofs_map_blocks map = { .index = UINT_MAX };
vi->fragmentoff = le32_to_cpu(h->h_fragmentoff);
ret = z_erofs_do_map_blocks(vi, &map,
EROFS_GET_BLOCKS_FINDTAIL);
if (ret < 0)
return ret;
}
out:
vi->flags |= EROFS_I_Z_INITED;
return 0;
}
@ -66,7 +106,9 @@ struct z_erofs_maprecorder {
u8 type, headtype;
u16 clusterofs;
u16 delta[2];
erofs_blk_t pblk, compressedlcs;
erofs_blk_t pblk, compressedblks;
erofs_off_t nextpackoff;
bool partialref;
};
static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
@ -93,11 +135,10 @@ static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
{
struct erofs_inode *const vi = m->inode;
const erofs_off_t ibase = iloc(vi->nid);
const erofs_off_t pos =
Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize +
vi->xattr_isize) +
lcn * sizeof(struct z_erofs_vle_decompressed_index);
struct z_erofs_vle_decompressed_index *di;
const erofs_off_t pos = Z_EROFS_FULL_INDEX_ALIGN(ibase +
vi->inode_isize + vi->xattr_isize) +
lcn * sizeof(struct z_erofs_lcluster_index);
struct z_erofs_lcluster_index *di;
unsigned int advise, type;
int err;
@ -105,29 +146,32 @@ static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
if (err)
return err;
m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index);
m->lcn = lcn;
di = m->kaddr + erofs_blkoff(pos);
advise = le16_to_cpu(di->di_advise);
type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) &
((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1);
type = (advise >> Z_EROFS_LI_LCLUSTER_TYPE_BIT) &
((1 << Z_EROFS_LI_LCLUSTER_TYPE_BITS) - 1);
switch (type) {
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
m->clusterofs = 1 << vi->z_logical_clusterbits;
m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
if (m->delta[0] & Z_EROFS_VLE_DI_D0_CBLKCNT) {
if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) {
if (!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
m->compressedlcs = m->delta[0] &
~Z_EROFS_VLE_DI_D0_CBLKCNT;
m->compressedblks = m->delta[0] &
~Z_EROFS_LI_D0_CBLKCNT;
m->delta[0] = 1;
}
m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
break;
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
case Z_EROFS_LCLUSTER_TYPE_PLAIN:
case Z_EROFS_LCLUSTER_TYPE_HEAD1:
if (advise & Z_EROFS_LI_PARTIAL_REF)
m->partialref = true;
m->clusterofs = le16_to_cpu(di->di_clusterofs);
m->pblk = le32_to_cpu(di->di_u.blkaddr);
break;
@ -164,25 +208,25 @@ static int get_compacted_la_distance(unsigned int lclusterbits,
lo = decode_compactedbits(lclusterbits, lomask,
in, encodebits * i, &type);
if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
return d1;
++d1;
} while (++i < vcnt);
/* vcnt - 1 (Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) item */
if (!(lo & Z_EROFS_VLE_DI_D0_CBLKCNT))
/* vcnt - 1 (Z_EROFS_LCLUSTER_TYPE_NONHEAD) item */
if (!(lo & Z_EROFS_LI_D0_CBLKCNT))
d1 += lo - 1;
return d1;
}
static int unpack_compacted_index(struct z_erofs_maprecorder *m,
unsigned int amortizedshift,
unsigned int eofs, bool lookahead)
erofs_off_t pos, bool lookahead)
{
struct erofs_inode *const vi = m->inode;
const unsigned int lclusterbits = vi->z_logical_clusterbits;
const unsigned int lomask = (1 << lclusterbits) - 1;
unsigned int vcnt, base, lo, encodebits, nblk;
unsigned int vcnt, base, lo, encodebits, nblk, eofs;
int i;
u8 *in, type;
bool big_pcluster;
@ -194,8 +238,12 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
else
return -EOPNOTSUPP;
/* it doesn't equal to round_up(..) */
m->nextpackoff = round_down(pos, vcnt << amortizedshift) +
(vcnt << amortizedshift);
big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
eofs = erofs_blkoff(pos);
base = round_down(eofs, vcnt << amortizedshift);
in = m->kaddr + base;
@ -204,20 +252,19 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
lo = decode_compactedbits(lclusterbits, lomask,
in, encodebits * i, &type);
m->type = type;
if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
m->clusterofs = 1 << lclusterbits;
/* figure out lookahead_distance: delta[1] if needed */
if (lookahead)
m->delta[1] = get_compacted_la_distance(lclusterbits,
encodebits,
vcnt, in, i);
if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
encodebits, vcnt, in, i);
if (lo & Z_EROFS_LI_D0_CBLKCNT) {
if (!big_pcluster) {
DBG_BUGON(1);
return -EFSCORRUPTED;
}
m->compressedlcs = lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
m->compressedblks = lo & ~Z_EROFS_LI_D0_CBLKCNT;
m->delta[0] = 1;
return 0;
} else if (i + 1 != (int)vcnt) {
@ -231,9 +278,9 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
*/
lo = decode_compactedbits(lclusterbits, lomask,
in, encodebits * (i - 1), &type);
if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD)
lo = 0;
else if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT)
else if (lo & Z_EROFS_LI_D0_CBLKCNT)
lo = 1;
m->delta[0] = lo + 1;
return 0;
@ -247,7 +294,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
--i;
lo = decode_compactedbits(lclusterbits, lomask,
in, encodebits * i, &type);
if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD)
i -= lo;
if (i >= 0)
@ -259,13 +306,13 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
--i;
lo = decode_compactedbits(lclusterbits, lomask,
in, encodebits * i, &type);
if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) {
if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
if (lo & Z_EROFS_LI_D0_CBLKCNT) {
--i;
nblk += lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
nblk += lo & ~Z_EROFS_LI_D0_CBLKCNT;
continue;
}
if (lo == 1) {
if (lo <= 1) {
DBG_BUGON(1);
/* --i; ++nblk; continue; */
return -EFSCORRUPTED;
@ -289,7 +336,7 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
const erofs_off_t ebase = round_up(iloc(vi->nid) + vi->inode_isize +
vi->xattr_isize, 8) +
sizeof(struct z_erofs_map_header);
const unsigned int totalidx = DIV_ROUND_UP(vi->i_size, EROFS_BLKSIZ);
const unsigned int totalidx = BLK_ROUND_UP(vi->i_size);
unsigned int compacted_4b_initial, compacted_2b;
unsigned int amortizedshift;
erofs_off_t pos;
@ -307,7 +354,8 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
if (compacted_4b_initial == 32 / 4)
compacted_4b_initial = 0;
if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B)
if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
compacted_4b_initial < totalidx)
compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
else
compacted_2b = 0;
@ -332,8 +380,7 @@ out:
err = z_erofs_reload_indexes(m, erofs_blknr(pos));
if (err)
return err;
return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos),
lookahead);
return unpack_compacted_index(m, amortizedshift, pos, lookahead);
}
static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
@ -341,10 +388,10 @@ static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
{
const unsigned int datamode = m->inode->datalayout;
if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
if (datamode == EROFS_INODE_COMPRESSED_FULL)
return legacy_load_cluster_from_disk(m, lcn);
if (datamode == EROFS_INODE_FLAT_COMPRESSION)
if (datamode == EROFS_INODE_COMPRESSED_COMPACT)
return compacted_load_cluster_from_disk(m, lcn, lookahead);
return -EINVAL;
@ -373,7 +420,7 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
return err;
switch (m->type) {
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
if (!m->delta[0]) {
erofs_err("invalid lookback distance 0 @ nid %llu",
(unsigned long long)vi->nid);
@ -381,8 +428,8 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
return -EFSCORRUPTED;
}
return z_erofs_extent_lookback(m, m->delta[0]);
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
case Z_EROFS_LCLUSTER_TYPE_PLAIN:
case Z_EROFS_LCLUSTER_TYPE_HEAD1:
m->headtype = m->type;
map->m_la = (lcn << lclusterbits) | m->clusterofs;
break;
@ -404,16 +451,17 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
unsigned long lcn;
int err;
DBG_BUGON(m->type != Z_EROFS_VLE_CLUSTER_TYPE_PLAIN &&
m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD);
if (m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN ||
DBG_BUGON(m->type != Z_EROFS_LCLUSTER_TYPE_PLAIN &&
m->type != Z_EROFS_LCLUSTER_TYPE_HEAD1);
if (m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
!(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) {
map->m_plen = 1 << lclusterbits;
return 0;
}
lcn = m->lcn + 1;
if (m->compressedlcs)
if (m->compressedblks)
goto out;
err = z_erofs_load_cluster_from_disk(m, lcn, false);
@ -422,28 +470,28 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
/*
* If the 1st NONHEAD lcluster has already been handled initially w/o
* valid compressedlcs, which means at least it mustn't be CBLKCNT, or
* valid compressedblks, which means at least it mustn't be CBLKCNT, or
* an internal implemenatation error is detected.
*
* The following code can also handle it properly anyway, but let's
* BUG_ON in the debugging mode only for developers to notice that.
*/
DBG_BUGON(lcn == initial_lcn &&
m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD);
m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD);
switch (m->type) {
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
case Z_EROFS_LCLUSTER_TYPE_PLAIN:
case Z_EROFS_LCLUSTER_TYPE_HEAD1:
/*
* if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
* rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
*/
m->compressedlcs = 1;
m->compressedblks = 1 << (lclusterbits - sbi.blkszbits);
break;
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
if (m->delta[0] != 1)
goto err_bonus_cblkcnt;
if (m->compressedlcs)
if (m->compressedblks)
break;
/* fallthrough */
default:
@ -453,7 +501,7 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
return -EFSCORRUPTED;
}
out:
map->m_plen = m->compressedlcs << lclusterbits;
map->m_plen = m->compressedblks << sbi.blkszbits;
return 0;
err_bonus_cblkcnt:
erofs_err("bogus CBLKCNT @ lcn %lu of nid %llu",
@ -481,11 +529,11 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
if (err)
return err;
if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) {
DBG_BUGON(!m->delta[1] &&
m->clusterofs != 1 << lclusterbits);
} else if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN ||
m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD) {
} else if (m->type == Z_EROFS_LCLUSTER_TYPE_PLAIN ||
m->type == Z_EROFS_LCLUSTER_TYPE_HEAD1) {
/* go on until the next HEAD lcluster */
if (lcn != headlcn)
break;
@ -504,10 +552,12 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
return 0;
}
int z_erofs_map_blocks_iter(struct erofs_inode *vi,
struct erofs_map_blocks *map,
int flags)
static int z_erofs_do_map_blocks(struct erofs_inode *vi,
struct erofs_map_blocks *map,
int flags)
{
bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER;
bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER;
struct z_erofs_maprecorder m = {
.inode = vi,
.map = map,
@ -518,20 +568,8 @@ int z_erofs_map_blocks_iter(struct erofs_inode *vi,
unsigned long initial_lcn;
unsigned long long ofs, end;
/* when trying to read beyond EOF, leave it unmapped */
if (map->m_la >= vi->i_size) {
map->m_llen = map->m_la + 1 - vi->i_size;
map->m_la = vi->i_size;
map->m_flags = 0;
goto out;
}
err = z_erofs_fill_inode_lazy(vi);
if (err)
goto out;
lclusterbits = vi->z_logical_clusterbits;
ofs = map->m_la;
ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? vi->i_size - 1 : map->m_la;
initial_lcn = ofs >> lclusterbits;
endoff = ofs & ((1 << lclusterbits) - 1);
@ -539,11 +577,14 @@ int z_erofs_map_blocks_iter(struct erofs_inode *vi,
if (err)
goto out;
if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL))
vi->z_idataoff = m.nextpackoff;
map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED;
end = (m.lcn + 1ULL) << lclusterbits;
switch (m.type) {
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
case Z_EROFS_LCLUSTER_TYPE_PLAIN:
case Z_EROFS_LCLUSTER_TYPE_HEAD1:
if (endoff >= m.clusterofs) {
m.headtype = m.type;
map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
@ -560,7 +601,7 @@ int z_erofs_map_blocks_iter(struct erofs_inode *vi,
map->m_flags |= EROFS_MAP_FULL_MAPPED;
m.delta[0] = 1;
/* fallthrough */
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
/* get the correspoinding first chunk */
err = z_erofs_extent_lookback(&m, m.delta[0]);
if (err)
@ -572,18 +613,43 @@ int z_erofs_map_blocks_iter(struct erofs_inode *vi,
err = -EOPNOTSUPP;
goto out;
}
if (m.partialref)
map->m_flags |= EROFS_MAP_PARTIAL_REF;
map->m_llen = end - map->m_la;
map->m_pa = blknr_to_addr(m.pblk);
if (flags & EROFS_GET_BLOCKS_FINDTAIL) {
vi->z_tailextent_headlcn = m.lcn;
/* for non-compact indexes, fragmentoff is 64 bits */
if (fragment && vi->datalayout == EROFS_INODE_COMPRESSED_FULL)
vi->fragmentoff |= (u64)m.pblk << 32;
}
if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {
map->m_flags |= EROFS_MAP_META;
map->m_pa = vi->z_idataoff;
map->m_plen = vi->z_idata_size;
} else if (fragment && m.lcn == vi->z_tailextent_headlcn) {
map->m_flags |= EROFS_MAP_FRAGMENT;
} else {
map->m_pa = erofs_pos(m.pblk);
err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
if (err)
goto out;
}
err = z_erofs_get_extent_compressedlen(&m, initial_lcn);
if (err)
goto out;
if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN)
map->m_algorithmformat = Z_EROFS_COMPRESSION_SHIFTED;
else
if (m.headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN) {
if (map->m_llen > map->m_plen) {
DBG_BUGON(1);
err = -EFSCORRUPTED;
goto out;
}
if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER)
map->m_algorithmformat =
Z_EROFS_COMPRESSION_INTERLACED;
else
map->m_algorithmformat =
Z_EROFS_COMPRESSION_SHIFTED;
} else {
map->m_algorithmformat = vi->z_algorithmtype[0];
}
if (flags & EROFS_GET_BLOCKS_FIEMAP) {
err = z_erofs_get_extent_decompressedlen(&m);
@ -595,7 +661,38 @@ out:
erofs_dbg("m_la %" PRIu64 " m_pa %" PRIu64 " m_llen %" PRIu64 " m_plen %" PRIu64 " m_flags 0%o",
map->m_la, map->m_pa,
map->m_llen, map->m_plen, map->m_flags);
return err;
}
int z_erofs_map_blocks_iter(struct erofs_inode *vi,
struct erofs_map_blocks *map,
int flags)
{
int err = 0;
/* when trying to read beyond EOF, leave it unmapped */
if (map->m_la >= vi->i_size) {
map->m_llen = map->m_la + 1 - vi->i_size;
map->m_la = vi->i_size;
map->m_flags = 0;
goto out;
}
err = z_erofs_fill_inode_lazy(vi);
if (err)
goto out;
if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) &&
!vi->z_tailextent_headlcn) {
map->m_la = 0;
map->m_llen = vi->i_size;
map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_FULL_MAPPED |
EROFS_MAP_FRAGMENT;
goto out;
}
err = z_erofs_do_map_blocks(vi, map, flags);
out:
DBG_BUGON(err < 0 && err != -ENOMEM);
return err;
}

View file

@ -30,6 +30,7 @@ struct disk_partition;
* @param[in] part_info Place to store the partition information
* Return: The slot number (>= 0) on success, or a negative on error
*/
int ab_select_slot(struct blk_desc *dev_desc, struct disk_partition *part_info);
int ab_select_slot(struct blk_desc *dev_desc, struct disk_partition *part_info,
bool dec_tries);
#endif /* __ANDROID_AB_H */

View file

@ -69,7 +69,7 @@ const char *const *const bootstd_get_bootdev_order(struct udevice *dev,
/**
* bootstd_get_prefixes() - Get the filename-prefixes list
*
* This reads the prefixes, e.g. {"/", "/bpot", NULL}
* This reads the prefixes, e.g. {"/", "/boot", NULL}
*
* The list is alloced by the bootstd driver so should not be freed. That is the
* reason for all the const stuff in the function signature

View file

@ -105,7 +105,24 @@ struct blk_desc *blk_get_dev(const char *ifname, int dev);
struct blk_desc *mg_disk_get_dev(int dev);
/* disk/part.c */
/**
* part_get_info_by_type() - Get partitions from a block device using a specific
* partition driver
*
* Each interface allocates its own devices and typically struct blk_desc is
* contained with the interface's data structure. There is no global
* numbering for block devices, so the interface name must be provided.
*
* @dev_desc: Block device descriptor
* @part: Partition number to read
* @part_type: Partition driver to use, or PART_TYPE_UNKNOWN to automatically
* choose a driver
* @info: Returned partition information
*
* Return: 0 on success, negative errno on failure
*/
int part_get_info_by_type(struct blk_desc *dev_desc, int part, int part_type,
struct disk_partition *info);
int part_get_info(struct blk_desc *dev_desc, int part,
struct disk_partition *info);
/**

View file

@ -17,10 +17,12 @@ static int do_test(struct unit_test_state *uts, int expected,
struct blk_desc *mmc_dev_desc;
struct disk_partition part_info;
ut_asserteq(expected,
part_get_info_by_dev_and_name_or_num("mmc", part_str,
&mmc_dev_desc,
&part_info, whole));
int ret = part_get_info_by_dev_and_name_or_num("mmc", part_str,
&mmc_dev_desc,
&part_info, whole);
ut_assertf(expected == ret, "test(%d, \"%s\", %d) == %d", expected,
part_str, whole, ret);
return 0;
}
@ -76,15 +78,15 @@ static int dm_test_part(struct unit_test_state *uts)
test(-EINVAL, "#test1", true);
test(1, "2", false);
test(1, "2", true);
test(-ENOENT, "1:0", false);
test(0, "1:0", true);
test(1, "1:1", false);
test(2, "1:2", false);
test(1, "1.0", false);
test(0, "1.0:0", true);
test(1, "1.0:1", false);
test(2, "1.0:2", false);
test(-EINVAL, "1#bogus", false);
test(-ENOENT, "2:0", false);
test(0, "2:0", true);
test(1, "2:1", false);
test(2, "2:2", false);
test(1, "2.0", false);
test(0, "2.0:0", true);
test(1, "2.0:1", false);
test(2, "2.0:2", false);
test(-EINVAL, "2#bogus", false);
test(1, "2#test1", false);
test(2, "2#test2", false);
ret = 0;
@ -106,3 +108,90 @@ static int dm_test_part_bootable(struct unit_test_state *uts)
return 0;
}
DM_TEST(dm_test_part_bootable, UT_TESTF_SCAN_FDT);
static int do_get_info_test(struct unit_test_state *uts,
struct blk_desc *dev_desc, int part, int part_type,
struct disk_partition const *reference)
{
struct disk_partition p;
int ret;
memset(&p, 0, sizeof(p));
ret = part_get_info_by_type(dev_desc, part, part_type, &p);
printf("part_get_info_by_type(%d, 0x%x) = %d\n", part, part_type, ret);
if (ut_assertok(ret)) {
return 0;
}
ut_asserteq(reference->start, p.start);
ut_asserteq(reference->size, p.size);
ut_asserteq(reference->sys_ind, p.sys_ind);
return 0;
}
static int dm_test_part_get_info_by_type(struct unit_test_state *uts)
{
char str_disk_guid[UUID_STR_LEN + 1];
struct blk_desc *mmc_dev_desc;
struct disk_partition gpt_parts[] = {
{
.start = 48, /* GPT data takes up the first 34 blocks or so */
.size = 1,
.name = "test1",
.sys_ind = 0,
},
{
.start = 49,
.size = 1,
.name = "test2",
.sys_ind = 0,
},
};
struct disk_partition mbr_parts[] = {
{
.start = 1,
.size = 33,
.name = "gpt",
.sys_ind = EFI_PMBR_OSTYPE_EFI_GPT,
},
{
.start = 48,
.size = 1,
.name = "test1",
.sys_ind = 0x83,
},
};
ut_asserteq(2, blk_get_device_by_str("mmc", "2", &mmc_dev_desc));
if (CONFIG_IS_ENABLED(RANDOM_UUID)) {
gen_rand_uuid_str(gpt_parts[0].uuid, UUID_STR_FORMAT_STD);
gen_rand_uuid_str(gpt_parts[1].uuid, UUID_STR_FORMAT_STD);
gen_rand_uuid_str(str_disk_guid, UUID_STR_FORMAT_STD);
}
ut_assertok(gpt_restore(mmc_dev_desc, str_disk_guid, gpt_parts,
ARRAY_SIZE(gpt_parts)));
ut_assertok(write_mbr_partitions(mmc_dev_desc, mbr_parts,
ARRAY_SIZE(mbr_parts), 0));
#define get_info_test(_part, _part_type, _reference) \
ut_assertok(do_get_info_test(uts, mmc_dev_desc, _part, _part_type, \
_reference))
for (int i = 0; i < ARRAY_SIZE(gpt_parts); i++) {
get_info_test(i + 1, PART_TYPE_UNKNOWN, &gpt_parts[i]);
}
for (int i = 0; i < ARRAY_SIZE(mbr_parts); i++) {
get_info_test(i + 1, PART_TYPE_DOS, &mbr_parts[i]);
}
for (int i = 0; i < ARRAY_SIZE(gpt_parts); i++) {
get_info_test(i + 1, PART_TYPE_EFI, &gpt_parts[i]);
}
return 0;
}
DM_TEST(dm_test_part_get_info_by_type, UT_TESTF_SCAN_PDATA | UT_TESTF_SCAN_FDT);

View file

@ -32,4 +32,5 @@ def cat_data(u_boot_config):
pytest.skip('Setup failed')
finally:
shutil.rmtree(mnt_point)
os.remove(image_path)
if os.path.exists(image_path):
os.remove(image_path)

View file

@ -32,4 +32,5 @@ def xxd_data(u_boot_config):
pytest.skip('Setup failed')
finally:
shutil.rmtree(mnt_point)
os.remove(image_path)
if os.path.exists(image_path):
os.remove(image_path)