Use Docker Buildx for multi-arch builds

The bitwarden_rs code is still cross-compiled exactly as before, but Docker
Buildx is used to rewrite the resulting Docker images with correct platform
metadata (reflecting the target platform instead of the build platform).
Buildx also now handles building and pushing the multi-arch manifest lists.
This commit is contained in:
Jeremy Lin 2021-01-09 02:33:36 -08:00
parent 175f2aeace
commit 5633b6ac94
11 changed files with 183 additions and 114 deletions

33
docker/Dockerfile.buildx Normal file
View file

@ -0,0 +1,33 @@
# The cross-built images have the build arch (`amd64`) embedded in the image
# manifest, rather than the target arch. For example:
#
# $ docker inspect bitwardenrs/server:latest-armv7 | jq -r '.[]|.Architecture'
# amd64
#
# Recent versions of Docker have started printing a warning when the image's
# claimed arch doesn't match the host arch. For example:
#
# WARNING: The requested image's platform (linux/amd64) does not match the
# detected host platform (linux/arm/v7) and no specific platform was requested
#
# The image still works fine, but the spurious warning creates confusion.
#
# Docker doesn't seem to provide a way to directly set the arch of an image
# at build time. To resolve the build vs. target arch discrepancy, we use
# Docker Buildx to build a new set of images with the correct target arch.
#
# Docker Buildx uses this Dockerfile to build an image for each requested
# platform. Since the Dockerfile basically consists of a single `FROM`
# instruction, we're effectively telling Buildx to build a platform-specific
# image by simply copying the existing cross-built image and setting the
# correct target arch as a side effect.
#
# References:
#
# - https://docs.docker.com/buildx/working-with-buildx/#build-multi-platform-images
# - https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
# - https://docs.docker.com/engine/reference/builder/#understand-how-arg-and-from-interact
#
ARG LOCAL_REPO
ARG DOCKER_TAG
FROM ${LOCAL_REPO}:${DOCKER_TAG}-${TARGETARCH}${TARGETVARIANT}

View file

@ -7,24 +7,24 @@
{% set build_stage_base_image = "clux/muslrust:nightly-2020-11-22" %} {% set build_stage_base_image = "clux/muslrust:nightly-2020-11-22" %}
{% set runtime_stage_base_image = "alpine:3.12" %} {% set runtime_stage_base_image = "alpine:3.12" %}
{% set package_arch_target = "x86_64-unknown-linux-musl" %} {% set package_arch_target = "x86_64-unknown-linux-musl" %}
{% elif "arm32v7" in target_file %} {% elif "armv7" in target_file %}
{% set build_stage_base_image = "messense/rust-musl-cross:armv7-musleabihf" %} {% set build_stage_base_image = "messense/rust-musl-cross:armv7-musleabihf" %}
{% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.12" %} {% set runtime_stage_base_image = "balenalib/armv7hf-alpine:3.12" %}
{% set package_arch_target = "armv7-unknown-linux-musleabihf" %} {% set package_arch_target = "armv7-unknown-linux-musleabihf" %}
{% endif %} {% endif %}
{% elif "amd64" in target_file %} {% elif "amd64" in target_file %}
{% set runtime_stage_base_image = "debian:buster-slim" %} {% set runtime_stage_base_image = "debian:buster-slim" %}
{% elif "arm64v8" in target_file %} {% elif "arm64" in target_file %}
{% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %} {% set runtime_stage_base_image = "balenalib/aarch64-debian:buster" %}
{% set package_arch_name = "arm64" %} {% set package_arch_name = "arm64" %}
{% set package_arch_target = "aarch64-unknown-linux-gnu" %} {% set package_arch_target = "aarch64-unknown-linux-gnu" %}
{% set package_cross_compiler = "aarch64-linux-gnu" %} {% set package_cross_compiler = "aarch64-linux-gnu" %}
{% elif "arm32v6" in target_file %} {% elif "armv6" in target_file %}
{% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %} {% set runtime_stage_base_image = "balenalib/rpi-debian:buster" %}
{% set package_arch_name = "armel" %} {% set package_arch_name = "armel" %}
{% set package_arch_target = "arm-unknown-linux-gnueabi" %} {% set package_arch_target = "arm-unknown-linux-gnueabi" %}
{% set package_cross_compiler = "arm-linux-gnueabi" %} {% set package_cross_compiler = "arm-linux-gnueabi" %}
{% elif "arm32v7" in target_file %} {% elif "armv7" in target_file %}
{% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %} {% set runtime_stage_base_image = "balenalib/armv7hf-debian:buster" %}
{% set package_arch_name = "armhf" %} {% set package_arch_name = "armhf" %}
{% set package_arch_target = "armv7-unknown-linux-gnueabihf" %} {% set package_arch_target = "armv7-unknown-linux-gnueabihf" %}
@ -178,7 +178,7 @@ RUN touch src/main.rs
# your actual source files being built # your actual source files being built
RUN cargo build --features ${DB} --release{{ package_arch_target_param }} RUN cargo build --features ${DB} --release{{ package_arch_target_param }}
{% if "alpine" in target_file %} {% if "alpine" in target_file %}
{% if "arm32v7" in target_file %} {% if "armv7" in target_file %}
RUN musl-strip target/{{ package_arch_target }}/release/bitwarden_rs RUN musl-strip target/{{ package_arch_target }}/release/bitwarden_rs
{% endif %} {% endif %}
{% endif %} {% endif %}
@ -225,7 +225,7 @@ RUN apt-get update && apt-get install -y \
libpq5 \ libpq5 \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
{% endif %} {% endif %}
{% if "alpine" in target_file and "arm32v7" in target_file %} {% if "alpine" in target_file and "armv7" in target_file %}
RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community catatonit RUN apk add --no-cache -X http://dl-cdn.alpinelinux.org/alpine/edge/community catatonit
{% endif %} {% endif %}
@ -256,7 +256,7 @@ HEALTHCHECK --interval=60s --timeout=10s CMD ["/healthcheck.sh"]
# Configures the startup! # Configures the startup!
WORKDIR / WORKDIR /
{% if "alpine" in target_file and "arm32v7" in target_file %} {% if "alpine" in target_file and "armv7" in target_file %}
CMD ["catatonit", "/start.sh"] CMD ["catatonit", "/start.sh"]
{% else %} {% else %}
CMD ["/start.sh"] CMD ["/start.sh"]

View file

@ -10,7 +10,7 @@ Docker Hub hooks provide these predefined [environment variables](https://docs.d
* `DOCKER_TAG`: the Docker repository tag being built. * `DOCKER_TAG`: the Docker repository tag being built.
* `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.) * `IMAGE_NAME`: the name and tag of the Docker repository being built. (This variable is a combination of `DOCKER_REPO:DOCKER_TAG`.)
The current multi-arch image build relies on the original bitwarden_rs Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/database/OS combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point. The current multi-arch image build relies on the original bitwarden_rs Dockerfiles, which use cross-compilation for architectures other than `amd64`, and don't yet support all arch/distro combinations. However, cross-compilation is much faster than QEMU-based builds (e.g., using `docker buildx`). This situation may need to be revisited at some point.
## References ## References

View file

@ -1,19 +1,16 @@
# The default Debian-based images support these arches for all database connections # The default Debian-based images support these arches for all database backends.
#
# Other images (Alpine-based) currently
# support only a subset of these.
arches=( arches=(
amd64 amd64
arm32v6 armv6
arm32v7 armv7
arm64v8 arm64
) )
if [[ "${DOCKER_TAG}" == *alpine ]]; then if [[ "${DOCKER_TAG}" == *alpine ]]; then
# The Alpine build currently only works for amd64. # The Alpine image build currently only works for certain arches.
os_suffix=.alpine distro_suffix=.alpine
arches=( arches=(
amd64 amd64
arm32v7 armv7
) )
fi fi

View file

@ -9,6 +9,6 @@ set -ex
for arch in "${arches[@]}"; do for arch in "${arches[@]}"; do
docker build \ docker build \
-t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \ -t "${DOCKER_REPO}:${DOCKER_TAG}-${arch}" \
-f docker/${arch}/Dockerfile${os_suffix} \ -f docker/${arch}/Dockerfile${distro_suffix} \
. .
done done

18
hooks/pre_build Executable file
View file

@ -0,0 +1,18 @@
#!/bin/bash
set -ex
# Print some environment info in case it's useful for troubleshooting.
id
pwd
df -h
env
docker info
docker version
# Install build dependencies.
deps=(
jq
)
apt-get update
apt-get install -y "${deps[@]}"

View file

@ -1,117 +1,138 @@
#!/bin/bash #!/bin/bash
echo ">>> Pushing images..."
export DOCKER_CLI_EXPERIMENTAL=enabled
declare -A annotations=(
[amd64]="--os linux --arch amd64"
[arm32v6]="--os linux --arch arm --variant v6"
[arm32v7]="--os linux --arch arm --variant v7"
[arm64v8]="--os linux --arch arm64 --variant v8"
)
source ./hooks/arches.sh source ./hooks/arches.sh
export DOCKER_CLI_EXPERIMENTAL=enabled
# Join a list of args with a single char.
# Ref: https://stackoverflow.com/a/17841619
join() { local IFS="$1"; shift; echo "$*"; }
set -ex set -ex
declare -A images echo ">>> Starting local Docker registry..."
# Docker Buildx's `docker-container` driver is needed for multi-platform
# builds, but it can't access existing images on the Docker host (like the
# cross-compiled ones we just built). Those images first need to be pushed to
# a registry -- Docker Hub could be used, but since it's not trivial to clean
# up those intermediate images on Docker Hub, it's easier to just run a local
# Docker registry, which gets cleaned up automatically once the build job ends.
#
# https://docs.docker.com/registry/deploying/
# https://hub.docker.com/_/registry
#
# Use host networking so the buildx container can access the registry via
# localhost.
#
docker run -d --name registry --network host registry:2 # defaults to port 5000
# Docker Hub sets a `DOCKER_REPO` env var with the format `index.docker.io/user/repo`.
# Strip the registry portion to construct a local repo path for use in `Dockerfile.buildx`.
LOCAL_REGISTRY="localhost:5000"
REPO="${DOCKER_REPO#*/}"
LOCAL_REPO="${LOCAL_REGISTRY}/${REPO}"
echo ">>> Pushing images to local registry..."
for arch in ${arches[@]}; do for arch in ${arches[@]}; do
images[$arch]="${DOCKER_REPO}:${DOCKER_TAG}-${arch}" docker_image="${DOCKER_REPO}:${DOCKER_TAG}-${arch}"
local_image="${LOCAL_REPO}:${DOCKER_TAG}-${arch}"
docker tag "${docker_image}" "${local_image}"
docker push "${local_image}"
done done
# Push the images that were just built; manifest list creation fails if the echo ">>> Setting up Docker Buildx..."
# images (manifests) referenced don't already exist in the Docker registry.
for image in "${images[@]}"; do
docker push "${image}"
done
manifest_lists=("${DOCKER_REPO}:${DOCKER_TAG}") # Same as earlier, use host networking so the buildx container can access the
# registry via localhost.
#
# Ref: https://github.com/docker/buildx/issues/94#issuecomment-534367714
#
docker buildx create --name builder --use --driver-opt network=host
# If the Docker tag starts with a version number, assume the latest release is echo ">>> Running Docker Buildx..."
# being pushed. Add an extra manifest (`latest` or `alpine`, as appropriate)
tags=("${DOCKER_REPO}:${DOCKER_TAG}")
# If the Docker tag starts with a version number, assume the latest release
# is being pushed. Add an extra tag (`latest` or `alpine`, as appropriate)
# to make it easier for users to track the latest release. # to make it easier for users to track the latest release.
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
if [[ "${DOCKER_TAG}" == *alpine ]]; then if [[ "${DOCKER_TAG}" == *alpine ]]; then
manifest_lists+=(${DOCKER_REPO}:alpine) tags+=(${DOCKER_REPO}:alpine)
else else
manifest_lists+=(${DOCKER_REPO}:latest) tags+=(${DOCKER_REPO}:latest)
# Add an extra `latest-arm32v6` tag; Docker can't seem to properly
# auto-select that image on Armv6 platforms like Raspberry Pi 1 and Zero
# (https://github.com/moby/moby/issues/41017).
#
# Add this tag only for the SQLite image, as the MySQL and PostgreSQL
# builds don't currently work on non-amd64 arches.
#
# TODO: Also add an `alpine-arm32v6` tag if multi-arch support for
# Alpine-based bitwarden_rs images is implemented before this Docker
# issue is fixed.
if [[ ${DOCKER_REPO} == *server ]]; then
docker tag "${DOCKER_REPO}:${DOCKER_TAG}-arm32v6" "${DOCKER_REPO}:latest-arm32v6"
docker push "${DOCKER_REPO}:latest-arm32v6"
fi
fi fi
fi fi
for manifest_list in "${manifest_lists[@]}"; do tag_args=()
# Create the (multi-arch) manifest list of arch-specific images. for tag in "${tags[@]}"; do
docker manifest create ${manifest_list} ${images[@]} tag_args+=(--tag "${tag}")
# Make sure each image manifest is annotated with the correct arch info.
# Docker does not auto-detect the arch of each cross-compiled image, so
# everything would appear as `linux/amd64` otherwise.
for arch in "${arches[@]}"; do
docker manifest annotate ${annotations[$arch]} ${manifest_list} ${images[$arch]}
done
# Push the manifest list.
docker manifest push --purge ${manifest_list}
done done
# Avoid logging credentials and tokens. # Docker Buildx takes a list of target platforms (OS/arch/variant), so map
set +ex # the arch list to a platform list (assuming the OS is always `linux`).
declare -A arch_to_platform=(
# Delete the arch-specific tags, if credentials for doing so are available. [amd64]="linux/amd64"
# Note that `DOCKER_PASSWORD` must be the actual user password. Passing a JWT [armv6]="linux/arm/v6"
# obtained using a personal access token results in a 403 error with [armv7]="linux/arm/v7"
# {"detail": "access to the resource is forbidden with personal access token"} [arm64]="linux/arm64"
if [[ -z "${DOCKER_USERNAME}" || -z "${DOCKER_PASSWORD}" ]]; then )
exit 0 platforms=()
fi
# Given a JSON input on stdin, extract the string value associated with the
# specified key. This avoids an extra dependency on a tool like `jq`.
extract() {
local key="$1"
# Extract "<key>":"<val>" (assumes key/val won't contain double quotes).
# The colon may have whitespace on either side.
grep -o "\"${key}\"[[:space:]]*:[[:space:]]*\"[^\"]\+\"" |
# Extract just <val> by deleting the last '"', and then greedily deleting
# everything up to '"'.
sed -e 's/"$//' -e 's/.*"//'
}
echo ">>> Getting API token..."
jwt=$(curl -sS -X POST \
-H "Content-Type: application/json" \
-d "{\"username\":\"${DOCKER_USERNAME}\",\"password\": \"${DOCKER_PASSWORD}\"}" \
"https://hub.docker.com/v2/users/login" |
extract 'token')
# Strip the registry portion from `index.docker.io/user/repo`.
repo="${DOCKER_REPO#*/}"
for arch in ${arches[@]}; do for arch in ${arches[@]}; do
# Don't delete the `arm32v6` tag; Docker can't seem to properly platforms+=("${arch_to_platform[$arch]}")
# auto-select that image on Armv6 platforms like Raspberry Pi 1 and Zero
# (https://github.com/moby/moby/issues/41017).
if [[ ${arch} == 'arm32v6' ]]; then
continue
fi
tag="${DOCKER_TAG}-${arch}"
echo ">>> Deleting '${repo}:${tag}'..."
curl -sS -X DELETE \
-H "Authorization: Bearer ${jwt}" \
"https://hub.docker.com/v2/repositories/${repo}/tags/${tag}/"
done done
platforms="$(join "," "${platforms[@]}")"
# Run the build, pushing the resulting images and multi-arch manifest list to
# Docker Hub. The Dockerfile is read from stdin to avoid sending any build
# context, which isn't needed here since the actual cross-compiled images
# have already been built.
docker buildx build \
--network host \
--build-arg LOCAL_REPO="${LOCAL_REPO}" \
--build-arg DOCKER_TAG="${DOCKER_TAG}" \
--platform "${platforms}" \
"${tag_args[@]}" \
--push \
- < ./docker/Dockerfile.buildx
# Add an extra arch-specific tag for `arm32v6`; Docker can't seem to properly
# auto-select that image on ARMv6 platforms like Raspberry Pi 1 and Zero
# (https://github.com/moby/moby/issues/41017).
#
# Note that we use `arm32v6` instead of `armv6` to be consistent with the
# existing bitwarden_rs tags, which adhere to the naming conventions of the
# Docker per-architecture repos (e.g., https://hub.docker.com/u/arm32v6).
# Unfortunately, these per-arch repo names aren't always consistent with the
# corresponding platform (OS/arch/variant) IDs, particularly in the case of
# 32-bit ARM arches (e.g., `linux/arm/v6` is used, not `linux/arm32/v6`).
#
# TODO: It looks like this issue should be fixed starting in Docker 20.10.0,
# so this step can be removed once fixed versions are in wider distribution.
#
# Tags:
#
# testing => testing-arm32v6
# testing-alpine => <ignored>
# x.y.z => x.y.z-arm32v6, latest-arm32v6
# x.y.z-alpine => <ignored>
#
if [[ "${DOCKER_TAG}" != *alpine ]]; then
image="${DOCKER_REPO}":"${DOCKER_TAG}"
# Fetch the multi-arch manifest list and find the digest of the armv6 image.
filter='.manifests|.[]|select(.platform.architecture=="arm" and .platform.variant=="v6")|.digest'
digest="$(docker manifest inspect "${image}" | jq -r "${filter}")"
# Pull the armv6 image by digest, retag it, and repush it.
docker pull "${DOCKER_REPO}"@"${digest}"
docker tag "${DOCKER_REPO}"@"${digest}" "${image}"-arm32v6
docker push "${image}"-arm32v6
if [[ "${DOCKER_TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+ ]]; then
docker tag "${image}"-arm32v6 "${DOCKER_REPO}:latest"-arm32v6
docker push "${DOCKER_REPO}:latest"-arm32v6
fi
fi