5 export MSYS_NO_PATHCONV=1
7 script=`cd $(dirname $0) && pwd`/`basename $0`
10 docker_dir="`dirname $script`"
11 ci_dir="`dirname $docker_dir`"
12 src_dir="`dirname $ci_dir`"
13 root_dir="`dirname $src_dir`"
16 dist=$objdir/build/dist
18 source "$ci_dir/shared.sh"
20 if [ -f "$docker_dir/$image/Dockerfile" ]; then
21 if [ "$CI" != "" ]; then
22 hash_key=/tmp/.docker-hash-key.txt
24 echo $image >> $hash_key
26 cat "$docker_dir/$image/Dockerfile" >> $hash_key
27 # Look for all source files involves in the COPY command
28 copied_files=/tmp/.docker-copied-files.txt
30 for i in $(sed -n -e 's/^COPY \(.*\) .*$/\1/p' "$docker_dir/$image/Dockerfile"); do
32 find "$docker_dir/$i" -type f >> $copied_files
34 # Sort the file names and cat the content into the hash key
35 sort $copied_files | xargs cat >> $hash_key
37 docker --version >> $hash_key
38 cksum=$(sha512sum $hash_key | \
41 s3url="s3://$SCCACHE_BUCKET/docker/$cksum"
42 url="https://$SCCACHE_BUCKET.s3.amazonaws.com/docker/$cksum"
43 upload="aws s3 cp - $s3url"
45 echo "Attempting to download $url"
46 rm -f /tmp/rustci_docker_cache
48 retry curl -y 30 -Y 10 --connect-timeout 30 -f -L -C - -o /tmp/rustci_docker_cache "$url"
49 loaded_images=$(docker load -i /tmp/rustci_docker_cache | sed 's/.* sha/sha/')
51 echo "Downloaded containers:\n$loaded_images"
54 dockerfile="$docker_dir/$image/Dockerfile"
55 if [ -x /usr/bin/cygpath ]; then
56 context="`cygpath -w $docker_dir`"
57 dockerfile="`cygpath -w $dockerfile`"
68 if [ "$upload" != "" ]; then
69 digest=$(docker inspect rust-ci --format '{{.Id}}')
70 echo "Built container $digest"
71 if ! grep -q "$digest" <(echo "$loaded_images"); then
72 echo "Uploading finished image to $url"
74 docker history -q rust-ci | \
81 echo "Looks like docker image is the same as before, not uploading"
83 # Record the container image for reuse, e.g. by rustup.rs builds
84 info="$dist/image-$image.txt"
87 echo "$digest" >>"$info"
89 elif [ -f "$docker_dir/disabled/$image/Dockerfile" ]; then
91 echo Cannot run disabled images on CI!
94 # Transform changes the context of disabled Dockerfiles to match the enabled ones
95 tar --transform 's#^./disabled/#./#' -C $docker_dir -c . | docker \
99 -f "$image/Dockerfile" \
102 echo Invalid image: $image
106 mkdir -p $HOME/.cargo
108 mkdir -p $objdir/cores
109 mkdir -p /tmp/toolstate
112 if [ "$SCCACHE_BUCKET" != "" ]; then
113 args="$args --env SCCACHE_BUCKET"
114 args="$args --env SCCACHE_REGION"
115 args="$args --env AWS_ACCESS_KEY_ID"
116 args="$args --env AWS_SECRET_ACCESS_KEY"
118 mkdir -p $HOME/.cache/sccache
119 args="$args --env SCCACHE_DIR=/sccache --volume $HOME/.cache/sccache:/sccache"
122 # Run containers as privileged as it should give them access to some more
123 # syscalls such as ptrace and whatnot. In the upgrade to LLVM 5.0 it was
124 # discovered that the leak sanitizer apparently needs these syscalls nowadays so
125 # we'll need `--privileged` for at least the `x86_64-gnu` builder, so this just
126 # goes ahead and sets it for all builders.
127 args="$args --privileged"
129 # Things get a little weird if this script is already running in a docker
130 # container. If we're already in a docker container then we assume it's set up
131 # to do docker-in-docker where we have access to a working `docker` command.
133 # If this is the case (we check via the presence of `/.dockerenv`)
134 # then we can't actually use the `--volume` argument. Typically we use
135 # `--volume` to efficiently share the build and source directory between this
136 # script and the container we're about to spawn. If we're inside docker already
137 # though the `--volume` argument maps the *host's* folder to the container we're
138 # about to spawn, when in fact we want the folder in this container itself. To
139 # work around this we use a recipe cribbed from
140 # https://circleci.com/docs/2.0/building-docker-images/#mounting-folders to
141 # create a temporary container with a volume. We then copy the entire source
142 # directory into this container, and then use that copy in the container we're
143 # about to spawn. Finally after the build finishes we re-extract the object
146 # Note that none of this is necessary if we're *not* in a docker-in-docker
147 # scenario. If this script is run on a bare metal host then we share a bunch of
148 # data directories to share as much data as possible. Note that we also use
149 # `LOCAL_USER_ID` (recognized in `src/ci/run.sh`) to ensure that files are all
150 # read/written as the same user as the bare-metal user.
151 if [ -f /.dockerenv ]; then
152 docker create -v /checkout --name checkout alpine:3.4 /bin/true
153 docker cp . checkout:/checkout
154 args="$args --volumes-from checkout"
156 args="$args --volume $root_dir:/checkout:ro"
157 args="$args --volume $objdir:/checkout/obj"
158 args="$args --volume $HOME/.cargo:/cargo"
159 args="$args --volume $HOME/rustsrc:$HOME/rustsrc"
160 args="$args --volume /tmp/toolstate:/tmp/toolstate"
161 args="$args --env LOCAL_USER_ID=`id -u`"
166 --workdir /checkout/obj \
167 --env SRC=/checkout \
169 --env CARGO_HOME=/cargo \
174 --env BUILD_SOURCEBRANCHNAME \
175 --env TOOLSTATE_REPO_ACCESS_TOKEN \
176 --env TOOLSTATE_REPO \
177 --env TOOLSTATE_PUBLISH \
178 --env CI_JOB_NAME="${CI_JOB_NAME-$IMAGE}" \
182 /checkout/src/ci/run.sh
184 if [ -f /.dockerenv ]; then
186 docker cp checkout:/checkout/obj $objdir