From 430783a7bd15e99d27c2cdcd2f9addac872f6d9e Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Wed, 5 Feb 2025 10:07:42 -0500 Subject: [PATCH 1/5] overlay: sync the upper directory's timestamp to the lower's, too In addition to setting the (usually recently-created) upper directory's ownership and permissions to match those of the lower (the location of which wasn't known when the upper was created), set the timestamps to match, too. Signed-off-by: Nalin Dahyabhai --- pkg/overlay/overlay_linux.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pkg/overlay/overlay_linux.go b/pkg/overlay/overlay_linux.go index 8b84d463584..b0dd608331b 100644 --- a/pkg/overlay/overlay_linux.go +++ b/pkg/overlay/overlay_linux.go @@ -63,6 +63,13 @@ func MountWithOptions(contentDir, source, dest string, opts *Options) (mount spe if err := os.Chown(upperDir, int(stat.Uid), int(stat.Gid)); err != nil { return mount, err } + times := []syscall.Timeval{ + syscall.NsecToTimeval(syscall.TimespecToNsec(stat.Atim)), + syscall.NsecToTimeval(syscall.TimespecToNsec(stat.Mtim)), + } + if err := syscall.Utimes(upperDir, times); err != nil { + return mount, err + } } overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", escapeColon(source), upperDir, workDir) } From 18108e7089a3b802f43ad3e689a726661b41a60d Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Wed, 5 Feb 2025 10:48:30 -0500 Subject: [PATCH 2/5] overlay: chown()ing the upper dir: ignore EINVAL on overflow IDs When chown()ing the upper directory to match the lower directory, if the ownership of the lower directory is the overflow UID:GID, ignore EINVAL. Signed-off-by: Nalin Dahyabhai --- pkg/overlay/overlay_linux.go | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/pkg/overlay/overlay_linux.go b/pkg/overlay/overlay_linux.go index b0dd608331b..dae0de9ab18 100644 --- a/pkg/overlay/overlay_linux.go +++ b/pkg/overlay/overlay_linux.go @@ -1,9 +1,11 @@ package overlay import ( + "errors" "fmt" "os" "path/filepath" + "strconv" "strings" "syscall" @@ -61,7 +63,22 @@ func MountWithOptions(contentDir, source, dest string, opts *Options) (mount spe } if stat, ok := st.Sys().(*syscall.Stat_t); ok { if err := os.Chown(upperDir, int(stat.Uid), int(stat.Gid)); err != nil { - return mount, err + if !errors.Is(err, syscall.EINVAL) { + return mount, err + } + overflowed := false + overflowUIDText, uerr := os.ReadFile("/proc/sys/kernel/overflowuid") + overflowGIDText, gerr := os.ReadFile("/proc/sys/kernel/overflowgid") + if uerr == nil && gerr == nil { + overflowUID, uerr := strconv.Atoi(strings.TrimSpace(string(overflowUIDText))) + overflowGID, gerr := strconv.Atoi(strings.TrimSpace(string(overflowGIDText))) + if uerr == nil && gerr == nil && int(stat.Uid) == overflowUID && int(stat.Gid) == overflowGID { + overflowed = true + } + } + if !overflowed { + return mount, err + } } times := []syscall.Timeval{ syscall.NsecToTimeval(syscall.TimespecToNsec(stat.Atim)), From da0e8a8c08283ca9d7a9ee4c6b93f209d84af5f7 Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Tue, 4 Feb 2025 17:02:41 -0500 Subject: [PATCH 3/5] imagebuildah: use a longer-lived overlay over the build context Mount a read-write overlay directory over the build context directory to restore the ability to use it as a covert cache of sorts during the lifetime of the build, but in a way that still ensures that we don't modify the real build context directory. N.B.: builds where FROM in one stage referenced a relative path which had been written to a bind-mounted default build context directory by an earlier stage broke when we started making those bind mounts into overlays to prevent/discard modifications to that directory, and while this extends the lifetime of that overlay so that it's consistent throughout the build, those relative path names are still going to point to the wrong location. Since we need to determine SELinux labeling before mounting the overlay, go ahead and calculate the labels to use before creating the first builder, and remove the logic that had whichever stage thought it was the first one set them in its parent object for use by other stages, in what was probably a racey way. Signed-off-by: Nalin Dahyabhai --- imagebuildah/build.go | 15 ++++-- imagebuildah/build_linux.go | 86 ++++++++++++++++++++++++++++++++++ imagebuildah/build_notlinux.go | 19 ++++++++ imagebuildah/executor.go | 6 ++- imagebuildah/stage_executor.go | 14 ++---- internal/types.go | 1 + internal/volumes/volumes.go | 12 ++++- pkg/cli/build.go | 5 +- pkg/parse/parse.go | 22 ++++++++- run.go | 4 +- tests/bud.bats | 2 +- 11 files changed, 167 insertions(+), 19 deletions(-) create mode 100644 imagebuildah/build_linux.go create mode 100644 imagebuildah/build_notlinux.go diff --git a/imagebuildah/build.go b/imagebuildah/build.go index 607a99db9a2..f6321f56847 100644 --- a/imagebuildah/build.go +++ b/imagebuildah/build.go @@ -209,6 +209,15 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B options.AdditionalBuildContexts = make(map[string]*define.AdditionalBuildContext) } + contextDirectory, processLabel, mountLabel, usingContextOverlay, cleanupOverlay, err := platformSetupContextDirectoryOverlay(store, &options) + if err != nil { + return "", nil, fmt.Errorf("mounting an overlay over build context directory: %w", err) + } + defer cleanupOverlay() + if contextDirectory != "" { + options.ContextDirectory = contextDirectory + } + if len(options.Platforms) == 0 { options.Platforms = append(options.Platforms, struct{ OS, Arch, Variant string }{ OS: options.SystemContext.OSChoice, @@ -283,7 +292,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B platformOptions.ReportWriter = reporter platformOptions.Err = stderr } - thisID, thisRef, err := buildDockerfilesOnce(ctx, store, loggerPerPlatform, logPrefix, platformOptions, paths, files) + thisID, thisRef, err := buildDockerfilesOnce(ctx, store, loggerPerPlatform, logPrefix, platformOptions, paths, files, processLabel, mountLabel, usingContextOverlay) if err != nil { if errorContext := strings.TrimSpace(logPrefix); errorContext != "" { return fmt.Errorf("%s: %w", errorContext, err) @@ -394,7 +403,7 @@ func BuildDockerfiles(ctx context.Context, store storage.Store, options define.B return id, ref, nil } -func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logrus.Logger, logPrefix string, options define.BuildOptions, containerFiles []string, dockerfilecontents [][]byte) (string, reference.Canonical, error) { +func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logrus.Logger, logPrefix string, options define.BuildOptions, containerFiles []string, dockerfilecontents [][]byte, processLabel, mountLabel string, usingContextOverlay bool) (string, reference.Canonical, error) { mainNode, err := imagebuilder.ParseDockerfile(bytes.NewReader(dockerfilecontents[0])) if err != nil { return "", nil, fmt.Errorf("parsing main Dockerfile: %s: %w", containerFiles[0], err) @@ -445,7 +454,7 @@ func buildDockerfilesOnce(ctx context.Context, store storage.Store, logger *logr mainNode.Children = append(mainNode.Children, additionalNode.Children...) } - exec, err := newExecutor(logger, logPrefix, store, options, mainNode, containerFiles) + exec, err := newExecutor(logger, logPrefix, store, options, mainNode, containerFiles, processLabel, mountLabel, usingContextOverlay) if err != nil { return "", nil, fmt.Errorf("creating build executor: %w", err) } diff --git a/imagebuildah/build_linux.go b/imagebuildah/build_linux.go new file mode 100644 index 00000000000..3eb8ca172d6 --- /dev/null +++ b/imagebuildah/build_linux.go @@ -0,0 +1,86 @@ +package imagebuildah + +import ( + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "slices" + + "github.com/containers/buildah/define" + "github.com/containers/buildah/internal/tmpdir" + "github.com/containers/buildah/pkg/overlay" + "github.com/containers/storage" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// platformSetupContextDirectoryOverlay() may set up an overlay _over_ the +// build context directory, and sorts out labeling. Returns either the new +// location which should be used as the base build context or the old location; +// the process label and mount label for the build; a boolean value that +// indicates whether we did, in fact, mount an overlay; a cleanup function +// which should be called when the location is no longer needed (on success); +// and a non-nil fatal error if any of that failed. +func platformSetupContextDirectoryOverlay(store storage.Store, options *define.BuildOptions) (string, string, string, bool, func(), error) { + var succeeded bool + var tmpDir, contentDir string + cleanup := func() { + if contentDir != "" { + if err := overlay.CleanupContent(tmpDir); err != nil { + logrus.Debugf("cleaning up overlay scaffolding for build context directory: %v", err) + } + } + if tmpDir != "" { + if err := os.Remove(tmpDir); err != nil && !errors.Is(err, fs.ErrNotExist) { + logrus.Debugf("removing should-be-empty temporary directory %q: %v", tmpDir, err) + } + } + } + defer func() { + if !succeeded { + cleanup() + } + }() + // double-check that the context directory location is an absolute path + contextDirectoryAbsolute, err := filepath.Abs(options.ContextDirectory) + if err != nil { + return "", "", "", false, nil, fmt.Errorf("determining absolute path of %q: %w", options.ContextDirectory, err) + } + var st unix.Stat_t + if err := unix.Stat(contextDirectoryAbsolute, &st); err != nil { + return "", "", "", false, nil, fmt.Errorf("checking ownership of %q: %w", options.ContextDirectory, err) + } + // figure out the labeling situation + processLabel, mountLabel, err := label.InitLabels(options.CommonBuildOpts.LabelOpts) + if err != nil { + return "", "", "", false, nil, err + } + // create a temporary directory + tmpDir, err = os.MkdirTemp(tmpdir.GetTempDir(), "buildah-context-") + if err != nil { + return "", "", "", false, nil, fmt.Errorf("creating temporary directory: %w", err) + } + // create the scaffolding for an overlay mount under it + contentDir, err = overlay.TempDir(tmpDir, 0, 0) + if err != nil { + return "", "", "", false, nil, fmt.Errorf("creating overlay scaffolding for build context directory: %w", err) + } + // mount an overlay that uses it as a lower + overlayOptions := overlay.Options{ + GraphOpts: slices.Clone(store.GraphOptions()), + ForceMount: true, + MountLabel: mountLabel, + } + targetDir := filepath.Join(contentDir, "target") + contextDirMountSpec, err := overlay.MountWithOptions(contentDir, contextDirectoryAbsolute, targetDir, &overlayOptions) + if err != nil { + return "", "", "", false, nil, fmt.Errorf("creating overlay scaffolding for build context directory: %w", err) + } + // going forward, pretend that the merged directory is the actual context directory + logrus.Debugf("mounted an overlay at %q over %q", contextDirMountSpec.Source, contextDirectoryAbsolute) + succeeded = true + return contextDirMountSpec.Source, processLabel, mountLabel, true, cleanup, nil +} diff --git a/imagebuildah/build_notlinux.go b/imagebuildah/build_notlinux.go new file mode 100644 index 00000000000..a5169215e28 --- /dev/null +++ b/imagebuildah/build_notlinux.go @@ -0,0 +1,19 @@ +//go:build !linux + +package imagebuildah + +import ( + "github.com/containers/buildah/define" + "github.com/containers/storage" +) + +// platformSetupContextDirectoryOverlay() may set up an overlay _over_ the +// build context directory, and sorts out labeling. Returns either the new +// location which should be used as the base build context or the old location; +// the process label and mount label for the build; a boolean value that +// indicates whether we did, in fact, mount an overlay; a cleanup function +// which should be called when the location is no longer needed (on success); +// and a non-nil fatal error if any of that failed. +func platformSetupContextDirectoryOverlay(store storage.Store, options *define.BuildOptions) (string, string, string, bool, func(), error) { + return options.ContextDirectory, "", "", false, func() {}, nil +} diff --git a/imagebuildah/executor.go b/imagebuildah/executor.go index 1c7f7fd56ae..1cc619d3064 100644 --- a/imagebuildah/executor.go +++ b/imagebuildah/executor.go @@ -69,6 +69,7 @@ type Executor struct { stages map[string]*StageExecutor store storage.Store contextDir string + contextDirWritesAreDiscarded bool pullPolicy define.PullPolicy registry string ignoreUnrecognizedInstructions bool @@ -173,7 +174,7 @@ type imageTypeAndHistoryAndDiffIDs struct { } // newExecutor creates a new instance of the imagebuilder.Executor interface. -func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, options define.BuildOptions, mainNode *parser.Node, containerFiles []string) (*Executor, error) { +func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, options define.BuildOptions, mainNode *parser.Node, containerFiles []string, processLabel, mountLabel string, contextWritesDiscarded bool) (*Executor, error) { defaultContainerConfig, err := config.Default() if err != nil { return nil, fmt.Errorf("failed to get container config: %w", err) @@ -238,6 +239,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o stages: make(map[string]*StageExecutor), store: store, contextDir: options.ContextDirectory, + contextDirWritesAreDiscarded: contextWritesDiscarded, excludes: excludes, groupAdd: options.GroupAdd, ignoreFile: options.IgnoreFile, @@ -273,6 +275,8 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o squash: options.Squash, labels: slices.Clone(options.Labels), layerLabels: slices.Clone(options.LayerLabels), + processLabel: processLabel, + mountLabel: mountLabel, annotations: slices.Clone(options.Annotations), layers: options.Layers, noHostname: options.CommonBuildOpts.NoHostname, diff --git a/imagebuildah/stage_executor.go b/imagebuildah/stage_executor.go index 0f34029179f..2d7368ad2cc 100644 --- a/imagebuildah/stage_executor.go +++ b/imagebuildah/stage_executor.go @@ -609,6 +609,10 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co // items in the passed-in mounts list which include a "from=" value. func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]internal.StageMountDetails, error) { stageMountPoints := make(map[string]internal.StageMountDetails) + stageMountPoints[""] = internal.StageMountDetails{ + MountPoint: s.executor.contextDir, + IsWritesDiscardedOverlay: s.executor.contextDirWritesAreDiscarded, + } for _, flag := range mountList { if strings.Contains(flag, "from") { tokens := strings.Split(flag, ",") @@ -1014,16 +1018,6 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo return nil, fmt.Errorf("creating build container: %w", err) } - // If executor's ProcessLabel and MountLabel is empty means this is the first stage - // Make sure we share first stage's ProcessLabel and MountLabel with all other subsequent stages - // Doing this will ensure and one stage in same build can mount another stage even if `selinux` - // is enabled. - - if s.executor.mountLabel == "" && s.executor.processLabel == "" { - s.executor.mountLabel = builder.MountLabel - s.executor.processLabel = builder.ProcessLabel - } - if initializeIBConfig { volumes := map[string]struct{}{} for _, v := range builder.Volumes() { diff --git a/internal/types.go b/internal/types.go index 5b14285cde1..1a66183b683 100644 --- a/internal/types.go +++ b/internal/types.go @@ -17,4 +17,5 @@ type StageMountDetails struct { IsImage bool // true if the mountpoint is an image's rootfs IsAdditionalBuildContext bool // true if the mountpoint is an additional build context MountPoint string // mountpoint of the stage or image's root directory or path of the additional build context + IsWritesDiscardedOverlay bool // this is an overlay that discards writes } diff --git a/internal/volumes/volumes.go b/internal/volumes/volumes.go index 941d6f9539c..90a4ce092d5 100644 --- a/internal/volumes/volumes.go +++ b/internal/volumes/volumes.go @@ -141,6 +141,7 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st setDest := "" bindNonRecursive := false fromWhere := "" + skipOverlay := false for _, val := range args { argName, argValue, hasArgValue := strings.Cut(val, "=") @@ -248,6 +249,7 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st if additionalMountPoints != nil { if val, ok := additionalMountPoints[fromWhere]; ok { mountPoint = val.MountPoint + skipOverlay = val.IsWritesDiscardedOverlay } } // if mountPoint of image was not found in additionalMap @@ -273,6 +275,14 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st }() } contextDir = mountPoint + } else { + // special case an additional mount point for "" as shorthand for "preferred location of the default build context" + if additionalMountPoints != nil { + if val, ok := additionalMountPoints[""]; ok { + contextDir = val.MountPoint + skipOverlay = val.IsWritesDiscardedOverlay + } + } } // buildkit parity: default bind option must be `rbind` @@ -328,7 +338,7 @@ func GetBindMount(sys *types.SystemContext, args []string, contextDir string, st } overlayDir := "" - if mountedImage != "" || mountIsReadWrite(newMount) { + if !skipOverlay && (mountedImage != "" || mountIsReadWrite(newMount)) { if newMount, overlayDir, err = convertToOverlay(newMount, store, mountLabel, tmpDir, 0, 0); err != nil { return newMount, "", "", "", err } diff --git a/pkg/cli/build.go b/pkg/cli/build.go index 5437f1d3669..c04ee918c6f 100644 --- a/pkg/cli/build.go +++ b/pkg/cli/build.go @@ -113,7 +113,10 @@ func GenBuildOptions(c *cobra.Command, inputArgs []string, iopts BuildOptions) ( if c.Flag("build-context").Changed { for _, contextString := range iopts.BuildContext { av := strings.SplitN(contextString, "=", 2) - if len(av) > 1 { + // the key should be non-empty: we use "" as internal + // shorthand for the default build context when there's + // an overlay mounted over it + if len(av) > 1 && av[0] != "" { parseAdditionalBuildContext, err := parse.GetAdditionalBuildContext(av[1]) if err != nil { return options, nil, nil, fmt.Errorf("while parsing additional build context: %w", err) diff --git a/pkg/parse/parse.go b/pkg/parse/parse.go index 15b520cfa11..cc00906cc6b 100644 --- a/pkg/parse/parse.go +++ b/pkg/parse/parse.go @@ -57,7 +57,12 @@ const ( BuildahCacheDir = "buildah-cache" ) -var errInvalidSecretSyntax = errors.New("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV][,type=file|env]") +var ( + errInvalidSecretSyntax = errors.New("incorrect secret flag format: should be --secret id=foo,src=bar[,env=ENV][,type=file|env]") + errInvalidBuildContextPathname = errors.New(`invalid build context path ""`) + errInvalidBuildContextImage = errors.New(`invalid build context image name ""`) + errInvalidBuildContextURL = errors.New(`invalid build context image URL ""`) +) // RepoNamesToNamedReferences parse the raw string to Named reference func RepoNamesToNamedReferences(destList []string) ([]reference.Named, error) { @@ -208,19 +213,34 @@ func CommonBuildOptionsFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name // GetAdditionalBuildContext consumes raw string and returns parsed AdditionalBuildContext func GetAdditionalBuildContext(value string) (define.AdditionalBuildContext, error) { + if value == "" { + return define.AdditionalBuildContext{}, errInvalidBuildContextPathname + } ret := define.AdditionalBuildContext{IsURL: false, IsImage: false, Value: value} if strings.HasPrefix(value, "docker-image://") { ret.IsImage = true ret.Value = strings.TrimPrefix(value, "docker-image://") + if ret.Value == "" { + return define.AdditionalBuildContext{}, errInvalidBuildContextImage + } } else if strings.HasPrefix(value, "container-image://") { ret.IsImage = true ret.Value = strings.TrimPrefix(value, "container-image://") + if ret.Value == "" { + return define.AdditionalBuildContext{}, errInvalidBuildContextImage + } } else if strings.HasPrefix(value, "docker://") { ret.IsImage = true ret.Value = strings.TrimPrefix(value, "docker://") + if ret.Value == "" { + return define.AdditionalBuildContext{}, errInvalidBuildContextImage + } } else if strings.HasPrefix(value, "http://") || strings.HasPrefix(value, "https://") { ret.IsImage = false ret.IsURL = true + if strings.TrimPrefix(ret.Value, "http://") == "" || strings.TrimPrefix(ret.Value, "https://") == "" { + return define.AdditionalBuildContext{}, errInvalidBuildContextURL + } } else { path, err := filepath.Abs(value) if err != nil { diff --git a/run.go b/run.go index 3988bf511ec..681eb6ad9b9 100644 --- a/run.go +++ b/run.go @@ -157,7 +157,9 @@ type RunOptions struct { SSHSources map[string]*sshagent.Source `json:"-"` // RunMounts are unparsed mounts to be added for this run RunMounts []string - // Map of stages and container mountpoint if any from stage executor + // Map of already-mounted stages, images, and container mountpoints + // which entries in `RunMounts` might be referring to. If a value for + // the "" key is present, it points to the context directory. StageMountPoints map[string]internal.StageMountDetails // IDs of mounted images to be unmounted before returning // Deprecated: before 1.39, these images would not be consistently diff --git a/tests/bud.bats b/tests/bud.bats index b93de284e5b..8da6eee6899 100644 --- a/tests/bud.bats +++ b/tests/bud.bats @@ -674,7 +674,7 @@ symlink(subdir)" _prefetch busybox run_buildah 125 build -t testbud3 $WITH_POLICY_JSON $BUDFILES/dockerignore3 expect_output --substring 'building.*"COPY test1.txt /upload/test1.txt".*no such file or directory' - expect_output --substring $(realpath "$BUDFILES/dockerignore3/.dockerignore") + expect_output --substring 'filtered out using /[^ ]*/.dockerignore' } @test "bud with .dockerignore #4" { From 0cf64aceb1241b68e6eaf732eb166e8f811b8cf9 Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Tue, 4 Feb 2025 18:21:21 -0500 Subject: [PATCH 4/5] imagebuildah: try to rein in use of transport names in image specs Try to limit which image transports we accept in stages, and scope the ones that use path names to the context directory. At some point anything that isn't an image ID or pullable spec should start being rejected. Signed-off-by: Nalin Dahyabhai --- imagebuildah/imagebuildah_test.go | 29 +++ imagebuildah/stage_executor.go | 308 +++++++++++++++++++++- imagebuildah/stage_executor_test.go | 385 ++++++++++++++++++++++++++++ tests/bud.bats | 49 ++++ 4 files changed, 770 insertions(+), 1 deletion(-) create mode 100644 imagebuildah/imagebuildah_test.go diff --git a/imagebuildah/imagebuildah_test.go b/imagebuildah/imagebuildah_test.go new file mode 100644 index 00000000000..ae9ac5a0e5b --- /dev/null +++ b/imagebuildah/imagebuildah_test.go @@ -0,0 +1,29 @@ +package imagebuildah + +import ( + "flag" + "os" + "testing" + + "github.com/sirupsen/logrus" +) + +func TestMain(m *testing.M) { + var logLevel string + debug := false + if InitReexec() { + return + } + flag.BoolVar(&debug, "debug", false, "turn on debug logging") + flag.StringVar(&logLevel, "log-level", "error", "log level") + flag.Parse() + level, err := logrus.ParseLevel(logLevel) + if err != nil { + logrus.Fatalf("error parsing log level %q: %v", logLevel, err) + } + if debug && level < logrus.DebugLevel { + level = logrus.DebugLevel + } + logrus.SetLevel(level) + os.Exit(m.Run()) +} diff --git a/imagebuildah/stage_executor.go b/imagebuildah/stage_executor.go index 2d7368ad2cc..4edc12c2d16 100644 --- a/imagebuildah/stage_executor.go +++ b/imagebuildah/stage_executor.go @@ -1,6 +1,7 @@ package imagebuildah import ( + "archive/tar" "context" "crypto/sha256" "errors" @@ -26,13 +27,21 @@ import ( "github.com/containers/buildah/util" config "github.com/containers/common/pkg/config" cp "github.com/containers/image/v5/copy" + directoryTransport "github.com/containers/image/v5/directory" + dockerTransport "github.com/containers/image/v5/docker" imagedocker "github.com/containers/image/v5/docker" + dockerArchiveTransport "github.com/containers/image/v5/docker/archive" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" + ociArchiveTransport "github.com/containers/image/v5/oci/archive" + ociLayoutTransport "github.com/containers/image/v5/oci/layout" + openshiftTransport "github.com/containers/image/v5/openshift" + "github.com/containers/image/v5/pkg/compression" is "github.com/containers/image/v5/storage" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/unshare" docker "github.com/fsouza/go-dockerclient" @@ -926,6 +935,290 @@ func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error { return errors.New(err) } +// sanitizeImageReference limits which image transports we'll accept. For +// those it accepts which refer to filesystem objects, where relative path +// names are evaluated relative to "contextDir", it will create a copy of the +// original image, under "tmpdir", which contains no symbolic links. +func (s *StageExecutor) sanitizeFrom(from, tmpdir string) (newFrom string, err error) { + transportName, restOfImageName, maybeHasTransportName := strings.Cut(from, ":") + if !maybeHasTransportName || transports.Get(transportName) == nil { + if _, err = reference.ParseNormalizedNamed(from); err == nil { + // this is a normal-looking image-in-a-registry-or-named-in-storage name + return from, nil + } + if img, err := s.executor.store.Image(from); img != nil && err == nil { + // this is an image ID + return from, nil + } + return "", fmt.Errorf("parsing image name %q: %w", from, err) + } + // TODO: drop this part and just return an error... someday + return sanitizeImageName(transportName, restOfImageName, s.executor.contextDir, tmpdir) +} + +// sanitizeImageReference limits which image transports we'll accept. For +// those it accepts which refer to filesystem objects, where relative path +// names are evaluated relative to "contextDir", it will create a copy of the +// original image, under "tmpdir", which contains no symbolic links. +func sanitizeImageName(transportName, restOfImageName, contextDir, tmpdir string) (newFrom string, err error) { + succeeded := false + isEmbeddedArchive := false + newImageDirectory := "" + var tw *tar.Writer + var archiveSource string + var imageArchive io.ReadCloser + switch transportName { + case dockerTransport.Transport.Name(), "docker-daemon", openshiftTransport.Transport.Name(): // ok, these are all remote + return transportName + ":" + restOfImageName, nil + case dockerArchiveTransport.Transport.Name(), ociArchiveTransport.Transport.Name(): // these are, basically, tarballs + // these take the form path[:stuff] + transportRef := restOfImageName + parts := strings.Split(transportRef, ":") + archiveSource = parts[0] + refLeftover := parts[1:] + // create a temporary file to use as our new archive + var f *os.File + if f, err = os.CreateTemp(tmpdir, "buildah-archive-"); err != nil { + return "", fmt.Errorf("creating temporary copy of base image: %w", err) + } + defer func() { + name := f.Name() + if err := f.Close(); err != nil { + logrus.Warn(err) + } + if !succeeded { + if err := os.Remove(name); err != nil { + logrus.Warn(err) + } + } + }() + tw = tar.NewWriter(f) + defer func() { + if err := tw.Close(); err != nil { + logrus.Warn(err) + } + }() + // archive the archive + isEmbeddedArchive = true + for { + // try to make sure the archiver doesn't get thrown by relative prefixes + if strings.HasPrefix(archiveSource, "/") && archiveSource != "/" { + archiveSource = strings.TrimPrefix(archiveSource, "/") + continue + } else if strings.HasPrefix(archiveSource, "./") && archiveSource != "./" { + archiveSource = strings.TrimPrefix(archiveSource, "./") + continue + } + break + } + tarOptions := &archive.TarOptions{ + IncludeFiles: []string{path.Clean(archiveSource)}, + ExcludePatterns: []string{"**"}, + } + imageArchive, err = chrootarchive.Tar(contextDir, tarOptions, contextDir) + // generate the new reference using the file + newFrom = transportName + ":" + strings.Join(append([]string{f.Name()}, refLeftover...), ":") + case ociLayoutTransport.Transport.Name(): // this is a directory tree + // this takes the form path[:stuff] + transportRef := restOfImageName + parts := strings.Split(transportRef, ":") + archiveSource = parts[0] + refLeftover := parts[1:] + // create a new directory to use as our new layout directory + newImageDirectory, err = os.MkdirTemp(tmpdir, "buildah-layout-") + if err != nil { + return "", fmt.Errorf("creating temporary copy of base image: %w", err) + } + defer func() { + if !succeeded { + if err := os.RemoveAll(newImageDirectory); err != nil { + logrus.Warn(err) + } + } + }() + // archive the directory + tarOptions := &archive.TarOptions{} + imageArchive, err = chrootarchive.Tar(filepath.Join(contextDir, archiveSource), tarOptions, contextDir) + // generate the new reference using the directory + newFrom = transportName + ":" + strings.Join(append([]string{newImageDirectory}, refLeftover...), ":") + case directoryTransport.Transport.Name(): // this is also a directory tree + // this takes the form of just a path + transportRef := restOfImageName + // create a new directory to use as our new image directory + newImageDirectory, err = os.MkdirTemp(tmpdir, "buildah-dir-") + if err != nil { + return "", fmt.Errorf("creating temporary copy of base image: %w", err) + } + defer func() { + if !succeeded { + if err := os.RemoveAll(newImageDirectory); err != nil { + logrus.Warn(err) + } + } + }() + // archive the directory + archiveSource = transportRef + tarOptions := &archive.TarOptions{} + imageArchive, err = chrootarchive.Tar(filepath.Join(contextDir, archiveSource), tarOptions, contextDir) + // generate the new reference using the directory + newFrom = transportName + ":" + newImageDirectory + default: + return "", fmt.Errorf("unexpected container image transport %q", transportName) + } + if err != nil { + return "", fmt.Errorf("error archiving source at %q under %q", archiveSource, contextDir) + } + defer func() { + if err := imageArchive.Close(); err != nil { + logrus.Warn(err) + } + }() + decompressed, _, err := compression.AutoDecompress(imageArchive) + if err != nil { + return "", fmt.Errorf("error decompressing-if-necessary archive %q", archiveSource) + } + defer func() { + if err := decompressed.Close(); err != nil { + logrus.Warn(err) + } + }() + tr := tar.NewReader(decompressed) + seenEntries := make(map[string]struct{}) + hdr, err := tr.Next() + for hdr != nil { + // if the archive we're parsing is expected to have an archive as one of its contents, + // assume it's the first (and hopefully, only) item + if isEmbeddedArchive { + decompressed, _, decompressErr := compression.AutoDecompress(tr) + if decompressErr != nil { + return "", fmt.Errorf("error decompressing-if-necessary archive %q: %w", archiveSource, decompressErr) + } + defer func() { + if err := decompressed.Close(); err != nil { + logrus.Warn(err) + } + }() + tr = tar.NewReader(decompressed) + hdr, err = tr.Next() + isEmbeddedArchive = false + continue + } + if tw == nil { + var createErr error + // write directly under newImageDirectory + hdr.Name = path.Clean("/" + hdr.Name) + newName := filepath.Join(newImageDirectory, filepath.FromSlash(hdr.Name)) + switch hdr.Typeflag { + case tar.TypeDir: + createErr = os.Mkdir(newName, 0o700) + case tar.TypeReg: + createErr = func() error { + var f *os.File + f, err := os.OpenFile(newName, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o700) + if err != nil { + return fmt.Errorf("copying content for %q in base image: %w", hdr.Name, err) + } + n, err := io.Copy(f, tr) + if err != nil { + f.Close() + return fmt.Errorf("copying content for %q in base image: %w", hdr.Name, err) + } + if n != hdr.Size { + f.Close() + return fmt.Errorf("short write writing %q in base image: %d != %d", hdr.Name, n, hdr.Size) + } + return f.Close() + }() + case tar.TypeLink: + linkName := path.Clean("/" + hdr.Linkname) + oldName := filepath.Join(newImageDirectory, filepath.FromSlash(linkName)) + createErr = os.Link(oldName, newName) + case tar.TypeSymlink: // convert it to a hard link + var oldName string + if !path.IsAbs(hdr.Linkname) { + linkName := path.Join("/"+path.Dir(hdr.Name), hdr.Linkname) + oldName = filepath.Join(newImageDirectory, filepath.FromSlash(linkName)) + } else { + oldName = filepath.Join(newImageDirectory, filepath.FromSlash(hdr.Linkname)) + } + createErr = os.Link(oldName, newName) + default: + return "", fmt.Errorf("extracting archive of base image: disallowed entry type %c", hdr.Typeflag) + } + if createErr != nil { + return "", fmt.Errorf("creating %q: %w", newName, createErr) + } + } else { + // write to the archive file + hdr.Name = path.Clean("/" + hdr.Name) + if hdr.Name != "/" { + hdr.Name = strings.TrimPrefix(hdr.Name, "/") + } + seenEntries[hdr.Name] = struct{}{} + switch hdr.Typeflag { + case tar.TypeDir, tar.TypeReg, tar.TypeLink: + if err := tw.WriteHeader(hdr); err != nil { + return "", fmt.Errorf("rewriting archive of base image: %w", err) + } + case tar.TypeSymlink: + // resolve the target of the symlink + linkname := hdr.Linkname + if !path.IsAbs(linkname) { + linkname = path.Join("/"+path.Dir(hdr.Name), linkname) + } + linkname = path.Clean(linkname) + if linkname != "/" { + linkname = strings.TrimPrefix(linkname, "/") + } + if _, validTarget := seenEntries[linkname]; !validTarget { + return "", fmt.Errorf("invalid symbolic link from %q to %q (%q) in archive", hdr.Name, hdr.Linkname, linkname) + } + rel, err := filepath.Rel(filepath.FromSlash(path.Dir("/"+hdr.Name)), filepath.FromSlash("/"+linkname)) + if err != nil { + return "", fmt.Errorf("computing relative path from %q to %q in archive", hdr.Name, linkname) + } + rel = filepath.ToSlash(rel) + if transportName == ociArchiveTransport.Transport.Name() { + // rewrite as a hard link for oci-archive, which gets + // extracted into a temporary directory to be read, but + // not for docker-archive, which is read directly from + // the unextracted archive file, in a way which doesn't + // understand hard links + hdr.Typeflag = tar.TypeLink + hdr.Linkname = linkname + } else { + // ensure it's a relative symlink inside of the tree + // for docker-archive + hdr.Linkname = rel + } + if err := tw.WriteHeader(hdr); err != nil { + return "", fmt.Errorf("rewriting archive of base image: %w", err) + } + default: + return "", fmt.Errorf("rewriting archive of base image: disallowed entry type %c", hdr.Typeflag) + } + if hdr.Typeflag == tar.TypeReg { + n, err := io.Copy(tw, tr) + if err != nil { + return "", fmt.Errorf("copying content for %q in base image: %w", hdr.Name, err) + } + if n != hdr.Size { + return "", fmt.Errorf("short write writing %q in base image: %d != %d", hdr.Name, n, hdr.Size) + } + } + } + hdr, err = tr.Next() + } + if isEmbeddedArchive { + logrus.Warnf("expected to have archived a copy of %q, missed it", archiveSource) + } + if err != nil && !errors.Is(err, io.EOF) { + return "", fmt.Errorf("reading archive of base image: %w", err) + } + succeeded = true + return newFrom, nil +} + // prepare creates a working container based on the specified image, or if one // isn't specified, the first argument passed to the first FROM instruction we // can find in the stage's parsed tree. @@ -942,6 +1235,19 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo } from = base } + sanitizedDir, err := os.MkdirTemp(tmpdir.GetTempDir(), "buildah-context-") + if err != nil { + return nil, fmt.Errorf("creating temporary directory: %w", err) + } + defer func() { + if err := os.RemoveAll(sanitizedDir); err != nil { + logrus.Warn(err) + } + }() + sanitizedFrom, err := s.sanitizeFrom(from, tmpdir.GetTempDir()) + if err != nil { + return nil, fmt.Errorf("invalid base image specification %q: %w", from, err) + } displayFrom := from if ib.Platform != "" { displayFrom = "--platform=" + ib.Platform + " " + displayFrom @@ -980,7 +1286,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo builderOptions := buildah.BuilderOptions{ Args: ib.Args, - FromImage: from, + FromImage: sanitizedFrom, GroupAdd: s.executor.groupAdd, PullPolicy: pullPolicy, ContainerSuffix: s.executor.containerSuffix, diff --git a/imagebuildah/stage_executor_test.go b/imagebuildah/stage_executor_test.go index 57cf7da706a..4fa58cae8d7 100644 --- a/imagebuildah/stage_executor_test.go +++ b/imagebuildah/stage_executor_test.go @@ -1,10 +1,27 @@ package imagebuildah import ( + "archive/tar" + "bytes" + "context" "encoding/json" + "errors" + "io" + "io/fs" + "os" + "path" + "path/filepath" "strconv" + "strings" "testing" + imageCopy "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/transports/alltransports" + digest "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -99,3 +116,371 @@ func TestHistoryEntriesEqual(t *testing.T) { }) } } + +func TestSanitizeImageName(t *testing.T) { + const badLinkTarget = "../../../../../../../../../../../etc/passwd" + + // prepare to copy from the layout to other types of destinations + ctx := context.Background() + // sys := &types.SystemContext{} + policy, err := signature.NewPolicyFromBytes([]byte(`{"default":[{"type":"insecureAcceptAnything"}]}`)) + require.NoErrorf(t, err, "creating a policy") + policyContext, err := signature.NewPolicyContext(policy) + require.NoErrorf(t, err, "creating a policy context") + + scanDir := func(dir string) (bool, error) { + // look for anything that isn't a plain directory or file + foundSuspiciousStuff := false + err := filepath.WalkDir(dir, func(_ string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.Type().IsRegular() && !d.Type().IsDir() { + foundSuspiciousStuff = true + } + return nil + }) + return foundSuspiciousStuff, err + } + scanArchive := func(file string) (bool, error) { + // look for anything that isn't a plain directory, file, or hard link + foundSuspiciousStuff := false + f, err := os.Open(file) + if err != nil { + return foundSuspiciousStuff, err + } + dc, _, err := compression.AutoDecompress(f) + if err != nil { + return foundSuspiciousStuff, err + } + tr := tar.NewReader(dc) + hdr, err := tr.Next() + for hdr != nil { + if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeDir && hdr.Typeflag != tar.TypeLink { + foundSuspiciousStuff = true + break + } + if hdr.Typeflag == tar.TypeLink { + if strings.TrimPrefix(path.Clean("/"+hdr.Linkname), "/") != hdr.Linkname { + foundSuspiciousStuff = true + break + } + } + hdr, err = tr.Next() + } + if err := dc.Close(); err != nil { + if err2 := f.Close(); err2 != nil { + err = errors.Join(err, err2) + } + return foundSuspiciousStuff, err + } + if err := f.Close(); err != nil { + return foundSuspiciousStuff, err + } + if err != nil && !errors.Is(err, io.EOF) { + return foundSuspiciousStuff, err + } + return foundSuspiciousStuff, nil + } + generateLayout := func(t *testing.T, layoutDir string) (uncompressed digest.Digest, compressed digest.Digest) { + t.Helper() + var layer bytes.Buffer + layerDigester := digest.Canonical.Digester() + wc, err := compression.CompressStream(io.MultiWriter(&layer, layerDigester.Hash()), compression.Gzip, nil) + require.NoErrorf(t, err, "compressing empty layer") + diffID := digest.Canonical.Digester() + tw := tar.NewWriter(io.MultiWriter(wc, diffID.Hash())) + require.NoErrorf(t, tw.Close(), "flushing empty layer") + require.NoErrorf(t, wc.Close(), "flushing compressor") + diffDigest := diffID.Digest() + layerBytes := layer.Bytes() + layerDigest := layerDigester.Digest() + ociConfig := v1.Image{ + RootFS: v1.RootFS{ + Type: "layers", + DiffIDs: []digest.Digest{diffDigest}, + }, + } + configBytes, err := json.Marshal(&ociConfig) + require.NoError(t, err, "encoding oci config") + configDigest := digest.Canonical.FromBytes(configBytes) + ociManifest := v1.Manifest{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + MediaType: v1.MediaTypeImageManifest, + Config: v1.Descriptor{ + MediaType: v1.MediaTypeImageConfig, + Digest: configDigest, + Size: int64(len(configBytes)), + }, + Layers: []v1.Descriptor{{ + MediaType: v1.MediaTypeImageLayerGzip, + Digest: layerDigest, + Size: int64(len(layerBytes)), + }}, + } + manifestBytes, err := json.Marshal(&ociManifest) + require.NoError(t, err, "encoding oci manifest") + manifestDigest := digest.Canonical.FromBytes(manifestBytes) + index := v1.Index{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + MediaType: v1.MediaTypeImageIndex, + Manifests: []v1.Descriptor{{ + MediaType: v1.MediaTypeImageManifest, + Digest: manifestDigest, + Size: int64(len(manifestBytes)), + Annotations: map[string]string{ + v1.AnnotationRefName: "latest", + }, + }}, + } + indexBytes, err := json.Marshal(&index) + require.NoError(t, err, "encoding oci index") + layoutBytes, err := json.Marshal(&v1.ImageLayout{Version: v1.ImageLayoutVersion}) + require.NoError(t, err, "encoding oci layout") + blobsDir := filepath.Join(layoutDir, v1.ImageBlobsDir) + require.NoError(t, os.MkdirAll(blobsDir, 0o700), "creating blobs subdirectory") + blobsDir = filepath.Join(blobsDir, digest.Canonical.String()) + require.NoErrorf(t, os.MkdirAll(blobsDir, 0o700), "creating %q/%q subdirectory", v1.ImageBlobsDir, digest.Canonical.String()) + require.NoError(t, os.WriteFile(filepath.Join(blobsDir, layerDigest.Encoded()), layerBytes, 0o600), "writing layer") + sus, err := scanArchive(filepath.Join(blobsDir, layerDigest.Encoded())) + require.NoError(t, err, "unexpected error scanning empty layer") + require.False(t, sus, "empty layer should pass scan") + require.NoError(t, os.WriteFile(filepath.Join(blobsDir, configDigest.Encoded()), configBytes, 0o600), "writing config") + require.NoError(t, os.WriteFile(filepath.Join(blobsDir, manifestDigest.Encoded()), manifestBytes, 0o600), "writing manifest") + require.NoError(t, os.WriteFile(filepath.Join(layoutDir, v1.ImageIndexFile), indexBytes, 0o600), "writing index") + require.NoError(t, os.WriteFile(filepath.Join(layoutDir, v1.ImageLayoutFile), layoutBytes, 0o600), "writing layout") + sus, err = scanDir(layoutDir) + require.NoError(t, err, "scanning layout directory") + require.False(t, sus, "check on layout directory") + return diffDigest, layerDigest + } + mutateDirectory := func(t *testing.T, parentdir, input, replace string) string { + t.Helper() + tmpdir, err := os.MkdirTemp(parentdir, "directory") + require.NoError(t, err, "creating mutated directory") + found := false + err = filepath.Walk(input, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := filepath.Rel(input, path) + if err != nil { + return err + } + if rel == replace { + found = true + return os.Symlink(badLinkTarget, filepath.Join(tmpdir, rel)) + } + if info.IsDir() { + if rel == "." { + return nil + } + return os.Mkdir(filepath.Join(tmpdir, rel), info.Mode()) + } + switch info.Mode() & os.ModeType { + case os.ModeSymlink: + target, err := os.Readlink(path) + require.NoErrorf(t, err, "reading link target from %q", path) + return os.Symlink(target, filepath.Join(tmpdir, rel)) + case os.ModeCharDevice | os.ModeDevice: + t.Fatalf("unexpected character device %q", path) + case os.ModeDevice: + t.Fatalf("unexpected block device %q", path) + case os.ModeNamedPipe: + t.Fatalf("unexpected named pipe %q", path) + case os.ModeSocket: + t.Fatalf("unexpected socket %q", path) + case os.ModeIrregular: + t.Fatalf("unexpected irregularity %q", path) + case os.ModeDir: + t.Fatalf("unexpected directory %q after we should have created it", path) + default: + inf, err := os.Open(path) + require.NoErrorf(t, err, "opening %q to read it", path) + outpath := filepath.Join(tmpdir, rel) + outf, err := os.OpenFile(outpath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode()) + require.NoErrorf(t, err, "opening %q to write it", outpath) + n, err := io.Copy(outf, inf) + require.NoErrorf(t, err, "copying contents of %q", path) + require.Equalf(t, info.Size(), n, "unexpected write length to %q (%d != %d)", outpath, n, info.Size()) + require.NoErrorf(t, outf.Close(), "closing new file %q", outpath) + require.NoErrorf(t, inf.Close(), "closing old file %q", path) + } + return nil + }) + require.NoError(t, err, "mutating directory") + require.Truef(t, found, "did not replace %q", replace) + return tmpdir + } + mutateArchive := func(t *testing.T, parentdir, input, replace string) string { + t.Helper() + f, err := os.Open(input) + require.NoError(t, err, "opening archive for mutating") + tmpfile, err := os.CreateTemp(parentdir, "archive") + require.NoError(t, err, "creating mutated archive") + tr := tar.NewReader(f) + tw := tar.NewWriter(tmpfile) + found := false + hdr, err := tr.Next() + for hdr != nil { + if hdr.Name == replace { + err := tw.WriteHeader(&tar.Header{ + Name: hdr.Name, + Typeflag: tar.TypeSymlink, + Linkname: badLinkTarget, + }) + require.NoErrorf(t, err, "copying contents of %q", hdr.Name) + found = true + } else { + err := tw.WriteHeader(hdr) + require.NoErrorf(t, err, "copying contents of %q", hdr.Name) + _, err = io.Copy(tw, tr) + require.NoErrorf(t, err, "copying contents of %q", hdr.Name) + } + if err != nil { + break + } + hdr, err = tr.Next() + } + if err != nil && !errors.Is(err, io.EOF) { + require.NoError(t, err, "when finished reading archive for mutating") + } + require.NoError(t, f.Close(), "closing archive for mutating") + require.NoError(t, tw.Close(), "finishing up writing mutated archive") + tmpfileName := tmpfile.Name() + require.NoError(t, tmpfile.Close(), "closing mutated archive") + require.Truef(t, found, "did not replace %q", replace) + return tmpfileName + } + requireImageReadable := func(t *testing.T, imageName string) { + t.Helper() + tmpdir := t.TempDir() + ref, err := alltransports.ParseImageName(imageName) + require.NoErrorf(t, err, "parsing image reference %q", imageName) + dir, err := alltransports.ParseImageName("dir:" + tmpdir) + require.NoErrorf(t, err, "parsing image reference %q", "dir:"+tmpdir) + _, err = imageCopy.Image(ctx, policyContext, dir, ref, nil) + require.NoErrorf(t, err, "copying image %q, which should have been successful", transports.ImageName(ref)) + } + + contextDir := t.TempDir() + subdirsDir := filepath.Join(contextDir, "subdirs") + require.NoError(t, os.Mkdir(subdirsDir, 0o700), "creating subdirectory directory") + archiveDir := filepath.Join(contextDir, "archives") + require.NoError(t, os.Mkdir(archiveDir, 0o700), "creating archives directory") + + // create a normal layout somewhere under contextDir + goodLayout, err := os.MkdirTemp(subdirsDir, "goodlayout") + require.NoErrorf(t, err, "creating a known-good OCI layout") + diffDigest, blobDigest := generateLayout(t, goodLayout) + goodLayoutRef, err := alltransports.ParseImageName("oci:" + goodLayout) + require.NoErrorf(t, err, "parsing image reference to known-good OCI layout") + sus, err := scanDir(goodLayout) + require.NoError(t, err, "scanning known-good OCI layout") + assert.False(t, sus, "check on known-good OCI layout") + + // copy to a directory + goodDir, err := os.MkdirTemp(subdirsDir, "gooddir") + require.NoErrorf(t, err, "creating a temporary directory to store a good directory") + goodDirRef, err := alltransports.ParseImageName("dir:" + goodDir) + require.NoErrorf(t, err, "parsing image reference to good directory") + _, err = imageCopy.Image(ctx, policyContext, goodDirRef, goodLayoutRef, nil) + require.NoError(t, err, "copying an acceptable OCI layout to a directory") + + // scan the directory + sus, err = scanDir(goodDir) + require.NoError(t, err, "scanning good directory") + assert.False(t, sus, "check on good directory") + + // copy to a docker-archive + goodDockerArchiveFile, err := os.CreateTemp(archiveDir, "gooddockerarchive") + require.NoErrorf(t, err, "creating a temporary file to store a good docker archive") + goodDockerArchive := goodDockerArchiveFile.Name() + goodDockerArchiveRef, err := alltransports.ParseImageName("docker-archive:" + goodDockerArchive) + require.NoErrorf(t, err, "parsing image reference to good docker archive") + require.NoError(t, err, goodDockerArchiveFile.Close()) + require.NoError(t, err, os.Remove(goodDockerArchive)) + _, err = imageCopy.Image(ctx, policyContext, goodDockerArchiveRef, goodLayoutRef, nil) + require.NoError(t, err, "copying an acceptable OCI layout to a docker archive") + + // scan the docker-archive + sus, err = scanArchive(goodDockerArchive) + require.NoError(t, err, "scanning good docker archive") + assert.True(t, sus, "check on good docker archive") // there are symlinks in there + + // copy to an oci-archive + goodOCIArchiveFile, err := os.CreateTemp(archiveDir, "goodociarchive") + require.NoErrorf(t, err, "creating a temporary file to store a good oci archive") + goodOCIArchive := goodOCIArchiveFile.Name() + goodOCIArchiveRef, err := alltransports.ParseImageName("oci-archive:" + goodOCIArchive) + require.NoErrorf(t, err, "parsing image reference to good oci archive") + require.NoError(t, err, goodOCIArchiveFile.Close()) + require.NoError(t, err, os.Remove(goodOCIArchive)) + _, err = imageCopy.Image(ctx, policyContext, goodOCIArchiveRef, goodLayoutRef, nil) + require.NoError(t, err, "copying an acceptable OCI layout to an OCI archive") + + // scan the oci-archive + sus, err = scanArchive(goodOCIArchive) + require.NoError(t, err, "scanning good OCI archive") + assert.False(t, sus, "check on good OCI archive") + + // make sure the original versions can all be read without error + requireImageReadable(t, transports.ImageName(goodLayoutRef)) + requireImageReadable(t, transports.ImageName(goodDirRef)) + requireImageReadable(t, transports.ImageName(goodOCIArchiveRef)) + requireImageReadable(t, transports.ImageName(goodDockerArchiveRef)) + + // sanitize them all + goodLayoutRel, err := filepath.Rel(contextDir, goodLayout) + require.NoErrorf(t, err, "converting absolute path %q to a relative one", goodLayout) + newGoodLayout, err := sanitizeImageName("oci", goodLayoutRel, contextDir, t.TempDir()) + require.NoError(t, err, "sanitizing good OCI layout") + goodDirRel, err := filepath.Rel(contextDir, goodDir) + require.NoErrorf(t, err, "converting absolute path %q to a relative one", goodDir) + newGoodDir, err := sanitizeImageName("dir", goodDirRel, contextDir, t.TempDir()) + require.NoError(t, err, "sanitizing good directory") + goodOCIArchiveRel, err := filepath.Rel(contextDir, goodOCIArchive) + require.NoErrorf(t, err, "converting absolute path %q to a relative one", goodOCIArchive) + newGoodOCIArchive, err := sanitizeImageName("oci-archive", goodOCIArchiveRel, contextDir, t.TempDir()) + require.NoError(t, err, "sanitizing good OCI archive") + goodDockerArchiveRel, err := filepath.Rel(contextDir, goodDockerArchive) + require.NoErrorf(t, err, "converting absolute path %q to a relative one", goodDockerArchive) + newGoodDockerArchive, err := sanitizeImageName("docker-archive", goodDockerArchiveRel, contextDir, t.TempDir()) + require.NoError(t, err, "sanitizing good docker archive") + + // make sure the sanitized versions can all be read without error + requireImageReadable(t, newGoodLayout) + requireImageReadable(t, newGoodDir) + requireImageReadable(t, newGoodOCIArchive) + requireImageReadable(t, newGoodDockerArchive) + + // mutate them all + badLayout := mutateDirectory(t, contextDir, goodLayout, filepath.Join(v1.ImageBlobsDir, blobDigest.Algorithm().String(), blobDigest.Encoded())) + badLayoutRel, err := filepath.Rel(contextDir, badLayout) + require.NoErrorf(t, err, "converting absolute path %q to a relative one", badLayout) + _, err = sanitizeImageName("oci", badLayoutRel, contextDir, t.TempDir()) + require.ErrorIs(t, err, os.ErrNotExist, "sanitizing bad OCI layout") + + badDir := mutateDirectory(t, contextDir, goodDir, blobDigest.Encoded()) + badDirRel, err := filepath.Rel(contextDir, badDir) + require.NoErrorf(t, err, "converting absolute path %q to a relative one", badDir) + _, err = sanitizeImageName("dir", badDirRel, contextDir, t.TempDir()) + require.ErrorIs(t, err, os.ErrNotExist, "sanitizing bad directory") + + badOCIArchive := mutateArchive(t, contextDir, goodOCIArchive, filepath.Join(v1.ImageBlobsDir, blobDigest.Algorithm().String(), blobDigest.Encoded())) + + badOCIArchiveRel, err := filepath.Rel(contextDir, badOCIArchive) + require.NoErrorf(t, err, "converting absolute path %q to a relative one", badOCIArchive) + _, err = sanitizeImageName("oci-archive", badOCIArchiveRel, contextDir, t.TempDir()) + require.ErrorContains(t, err, "invalid symbolic link", "sanitizing bad oci archive") + + badDockerArchive := mutateArchive(t, contextDir, goodDockerArchive, diffDigest.Encoded()+".tar") + badDockerArchiveRel, err := filepath.Rel(contextDir, badDockerArchive) + require.NoErrorf(t, err, "converting absolute path %q to a relative one", badDockerArchive) + _, err = sanitizeImageName("docker-archive", badDockerArchiveRel, contextDir, t.TempDir()) + require.ErrorContains(t, err, "invalid symbolic link", "sanitizing bad docker archive") +} diff --git a/tests/bud.bats b/tests/bud.bats index 8da6eee6899..9a8790f72e1 100644 --- a/tests/bud.bats +++ b/tests/bud.bats @@ -7341,3 +7341,52 @@ EOF find ${TEST_SCRATCH_DIR}/buildcontext -ls expect_output "" "build should not be able to write to build context" } + +@test "build-oci-archive-switch" { + base=busybox + _prefetch $base + run_buildah from -q $base + run_buildah inspect --format '{{.FromImageID}}' "$output" + imageID="$output" + mkdir -p ${TEST_SCRATCH_DIR}/buildcontext + copy containers-storage:$imageID oci-archive:${TEST_SCRATCH_DIR}/buildcontext/source-oci-archive.tar + copy containers-storage:$imageID oci:${TEST_SCRATCH_DIR}/buildcontext/source-oci + copy containers-storage:$imageID docker-archive:${TEST_SCRATCH_DIR}/buildcontext/source-docker-archive.tar + copy containers-storage:$imageID dir:${TEST_SCRATCH_DIR}/buildcontext/source-dir + pushd ${TEST_SCRATCH_DIR} + cat > ${TEST_SCRATCH_DIR}/buildcontext/Dockerfile << EOF + FROM $base + RUN --mount=type=bind,rw,target=/bc mkdir /bc/oci-archive /bc/oci /bc/docker-archive /bc/dir + RUN --mount=type=bind,rw,target=/bc cp /bc/source-oci-archive.tar /bc/oci-archive/archive.tar + RUN --mount=type=bind,rw,target=/bc cp -a /bc/source-oci/* /bc/oci/ + RUN --mount=type=bind,rw,target=/bc cp /bc/source-docker-archive.tar /bc/docker-archive/archive.tar + RUN --mount=type=bind,rw,target=/bc cp -a /bc/source-dir/* /bc/dir/ + FROM oci-archive:oci-archive/archive.tar + RUN --mount=type=bind,from=0,target=/var/tmp/0 : + FROM oci-archive:/oci-archive/archive.tar + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 : + FROM oci-archive:./oci-archive/archive.tar + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 : + FROM oci:oci/ + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 : + FROM oci:/oci/ + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 --mount=type=bind,from=4,target=/var/tmp/4 : + FROM oci:./oci + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 --mount=type=bind,from=4,target=/var/tmp/4 --mount=type=bind,from=5,target=/var/tmp/5 : + FROM oci:oci + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 --mount=type=bind,from=4,target=/var/tmp/4 --mount=type=bind,from=5,target=/var/tmp/5 --mount=type=bind,from=6,target=/var/tmp/6 : + FROM docker-archive:docker-archive/archive.tar + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 --mount=type=bind,from=4,target=/var/tmp/4 --mount=type=bind,from=5,target=/var/tmp/5 --mount=type=bind,from=6,target=/var/tmp/6 --mount=type=bind,from=7,target=/var/tmp/7 : + FROM docker-archive:/docker-archive/archive.tar + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 --mount=type=bind,from=4,target=/var/tmp/4 --mount=type=bind,from=5,target=/var/tmp/5 --mount=type=bind,from=6,target=/var/tmp/6 --mount=type=bind,from=7,target=/var/tmp/7 --mount=type=bind,from=8,target=/var/tmp/8 : + FROM docker-archive:./docker-archive/archive.tar + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 --mount=type=bind,from=4,target=/var/tmp/4 --mount=type=bind,from=5,target=/var/tmp/5 --mount=type=bind,from=6,target=/var/tmp/6 --mount=type=bind,from=7,target=/var/tmp/7 --mount=type=bind,from=8,target=/var/tmp/8 --mount=type=bind,from=9,target=/var/tmp/9 : + FROM dir:dir + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 --mount=type=bind,from=4,target=/var/tmp/4 --mount=type=bind,from=5,target=/var/tmp/5 --mount=type=bind,from=6,target=/var/tmp/6 --mount=type=bind,from=7,target=/var/tmp/7 --mount=type=bind,from=8,target=/var/tmp/8 --mount=type=bind,from=9,target=/var/tmp/9 --mount=type=bind,from=10,target=/var/tmp/10 : + FROM dir:/dir + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 --mount=type=bind,from=4,target=/var/tmp/4 --mount=type=bind,from=5,target=/var/tmp/5 --mount=type=bind,from=6,target=/var/tmp/6 --mount=type=bind,from=7,target=/var/tmp/7 --mount=type=bind,from=8,target=/var/tmp/8 --mount=type=bind,from=9,target=/var/tmp/9 --mount=type=bind,from=10,target=/var/tmp/10 --mount=type=bind,from=11,target=/var/tmp/11 : + FROM dir:./dir/ + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 --mount=type=bind,from=4,target=/var/tmp/4 --mount=type=bind,from=5,target=/var/tmp/5 --mount=type=bind,from=6,target=/var/tmp/6 --mount=type=bind,from=7,target=/var/tmp/7 --mount=type=bind,from=8,target=/var/tmp/8 --mount=type=bind,from=9,target=/var/tmp/9 --mount=type=bind,from=10,target=/var/tmp/10 --mount=type=bind,from=11,target=/var/tmp/11 --mount=type=bind,from=12,target=/var/tmp/12 : +EOF + run_buildah build ${TEST_SCRATCH_DIR}/buildcontext +} From c7c4667687b2422c7b86c23c68e0b4810457bb85 Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Tue, 18 Feb 2025 17:40:26 -0500 Subject: [PATCH 5/5] iamgebuildah.StageExecutor.runStageMountPoints(): correct an error Add a missing "not" to an error message. Signed-off-by: Nalin Dahyabhai --- imagebuildah/stage_executor.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/imagebuildah/stage_executor.go b/imagebuildah/stage_executor.go index 4edc12c2d16..8faa61b6a52 100644 --- a/imagebuildah/stage_executor.go +++ b/imagebuildah/stage_executor.go @@ -546,7 +546,7 @@ func (s *StageExecutor) performCopy(excludes []string, copies ...imagebuilder.Co } contextDir = mountPoint } - // Original behaviour of buildah still stays true for COPY irrespective of additional context. + // This isn't --from the build context directory, so we don't want to force everything to 0:0 preserveOwnership = true copyExcludes = excludes } else { @@ -651,7 +651,7 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte if additionalBuildContext.IsImage { mountPoint, err := s.getImageRootfs(s.ctx, additionalBuildContext.Value) if err != nil { - return nil, fmt.Errorf("%s from=%s: image found with that name", flag, from) + return nil, fmt.Errorf("%s from=%s: image not found with that name", flag, from) } // The `from` in stageMountPoints should point // to `mountPoint` replaced from additional