diff --git a/imagebuildah/imagebuildah_test.go b/imagebuildah/imagebuildah_test.go new file mode 100644 index 0000000000..ae9ac5a0e5 --- /dev/null +++ b/imagebuildah/imagebuildah_test.go @@ -0,0 +1,29 @@ +package imagebuildah + +import ( + "flag" + "os" + "testing" + + "github.com/sirupsen/logrus" +) + +func TestMain(m *testing.M) { + var logLevel string + debug := false + if InitReexec() { + return + } + flag.BoolVar(&debug, "debug", false, "turn on debug logging") + flag.StringVar(&logLevel, "log-level", "error", "log level") + flag.Parse() + level, err := logrus.ParseLevel(logLevel) + if err != nil { + logrus.Fatalf("error parsing log level %q: %v", logLevel, err) + } + if debug && level < logrus.DebugLevel { + level = logrus.DebugLevel + } + logrus.SetLevel(level) + os.Exit(m.Run()) +} diff --git a/imagebuildah/stage_executor.go b/imagebuildah/stage_executor.go index 195475d2fd..425f4fbe61 100644 --- a/imagebuildah/stage_executor.go +++ b/imagebuildah/stage_executor.go @@ -1,6 +1,7 @@ package imagebuildah import ( + "archive/tar" "context" "crypto/sha256" "errors" @@ -26,13 +27,21 @@ import ( "github.com/containers/buildah/util" config "github.com/containers/common/pkg/config" cp "github.com/containers/image/v5/copy" + directoryTransport "github.com/containers/image/v5/directory" + dockerTransport "github.com/containers/image/v5/docker" imagedocker "github.com/containers/image/v5/docker" + dockerArchiveTransport "github.com/containers/image/v5/docker/archive" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/manifest" + ociArchiveTransport "github.com/containers/image/v5/oci/archive" + ociLayoutTransport "github.com/containers/image/v5/oci/layout" + openshiftTransport "github.com/containers/image/v5/openshift" + "github.com/containers/image/v5/pkg/compression" is "github.com/containers/image/v5/storage" "github.com/containers/image/v5/transports" "github.com/containers/image/v5/types" "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/chrootarchive" "github.com/containers/storage/pkg/unshare" docker "github.com/fsouza/go-dockerclient" @@ -920,6 +929,290 @@ func (s *StageExecutor) UnrecognizedInstruction(step *imagebuilder.Step) error { return errors.New(err) } +// sanitizeImageReference limits which image transports we'll accept. For +// those it accepts which refer to filesystem objects, where relative path +// names are evaluated relative to "contextDir", it will create a copy of the +// original image, under "tmpdir", which contains no symbolic links. +func (s *StageExecutor) sanitizeFrom(from, tmpdir string) (newFrom string, err error) { + transportName, restOfImageName, maybeHasTransportName := strings.Cut(from, ":") + if !maybeHasTransportName || transports.Get(transportName) == nil { + if _, err = reference.ParseNormalizedNamed(from); err == nil { + // this is a normal-looking image-in-a-registry-or-named-in-storage name + return from, nil + } + if img, err := s.executor.store.Image(from); img != nil && err == nil { + // this is an image ID + return from, nil + } + return "", fmt.Errorf("parsing image name %q: %w", from, err) + } + // TODO: drop this part and just return an error... someday + return sanitizeImageName(transportName, restOfImageName, s.executor.contextDir, tmpdir) +} + +// sanitizeImageReference limits which image transports we'll accept. For +// those it accepts which refer to filesystem objects, where relative path +// names are evaluated relative to "contextDir", it will create a copy of the +// original image, under "tmpdir", which contains no symbolic links. +func sanitizeImageName(transportName, restOfImageName, contextDir, tmpdir string) (newFrom string, err error) { + succeeded := false + isEmbeddedArchive := false + newImageDirectory := "" + var tw *tar.Writer + var archiveSource string + var imageArchive io.ReadCloser + switch transportName { + case dockerTransport.Transport.Name(), "docker-daemon", openshiftTransport.Transport.Name(): // ok, these are all remote + return transportName + ":" + restOfImageName, nil + case dockerArchiveTransport.Transport.Name(), ociArchiveTransport.Transport.Name(): // these are, basically, tarballs + // these take the form path[:stuff] + transportRef := restOfImageName + parts := strings.Split(transportRef, ":") + archiveSource = parts[0] + refLeftover := parts[1:] + // create a temporary file to use as our new archive + var f *os.File + if f, err = os.CreateTemp(tmpdir, "buildah-archive-"); err != nil { + return "", fmt.Errorf("creating temporary copy of base image: %w", err) + } + defer func() { + name := f.Name() + if err := f.Close(); err != nil { + logrus.Warn(err) + } + if !succeeded { + if err := os.Remove(name); err != nil { + logrus.Warn(err) + } + } + }() + tw = tar.NewWriter(f) + defer func() { + if err := tw.Close(); err != nil { + logrus.Warn(err) + } + }() + // archive the archive + isEmbeddedArchive = true + for { + // try to make sure the archiver doesn't get thrown by relative prefixes + if strings.HasPrefix(archiveSource, "/") && archiveSource != "/" { + archiveSource = strings.TrimPrefix(archiveSource, "/") + continue + } else if strings.HasPrefix(archiveSource, "./") && archiveSource != "./" { + archiveSource = strings.TrimPrefix(archiveSource, "./") + continue + } + break + } + tarOptions := &archive.TarOptions{ + IncludeFiles: []string{path.Clean(archiveSource)}, + ExcludePatterns: []string{"**"}, + } + imageArchive, err = chrootarchive.Tar(contextDir, tarOptions, contextDir) + // generate the new reference using the file + newFrom = transportName + ":" + strings.Join(append([]string{f.Name()}, refLeftover...), ":") + case ociLayoutTransport.Transport.Name(): // this is a directory tree + // this takes the form path[:stuff] + transportRef := restOfImageName + parts := strings.Split(transportRef, ":") + archiveSource = parts[0] + refLeftover := parts[1:] + // create a new directory to use as our new layout directory + newImageDirectory, err = os.MkdirTemp(tmpdir, "buildah-layout-") + if err != nil { + return "", fmt.Errorf("creating temporary copy of base image: %w", err) + } + defer func() { + if !succeeded { + if err := os.RemoveAll(newImageDirectory); err != nil { + logrus.Warn(err) + } + } + }() + // archive the directory + tarOptions := &archive.TarOptions{} + imageArchive, err = chrootarchive.Tar(filepath.Join(contextDir, archiveSource), tarOptions, contextDir) + // generate the new reference using the directory + newFrom = transportName + ":" + strings.Join(append([]string{newImageDirectory}, refLeftover...), ":") + case directoryTransport.Transport.Name(): // this is also a directory tree + // this takes the form of just a path + transportRef := restOfImageName + // create a new directory to use as our new image directory + newImageDirectory, err = os.MkdirTemp(tmpdir, "buildah-dir-") + if err != nil { + return "", fmt.Errorf("creating temporary copy of base image: %w", err) + } + defer func() { + if !succeeded { + if err := os.RemoveAll(newImageDirectory); err != nil { + logrus.Warn(err) + } + } + }() + // archive the directory + archiveSource = transportRef + tarOptions := &archive.TarOptions{} + imageArchive, err = chrootarchive.Tar(filepath.Join(contextDir, archiveSource), tarOptions, contextDir) + // generate the new reference using the directory + newFrom = transportName + ":" + newImageDirectory + default: + return "", fmt.Errorf("unexpected container image transport %q", transportName) + } + if err != nil { + return "", fmt.Errorf("error archiving source at %q under %q", archiveSource, contextDir) + } + defer func() { + if err := imageArchive.Close(); err != nil { + logrus.Warn(err) + } + }() + decompressed, _, err := compression.AutoDecompress(imageArchive) + if err != nil { + return "", fmt.Errorf("error decompressing-if-necessary archive %q", archiveSource) + } + defer func() { + if err := decompressed.Close(); err != nil { + logrus.Warn(err) + } + }() + tr := tar.NewReader(decompressed) + seenEntries := make(map[string]struct{}) + hdr, err := tr.Next() + for hdr != nil { + // if the archive we're parsing is expected to have an archive as one of its contents, + // assume it's the first (and hopefully, only) item + if isEmbeddedArchive { + decompressed, _, decompressErr := compression.AutoDecompress(tr) + if decompressErr != nil { + return "", fmt.Errorf("error decompressing-if-necessary archive %q: %w", archiveSource, decompressErr) + } + defer func() { + if err := decompressed.Close(); err != nil { + logrus.Warn(err) + } + }() + tr = tar.NewReader(decompressed) + hdr, err = tr.Next() + isEmbeddedArchive = false + continue + } + if tw == nil { + var createErr error + // write directly under newImageDirectory + hdr.Name = path.Clean("/" + hdr.Name) + newName := filepath.Join(newImageDirectory, filepath.ToSlash(hdr.Name)) + switch hdr.Typeflag { + case tar.TypeDir: + createErr = os.Mkdir(newName, 0o700) + case tar.TypeReg: + createErr = func() error { + var f *os.File + f, err := os.OpenFile(newName, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o700) + if err != nil { + return fmt.Errorf("copying content for %q in base image: %w", hdr.Name, err) + } + n, err := io.Copy(f, tr) + if err != nil { + f.Close() + return fmt.Errorf("copying content for %q in base image: %w", hdr.Name, err) + } + if n != hdr.Size { + f.Close() + return fmt.Errorf("short write writing %q in base image: %d != %d", hdr.Name, n, hdr.Size) + } + return f.Close() + }() + case tar.TypeLink: + linkName := path.Clean("/" + hdr.Linkname) + oldName := filepath.Join(newImageDirectory, filepath.ToSlash(linkName)) + createErr = os.Link(oldName, newName) + case tar.TypeSymlink: // convert it to a hard link + var oldName string + if !path.IsAbs(hdr.Linkname) { + linkName := path.Join("/"+path.Dir(hdr.Name), hdr.Linkname) + oldName = filepath.Join(newImageDirectory, filepath.ToSlash(linkName)) + } else { + oldName = filepath.Join(newImageDirectory, filepath.FromSlash(hdr.Linkname)) + } + createErr = os.Link(oldName, newName) + default: + return "", fmt.Errorf("extracting archive of base image: disallowed entry type %c", hdr.Typeflag) + } + if createErr != nil { + return "", fmt.Errorf("creating %q: %w", newName, createErr) + } + } else { + // write to the archive file + hdr.Name = path.Clean("/" + hdr.Name) + if hdr.Name != "/" { + hdr.Name = strings.TrimPrefix(hdr.Name, "/") + } + seenEntries[hdr.Name] = struct{}{} + switch hdr.Typeflag { + case tar.TypeDir, tar.TypeReg, tar.TypeLink: + if err := tw.WriteHeader(hdr); err != nil { + return "", fmt.Errorf("rewriting archive of base image: %w", err) + } + case tar.TypeSymlink: + // resolve the target of the symlink + linkname := hdr.Linkname + if !path.IsAbs(linkname) { + linkname = path.Join("/"+path.Dir(hdr.Name), linkname) + } + linkname = path.Clean(linkname) + if linkname != "/" { + linkname = strings.TrimPrefix(linkname, "/") + } + if _, validTarget := seenEntries[linkname]; !validTarget { + return "", fmt.Errorf("invalid symbolic link from %q to %q (%q) in archive", hdr.Name, hdr.Linkname, linkname) + } + rel, err := filepath.Rel(filepath.FromSlash(path.Dir("/"+hdr.Name)), filepath.FromSlash("/"+linkname)) + if err != nil { + return "", fmt.Errorf("computing relative path from %q to %q in archive", hdr.Name, linkname) + } + rel = filepath.ToSlash(rel) + if transportName == ociArchiveTransport.Transport.Name() { + // rewrite as a hard link for oci-archive, which gets + // extracted into a temporary directory to be read, but + // not for docker-archive, which is read directly from + // the unextracted archive file, in a way which doesn't + // understand hard links + hdr.Typeflag = tar.TypeLink + hdr.Linkname = linkname + } else { + // ensure it's a relative symlink inside of the tree + // for docker-archive + hdr.Linkname = rel + } + if err := tw.WriteHeader(hdr); err != nil { + return "", fmt.Errorf("rewriting archive of base image: %w", err) + } + default: + return "", fmt.Errorf("rewriting archive of base image: disallowed entry type %c", hdr.Typeflag) + } + if hdr.Typeflag == tar.TypeReg { + n, err := io.Copy(tw, tr) + if err != nil { + return "", fmt.Errorf("copying content for %q in base image: %w", hdr.Name, err) + } + if n != hdr.Size { + return "", fmt.Errorf("short write writing %q in base image: %d != %d", hdr.Name, n, hdr.Size) + } + } + } + hdr, err = tr.Next() + } + if isEmbeddedArchive { + logrus.Warnf("expected to have archived a copy of %q, missed it", archiveSource) + } + if err != nil && !errors.Is(err, io.EOF) { + return "", fmt.Errorf("reading archive of base image: %w", err) + } + succeeded = true + return newFrom, nil +} + // prepare creates a working container based on the specified image, or if one // isn't specified, the first argument passed to the first FROM instruction we // can find in the stage's parsed tree. @@ -936,6 +1229,19 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo } from = base } + sanitizedDir, err := os.MkdirTemp(tmpdir.GetTempDir(), "buildah-context-") + if err != nil { + return nil, fmt.Errorf("creating temporary directory: %w", err) + } + defer func() { + if err := os.RemoveAll(sanitizedDir); err != nil { + logrus.Warn(err) + } + }() + sanitizedFrom, err := s.sanitizeFrom(from, tmpdir.GetTempDir()) + if err != nil { + return nil, fmt.Errorf("invalid base image specification %q: %w", from, err) + } displayFrom := from if ib.Platform != "" { displayFrom = "--platform=" + ib.Platform + " " + displayFrom @@ -974,7 +1280,7 @@ func (s *StageExecutor) prepare(ctx context.Context, from string, initializeIBCo builderOptions := buildah.BuilderOptions{ Args: ib.Args, - FromImage: from, + FromImage: sanitizedFrom, GroupAdd: s.executor.groupAdd, PullPolicy: pullPolicy, ContainerSuffix: s.executor.containerSuffix, diff --git a/imagebuildah/stage_executor_test.go b/imagebuildah/stage_executor_test.go index 57cf7da706..4fa58cae8d 100644 --- a/imagebuildah/stage_executor_test.go +++ b/imagebuildah/stage_executor_test.go @@ -1,10 +1,27 @@ package imagebuildah import ( + "archive/tar" + "bytes" + "context" "encoding/json" + "errors" + "io" + "io/fs" + "os" + "path" + "path/filepath" "strconv" + "strings" "testing" + imageCopy "github.com/containers/image/v5/copy" + "github.com/containers/image/v5/pkg/compression" + "github.com/containers/image/v5/signature" + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/transports/alltransports" + digest "github.com/opencontainers/go-digest" + imgspec "github.com/opencontainers/image-spec/specs-go" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -99,3 +116,371 @@ func TestHistoryEntriesEqual(t *testing.T) { }) } } + +func TestSanitizeImageName(t *testing.T) { + const badLinkTarget = "../../../../../../../../../../../etc/passwd" + + // prepare to copy from the layout to other types of destinations + ctx := context.Background() + // sys := &types.SystemContext{} + policy, err := signature.NewPolicyFromBytes([]byte(`{"default":[{"type":"insecureAcceptAnything"}]}`)) + require.NoErrorf(t, err, "creating a policy") + policyContext, err := signature.NewPolicyContext(policy) + require.NoErrorf(t, err, "creating a policy context") + + scanDir := func(dir string) (bool, error) { + // look for anything that isn't a plain directory or file + foundSuspiciousStuff := false + err := filepath.WalkDir(dir, func(_ string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.Type().IsRegular() && !d.Type().IsDir() { + foundSuspiciousStuff = true + } + return nil + }) + return foundSuspiciousStuff, err + } + scanArchive := func(file string) (bool, error) { + // look for anything that isn't a plain directory, file, or hard link + foundSuspiciousStuff := false + f, err := os.Open(file) + if err != nil { + return foundSuspiciousStuff, err + } + dc, _, err := compression.AutoDecompress(f) + if err != nil { + return foundSuspiciousStuff, err + } + tr := tar.NewReader(dc) + hdr, err := tr.Next() + for hdr != nil { + if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeDir && hdr.Typeflag != tar.TypeLink { + foundSuspiciousStuff = true + break + } + if hdr.Typeflag == tar.TypeLink { + if strings.TrimPrefix(path.Clean("/"+hdr.Linkname), "/") != hdr.Linkname { + foundSuspiciousStuff = true + break + } + } + hdr, err = tr.Next() + } + if err := dc.Close(); err != nil { + if err2 := f.Close(); err2 != nil { + err = errors.Join(err, err2) + } + return foundSuspiciousStuff, err + } + if err := f.Close(); err != nil { + return foundSuspiciousStuff, err + } + if err != nil && !errors.Is(err, io.EOF) { + return foundSuspiciousStuff, err + } + return foundSuspiciousStuff, nil + } + generateLayout := func(t *testing.T, layoutDir string) (uncompressed digest.Digest, compressed digest.Digest) { + t.Helper() + var layer bytes.Buffer + layerDigester := digest.Canonical.Digester() + wc, err := compression.CompressStream(io.MultiWriter(&layer, layerDigester.Hash()), compression.Gzip, nil) + require.NoErrorf(t, err, "compressing empty layer") + diffID := digest.Canonical.Digester() + tw := tar.NewWriter(io.MultiWriter(wc, diffID.Hash())) + require.NoErrorf(t, tw.Close(), "flushing empty layer") + require.NoErrorf(t, wc.Close(), "flushing compressor") + diffDigest := diffID.Digest() + layerBytes := layer.Bytes() + layerDigest := layerDigester.Digest() + ociConfig := v1.Image{ + RootFS: v1.RootFS{ + Type: "layers", + DiffIDs: []digest.Digest{diffDigest}, + }, + } + configBytes, err := json.Marshal(&ociConfig) + require.NoError(t, err, "encoding oci config") + configDigest := digest.Canonical.FromBytes(configBytes) + ociManifest := v1.Manifest{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + MediaType: v1.MediaTypeImageManifest, + Config: v1.Descriptor{ + MediaType: v1.MediaTypeImageConfig, + Digest: configDigest, + Size: int64(len(configBytes)), + }, + Layers: []v1.Descriptor{{ + MediaType: v1.MediaTypeImageLayerGzip, + Digest: layerDigest, + Size: int64(len(layerBytes)), + }}, + } + manifestBytes, err := json.Marshal(&ociManifest) + require.NoError(t, err, "encoding oci manifest") + manifestDigest := digest.Canonical.FromBytes(manifestBytes) + index := v1.Index{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + MediaType: v1.MediaTypeImageIndex, + Manifests: []v1.Descriptor{{ + MediaType: v1.MediaTypeImageManifest, + Digest: manifestDigest, + Size: int64(len(manifestBytes)), + Annotations: map[string]string{ + v1.AnnotationRefName: "latest", + }, + }}, + } + indexBytes, err := json.Marshal(&index) + require.NoError(t, err, "encoding oci index") + layoutBytes, err := json.Marshal(&v1.ImageLayout{Version: v1.ImageLayoutVersion}) + require.NoError(t, err, "encoding oci layout") + blobsDir := filepath.Join(layoutDir, v1.ImageBlobsDir) + require.NoError(t, os.MkdirAll(blobsDir, 0o700), "creating blobs subdirectory") + blobsDir = filepath.Join(blobsDir, digest.Canonical.String()) + require.NoErrorf(t, os.MkdirAll(blobsDir, 0o700), "creating %q/%q subdirectory", v1.ImageBlobsDir, digest.Canonical.String()) + require.NoError(t, os.WriteFile(filepath.Join(blobsDir, layerDigest.Encoded()), layerBytes, 0o600), "writing layer") + sus, err := scanArchive(filepath.Join(blobsDir, layerDigest.Encoded())) + require.NoError(t, err, "unexpected error scanning empty layer") + require.False(t, sus, "empty layer should pass scan") + require.NoError(t, os.WriteFile(filepath.Join(blobsDir, configDigest.Encoded()), configBytes, 0o600), "writing config") + require.NoError(t, os.WriteFile(filepath.Join(blobsDir, manifestDigest.Encoded()), manifestBytes, 0o600), "writing manifest") + require.NoError(t, os.WriteFile(filepath.Join(layoutDir, v1.ImageIndexFile), indexBytes, 0o600), "writing index") + require.NoError(t, os.WriteFile(filepath.Join(layoutDir, v1.ImageLayoutFile), layoutBytes, 0o600), "writing layout") + sus, err = scanDir(layoutDir) + require.NoError(t, err, "scanning layout directory") + require.False(t, sus, "check on layout directory") + return diffDigest, layerDigest + } + mutateDirectory := func(t *testing.T, parentdir, input, replace string) string { + t.Helper() + tmpdir, err := os.MkdirTemp(parentdir, "directory") + require.NoError(t, err, "creating mutated directory") + found := false + err = filepath.Walk(input, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := filepath.Rel(input, path) + if err != nil { + return err + } + if rel == replace { + found = true + return os.Symlink(badLinkTarget, filepath.Join(tmpdir, rel)) + } + if info.IsDir() { + if rel == "." { + return nil + } + return os.Mkdir(filepath.Join(tmpdir, rel), info.Mode()) + } + switch info.Mode() & os.ModeType { + case os.ModeSymlink: + target, err := os.Readlink(path) + require.NoErrorf(t, err, "reading link target from %q", path) + return os.Symlink(target, filepath.Join(tmpdir, rel)) + case os.ModeCharDevice | os.ModeDevice: + t.Fatalf("unexpected character device %q", path) + case os.ModeDevice: + t.Fatalf("unexpected block device %q", path) + case os.ModeNamedPipe: + t.Fatalf("unexpected named pipe %q", path) + case os.ModeSocket: + t.Fatalf("unexpected socket %q", path) + case os.ModeIrregular: + t.Fatalf("unexpected irregularity %q", path) + case os.ModeDir: + t.Fatalf("unexpected directory %q after we should have created it", path) + default: + inf, err := os.Open(path) + require.NoErrorf(t, err, "opening %q to read it", path) + outpath := filepath.Join(tmpdir, rel) + outf, err := os.OpenFile(outpath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode()) + require.NoErrorf(t, err, "opening %q to write it", outpath) + n, err := io.Copy(outf, inf) + require.NoErrorf(t, err, "copying contents of %q", path) + require.Equalf(t, info.Size(), n, "unexpected write length to %q (%d != %d)", outpath, n, info.Size()) + require.NoErrorf(t, outf.Close(), "closing new file %q", outpath) + require.NoErrorf(t, inf.Close(), "closing old file %q", path) + } + return nil + }) + require.NoError(t, err, "mutating directory") + require.Truef(t, found, "did not replace %q", replace) + return tmpdir + } + mutateArchive := func(t *testing.T, parentdir, input, replace string) string { + t.Helper() + f, err := os.Open(input) + require.NoError(t, err, "opening archive for mutating") + tmpfile, err := os.CreateTemp(parentdir, "archive") + require.NoError(t, err, "creating mutated archive") + tr := tar.NewReader(f) + tw := tar.NewWriter(tmpfile) + found := false + hdr, err := tr.Next() + for hdr != nil { + if hdr.Name == replace { + err := tw.WriteHeader(&tar.Header{ + Name: hdr.Name, + Typeflag: tar.TypeSymlink, + Linkname: badLinkTarget, + }) + require.NoErrorf(t, err, "copying contents of %q", hdr.Name) + found = true + } else { + err := tw.WriteHeader(hdr) + require.NoErrorf(t, err, "copying contents of %q", hdr.Name) + _, err = io.Copy(tw, tr) + require.NoErrorf(t, err, "copying contents of %q", hdr.Name) + } + if err != nil { + break + } + hdr, err = tr.Next() + } + if err != nil && !errors.Is(err, io.EOF) { + require.NoError(t, err, "when finished reading archive for mutating") + } + require.NoError(t, f.Close(), "closing archive for mutating") + require.NoError(t, tw.Close(), "finishing up writing mutated archive") + tmpfileName := tmpfile.Name() + require.NoError(t, tmpfile.Close(), "closing mutated archive") + require.Truef(t, found, "did not replace %q", replace) + return tmpfileName + } + requireImageReadable := func(t *testing.T, imageName string) { + t.Helper() + tmpdir := t.TempDir() + ref, err := alltransports.ParseImageName(imageName) + require.NoErrorf(t, err, "parsing image reference %q", imageName) + dir, err := alltransports.ParseImageName("dir:" + tmpdir) + require.NoErrorf(t, err, "parsing image reference %q", "dir:"+tmpdir) + _, err = imageCopy.Image(ctx, policyContext, dir, ref, nil) + require.NoErrorf(t, err, "copying image %q, which should have been successful", transports.ImageName(ref)) + } + + contextDir := t.TempDir() + subdirsDir := filepath.Join(contextDir, "subdirs") + require.NoError(t, os.Mkdir(subdirsDir, 0o700), "creating subdirectory directory") + archiveDir := filepath.Join(contextDir, "archives") + require.NoError(t, os.Mkdir(archiveDir, 0o700), "creating archives directory") + + // create a normal layout somewhere under contextDir + goodLayout, err := os.MkdirTemp(subdirsDir, "goodlayout") + require.NoErrorf(t, err, "creating a known-good OCI layout") + diffDigest, blobDigest := generateLayout(t, goodLayout) + goodLayoutRef, err := alltransports.ParseImageName("oci:" + goodLayout) + require.NoErrorf(t, err, "parsing image reference to known-good OCI layout") + sus, err := scanDir(goodLayout) + require.NoError(t, err, "scanning known-good OCI layout") + assert.False(t, sus, "check on known-good OCI layout") + + // copy to a directory + goodDir, err := os.MkdirTemp(subdirsDir, "gooddir") + require.NoErrorf(t, err, "creating a temporary directory to store a good directory") + goodDirRef, err := alltransports.ParseImageName("dir:" + goodDir) + require.NoErrorf(t, err, "parsing image reference to good directory") + _, err = imageCopy.Image(ctx, policyContext, goodDirRef, goodLayoutRef, nil) + require.NoError(t, err, "copying an acceptable OCI layout to a directory") + + // scan the directory + sus, err = scanDir(goodDir) + require.NoError(t, err, "scanning good directory") + assert.False(t, sus, "check on good directory") + + // copy to a docker-archive + goodDockerArchiveFile, err := os.CreateTemp(archiveDir, "gooddockerarchive") + require.NoErrorf(t, err, "creating a temporary file to store a good docker archive") + goodDockerArchive := goodDockerArchiveFile.Name() + goodDockerArchiveRef, err := alltransports.ParseImageName("docker-archive:" + goodDockerArchive) + require.NoErrorf(t, err, "parsing image reference to good docker archive") + require.NoError(t, err, goodDockerArchiveFile.Close()) + require.NoError(t, err, os.Remove(goodDockerArchive)) + _, err = imageCopy.Image(ctx, policyContext, goodDockerArchiveRef, goodLayoutRef, nil) + require.NoError(t, err, "copying an acceptable OCI layout to a docker archive") + + // scan the docker-archive + sus, err = scanArchive(goodDockerArchive) + require.NoError(t, err, "scanning good docker archive") + assert.True(t, sus, "check on good docker archive") // there are symlinks in there + + // copy to an oci-archive + goodOCIArchiveFile, err := os.CreateTemp(archiveDir, "goodociarchive") + require.NoErrorf(t, err, "creating a temporary file to store a good oci archive") + goodOCIArchive := goodOCIArchiveFile.Name() + goodOCIArchiveRef, err := alltransports.ParseImageName("oci-archive:" + goodOCIArchive) + require.NoErrorf(t, err, "parsing image reference to good oci archive") + require.NoError(t, err, goodOCIArchiveFile.Close()) + require.NoError(t, err, os.Remove(goodOCIArchive)) + _, err = imageCopy.Image(ctx, policyContext, goodOCIArchiveRef, goodLayoutRef, nil) + require.NoError(t, err, "copying an acceptable OCI layout to an OCI archive") + + // scan the oci-archive + sus, err = scanArchive(goodOCIArchive) + require.NoError(t, err, "scanning good OCI archive") + assert.False(t, sus, "check on good OCI archive") + + // make sure the original versions can all be read without error + requireImageReadable(t, transports.ImageName(goodLayoutRef)) + requireImageReadable(t, transports.ImageName(goodDirRef)) + requireImageReadable(t, transports.ImageName(goodOCIArchiveRef)) + requireImageReadable(t, transports.ImageName(goodDockerArchiveRef)) + + // sanitize them all + goodLayoutRel, err := filepath.Rel(contextDir, goodLayout) + require.NoErrorf(t, err, "converting absolute path %q to a relative one", goodLayout) + newGoodLayout, err := sanitizeImageName("oci", goodLayoutRel, contextDir, t.TempDir()) + require.NoError(t, err, "sanitizing good OCI layout") + goodDirRel, err := filepath.Rel(contextDir, goodDir) + require.NoErrorf(t, err, "converting absolute path %q to a relative one", goodDir) + newGoodDir, err := sanitizeImageName("dir", goodDirRel, contextDir, t.TempDir()) + require.NoError(t, err, "sanitizing good directory") + goodOCIArchiveRel, err := filepath.Rel(contextDir, goodOCIArchive) + require.NoErrorf(t, err, "converting absolute path %q to a relative one", goodOCIArchive) + newGoodOCIArchive, err := sanitizeImageName("oci-archive", goodOCIArchiveRel, contextDir, t.TempDir()) + require.NoError(t, err, "sanitizing good OCI archive") + goodDockerArchiveRel, err := filepath.Rel(contextDir, goodDockerArchive) + require.NoErrorf(t, err, "converting absolute path %q to a relative one", goodDockerArchive) + newGoodDockerArchive, err := sanitizeImageName("docker-archive", goodDockerArchiveRel, contextDir, t.TempDir()) + require.NoError(t, err, "sanitizing good docker archive") + + // make sure the sanitized versions can all be read without error + requireImageReadable(t, newGoodLayout) + requireImageReadable(t, newGoodDir) + requireImageReadable(t, newGoodOCIArchive) + requireImageReadable(t, newGoodDockerArchive) + + // mutate them all + badLayout := mutateDirectory(t, contextDir, goodLayout, filepath.Join(v1.ImageBlobsDir, blobDigest.Algorithm().String(), blobDigest.Encoded())) + badLayoutRel, err := filepath.Rel(contextDir, badLayout) + require.NoErrorf(t, err, "converting absolute path %q to a relative one", badLayout) + _, err = sanitizeImageName("oci", badLayoutRel, contextDir, t.TempDir()) + require.ErrorIs(t, err, os.ErrNotExist, "sanitizing bad OCI layout") + + badDir := mutateDirectory(t, contextDir, goodDir, blobDigest.Encoded()) + badDirRel, err := filepath.Rel(contextDir, badDir) + require.NoErrorf(t, err, "converting absolute path %q to a relative one", badDir) + _, err = sanitizeImageName("dir", badDirRel, contextDir, t.TempDir()) + require.ErrorIs(t, err, os.ErrNotExist, "sanitizing bad directory") + + badOCIArchive := mutateArchive(t, contextDir, goodOCIArchive, filepath.Join(v1.ImageBlobsDir, blobDigest.Algorithm().String(), blobDigest.Encoded())) + + badOCIArchiveRel, err := filepath.Rel(contextDir, badOCIArchive) + require.NoErrorf(t, err, "converting absolute path %q to a relative one", badOCIArchive) + _, err = sanitizeImageName("oci-archive", badOCIArchiveRel, contextDir, t.TempDir()) + require.ErrorContains(t, err, "invalid symbolic link", "sanitizing bad oci archive") + + badDockerArchive := mutateArchive(t, contextDir, goodDockerArchive, diffDigest.Encoded()+".tar") + badDockerArchiveRel, err := filepath.Rel(contextDir, badDockerArchive) + require.NoErrorf(t, err, "converting absolute path %q to a relative one", badDockerArchive) + _, err = sanitizeImageName("docker-archive", badDockerArchiveRel, contextDir, t.TempDir()) + require.ErrorContains(t, err, "invalid symbolic link", "sanitizing bad docker archive") +} diff --git a/tests/bud.bats b/tests/bud.bats index 3def9b0261..433b894a35 100644 --- a/tests/bud.bats +++ b/tests/bud.bats @@ -7340,3 +7340,52 @@ EOF find ${TEST_SCRATCH_DIR}/buildcontext -ls expect_output "" "build should not be able to write to build context" } + +@test "build-oci-archive-switch" { + base=busybox + _prefetch $base + run_buildah from -q $base + run_buildah inspect --format '{{.FromImageID}}' "$output" + imageID="$output" + mkdir -p ${TEST_SCRATCH_DIR}/buildcontext + copy containers-storage:$imageID oci-archive:${TEST_SCRATCH_DIR}/buildcontext/source-oci-archive.tar + copy containers-storage:$imageID oci:${TEST_SCRATCH_DIR}/buildcontext/source-oci + copy containers-storage:$imageID docker-archive:${TEST_SCRATCH_DIR}/buildcontext/source-docker-archive.tar + copy containers-storage:$imageID dir:${TEST_SCRATCH_DIR}/buildcontext/source-dir + pushd ${TEST_SCRATCH_DIR} + cat > ${TEST_SCRATCH_DIR}/buildcontext/Dockerfile << EOF + FROM $base + RUN --mount=type=bind,rw,target=/bc mkdir /bc/oci-archive /bc/oci /bc/docker-archive /bc/dir + RUN --mount=type=bind,rw,target=/bc cp /bc/source-oci-archive.tar /bc/oci-archive/archive.tar + RUN --mount=type=bind,rw,target=/bc cp -a /bc/source-oci/* /bc/oci/ + RUN --mount=type=bind,rw,target=/bc cp /bc/source-docker-archive.tar /bc/docker-archive/archive.tar + RUN --mount=type=bind,rw,target=/bc cp -a /bc/source-dir/* /bc/dir/ + FROM oci-archive:oci-archive/archive.tar + RUN --mount=type=bind,from=0,target=/var/tmp/0 : + FROM oci-archive:/oci-archive/archive.tar + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 : + FROM oci-archive:./oci-archive/archive.tar + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 : + FROM oci:oci/ + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 : + FROM oci:/oci/ + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 --mount=type=bind,from=4,target=/var/tmp/4 : + FROM oci:./oci + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 --mount=type=bind,from=4,target=/var/tmp/4 --mount=type=bind,from=5,target=/var/tmp/5 : + FROM oci:oci + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 --mount=type=bind,from=4,target=/var/tmp/4 --mount=type=bind,from=5,target=/var/tmp/5 --mount=type=bind,from=6,target=/var/tmp/6 : + FROM docker-archive:docker-archive/archive.tar + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 --mount=type=bind,from=4,target=/var/tmp/4 --mount=type=bind,from=5,target=/var/tmp/5 --mount=type=bind,from=6,target=/var/tmp/6 --mount=type=bind,from=7,target=/var/tmp/7 : + FROM docker-archive:/docker-archive/archive.tar + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 --mount=type=bind,from=4,target=/var/tmp/4 --mount=type=bind,from=5,target=/var/tmp/5 --mount=type=bind,from=6,target=/var/tmp/6 --mount=type=bind,from=7,target=/var/tmp/7 --mount=type=bind,from=8,target=/var/tmp/8 : + FROM docker-archive:./docker-archive/archive.tar + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 --mount=type=bind,from=4,target=/var/tmp/4 --mount=type=bind,from=5,target=/var/tmp/5 --mount=type=bind,from=6,target=/var/tmp/6 --mount=type=bind,from=7,target=/var/tmp/7 --mount=type=bind,from=8,target=/var/tmp/8 --mount=type=bind,from=9,target=/var/tmp/9 : + FROM dir:dir + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 --mount=type=bind,from=4,target=/var/tmp/4 --mount=type=bind,from=5,target=/var/tmp/5 --mount=type=bind,from=6,target=/var/tmp/6 --mount=type=bind,from=7,target=/var/tmp/7 --mount=type=bind,from=8,target=/var/tmp/8 --mount=type=bind,from=9,target=/var/tmp/9 --mount=type=bind,from=10,target=/var/tmp/10 : + FROM dir:/dir + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 --mount=type=bind,from=4,target=/var/tmp/4 --mount=type=bind,from=5,target=/var/tmp/5 --mount=type=bind,from=6,target=/var/tmp/6 --mount=type=bind,from=7,target=/var/tmp/7 --mount=type=bind,from=8,target=/var/tmp/8 --mount=type=bind,from=9,target=/var/tmp/9 --mount=type=bind,from=10,target=/var/tmp/10 --mount=type=bind,from=11,target=/var/tmp/11 : + FROM dir:./dir/ + RUN --mount=type=bind,from=0,target=/var/tmp/0 --mount=type=bind,from=1,target=/var/tmp/1 --mount=type=bind,from=2,target=/var/tmp/2 --mount=type=bind,from=3,target=/var/tmp/3 --mount=type=bind,from=4,target=/var/tmp/4 --mount=type=bind,from=5,target=/var/tmp/5 --mount=type=bind,from=6,target=/var/tmp/6 --mount=type=bind,from=7,target=/var/tmp/7 --mount=type=bind,from=8,target=/var/tmp/8 --mount=type=bind,from=9,target=/var/tmp/9 --mount=type=bind,from=10,target=/var/tmp/10 --mount=type=bind,from=11,target=/var/tmp/11 --mount=type=bind,from=12,target=/var/tmp/12 : +EOF + run_buildah build ${TEST_SCRATCH_DIR}/buildcontext +}