Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion cmd/crc/cmd/bundle/bundle.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,15 @@ func GetBundleCmd(config *config.Config) *cobra.Command {
bundleCmd := &cobra.Command{
Use: "bundle SUBCOMMAND [flags]",
Short: "Manage CRC bundles",
Long: "Manage CRC bundles",
Long: "Manage CRC bundles, including downloading, listing, and cleaning up cached bundles.",
Run: func(cmd *cobra.Command, _ []string) {
_ = cmd.Help()
},
}
bundleCmd.AddCommand(getGenerateCmd(config))
bundleCmd.AddCommand(getDownloadCmd(config))
bundleCmd.AddCommand(getListCmd(config))
bundleCmd.AddCommand(getClearCmd())
bundleCmd.AddCommand(getPruneCmd())
return bundleCmd
}
57 changes: 57 additions & 0 deletions cmd/crc/cmd/bundle/clear.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
package bundle

import (
"os"
"path/filepath"
"strings"

"github.com/crc-org/crc/v2/pkg/crc/constants"
"github.com/crc-org/crc/v2/pkg/crc/logging"
"github.com/spf13/cobra"
)

func getClearCmd() *cobra.Command {
return &cobra.Command{
Use: "clear",
Short: "Clear cached CRC bundles",
Long: "Delete all downloaded CRC bundles from the cache directory.",
RunE: func(cmd *cobra.Command, args []string) error {
return runClear()
},
}
}

func runClear() error {
cacheDir := constants.MachineCacheDir
if _, err := os.Stat(cacheDir); os.IsNotExist(err) {
logging.Infof("Cache directory %s does not exist", cacheDir)
return nil
}

files, err := os.ReadDir(cacheDir)
if err != nil {
return err
}

cleared := false
var lastErr error
for _, file := range files {
if strings.HasSuffix(file.Name(), ".crcbundle") {
filePath := filepath.Join(cacheDir, file.Name())
logging.Infof("Deleting %s", filePath)
if err := os.RemoveAll(filePath); err != nil {
logging.Errorf("Failed to remove %s: %v", filePath, err)
lastErr = err
} else {
cleared = true
}
}
}

if !cleared && lastErr == nil {
logging.Infof("No bundles found in %s", cacheDir)
} else if cleared {
logging.Infof("Cleared cached bundles in %s", cacheDir)
}
return lastErr
}
156 changes: 156 additions & 0 deletions cmd/crc/cmd/bundle/download.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
package bundle

import (
"context"
"encoding/hex"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"strings"

crcConfig "github.com/crc-org/crc/v2/pkg/crc/config"
"github.com/crc-org/crc/v2/pkg/crc/constants"
"github.com/crc-org/crc/v2/pkg/crc/gpg"
"github.com/crc-org/crc/v2/pkg/crc/logging"
"github.com/crc-org/crc/v2/pkg/crc/machine/bundle"
crcPreset "github.com/crc-org/crc/v2/pkg/crc/preset"
"github.com/crc-org/crc/v2/pkg/download"
"github.com/spf13/cobra"
)

func getDownloadCmd(config *crcConfig.Config) *cobra.Command {
downloadCmd := &cobra.Command{
Use: "download [version] [architecture]",
Short: "Download a specific CRC bundle",
Long: "Download a specific CRC bundle from the mirrors. If no version or architecture is specified, the bundle for the current CRC version will be downloaded.",
RunE: func(cmd *cobra.Command, args []string) error {
force, _ := cmd.Flags().GetBool("force")
presetStr, _ := cmd.Flags().GetString("preset")

var preset crcPreset.Preset
if presetStr != "" {
var err error
preset, err = crcPreset.ParsePresetE(presetStr)
if err != nil {
return err
}
} else {
preset = crcConfig.GetPreset(config)
}

return runDownload(args, preset, force)
},
}
downloadCmd.Flags().BoolP("force", "f", false, "Overwrite existing bundle if present")
downloadCmd.Flags().StringP("preset", "p", "", "Target preset (openshift, okd, microshift)")

return downloadCmd
}

func runDownload(args []string, preset crcPreset.Preset, force bool) error {
// Disk space check (simple check for ~10GB free)
// This is a basic check, more robust checking would require syscall/windows specific implementations
// We skip this for now to avoid adding heavy OS-specific deps, assuming user manages disk space or download fails naturally.

// If no args, use default bundle path
if len(args) == 0 {
defaultBundlePath := constants.GetDefaultBundlePath(preset)
if !force {
if _, err := os.Stat(defaultBundlePath); err == nil {
logging.Infof("Bundle %s already exists. Use --force to overwrite.", defaultBundlePath)
return nil
}
}

Comment on lines +52 to +66
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Reject extra positional args to avoid silent ignores.
Right now, additional args beyond [version] [architecture] are ignored. Consider failing fast with a clear error.

✅ Proposed fix
 func runDownload(args []string, preset crcPreset.Preset, force bool) error {
+	if len(args) > 2 {
+		return fmt.Errorf("too many arguments: expected at most 2 (version, architecture), got %d", len(args))
+	}
 	// Disk space check (simple check for ~10GB free)
 	// This is a basic check, more robust checking would require syscall/windows specific implementations
 	// We skip this for now to avoid adding heavy OS-specific deps, assuming user manages disk space or download fails naturally.
🤖 Prompt for AI Agents
In `@cmd/crc/cmd/bundle/download.go` around lines 52 - 66, The runDownload
function currently ignores extra positional args; add a guard at the start of
runDownload that checks args length and returns an error when len(args) > 2
(allowed: [version] [architecture]) so callers fail fast with a clear message;
update the error return to use fmt.Errorf or the package's preferred
error/logging pattern and reference runDownload and args in the message to make
it obvious which call site and parameters were invalid.

logging.Debugf("Source: %s", constants.GetDefaultBundleDownloadURL(preset))
logging.Debugf("Destination: %s", defaultBundlePath)
// For default bundle, we use the existing logic which handles verification internally
_, err := bundle.Download(context.Background(), preset, defaultBundlePath, false)
return err
}

// If args provided, we are constructing a URL
version := args[0]

// Check if version is partial (Major.Minor) and resolve it if necessary
resolvedVersion, err := resolveOpenShiftVersion(preset, version)
if err != nil {
logging.Warnf("Could not resolve version %s: %v. Trying with original version string.", version, err)
} else if resolvedVersion != version {
logging.Debugf("Resolved version %s to %s", version, resolvedVersion)
version = resolvedVersion
}
Comment on lines +77 to +84
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Fail fast when partial-version resolution fails.
For a major.minor input, continuing with an unresolved version will usually 404 or fail signature validation, which is less actionable than a clear resolution error.

✅ Proposed fix
 	resolvedVersion, err := resolveOpenShiftVersion(preset, version)
 	if err != nil {
-		logging.Warnf("Could not resolve version %s: %v. Trying with original version string.", version, err)
+		return fmt.Errorf("failed to resolve version %s: %w", version, err)
 	} else if resolvedVersion != version {
 		logging.Debugf("Resolved version %s to %s", version, resolvedVersion)
 		version = resolvedVersion
 	}
🤖 Prompt for AI Agents
In `@cmd/crc/cmd/bundle/download.go` around lines 77 - 84, The code currently
swallows errors from resolveOpenShiftVersion and continues with the original
partial version; change this to fail fast: when resolveOpenShiftVersion(preset,
version) returns an error, propagate or return that error instead of logging a
warning and continuing. Update the logic around resolveOpenShiftVersion,
logging.Warnf, and the subsequent version assignment so that resolve errors
cause an early return (or a wrapped error) so callers know resolution failed
rather than proceeding with the unresolved partial version.


architecture := runtime.GOARCH
if len(args) > 1 {
architecture = args[1]
}

bundleName := constants.BundleName(preset, version, architecture)
bundlePath := filepath.Join(constants.MachineCacheDir, bundleName)

if !force {
if _, err := os.Stat(bundlePath); err == nil {
logging.Infof("Bundle %s already exists. Use --force to overwrite.", bundleName)
return nil
}
}

// Base URL for the directory containing the bundle and signature
baseVersionURL := fmt.Sprintf("%s/%s/%s/", constants.DefaultMirrorURL, preset.String(), version)
bundleURL := fmt.Sprintf("%s%s", baseVersionURL, bundleName)
sigURL := fmt.Sprintf("%s%s", baseVersionURL, "sha256sum.txt.sig")

logging.Infof("Downloading bundle: %s", bundleName)
logging.Debugf("Source: %s", bundleURL)
logging.Debugf("Destination: %s", constants.MachineCacheDir)

// Implement verification logic
logging.Infof("Verifying signature for %s...", version)
sha256sum, err := getVerifiedHashForCustomVersion(sigURL, bundleName)
if err != nil {
// Fallback: try without .sig if .sig not found, maybe just sha256sum.txt?
// For now, fail if signature verification fails as requested for "Safeguards"
return fmt.Errorf("signature verification failed: %w", err)
}

sha256bytes, err := hex.DecodeString(sha256sum)
if err != nil {
return fmt.Errorf("failed to decode sha256sum: %w", err)
}

_, err = download.Download(context.Background(), bundleURL, bundlePath, 0664, sha256bytes)
return err
}

func getVerifiedHashForCustomVersion(sigURL string, bundleName string) (string, error) {
// Reuse existing verification logic from bundle package via a helper here
// We essentially replicate getVerifiedHash but with our custom URL

res, err := download.InMemory(sigURL)
if err != nil {
return "", fmt.Errorf("failed to fetch signature file: %w", err)
}
defer res.Close()

signedHashes, err := io.ReadAll(res)
if err != nil {
return "", fmt.Errorf("failed to read signature file: %w", err)
}

verifiedHashes, err := gpg.GetVerifiedClearsignedMsgV3(constants.RedHatReleaseKey, string(signedHashes))
if err != nil {
return "", fmt.Errorf("invalid signature: %w", err)
}

lines := strings.Split(verifiedHashes, "\n")
for _, line := range lines {
if strings.HasSuffix(line, bundleName) {
sha256sum := strings.TrimSuffix(line, " "+bundleName)
return sha256sum, nil
}
}
return "", fmt.Errorf("hash for %s not found in signature file", bundleName)
}
69 changes: 69 additions & 0 deletions cmd/crc/cmd/bundle/list.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
package bundle

import (
"fmt"
"runtime"

"github.com/Masterminds/semver/v3"
crcConfig "github.com/crc-org/crc/v2/pkg/crc/config"
"github.com/crc-org/crc/v2/pkg/crc/logging"
"github.com/spf13/cobra"
)

func getListCmd(config *crcConfig.Config) *cobra.Command {
return &cobra.Command{
Use: "list [version]",
Short: "List available CRC bundles",
Long: "List available CRC bundles from the mirrors. Optionally filter by major.minor version (e.g. 4.19).",
RunE: func(cmd *cobra.Command, args []string) error {
return runList(args, config)
},
}
}

func runList(args []string, config *crcConfig.Config) error {
if len(args) > 1 {
return fmt.Errorf("too many arguments: expected at most 1 version filter, got %d", len(args))
}

preset := crcConfig.GetPreset(config)
versions, err := fetchAvailableVersions(preset)
if err != nil {
return err
}

if len(versions) == 0 {
logging.Infof("No bundles found for preset %s", preset)
return nil
}

var filter *semver.Version
if len(args) > 0 {
v, err := semver.NewVersion(args[0] + ".0") // Treat 4.19 as 4.19.0 for partial matching
if err == nil {
filter = v
} else {
// Try parsing as full version just in case
v, err = semver.NewVersion(args[0])
if err == nil {
filter = v
}
}
}

logging.Infof("Available bundles for %s:", preset)
for _, v := range versions {
if filter != nil {
if v.Major() != filter.Major() || v.Minor() != filter.Minor() {
continue
}
}

cachedStr := ""
if isBundleCached(preset, v.String(), runtime.GOARCH) {
cachedStr = " (cached)"
}
fmt.Printf("%s%s\n", v.String(), cachedStr)
}
return nil
}
71 changes: 71 additions & 0 deletions cmd/crc/cmd/bundle/prune.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
package bundle

import (
"os"
"path/filepath"
"sort"
"strings"

"github.com/crc-org/crc/v2/pkg/crc/constants"
"github.com/crc-org/crc/v2/pkg/crc/logging"
"github.com/spf13/cobra"
)

func getPruneCmd() *cobra.Command {
return &cobra.Command{
Use: "prune",
Short: "Prune old CRC bundles",
Long: "Keep only the most recent bundles and delete older ones to save space.",
RunE: func(cmd *cobra.Command, args []string) error {
// Default keep 2 most recent
return runPrune(2)
},
}
}

func runPrune(keep int) error {
cacheDir := constants.MachineCacheDir
if _, err := os.Stat(cacheDir); os.IsNotExist(err) {
logging.Infof("Cache directory %s does not exist", cacheDir)
return nil
}

files, err := os.ReadDir(cacheDir)
if err != nil {
return err
}

var bundleFiles []os.DirEntry
for _, file := range files {
if strings.HasSuffix(file.Name(), ".crcbundle") {
bundleFiles = append(bundleFiles, file)
}
}

if len(bundleFiles) <= keep {
logging.Infof("Nothing to prune (found %d bundles, keeping %d)", len(bundleFiles), keep)
return nil
}

// Sort by modification time, newest first
sort.Slice(bundleFiles, func(i, j int) bool {
infoI, errI := bundleFiles[i].Info()
infoJ, errJ := bundleFiles[j].Info()
if errI != nil || errJ != nil {
// If we can't get info, treat as oldest (sort to end for pruning)
return errJ != nil && errI == nil
}
return infoI.ModTime().After(infoJ.ModTime())
})

for i := keep; i < len(bundleFiles); i++ {
file := bundleFiles[i]
filePath := filepath.Join(cacheDir, file.Name())
logging.Infof("Pruning old bundle: %s", file.Name())
if err := os.RemoveAll(filePath); err != nil {
logging.Errorf("Failed to remove %s: %v", filePath, err)
}
}
Comment on lines +45 to +68
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Prune logic keeps only N total bundles, not N per major.minor.
The current mtime-based pruning deletes everything beyond the newest keep bundles globally, which conflicts with the requirement to keep the latest two patch versions per major.minor (and can unintentionally delete other arch/preset bundles). Consider grouping by major.minor (and arch) and pruning within each group based on version.

🔧 Proposed fix (group by version/arch and prune within each group)
@@
-	if len(bundleFiles) <= keep {
+	if len(bundleFiles) <= keep {
 		logging.Infof("Nothing to prune (found %d bundles, keeping %d)", len(bundleFiles), keep)
 		return nil
 	}
 
-	// Sort by modification time, newest first
-	sort.Slice(bundleFiles, func(i, j int) bool {
-		infoI, errI := bundleFiles[i].Info()
-		infoJ, errJ := bundleFiles[j].Info()
-		if errI != nil || errJ != nil {
-			// If we can't get info, treat as oldest (sort to end for pruning)
-			return errJ != nil && errI == nil
-		}
-		return infoI.ModTime().After(infoJ.ModTime())
-	})
-
-	for i := keep; i < len(bundleFiles); i++ {
-		file := bundleFiles[i]
-		filePath := filepath.Join(cacheDir, file.Name())
-		logging.Infof("Pruning old bundle: %s", file.Name())
-		if err := os.RemoveAll(filePath); err != nil {
-			logging.Errorf("Failed to remove %s: %v", filePath, err)
-		}
-	}
+	type bundleInfo struct {
+		entry os.DirEntry
+		major int
+		minor int
+		patch int
+		arch  string
+	}
+
+	groups := map[string][]bundleInfo{}
+	for _, file := range bundleFiles {
+		info, ok := parseBundleInfo(file.Name())
+		if !ok {
+			logging.Warnf("Skipping bundle with unrecognized version format: %s", file.Name())
+			continue
+		}
+		info.entry = file
+		key := fmt.Sprintf("%d.%d/%s", info.major, info.minor, info.arch)
+		groups[key] = append(groups[key], info)
+	}
+
+	for _, group := range groups {
+		sort.Slice(group, func(i, j int) bool {
+			return group[i].patch > group[j].patch
+		})
+		for i := keep; i < len(group); i++ {
+			file := group[i].entry
+			filePath := filepath.Join(cacheDir, file.Name())
+			logging.Infof("Pruning old bundle: %s", file.Name())
+			if err := os.RemoveAll(filePath); err != nil {
+				logging.Errorf("Failed to remove %s: %v", filePath, err)
+			}
+		}
+	}
 
 	return nil
 }
+
+func parseBundleInfo(name string) (bundleInfo, bool) {
+	base := strings.TrimSuffix(name, ".crcbundle")
+	parts := strings.Split(base, "_")
+	if len(parts) < 3 {
+		return bundleInfo{}, false
+	}
+	versionStr := parts[len(parts)-2]
+	arch := parts[len(parts)-1]
+	verParts := strings.Split(versionStr, ".")
+	if len(verParts) != 3 {
+		return bundleInfo{}, false
+	}
+	major, err1 := strconv.Atoi(verParts[0])
+	minor, err2 := strconv.Atoi(verParts[1])
+	patch, err3 := strconv.Atoi(verParts[2])
+	if err1 != nil || err2 != nil || err3 != nil {
+		return bundleInfo{}, false
+	}
+	return bundleInfo{major: major, minor: minor, patch: patch, arch: arch}, true
+}
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
if len(bundleFiles) <= keep {
logging.Infof("Nothing to prune (found %d bundles, keeping %d)", len(bundleFiles), keep)
return nil
}
// Sort by modification time, newest first
sort.Slice(bundleFiles, func(i, j int) bool {
infoI, errI := bundleFiles[i].Info()
infoJ, errJ := bundleFiles[j].Info()
if errI != nil || errJ != nil {
// If we can't get info, treat as oldest (sort to end for pruning)
return errJ != nil && errI == nil
}
return infoI.ModTime().After(infoJ.ModTime())
})
for i := keep; i < len(bundleFiles); i++ {
file := bundleFiles[i]
filePath := filepath.Join(cacheDir, file.Name())
logging.Infof("Pruning old bundle: %s", file.Name())
if err := os.RemoveAll(filePath); err != nil {
logging.Errorf("Failed to remove %s: %v", filePath, err)
}
}
if len(bundleFiles) <= keep {
logging.Infof("Nothing to prune (found %d bundles, keeping %d)", len(bundleFiles), keep)
return nil
}
type bundleInfo struct {
entry os.DirEntry
major int
minor int
patch int
arch string
}
groups := map[string][]bundleInfo{}
for _, file := range bundleFiles {
info, ok := parseBundleInfo(file.Name())
if !ok {
logging.Warnf("Skipping bundle with unrecognized version format: %s", file.Name())
continue
}
info.entry = file
key := fmt.Sprintf("%d.%d/%s", info.major, info.minor, info.arch)
groups[key] = append(groups[key], info)
}
for _, group := range groups {
sort.Slice(group, func(i, j int) bool {
return group[i].patch > group[j].patch
})
for i := keep; i < len(group); i++ {
file := group[i].entry
filePath := filepath.Join(cacheDir, file.Name())
logging.Infof("Pruning old bundle: %s", file.Name())
if err := os.RemoveAll(filePath); err != nil {
logging.Errorf("Failed to remove %s: %v", filePath, err)
}
}
}
return nil
}
func parseBundleInfo(name string) (bundleInfo, bool) {
base := strings.TrimSuffix(name, ".crcbundle")
parts := strings.Split(base, "_")
if len(parts) < 3 {
return bundleInfo{}, false
}
versionStr := parts[len(parts)-2]
arch := parts[len(parts)-1]
verParts := strings.Split(versionStr, ".")
if len(verParts) != 3 {
return bundleInfo{}, false
}
major, err1 := strconv.Atoi(verParts[0])
minor, err2 := strconv.Atoi(verParts[1])
patch, err3 := strconv.Atoi(verParts[2])
if err1 != nil || err2 != nil || err3 != nil {
return bundleInfo{}, false
}
return bundleInfo{major: major, minor: minor, patch: patch, arch: arch}, true
}
🤖 Prompt for AI Agents
In `@cmd/crc/cmd/bundle/prune.go` around lines 45 - 68, The prune logic currently
sorts bundleFiles globally and removes everything beyond the newest keep, but
you must instead group bundles by major.minor (and arch/preset if applicable)
and prune within each group; update the code around bundleFiles, sort.Slice and
the removal loop to (1) parse each file.Name() (or bundle metadata via
file.Info()) to extract semver major.minor and arch, (2) build a
map[groupKey][]os.FileInfo (or []fs.File) where groupKey = major.minor + ":" +
arch, (3) for each group sort by semantic version (or ModTime if version not
parseable) to get newest-first, (4) keep the first keep entries per group and
call os.RemoveAll(filepath.Join(cacheDir, file.Name())) only for the rest, and
ensure errors from Info()/version parsing are handled (treat unparsable entries
as oldest so they get pruned last).


return nil
}
Loading