merge: sync upstream mole/main

This commit is contained in:
zhukang
2026-03-11 16:57:00 +08:00
47 changed files with 1230 additions and 310 deletions

1
.github/CODEOWNERS vendored Normal file
View File

@@ -0,0 +1 @@
* @tw93

View File

@@ -10,6 +10,8 @@ assignees: ''
A clear and concise description of what the bug is. We suggest using English for better global understanding.
If you believe the issue may allow unsafe deletion, path validation bypass, privilege boundary bypass, or release/install integrity issues, do not file a public bug report. Report it privately using the contact details in `SECURITY.md`.
## Steps to reproduce
1. Run command: `mo ...`

View File

@@ -1,5 +1,8 @@
blank_issues_enabled: false
contact_links:
- name: Private Security Report
url: mailto:hitw93@gmail.com?subject=Mole%20security%20report
about: Report a suspected vulnerability privately instead of opening a public issue
- name: Telegram Community
url: https://t.me/+GclQS9ZnxyI2ODQ1
about: Join our Telegram group for questions and discussions

View File

@@ -4,8 +4,18 @@ updates:
directory: "/"
schedule:
interval: "weekly"
labels:
- "dependencies"
reviewers:
- "tw93"
open-pull-requests-limit: 10
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "weekly"
labels:
- "dependencies"
reviewers:
- "tw93"
open-pull-requests-limit: 10

18
.github/pull_request_template.md vendored Normal file
View File

@@ -0,0 +1,18 @@
## Summary
- Describe the change.
## Safety Review
- Does this change affect cleanup, uninstall, optimize, installer, remove, analyze delete, update, or install behavior?
- Does this change affect path validation, protected directories, symlink handling, sudo boundaries, or release/install integrity?
- If yes, describe the new boundary or risk change clearly.
## Tests
- List the automated tests you ran.
- List any manual checks for high-risk paths or destructive flows.
## Safety-related changes
- None.

View File

@@ -38,7 +38,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v5
with:
go-version: '1.24.6'
go-version-file: go.mod
- name: Install goimports
run: go install golang.org/x/tools/cmd/goimports@latest
@@ -91,7 +91,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v5
with:
go-version: '1.24.6'
go-version-file: go.mod
- name: Run check script
run: ./scripts/check.sh --no-format

52
.github/workflows/codeql.yml vendored Normal file
View File

@@ -0,0 +1,52 @@
name: CodeQL
on:
push:
branches: [main, dev]
pull_request:
branches: [main, dev]
schedule:
- cron: '17 3 * * 1'
permissions:
contents: read
security-events: write
jobs:
analyze:
name: Analyze (${{ matrix.language }})
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- language: go
build-mode: manual
- language: actions
build-mode: none
steps:
- name: Checkout
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4
- name: Set up Go
if: matrix.language == 'go'
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v5
with:
go-version-file: go.mod
- name: Initialize CodeQL
uses: github/codeql-action/init@v4
with:
languages: ${{ matrix.language }}
build-mode: ${{ matrix.build-mode }}
queries: security-extended
- name: Build for CodeQL
if: matrix.build-mode == 'manual'
run: make build
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v4
with:
category: "/language:${{ matrix.language }}"

View File

@@ -6,7 +6,7 @@ on:
- 'V*'
permissions:
contents: write
contents: read
jobs:
build:
@@ -28,7 +28,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v5
with:
go-version: "1.24.6"
go-version-file: go.mod
- name: Build Binaries
run: |
@@ -58,6 +58,10 @@ jobs:
name: Publish Release
needs: build
runs-on: ubuntu-latest
permissions:
contents: write
attestations: write
id-token: write
steps:
- name: Download all artifacts
uses: actions/download-artifact@70fc10c6e5e1ce46ad2ea6f2b72d43f7d47b13c3 # v8.0.0
@@ -69,12 +73,33 @@ jobs:
- name: Display structure of downloaded files
run: ls -R bin/
- name: Generate release checksums
run: |
cd bin
mapfile -t release_files < <(find . -maxdepth 1 -type f -printf '%P\n' | sort)
if [[ ${#release_files[@]} -eq 0 ]]; then
echo "No release assets found"
exit 1
fi
sha256sum "${release_files[@]}" > SHA256SUMS
cat SHA256SUMS
- name: Generate artifact attestation
uses: actions/attest-build-provenance@v4
with:
subject-path: |
bin/analyze-darwin-*
bin/status-darwin-*
bin/binaries-darwin-*.tar.gz
bin/SHA256SUMS
- name: Create Release
uses: softprops/action-gh-release@a06a81a03ee405af7f2048a818ed3f03bbf83c7b # v2
if: startsWith(github.ref, 'refs/tags/')
with:
name: ${{ github.ref_name }}
files: bin/*
generate_release_notes: true
generate_release_notes: false
draft: false
prerelease: false

View File

@@ -19,7 +19,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v5
with:
go-version: "1.24.6"
go-version-file: go.mod
- name: Run test script
env:
@@ -52,6 +52,9 @@ jobs:
steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v4
- name: Install tools
run: brew install bats-core
- name: Check for unsafe rm usage
run: |
echo "Checking for unsafe rm patterns..."
@@ -86,3 +89,10 @@ jobs:
exit 1
fi
echo "✓ No secrets found"
- name: Run high-risk path regression tests
env:
BATS_FORMATTER: tap
LANG: en_US.UTF-8
LC_ALL: en_US.UTF-8
run: bats tests/core_safe_functions.bats tests/purge.bats tests/installer.bats

1
.gitignore vendored
View File

@@ -50,6 +50,7 @@ GEMINI.md
ANTIGRAVITY.md
WARP.md
AGENTS.md
journal/
.cursorrules
# Go build artifacts (development)

View File

@@ -2,7 +2,8 @@
# Application Data Cleanup Module
set -euo pipefail
readonly ORPHAN_AGE_THRESHOLD=${ORPHAN_AGE_THRESHOLD:-${MOLE_ORPHAN_AGE_DAYS:-60}}
readonly ORPHAN_AGE_THRESHOLD=${ORPHAN_AGE_THRESHOLD:-${MOLE_ORPHAN_AGE_DAYS:-30}}
readonly CLAUDE_VM_ORPHAN_AGE_THRESHOLD=${MOLE_CLAUDE_VM_ORPHAN_AGE_DAYS:-7}
# Args: $1=target_dir, $2=label
clean_ds_store_tree() {
local target="$1"
@@ -59,7 +60,7 @@ clean_ds_store_tree() {
note_activity
fi
}
# Orphaned app data (60+ days inactive). Env: ORPHAN_AGE_THRESHOLD, DRY_RUN
# Orphaned app data (30+ days inactive). Env: ORPHAN_AGE_THRESHOLD, DRY_RUN
# Usage: scan_installed_apps "output_file"
scan_installed_apps() {
local installed_bundles="$1"
@@ -201,13 +202,13 @@ is_bundle_orphaned() {
;;
esac
# 5. Fast path: 60-day modification check (stat call, fast)
# 5. Fast path: 30-day modification check (stat call, fast)
if [[ -e "$directory_path" ]]; then
local last_modified_epoch=$(get_file_mtime "$directory_path")
local current_epoch
current_epoch=$(get_epoch_seconds)
local days_since_modified=$(((current_epoch - last_modified_epoch) / 86400))
if [[ $days_since_modified -lt ${ORPHAN_AGE_THRESHOLD:-60} ]]; then
if [[ $days_since_modified -lt ${ORPHAN_AGE_THRESHOLD:-30} ]]; then
return 1
fi
fi
@@ -261,6 +262,17 @@ is_claude_vm_bundle_orphaned() {
return 1
fi
if [[ -e "$vm_bundle_path" ]]; then
local last_modified_epoch
last_modified_epoch=$(get_file_mtime "$vm_bundle_path")
local current_epoch
current_epoch=$(get_epoch_seconds)
local days_since_modified=$(((current_epoch - last_modified_epoch) / 86400))
if [[ $days_since_modified -lt ${CLAUDE_VM_ORPHAN_AGE_THRESHOLD:-7} ]]; then
return 1
fi
fi
if [[ -z "$ORPHAN_MDFIND_CACHE_FILE" ]]; then
ORPHAN_MDFIND_CACHE_FILE=$(mktemp "${TMPDIR:-/tmp}/mole_mdfind_cache.XXXXXX")
register_temp_file "$ORPHAN_MDFIND_CACHE_FILE"

View File

@@ -212,7 +212,9 @@ clean_project_caches() {
[[ -d "$cache_dir/cache" ]] && safe_clean "$cache_dir/cache"/* "Next.js build cache" || true
;;
"__pycache__")
[[ -d "$cache_dir" ]] && safe_clean "$cache_dir"/* "Python bytecode cache" || true
# Remove the cache directory itself so we avoid expanding every
# .pyc file into a separate safe_clean target.
[[ -d "$cache_dir" ]] && safe_clean "$cache_dir" "Python bytecode cache" || true
;;
".dart_tool")
if [[ -d "$cache_dir" ]]; then

View File

@@ -198,13 +198,18 @@ clean_dev_docker() {
fi
stop_section_spinner
if [[ "$docker_running" == "true" ]]; then
clean_tool_cache "Docker build cache" docker builder prune -af
# Remove unused images, stopped containers, unused networks, and
# anonymous volumes in one pass. This maps better to the large
# reclaimable "docker system df" buckets users typically see.
clean_tool_cache "Docker unused data" docker system prune -af --volumes
else
echo -e " ${GRAY}${ICON_WARNING}${NC} Docker unused data · skipped (daemon not running)"
note_activity
debug_log "Docker daemon not running, skipping Docker cache cleanup"
fi
else
note_activity
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Docker build cache · would clean"
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Docker unused data · would clean"
fi
fi
safe_clean ~/.docker/buildx/cache/* "Docker BuildX cache"
@@ -359,6 +364,7 @@ clean_xcode_device_support() {
version_dirs+=("$entry")
done < <(command find "$ds_dir" -mindepth 1 -maxdepth 1 -print0 2> /dev/null)
if [[ ${#version_dirs[@]} -gt 0 ]]; then
# Sort by modification time (most recent first)
local -a sorted_dirs=()
while IFS= read -r line; do
@@ -403,6 +409,7 @@ clean_xcode_device_support() {
fi
fi
fi
fi
# Clean caches/logs inside kept versions
safe_clean "$ds_dir"/*/Symbols/System/Library/Caches/* "$display_name symbol cache"

View File

@@ -1310,6 +1310,14 @@ clean_project_artifacts() {
if [[ -t 1 ]]; then
stop_inline_spinner
fi
# Exit early if no artifacts were found to avoid unbound variable errors
# when expanding empty arrays with set -u active.
if [[ ${#menu_options[@]} -eq 0 ]]; then
echo ""
echo -e "${GRAY}No artifacts found to purge${NC}"
printf '\n'
return 0
fi
# Set global vars for selector
export PURGE_CATEGORY_SIZES=$(
IFS=,

View File

@@ -752,6 +752,23 @@ clean_virtualization_tools() {
# Estimate item size for Application Support cleanup.
# Files use stat; directories use du with timeout to avoid long blocking scans.
app_support_entry_count_capped() {
local dir="$1"
local maxdepth="${2:-1}"
local cap="${3:-101}"
local count=0
while IFS= read -r -d '' _entry; do
count=$((count + 1))
if ((count >= cap)); then
break
fi
done < <(command find "$dir" -mindepth 1 -maxdepth "$maxdepth" -print0 2> /dev/null)
[[ "$count" =~ ^[0-9]+$ ]] || count=0
printf '%s\n' "$count"
}
app_support_item_size_bytes() {
local item="$1"
local timeout_seconds="${2:-0.4}"
@@ -768,7 +785,7 @@ app_support_item_size_bytes() {
# Fast path: if directory has too many items, skip detailed size calculation
# to avoid hanging on deep directories (e.g., node_modules, .git)
local item_count
item_count=$(command find "$item" -maxdepth 2 -print0 2> /dev/null | tr -d '\0' | wc -c)
item_count=$(app_support_entry_count_capped "$item" 2 10001)
if [[ "$item_count" -gt 10000 ]]; then
# Return 1 to signal "too many items, size unknown"
return 1
@@ -859,7 +876,7 @@ clean_application_support_logs() {
if [[ -d "$candidate" ]]; then
# Quick count check - skip if too many items to avoid hanging
local quick_count
quick_count=$(command find "$candidate" -mindepth 1 -maxdepth 1 -printf '1\n' 2> /dev/null | wc -l | tr -d ' ')
quick_count=$(app_support_entry_count_capped "$candidate" 1 101)
if [[ "$quick_count" -gt 100 ]]; then
# Too many items - use bulk removal instead of item-by-item
local app_label="$app_name"
@@ -935,7 +952,7 @@ clean_application_support_logs() {
if [[ -d "$candidate" ]]; then
# Quick count check - skip if too many items
local quick_count
quick_count=$(command find "$candidate" -mindepth 1 -maxdepth 1 -printf '1\n' 2> /dev/null | wc -l | tr -d ' ')
quick_count=$(app_support_entry_count_capped "$candidate" 1 101)
if [[ "$quick_count" -gt 100 ]]; then
local container_label="$container"
if [[ ${#container_label} -gt 24 ]]; then

View File

@@ -68,7 +68,7 @@ get_lsregister_path() {
# Global Configuration Constants
# ============================================================================
readonly MOLE_TEMP_FILE_AGE_DAYS=7 # Temp file retention (days)
readonly MOLE_ORPHAN_AGE_DAYS=60 # Orphaned data retention (days)
readonly MOLE_ORPHAN_AGE_DAYS=30 # Orphaned data retention (days)
readonly MOLE_MAX_PARALLEL_JOBS=15 # Parallel job limit
readonly MOLE_MAIL_DOWNLOADS_MIN_KB=5120 # Mail attachment size threshold
readonly MOLE_MAIL_AGE_DAYS=30 # Mail attachment retention (days)
@@ -191,11 +191,17 @@ is_sip_enabled() {
# Detect CPU architecture
# Returns: "Apple Silicon" or "Intel"
detect_architecture() {
if [[ "$(uname -m)" == "arm64" ]]; then
echo "Apple Silicon"
else
echo "Intel"
if [[ -n "${MOLE_ARCH_CACHE:-}" ]]; then
echo "$MOLE_ARCH_CACHE"
return 0
fi
if [[ "$(uname -m)" == "arm64" ]]; then
export MOLE_ARCH_CACHE="Apple Silicon"
else
export MOLE_ARCH_CACHE="Intel"
fi
echo "$MOLE_ARCH_CACHE"
}
# Get free disk space on root volume
@@ -212,6 +218,11 @@ get_free_space() {
# Get Darwin kernel major version (e.g., 24 for 24.2.0)
# Returns 999 on failure to adopt conservative behavior (assume modern system)
get_darwin_major() {
if [[ -n "${MOLE_DARWIN_MAJOR_CACHE:-}" ]]; then
echo "$MOLE_DARWIN_MAJOR_CACHE"
return 0
fi
local kernel
kernel=$(uname -r 2> /dev/null || true)
local major="${kernel%%.*}"
@@ -219,6 +230,7 @@ get_darwin_major() {
# Return high number to skip potentially dangerous operations on unknown systems
major=999
fi
export MOLE_DARWIN_MAJOR_CACHE="$major"
echo "$major"
}
@@ -233,8 +245,10 @@ is_darwin_ge() {
# Get optimal parallel jobs for operation type (scan|io|compute|default)
get_optimal_parallel_jobs() {
local operation_type="${1:-default}"
local cpu_cores
cpu_cores=$(sysctl -n hw.ncpu 2> /dev/null || echo 4)
if [[ -z "${MOLE_CPU_CORES_CACHE:-}" ]]; then
export MOLE_CPU_CORES_CACHE=$(sysctl -n hw.ncpu 2> /dev/null || echo 4)
fi
local cpu_cores="$MOLE_CPU_CORES_CACHE"
case "$operation_type" in
scan | io)
echo $((cpu_cores * 2))
@@ -318,7 +332,7 @@ get_user_home() {
fi
if [[ -z "$home" ]]; then
home=$(eval echo "~$user" 2> /dev/null || true)
home=$(id -P "$user" 2> /dev/null | cut -d: -f9 || true)
fi
if [[ "$home" == "~"* ]]; then
@@ -586,7 +600,7 @@ mktemp_file() {
# Cleanup all tracked temp files and directories
cleanup_temp_files() {
stop_inline_spinner 2> /dev/null || true
stop_inline_spinner || true
local file
if [[ ${#MOLE_TEMP_FILES[@]} -gt 0 ]]; then
for file in "${MOLE_TEMP_FILES[@]}"; do
@@ -641,7 +655,7 @@ note_activity() {
# Usage: start_section_spinner "message"
start_section_spinner() {
local message="${1:-Scanning...}"
stop_inline_spinner 2> /dev/null || true
stop_inline_spinner || true
if [[ -t 1 ]]; then
MOLE_SPINNER_PREFIX=" " start_inline_spinner "$message"
fi
@@ -651,7 +665,7 @@ start_section_spinner() {
# Usage: stop_section_spinner
stop_section_spinner() {
# Always try to stop spinner (function handles empty PID gracefully)
stop_inline_spinner 2> /dev/null || true
stop_inline_spinner || true
# Always clear line to handle edge cases where spinner output remains
# (e.g., spinner was stopped elsewhere but line not cleared)
if [[ -t 1 ]]; then
@@ -732,18 +746,30 @@ update_progress_if_needed() {
# Usage: is_ansi_supported
# Returns: 0 if supported, 1 if not
is_ansi_supported() {
if [[ -n "${MOLE_ANSI_SUPPORTED_CACHE:-}" ]]; then
return "$MOLE_ANSI_SUPPORTED_CACHE"
fi
# Check if running in interactive terminal
[[ -t 1 ]] || return 1
if ! [[ -t 1 ]]; then
export MOLE_ANSI_SUPPORTED_CACHE=1
return 1
fi
# Check TERM variable
[[ -n "${TERM:-}" ]] || return 1
if [[ -z "${TERM:-}" ]]; then
export MOLE_ANSI_SUPPORTED_CACHE=1
return 1
fi
# Check for known ANSI-compatible terminals
case "$TERM" in
xterm* | vt100 | vt220 | screen* | tmux* | ansi | linux | rxvt* | konsole*)
export MOLE_ANSI_SUPPORTED_CACHE=0
return 0
;;
dumb | unknown)
export MOLE_ANSI_SUPPORTED_CACHE=1
return 1
;;
*)
@@ -751,8 +777,12 @@ is_ansi_supported() {
if command -v tput > /dev/null 2>&1; then
# Test if terminal supports colors (good proxy for ANSI support)
local colors=$(tput colors 2> /dev/null || echo "0")
[[ "$colors" -ge 8 ]] && return 0
if [[ "$colors" -ge 8 ]]; then
export MOLE_ANSI_SUPPORTED_CACHE=0
return 0
fi
fi
export MOLE_ANSI_SUPPORTED_CACHE=1
return 1
;;
esac

View File

@@ -92,7 +92,10 @@ validate_path_for_deletion() {
# Validate resolved target against protected paths
if [[ -n "$resolved_target" ]]; then
case "$resolved_target" in
/System/* | /usr/bin/* | /usr/lib/* | /bin/* | /sbin/* | /private/etc/*)
/ | /System | /System/* | /bin | /bin/* | /sbin | /sbin/* | \
/usr | /usr/bin | /usr/bin/* | /usr/lib | /usr/lib/* | \
/etc | /etc/* | /private/etc | /private/etc/* | \
/Library/Extensions | /Library/Extensions/*)
log_error "Symlink points to protected system path: $path -> $resolved_target"
return 1
;;

View File

@@ -42,9 +42,9 @@ if [[ -z "${MO_TIMEOUT_INITIALIZED:-}" ]]; then
fi
done
if [[ -z "$MO_TIMEOUT_BIN" ]] && command -v perl > /dev/null 2>&1; then
if command -v perl > /dev/null 2>&1; then
MO_TIMEOUT_PERL_BIN="$(command -v perl)"
if [[ "${MO_DEBUG:-0}" == "1" ]]; then
if [[ -z "$MO_TIMEOUT_BIN" ]] && [[ "${MO_DEBUG:-0}" == "1" ]]; then
echo "[TIMEOUT] Using perl fallback: $MO_TIMEOUT_PERL_BIN" >&2
fi
fi

76
SECURITY.md Normal file
View File

@@ -0,0 +1,76 @@
# Security Policy
Mole is a local system maintenance tool. It includes high-risk operations such as cleanup, uninstall, optimization, and artifact removal. We treat safety boundaries, deletion logic, and release integrity as security-sensitive areas.
## Reporting a Vulnerability
Please report suspected security issues privately.
- Email: `hitw93@gmail.com`
- Subject line: `Mole security report`
Do not open a public GitHub issue for an unpatched vulnerability.
If GitHub Security Advisories private reporting is enabled for the repository, you may use that channel instead of email.
Include as much of the following as possible:
- Mole version and install method
- macOS version
- Exact command or workflow involved
- Reproduction steps or proof of concept
- Whether the issue involves deletion boundaries, symlinks, sudo, path validation, or release/install integrity
## Response Expectations
- We aim to acknowledge new reports within 7 calendar days.
- We aim to provide a status update within 30 days if a fix or mitigation is not yet available.
- We will coordinate disclosure after a fix, mitigation, or clear user guidance is ready.
Response times are best-effort for a maintainer-led open source project, but security reports are prioritized over normal bug reports.
## Supported Versions
Security fixes are only guaranteed for:
- The latest published release
- The current `main` branch
Older releases may not receive security fixes. Users running high-risk commands should stay current.
## What We Consider a Security Issue
Examples of security-relevant issues include:
- Path validation bypasses
- Deletion outside intended cleanup boundaries
- Unsafe handling of symlinks or path traversal
- Unexpected privilege escalation or unsafe sudo behavior
- Sensitive data removal that bypasses documented protections
- Release, installation, update, or checksum integrity issues
- Vulnerabilities in logic that can cause unintended destructive behavior
## What Usually Does Not Qualify
The following are usually normal bugs, feature requests, or documentation issues rather than security issues:
- Cleanup misses that leave recoverable junk behind
- False negatives where Mole refuses to clean something
- Cosmetic UI problems
- Requests for broader or more aggressive cleanup behavior
- Compatibility issues without a plausible security impact
If you are unsure whether something is security-relevant, report it privately first.
## Security-Focused Areas in Mole
The project pays particular attention to:
- Destructive command boundaries
- Path validation and protected-directory rules
- Sudo and privilege boundaries
- Symlink and path traversal handling
- Sensitive data exclusions
- Packaging, release artifacts, checksums, and update/install flows
For the current technical design and known limitations, see [SECURITY_AUDIT.md](SECURITY_AUDIT.md).

View File

@@ -1,158 +1,287 @@
# Mole Security Reference
# Mole Security Audit
Version 1.28.0 | 2026-02-27
This document describes the security-relevant behavior of the current `main` branch. It is intended as a public description of Mole's safety boundaries, destructive-operation controls, release integrity signals, and known limitations.
## Path Validation
## Executive Summary
Every deletion goes through `lib/core/file_ops.sh`. The `validate_path_for_deletion()` function rejects empty paths, paths with `/../` in them, and anything containing control characters like newlines or null bytes.
Mole is a local system maintenance tool. Its main risk surface is not remote code execution; it is unintended local damage caused by cleanup, uninstall, optimize, purge, installer cleanup, or other destructive operations.
Direct `find ... -delete` is not used for security-sensitive cleanup paths. Deletions go through validated safe wrappers like `safe_sudo_find_delete()`, `safe_sudo_remove()`, and `safe_remove()`.
The project is designed around safety-first defaults:
**Blocked paths**, even with sudo:
- destructive paths are validated before deletion
- critical system roots and sensitive user-data categories are protected
- sudo use is bounded and additional restrictions apply when elevated deletion is required
- symlink handling is conservative
- preview, confirmation, timeout, and operation logging are used to make destructive behavior more visible and auditable
Mole prioritizes bounded cleanup over aggressive cleanup. When uncertainty exists, the tool should refuse, skip, or require stronger confirmation instead of widening deletion scope.
The project continues to strengthen:
- release integrity and public security signals
- targeted regression coverage for high-risk paths
- clearer documentation for privilege boundaries and known limitations
## Threat Surface
The highest-risk areas in Mole are:
- direct file and directory deletion
- recursive cleanup across common user and system cache locations
- uninstall flows that combine app removal with remnant cleanup
- project artifact purge for large dependency/build directories
- elevated cleanup paths that require sudo
- release, install, and update trust signals for distributed artifacts
`mo analyze` is intentionally lower-risk than cleanup flows:
- it does not require sudo
- it respects normal user permissions and SIP
- delete actions require explicit confirmation
- deletion routes through Finder Trash behavior rather than direct permanent removal
## Destructive Operation Boundaries
All destructive shell file operations are routed through guarded helpers in `lib/core/file_ops.sh`.
Core controls include:
- `validate_path_for_deletion()` rejects empty paths
- relative paths are rejected
- path traversal segments such as `..` as a path component are rejected
- paths containing control characters are rejected
- raw `find ... -delete` is avoided for security-sensitive cleanup logic
- removal flows use guarded helpers such as `safe_remove()`, `safe_sudo_remove()`, `safe_find_delete()`, and `safe_sudo_find_delete()`
Blocked paths remain protected even with sudo. Examples include:
```text
/ # root
/System # macOS system
/bin, /sbin, /usr # binaries
/etc, /var # config
/Library/Extensions # kexts
/private # system private
/
/System
/bin
/sbin
/usr
/etc
/var
/private
/Library/Extensions
```
Some system caches are OK to delete:
Some subpaths under otherwise protected roots are explicitly allowlisted for bounded cleanup where the project intentionally supports cache/log maintenance. Examples include:
- `/System/Library/Caches/com.apple.coresymbolicationd/data`
- `/private/tmp`, `/private/var/tmp`, `/private/var/log`, `/private/var/folders`
- `/private/var/db/diagnostics`, `/private/var/db/DiagnosticPipeline`, `/private/var/db/powerlog`, `/private/var/db/reportmemoryexception`
- `/private/tmp`
- `/private/var/tmp`
- `/private/var/log`
- `/private/var/folders`
- `/private/var/db/diagnostics`
- `/private/var/db/DiagnosticPipeline`
- `/private/var/db/powerlog`
- `/private/var/db/reportmemoryexception`
See `lib/core/file_ops.sh:60-78`.
This design keeps cleanup scoped to known-safe maintenance targets instead of broad root-level deletion patterns.
When running with sudo, `safe_sudo_recursive_delete()` also checks for symlinks. Refuses to follow symlinks pointing to system files.
## Path Protection Reference
## Cleanup Rules
### Protected Prefixes (Never Deleted)
**Orphan detection** at `lib/clean/apps.sh:orphan_detection()`:
App data is only considered orphaned if the app itself is gone from all three locations: `/Applications`, `~/Applications`, `/System/Applications`. On top of that, the data must be untouched for at least 60 days. Adobe, Microsoft, and Google stuff is whitelisted regardless.
**Uninstall matching** at `lib/clean/apps.sh:uninstall_app()`:
App names need at least 3 characters. Otherwise "Go" would match "Google" and that's bad. Fuzzy matching is off. Receipt scans only look under `/Applications` and `/Library/Application Support`, not in shared places like `/Library/Frameworks`.
**Dev tools:**
Cache dirs like `~/.cargo/registry/cache` or `~/.gradle/caches` get cleaned. But `~/.cargo/bin`, `~/.mix/archives`, `~/.rustup` toolchains, `~/.stack/programs` stay untouched.
**Application Support and Caches:**
- Cache entries are evaluated and removed safely on an item-by-item basis using `safe_remove()` (e.g., `process_container_cache`, `clean_application_support_logs`).
- Group Containers strictly filter against whitelists before deletion.
- Targets safe, age-gated resources natively (e.g., CrashReporter > 30 days, cached Steam/Simulator/Adobe/Teams log rot).
- Explicitly protects high-risk locations: `/private/var/folders/*` sweeping, iOS Backups (`MobileSync`), browser history/cookies, and destructive container/image pruning.
**LaunchAgent removal:**
Only removed when uninstalling the app that owns them. All `com.apple.*` items are skipped. Services get stopped via `launchctl` first. Generic names like Music, Notes, Photos are excluded from the search.
`stop_launch_services()` checks bundle_id is valid reverse-DNS before using it in find patterns, stopping glob injection. `find_app_files()` skips LaunchAgents named after common words like Music or Notes.
`unregister_app_bundle` explicitly drops uninstalled applications from LaunchServices via `lsregister -u`. `refresh_launch_services_after_uninstall` triggers asynchronous database compacting and rebuilds to ensure complete removal of stale app references without blocking workflows.
See `lib/core/app_protection.sh:find_app_files()`.
## Protected Categories
System stuff stays untouched: Control Center, System Settings, TCC, Spotlight, `/Library/Updates`.
VPN and proxy tools are skipped: Shadowsocks, V2Ray, Tailscale, Clash.
AI tools are protected: Cursor, Claude, ChatGPT, Ollama, LM Studio.
`~/Library/Messages/Attachments` and `~/Library/Metadata/CoreSpotlight` are kept out of automatic cleanup to avoid user-data or indexing risk.
Time Machine backups running? Won't clean. Status unclear? Also won't clean.
`com.apple.*` LaunchAgents/Daemons are never touched.
See `lib/core/app_protection.sh:is_critical_system_component()`.
## Analyzer
`mo analyze` runs differently:
- Standard user permissions, no sudo
- Respects SIP
- Two keys to delete: press ⌫ first, then Enter. Hard to delete by accident.
- Files go to Trash via Finder API, not rm
Code at `cmd/analyze/*.go`.
## Timeouts
Network volume checks timeout after 5s (NFS/SMB/AFP can hang forever). mdfind searches get 10s. SQLite vacuum gets 20s, skipped if Mail/Safari/Messages is open. dyld cache rebuild gets 180s, skipped if done in the last 24h.
`brew_uninstall_cask()` treats exit code 124 as timeout failure, returns immediately.
`app_support_item_size_bytes` calculation leverages direct `stat -f%z` checks and uses `du` only for directories, combined with strict timeout protections to avoid process hangs.
Font cache rebuilding (`opt_font_cache_rebuild`) safely aborts if explicit browser processes (Safari, Chrome, Firefox, Arc, etc.) are detected, preventing GPU cache corruption and rendering bugs.
See `lib/core/timeout.sh:run_with_timeout()`.
## User Config
Put paths in `~/.config/mole/whitelist`, one per line:
```bash
# exact matches only
/Users/me/important-cache
~/Library/Application Support/MyApp
```text
/
/System
/bin
/sbin
/usr
/etc
/var
/private
/Library/Extensions
```
These paths are protected from all operations.
### Whitelist Exceptions (Allowlisted for Cleanup)
Run `mo clean --dry-run` or `mo optimize --dry-run` to preview what would happen without actually doing it.
Some subpaths under protected roots are explicitly allowlisted:
## Testing
- `/private/tmp`
- `/private/var/tmp`
- `/private/var/log`
- `/private/var/folders`
- `/private/var/db/diagnostics`
- `/private/var/db/DiagnosticPipeline`
- `/private/var/db/powerlog`
- `/private/var/db/reportmemoryexception`
Security-sensitive cleanup paths are covered by BATS regression tests, including:
### Protected Categories
In addition to path blocking, these categories are protected:
- Keychains, password managers, credentials
- VPN/proxy tools (Shadowsocks, V2Ray, Clash, Tailscale)
- AI tools (Cursor, Claude, ChatGPT, Ollama)
- Browser history and cookies
- Time Machine data (during active backup)
- `com.apple.*` LaunchAgents/LaunchDaemons
- iCloud-synced `Mobile Documents`
## Implementation Details
All deletion routes through `lib/core/file_ops.sh`:
- `validate_path_for_deletion()` - Empty, relative, traversal checks
- `should_protect_path()` - Prefix and pattern matching
- `safe_remove()`, `safe_find_delete()`, `safe_sudo_remove()` - Guarded operations
See [`journal/2026-03-11-safe-remove-design.md`](journal/2026-03-11-safe-remove-design.md) for design rationale.
## Protected Directories and Categories
Mole has explicit protected-path and protected-category logic in addition to root-path blocking.
Protected or conservatively handled categories include:
- system components such as Control Center, System Settings, TCC, Spotlight, Finder, and Dock-related state
- keychains, password-manager data, tokens, credentials, and similar sensitive material
- VPN and proxy tools such as Shadowsocks, V2Ray, Clash, and Tailscale
- AI tools in generic protected-data logic, including Cursor, Claude, ChatGPT, and Ollama
- `~/Library/Messages/Attachments`
- browser history and cookies
- Time Machine data while backup state is active or ambiguous
- `com.apple.*` LaunchAgents and LaunchDaemons
- iCloud-synced `Mobile Documents` data
Project purge also uses conservative heuristics:
- purge targets must be inside configured project boundaries
- direct-child artifact cleanup is only allowed in single-project mode
- recently modified artifacts are treated as recent for 7 days
- nested artifacts are filtered to avoid parent-child over-deletion
- protected vendor/build-output heuristics block ambiguous directories
Developer cleanup also preserves high-value state. Examples intentionally left alone include:
- `~/.cargo/bin`
- `~/.rustup`
- `~/.mix/archives`
- `~/.stack/programs`
## Symlink and Path Traversal Handling
Symlink behavior is intentionally conservative.
- path validation checks symlink targets before deletion
- symlinks pointing at protected system targets are rejected
- `safe_sudo_remove()` refuses to sudo-delete symlinks
- `safe_find_delete()` and `safe_sudo_find_delete()` refuse to scan symlinked base directories
- installer discovery avoids treating symlinked installer files as deletion candidates
- analyzer scanning skips following symlinks to unexpected targets
Path traversal handling is also explicit:
- non-absolute paths are rejected for destructive helpers
- `..` is rejected when it appears as a path component
- legitimate names containing `..` inside a single path element remain allowed to avoid false positives for real application data
## Privilege Escalation and Sudo Boundaries
Mole uses sudo for a subset of system-maintenance paths, but elevated behavior is still bounded by validation and protected-path rules.
Key properties:
- sudo access is explicitly requested instead of assumed
- non-interactive preview remains conservative when sudo is unavailable
- protected roots remain blocked even when sudo is available
- sudo deletion uses the same path validation gate as non-sudo deletion
- sudo cleanup skips or reports denied operations instead of widening scope
- authentication, SIP/MDM, and read-only filesystem failures are classified separately in file-operation results
When sudo is denied or unavailable, Mole prefers skipping privileged cleanup to forcing execution through unsafe fallback behavior.
## Sensitive Data Exclusions
Mole is not intended to aggressively delete high-value user data.
Examples of conservative handling include:
- sensitive app families are excluded from generic orphan cleanup
- orphaned app data waits for inactivity windows before cleanup
- Claude VM orphan cleanup uses a separate stricter rule
- uninstall file lists are decoded and revalidated before removal
- reverse-DNS bundle ID validation is required before LaunchAgent and LaunchDaemon pattern matching
Installed-app detection is broader than a single `/Applications` scan and includes:
- `/Applications`
- `/System/Applications`
- `~/Applications`
- Homebrew Caskroom locations
- Setapp application paths
This reduces the risk of incorrectly classifying active software as orphaned data.
## Dry-Run, Confirmation, and Audit Logging
Mole exposes multiple safety controls before and during destructive actions:
- `--dry-run` previews are available for major destructive commands
- interactive high-risk flows require explicit confirmation before deletion
- purge marks recent projects conservatively and leaves them unselected by default
- analyzer delete uses Finder Trash rather than direct permanent removal
- operation logs are written to `~/.config/mole/operations.log` unless disabled with `MO_NO_OPLOG=1`
- timeouts bound external commands so stalled discovery or uninstall operations do not silently hang the entire flow
Relevant timeout behavior includes:
- orphan and Spotlight checks: 2s
- LaunchServices rebuild during uninstall: bounded 10s and 15s steps
- Homebrew uninstall cask flow: 300s by default, extended for large apps when needed
- project scans and sizing operations: bounded to avoid whole-home stalls
## Release Integrity and Continuous Security Signals
Mole treats release trust as part of its security posture, not just a packaging detail.
Repository-level signals include:
- weekly Dependabot updates for Go modules and GitHub Actions
- CI checks for unsafe `rm -rf` usage patterns and core protection behavior
- targeted tests for path validation, purge boundaries, symlink behavior, dry-run flows, and destructive helpers
- CodeQL scanning for Go and GitHub Actions workflows
- curated changelog-driven release notes with a dedicated `Safety-related changes` section
- published SHA-256 checksums for release assets
- GitHub artifact attestations for release assets
These controls do not eliminate all supply-chain risk, but they make release changes easier to review and verify.
## Testing Coverage
There is no single `tests/security.bats` file. Instead, security-relevant behavior is covered by focused suites, including:
- `tests/core_safe_functions.bats`
- `tests/clean_core.bats`
- `tests/clean_user_core.bats`
- `tests/clean_dev_caches.bats`
- `tests/clean_system_maintenance.bats`
- `tests/clean_apps.bats`
- `tests/purge.bats`
- `tests/core_safe_functions.bats`
- `tests/installer.bats`
- `tests/optimize.bats`
**System Memory Reports** computation uses bulk `find -exec stat` to avoid bash loop child-process limits on corrupted systems.
`bin/clean.sh` dry-run export temp files rely on tracked temp lifecycle (`create_temp_file()` + trap cleanup) to avoid orphan temp artifacts.
Background spinner logic interacts directly with `/dev/tty` and guarantees robust termination signals handling via trap mechanisms.
Key coverage areas include:
Latest local verification for this release branch:
- path validation rejects empty, relative, traversal, and system paths
- symlinked directories are rejected for destructive scans
- purge protects shallow or ambiguous paths and filters nested artifacts
- dry-run flows preview actions without applying them
- confirmation flows exist for high-risk interactive operations
- `bats tests/clean_core.bats` passed (12/12)
- `bats tests/clean_user_core.bats` passed (13/13)
- `bats tests/clean_dev_caches.bats` passed (8/8)
- `bats tests/clean_system_maintenance.bats` passed (40/40)
- `bats tests/purge.bats tests/core_safe_functions.bats` passed (67/67)
## Known Limitations and Future Work
Run tests:
- Cleanup is destructive. Most cleanup and uninstall flows do not provide undo.
- `mo analyze` delete is safer because it uses Trash, but other cleanup flows are permanent once confirmed.
- Generic orphan data waits 30 days before cleanup; this is conservative but heuristic.
- Claude VM orphan cleanup waits 7 days before cleanup; this is also heuristic.
- Time Machine safety windows are hour-based and intentionally conservative.
- Localized app names may still be missed in some heuristic paths, though bundle IDs are preferred where available.
- Users who want immediate removal of app data should use explicit uninstall flows rather than waiting for orphan cleanup.
- Release signing and provenance signals are improving, but downstream package-manager trust also depends on external distribution infrastructure.
- Planned follow-up work includes stronger destructive-command threat modeling, more regression coverage for high-risk paths, and continued hardening of release integrity and disclosure workflow.
```bash
bats tests/ # all
bats tests/security.bats # security only
```
CI runs shellcheck and go vet on every push.
## Dependencies
System binaries we use are all SIP protected: `plutil` (plist validation), `tmutil` (Time Machine), `dscacheutil` (cache rebuild), `diskutil` (volume info).
Go deps: bubbletea v0.23+, lipgloss v0.6+, gopsutil v3.22+, xxhash v2.2+. All MIT/BSD licensed. Versions are pinned, no CVEs. Binaries built via GitHub Actions.
## Limitations
System cache cleanup needs sudo, first time you'll get a password prompt. Orphan files wait 60 days before cleanup, use `mo uninstall` to delete manually if you're in a hurry. No undo, gone is gone, use dry-run first. Only recognizes English names, localized app names might be missed, but falls back to bundle ID.
Won't touch: documents, media files, password managers, keychains, configs under `/etc`, browser history/cookies, git repos.
For reporting procedures and supported versions, see [SECURITY.md](SECURITY.md).

View File

@@ -91,6 +91,31 @@ func TestScanPathConcurrentBasic(t *testing.T) {
}
}
func TestPerformScanForJSONCountsTopLevelFiles(t *testing.T) {
root := t.TempDir()
rootFile := filepath.Join(root, "root.txt")
if err := os.WriteFile(rootFile, []byte("root-data"), 0o644); err != nil {
t.Fatalf("write root file: %v", err)
}
nested := filepath.Join(root, "nested")
if err := os.MkdirAll(nested, 0o755); err != nil {
t.Fatalf("create nested dir: %v", err)
}
nestedFile := filepath.Join(nested, "nested.txt")
if err := os.WriteFile(nestedFile, []byte("nested-data"), 0o644); err != nil {
t.Fatalf("write nested file: %v", err)
}
result := performScanForJSON(root)
if result.TotalFiles != 2 {
t.Fatalf("expected 2 files in JSON output, got %d", result.TotalFiles)
}
}
func TestDeletePathWithProgress(t *testing.T) {
skipIfFinderUnavailable(t)

View File

@@ -58,6 +58,8 @@ func performScanForJSON(path string) jsonOutput {
info, err := item.Info()
if err == nil {
size = info.Size()
atomic.AddInt64(&filesScanned, 1)
atomic.AddInt64(&bytesScanned, size)
}
}
@@ -74,6 +76,6 @@ func performScanForJSON(path string) jsonOutput {
Path: path,
Entries: entries,
TotalSize: totalSize,
TotalFiles: filesScanned,
TotalFiles: atomic.LoadInt64(&filesScanned),
}
}

View File

@@ -22,6 +22,23 @@ var skipDiskMounts = map[string]bool{
"/dev": true,
}
var skipDiskFSTypes = map[string]bool{
"afpfs": true,
"autofs": true,
"cifs": true,
"devfs": true,
"fuse": true,
"fuseblk": true,
"fusefs": true,
"macfuse": true,
"nfs": true,
"osxfuse": true,
"procfs": true,
"smbfs": true,
"tmpfs": true,
"webdav": true,
}
func collectDisks() ([]DiskStatus, error) {
partitions, err := disk.Partitions(false)
if err != nil {
@@ -34,17 +51,7 @@ func collectDisks() ([]DiskStatus, error) {
seenVolume = make(map[string]bool)
)
for _, part := range partitions {
if strings.HasPrefix(part.Device, "/dev/loop") {
continue
}
if skipDiskMounts[part.Mountpoint] {
continue
}
if strings.HasPrefix(part.Mountpoint, "/System/Volumes/") {
continue
}
// Skip /private mounts.
if strings.HasPrefix(part.Mountpoint, "/private/") {
if shouldSkipDiskPartition(part) {
continue
}
baseDevice := baseDeviceName(part.Device)
@@ -97,6 +104,34 @@ func collectDisks() ([]DiskStatus, error) {
return disks, nil
}
func shouldSkipDiskPartition(part disk.PartitionStat) bool {
if strings.HasPrefix(part.Device, "/dev/loop") {
return true
}
if skipDiskMounts[part.Mountpoint] {
return true
}
if strings.HasPrefix(part.Mountpoint, "/System/Volumes/") {
return true
}
if strings.HasPrefix(part.Mountpoint, "/private/") {
return true
}
fstype := strings.ToLower(part.Fstype)
if skipDiskFSTypes[fstype] || strings.Contains(fstype, "fuse") {
return true
}
// On macOS, local disks should come from /dev. This filters sshfs/macFUSE-style
// mounts that can mirror the root volume and show up as duplicate internal disks.
if runtime.GOOS == "darwin" && part.Device != "" && !strings.HasPrefix(part.Device, "/dev/") {
return true
}
return false
}
var (
// External disk cache.
lastDiskCacheAt time.Time

View File

@@ -0,0 +1,60 @@
package main
import (
"testing"
"github.com/shirou/gopsutil/v4/disk"
)
func TestShouldSkipDiskPartition(t *testing.T) {
tests := []struct {
name string
part disk.PartitionStat
want bool
}{
{
name: "keep local apfs root volume",
part: disk.PartitionStat{
Device: "/dev/disk3s1s1",
Mountpoint: "/",
Fstype: "apfs",
},
want: false,
},
{
name: "skip macfuse mirror mount",
part: disk.PartitionStat{
Device: "kaku-local:/",
Mountpoint: "/Users/tw93/Library/Caches/dev.kaku/sshfs/kaku-local",
Fstype: "macfuse",
},
want: true,
},
{
name: "skip smb share",
part: disk.PartitionStat{
Device: "//server/share",
Mountpoint: "/Volumes/share",
Fstype: "smbfs",
},
want: true,
},
{
name: "skip system volume",
part: disk.PartitionStat{
Device: "/dev/disk3s5",
Mountpoint: "/System/Volumes/Data",
Fstype: "apfs",
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := shouldSkipDiskPartition(tt.part); got != tt.want {
t.Fatalf("shouldSkipDiskPartition(%+v) = %v, want %v", tt.part, got, tt.want)
}
})
}
}

View File

@@ -365,6 +365,8 @@ func renderDiskCard(disks []DiskStatus, io DiskIOStatus) cardData {
addGroup("EXTR", external)
if len(lines) == 0 {
lines = append(lines, subtleStyle.Render("No disks detected"))
} else if len(disks) == 1 {
lines = append(lines, formatDiskMetaLine(disks[0]))
}
}
readBar := ioBar(io.ReadRate)
@@ -398,8 +400,19 @@ func formatDiskLine(label string, d DiskStatus) string {
}
bar := progressBar(d.UsedPercent)
used := humanBytesShort(d.Used)
total := humanBytesShort(d.Total)
return fmt.Sprintf("%-6s %s %5.1f%%, %s/%s", label, bar, d.UsedPercent, used, total)
free := uint64(0)
if d.Total > d.Used {
free = d.Total - d.Used
}
return fmt.Sprintf("%-6s %s %s used, %s free", label, bar, used, humanBytesShort(free))
}
func formatDiskMetaLine(d DiskStatus) string {
parts := []string{humanBytesShort(d.Total)}
if d.Fstype != "" {
parts = append(parts, strings.ToUpper(d.Fstype))
}
return fmt.Sprintf("Total %s", strings.Join(parts, " · "))
}
func ioBar(rate float64) string {

View File

@@ -752,26 +752,49 @@ func TestFormatDiskLine(t *testing.T) {
name string
label string
disk DiskStatus
wantUsed string
wantFree string
wantNoSubstr string
}{
{
name: "empty label defaults to DISK",
label: "",
disk: DiskStatus{UsedPercent: 50.5, Used: 100 << 30, Total: 200 << 30},
wantUsed: "100G used",
wantFree: "100G free",
wantNoSubstr: "%",
},
{
name: "internal disk",
label: "INTR",
disk: DiskStatus{UsedPercent: 67.2, Used: 336 << 30, Total: 500 << 30},
wantUsed: "336G used",
wantFree: "164G free",
wantNoSubstr: "%",
},
{
name: "external disk",
label: "EXTR1",
disk: DiskStatus{UsedPercent: 85.0, Used: 850 << 30, Total: 1000 << 30},
wantUsed: "850G used",
wantFree: "150G free",
wantNoSubstr: "%",
},
{
name: "low usage",
label: "INTR",
disk: DiskStatus{UsedPercent: 15.3, Used: 15 << 30, Total: 100 << 30},
wantUsed: "15G used",
wantFree: "85G free",
wantNoSubstr: "%",
},
{
name: "used exceeds total clamps free to zero",
label: "INTR",
disk: DiskStatus{UsedPercent: 110.0, Used: 110 << 30, Total: 100 << 30},
wantUsed: "110G used",
wantFree: "0 free",
wantNoSubstr: "%",
},
}
@@ -789,10 +812,54 @@ func TestFormatDiskLine(t *testing.T) {
if !contains(got, expectedLabel) {
t.Errorf("formatDiskLine(%q, ...) = %q, should contain label %q", tt.label, got, expectedLabel)
}
if !contains(got, tt.wantUsed) {
t.Errorf("formatDiskLine(%q, ...) = %q, should contain used value %q", tt.label, got, tt.wantUsed)
}
if !contains(got, tt.wantFree) {
t.Errorf("formatDiskLine(%q, ...) = %q, should contain free value %q", tt.label, got, tt.wantFree)
}
if tt.wantNoSubstr != "" && contains(got, tt.wantNoSubstr) {
t.Errorf("formatDiskLine(%q, ...) = %q, should not contain %q", tt.label, got, tt.wantNoSubstr)
}
})
}
}
func TestRenderDiskCardAddsMetaLineForSingleDisk(t *testing.T) {
card := renderDiskCard([]DiskStatus{{
UsedPercent: 28.4,
Used: 263 << 30,
Total: 926 << 30,
Fstype: "apfs",
}}, DiskIOStatus{ReadRate: 0, WriteRate: 0.1})
if len(card.lines) != 4 {
t.Fatalf("renderDiskCard() single disk expected 4 lines, got %d", len(card.lines))
}
meta := stripANSI(card.lines[1])
if meta != "Total 926G · APFS" {
t.Fatalf("renderDiskCard() single disk meta line = %q, want %q", meta, "Total 926G · APFS")
}
}
func TestRenderDiskCardDoesNotAddMetaLineForMultipleDisks(t *testing.T) {
card := renderDiskCard([]DiskStatus{
{UsedPercent: 28.4, Used: 263 << 30, Total: 926 << 30, Fstype: "apfs"},
{UsedPercent: 50.0, Used: 500 << 30, Total: 1000 << 30, Fstype: "apfs"},
}, DiskIOStatus{})
if len(card.lines) != 4 {
t.Fatalf("renderDiskCard() multiple disks expected 4 lines, got %d", len(card.lines))
}
for _, line := range card.lines {
if stripANSI(line) == "Total 926G · APFS" || stripANSI(line) == "Total 1000G · APFS" {
t.Fatalf("renderDiskCard() multiple disks should not add meta line, got %q", line)
}
}
}
func TestGetScoreStyle(t *testing.T) {
tests := []struct {
name string

6
go.mod
View File

@@ -1,15 +1,13 @@
module github.com/tw93/mole
go 1.24.2
toolchain go1.24.6
go 1.25.0
require (
github.com/cespare/xxhash/v2 v2.3.0
github.com/charmbracelet/bubbletea v1.3.10
github.com/charmbracelet/lipgloss v1.1.0
github.com/shirou/gopsutil/v4 v4.26.2
golang.org/x/sync v0.19.0
golang.org/x/sync v0.20.0
)
require (

4
go.sum
View File

@@ -67,8 +67,8 @@ github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=
golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

View File

@@ -2,7 +2,8 @@
# Application Data Cleanup Module
set -euo pipefail
readonly ORPHAN_AGE_THRESHOLD=${ORPHAN_AGE_THRESHOLD:-${MOLE_ORPHAN_AGE_DAYS:-60}}
readonly ORPHAN_AGE_THRESHOLD=${ORPHAN_AGE_THRESHOLD:-${MOLE_ORPHAN_AGE_DAYS:-30}}
readonly CLAUDE_VM_ORPHAN_AGE_THRESHOLD=${MOLE_CLAUDE_VM_ORPHAN_AGE_DAYS:-7}
# Args: $1=target_dir, $2=label
clean_ds_store_tree() {
local target="$1"
@@ -59,7 +60,7 @@ clean_ds_store_tree() {
note_activity
fi
}
# Orphaned app data (60+ days inactive). Env: ORPHAN_AGE_THRESHOLD, DRY_RUN
# Orphaned app data (30+ days inactive). Env: ORPHAN_AGE_THRESHOLD, DRY_RUN
# Usage: scan_installed_apps "output_file"
scan_installed_apps() {
local installed_bundles="$1"
@@ -201,13 +202,13 @@ is_bundle_orphaned() {
;;
esac
# 5. Fast path: 60-day modification check (stat call, fast)
# 5. Fast path: 30-day modification check (stat call, fast)
if [[ -e "$directory_path" ]]; then
local last_modified_epoch=$(get_file_mtime "$directory_path")
local current_epoch
current_epoch=$(get_epoch_seconds)
local days_since_modified=$(((current_epoch - last_modified_epoch) / 86400))
if [[ $days_since_modified -lt ${ORPHAN_AGE_THRESHOLD:-60} ]]; then
if [[ $days_since_modified -lt ${ORPHAN_AGE_THRESHOLD:-30} ]]; then
return 1
fi
fi
@@ -261,6 +262,17 @@ is_claude_vm_bundle_orphaned() {
return 1
fi
if [[ -e "$vm_bundle_path" ]]; then
local last_modified_epoch
last_modified_epoch=$(get_file_mtime "$vm_bundle_path")
local current_epoch
current_epoch=$(get_epoch_seconds)
local days_since_modified=$(((current_epoch - last_modified_epoch) / 86400))
if [[ $days_since_modified -lt ${CLAUDE_VM_ORPHAN_AGE_THRESHOLD:-7} ]]; then
return 1
fi
fi
if [[ -z "$ORPHAN_MDFIND_CACHE_FILE" ]]; then
ORPHAN_MDFIND_CACHE_FILE=$(mktemp "${TMPDIR:-/tmp}/mole_mdfind_cache.XXXXXX")
register_temp_file "$ORPHAN_MDFIND_CACHE_FILE"

View File

@@ -212,7 +212,9 @@ clean_project_caches() {
[[ -d "$cache_dir/cache" ]] && safe_clean "$cache_dir/cache"/* "Next.js build cache" || true
;;
"__pycache__")
[[ -d "$cache_dir" ]] && safe_clean "$cache_dir"/* "Python bytecode cache" || true
# Remove the cache directory itself so we avoid expanding every
# .pyc file into a separate safe_clean target.
[[ -d "$cache_dir" ]] && safe_clean "$cache_dir" "Python bytecode cache" || true
;;
".dart_tool")
if [[ -d "$cache_dir" ]]; then

View File

@@ -198,13 +198,18 @@ clean_dev_docker() {
fi
stop_section_spinner
if [[ "$docker_running" == "true" ]]; then
clean_tool_cache "Docker build cache" docker builder prune -af
# Remove unused images, stopped containers, unused networks, and
# anonymous volumes in one pass. This maps better to the large
# reclaimable "docker system df" buckets users typically see.
clean_tool_cache "Docker unused data" docker system prune -af --volumes
else
echo -e " ${GRAY}${ICON_WARNING}${NC} Docker unused data · skipped (daemon not running)"
note_activity
debug_log "Docker daemon not running, skipping Docker cache cleanup"
fi
else
note_activity
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Docker build cache · would clean"
echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Docker unused data · would clean"
fi
fi
safe_clean ~/.docker/buildx/cache/* "Docker BuildX cache"
@@ -359,6 +364,7 @@ clean_xcode_device_support() {
version_dirs+=("$entry")
done < <(command find "$ds_dir" -mindepth 1 -maxdepth 1 -print0 2> /dev/null)
if [[ ${#version_dirs[@]} -gt 0 ]]; then
# Sort by modification time (most recent first)
local -a sorted_dirs=()
while IFS= read -r line; do
@@ -403,6 +409,7 @@ clean_xcode_device_support() {
fi
fi
fi
fi
# Clean caches/logs inside kept versions
safe_clean "$ds_dir"/*/Symbols/System/Library/Caches/* "$display_name symbol cache"

View File

@@ -1310,6 +1310,14 @@ clean_project_artifacts() {
if [[ -t 1 ]]; then
stop_inline_spinner
fi
# Exit early if no artifacts were found to avoid unbound variable errors
# when expanding empty arrays with set -u active.
if [[ ${#menu_options[@]} -eq 0 ]]; then
echo ""
echo -e "${GRAY}No artifacts found to purge${NC}"
printf '\n'
return 0
fi
# Set global vars for selector
export PURGE_CATEGORY_SIZES=$(
IFS=,

View File

@@ -752,6 +752,23 @@ clean_virtualization_tools() {
# Estimate item size for Application Support cleanup.
# Files use stat; directories use du with timeout to avoid long blocking scans.
app_support_entry_count_capped() {
local dir="$1"
local maxdepth="${2:-1}"
local cap="${3:-101}"
local count=0
while IFS= read -r -d '' _entry; do
count=$((count + 1))
if ((count >= cap)); then
break
fi
done < <(command find "$dir" -mindepth 1 -maxdepth "$maxdepth" -print0 2> /dev/null)
[[ "$count" =~ ^[0-9]+$ ]] || count=0
printf '%s\n' "$count"
}
app_support_item_size_bytes() {
local item="$1"
local timeout_seconds="${2:-0.4}"
@@ -768,7 +785,7 @@ app_support_item_size_bytes() {
# Fast path: if directory has too many items, skip detailed size calculation
# to avoid hanging on deep directories (e.g., node_modules, .git)
local item_count
item_count=$(command find "$item" -maxdepth 2 -print0 2> /dev/null | tr -d '\0' | wc -c)
item_count=$(app_support_entry_count_capped "$item" 2 10001)
if [[ "$item_count" -gt 10000 ]]; then
# Return 1 to signal "too many items, size unknown"
return 1
@@ -859,7 +876,7 @@ clean_application_support_logs() {
if [[ -d "$candidate" ]]; then
# Quick count check - skip if too many items to avoid hanging
local quick_count
quick_count=$(command find "$candidate" -mindepth 1 -maxdepth 1 -printf '1\n' 2> /dev/null | wc -l | tr -d ' ')
quick_count=$(app_support_entry_count_capped "$candidate" 1 101)
if [[ "$quick_count" -gt 100 ]]; then
# Too many items - use bulk removal instead of item-by-item
local app_label="$app_name"
@@ -935,7 +952,7 @@ clean_application_support_logs() {
if [[ -d "$candidate" ]]; then
# Quick count check - skip if too many items
local quick_count
quick_count=$(command find "$candidate" -mindepth 1 -maxdepth 1 -printf '1\n' 2> /dev/null | wc -l | tr -d ' ')
quick_count=$(app_support_entry_count_capped "$candidate" 1 101)
if [[ "$quick_count" -gt 100 ]]; then
local container_label="$container"
if [[ ${#container_label} -gt 24 ]]; then

View File

@@ -68,7 +68,7 @@ get_lsregister_path() {
# Global Configuration Constants
# ============================================================================
readonly MOLE_TEMP_FILE_AGE_DAYS=7 # Temp file retention (days)
readonly MOLE_ORPHAN_AGE_DAYS=60 # Orphaned data retention (days)
readonly MOLE_ORPHAN_AGE_DAYS=30 # Orphaned data retention (days)
readonly MOLE_MAX_PARALLEL_JOBS=15 # Parallel job limit
readonly MOLE_MAIL_DOWNLOADS_MIN_KB=5120 # Mail attachment size threshold
readonly MOLE_MAIL_AGE_DAYS=30 # Mail attachment retention (days)
@@ -191,11 +191,17 @@ is_sip_enabled() {
# Detect CPU architecture
# Returns: "Apple Silicon" or "Intel"
detect_architecture() {
if [[ "$(uname -m)" == "arm64" ]]; then
echo "Apple Silicon"
else
echo "Intel"
if [[ -n "${MOLE_ARCH_CACHE:-}" ]]; then
echo "$MOLE_ARCH_CACHE"
return 0
fi
if [[ "$(uname -m)" == "arm64" ]]; then
export MOLE_ARCH_CACHE="Apple Silicon"
else
export MOLE_ARCH_CACHE="Intel"
fi
echo "$MOLE_ARCH_CACHE"
}
# Get free disk space on root volume
@@ -212,6 +218,11 @@ get_free_space() {
# Get Darwin kernel major version (e.g., 24 for 24.2.0)
# Returns 999 on failure to adopt conservative behavior (assume modern system)
get_darwin_major() {
if [[ -n "${MOLE_DARWIN_MAJOR_CACHE:-}" ]]; then
echo "$MOLE_DARWIN_MAJOR_CACHE"
return 0
fi
local kernel
kernel=$(uname -r 2> /dev/null || true)
local major="${kernel%%.*}"
@@ -219,6 +230,7 @@ get_darwin_major() {
# Return high number to skip potentially dangerous operations on unknown systems
major=999
fi
export MOLE_DARWIN_MAJOR_CACHE="$major"
echo "$major"
}
@@ -233,8 +245,10 @@ is_darwin_ge() {
# Get optimal parallel jobs for operation type (scan|io|compute|default)
get_optimal_parallel_jobs() {
local operation_type="${1:-default}"
local cpu_cores
cpu_cores=$(sysctl -n hw.ncpu 2> /dev/null || echo 4)
if [[ -z "${MOLE_CPU_CORES_CACHE:-}" ]]; then
export MOLE_CPU_CORES_CACHE=$(sysctl -n hw.ncpu 2> /dev/null || echo 4)
fi
local cpu_cores="$MOLE_CPU_CORES_CACHE"
case "$operation_type" in
scan | io)
echo $((cpu_cores * 2))
@@ -318,7 +332,7 @@ get_user_home() {
fi
if [[ -z "$home" ]]; then
home=$(eval echo "~$user" 2> /dev/null || true)
home=$(id -P "$user" 2> /dev/null | cut -d: -f9 || true)
fi
if [[ "$home" == "~"* ]]; then
@@ -586,7 +600,7 @@ mktemp_file() {
# Cleanup all tracked temp files and directories
cleanup_temp_files() {
stop_inline_spinner 2> /dev/null || true
stop_inline_spinner || true
local file
if [[ ${#MOLE_TEMP_FILES[@]} -gt 0 ]]; then
for file in "${MOLE_TEMP_FILES[@]}"; do
@@ -641,7 +655,7 @@ note_activity() {
# Usage: start_section_spinner "message"
start_section_spinner() {
local message="${1:-Scanning...}"
stop_inline_spinner 2> /dev/null || true
stop_inline_spinner || true
if [[ -t 1 ]]; then
MOLE_SPINNER_PREFIX=" " start_inline_spinner "$message"
fi
@@ -651,7 +665,7 @@ start_section_spinner() {
# Usage: stop_section_spinner
stop_section_spinner() {
# Always try to stop spinner (function handles empty PID gracefully)
stop_inline_spinner 2> /dev/null || true
stop_inline_spinner || true
# Always clear line to handle edge cases where spinner output remains
# (e.g., spinner was stopped elsewhere but line not cleared)
if [[ -t 1 ]]; then
@@ -732,18 +746,30 @@ update_progress_if_needed() {
# Usage: is_ansi_supported
# Returns: 0 if supported, 1 if not
is_ansi_supported() {
if [[ -n "${MOLE_ANSI_SUPPORTED_CACHE:-}" ]]; then
return "$MOLE_ANSI_SUPPORTED_CACHE"
fi
# Check if running in interactive terminal
[[ -t 1 ]] || return 1
if ! [[ -t 1 ]]; then
export MOLE_ANSI_SUPPORTED_CACHE=1
return 1
fi
# Check TERM variable
[[ -n "${TERM:-}" ]] || return 1
if [[ -z "${TERM:-}" ]]; then
export MOLE_ANSI_SUPPORTED_CACHE=1
return 1
fi
# Check for known ANSI-compatible terminals
case "$TERM" in
xterm* | vt100 | vt220 | screen* | tmux* | ansi | linux | rxvt* | konsole*)
export MOLE_ANSI_SUPPORTED_CACHE=0
return 0
;;
dumb | unknown)
export MOLE_ANSI_SUPPORTED_CACHE=1
return 1
;;
*)
@@ -751,8 +777,12 @@ is_ansi_supported() {
if command -v tput > /dev/null 2>&1; then
# Test if terminal supports colors (good proxy for ANSI support)
local colors=$(tput colors 2> /dev/null || echo "0")
[[ "$colors" -ge 8 ]] && return 0
if [[ "$colors" -ge 8 ]]; then
export MOLE_ANSI_SUPPORTED_CACHE=0
return 0
fi
fi
export MOLE_ANSI_SUPPORTED_CACHE=1
return 1
;;
esac

View File

@@ -92,7 +92,10 @@ validate_path_for_deletion() {
# Validate resolved target against protected paths
if [[ -n "$resolved_target" ]]; then
case "$resolved_target" in
/System/* | /usr/bin/* | /usr/lib/* | /bin/* | /sbin/* | /private/etc/*)
/ | /System | /System/* | /bin | /bin/* | /sbin | /sbin/* | \
/usr | /usr/bin | /usr/bin/* | /usr/lib | /usr/lib/* | \
/etc | /etc/* | /private/etc | /private/etc/* | \
/Library/Extensions | /Library/Extensions/*)
log_error "Symlink points to protected system path: $path -> $resolved_target"
return 1
;;

View File

@@ -42,9 +42,9 @@ if [[ -z "${MO_TIMEOUT_INITIALIZED:-}" ]]; then
fi
done
if [[ -z "$MO_TIMEOUT_BIN" ]] && command -v perl > /dev/null 2>&1; then
if command -v perl > /dev/null 2>&1; then
MO_TIMEOUT_PERL_BIN="$(command -v perl)"
if [[ "${MO_DEBUG:-0}" == "1" ]]; then
if [[ -z "$MO_TIMEOUT_BIN" ]] && [[ "${MO_DEBUG:-0}" == "1" ]]; then
echo "[TIMEOUT] Using perl fallback: $MO_TIMEOUT_PERL_BIN" >&2
fi
fi

2
mole
View File

@@ -13,7 +13,7 @@ source "$SCRIPT_DIR/lib/core/commands.sh"
trap cleanup_temp_files EXIT INT TERM
# Version and update helpers
VERSION="1.29.0"
VERSION="1.30.0"
MOLE_TAGLINE="Deep clean and optimize your Mac."
is_touchid_configured() {

View File

@@ -60,7 +60,7 @@ EOF
}
@test "is_bundle_orphaned returns true for old uninstalled bundle" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" ORPHAN_AGE_THRESHOLD=60 bash --noprofile --norc <<'EOF'
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" ORPHAN_AGE_THRESHOLD=30 bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/apps.sh"
@@ -116,12 +116,12 @@ safe_clean() {
# Create required Library structure for permission check
mkdir -p "$HOME/Library/Caches"
# Create test structure with spaces in path (old modification time: 61 days ago)
# Create test structure with spaces in path (old modification time: 31 days ago)
mkdir -p "$HOME/Library/Saved Application State/com.test.orphan.savedState"
# Create a file with some content so directory size > 0
echo "test data" > "$HOME/Library/Saved Application State/com.test.orphan.savedState/data.plist"
# Set modification time to 61 days ago (older than 60-day threshold)
touch -t "$(date -v-61d +%Y%m%d%H%M.%S 2>/dev/null || date -d '61 days ago' +%Y%m%d%H%M.%S)" "$HOME/Library/Saved Application State/com.test.orphan.savedState" 2>/dev/null || true
# Set modification time to 31 days ago (older than 30-day threshold)
touch -t "$(date -v-31d +%Y%m%d%H%M.%S 2>/dev/null || date -d '31 days ago' +%Y%m%d%H%M.%S)" "$HOME/Library/Saved Application State/com.test.orphan.savedState" 2>/dev/null || true
# Disable spinner for test
start_section_spinner() { :; }
@@ -165,15 +165,15 @@ run_with_timeout() { shift; "$@"; }
# Create required Library structure for permission check
mkdir -p "$HOME/Library/Caches"
# Create test files (old modification time: 61 days ago)
# Create test files (old modification time: 31 days ago)
mkdir -p "$HOME/Library/Caches/com.test.orphan1"
mkdir -p "$HOME/Library/Caches/com.test.orphan2"
# Create files with content so size > 0
echo "data1" > "$HOME/Library/Caches/com.test.orphan1/data"
echo "data2" > "$HOME/Library/Caches/com.test.orphan2/data"
# Set modification time to 61 days ago
touch -t "$(date -v-61d +%Y%m%d%H%M.%S 2>/dev/null || date -d '61 days ago' +%Y%m%d%H%M.%S)" "$HOME/Library/Caches/com.test.orphan1" 2>/dev/null || true
touch -t "$(date -v-61d +%Y%m%d%H%M.%S 2>/dev/null || date -d '61 days ago' +%Y%m%d%H%M.%S)" "$HOME/Library/Caches/com.test.orphan2" 2>/dev/null || true
# Set modification time to 31 days ago
touch -t "$(date -v-31d +%Y%m%d%H%M.%S 2>/dev/null || date -d '31 days ago' +%Y%m%d%H%M.%S)" "$HOME/Library/Caches/com.test.orphan1" 2>/dev/null || true
touch -t "$(date -v-31d +%Y%m%d%H%M.%S 2>/dev/null || date -d '31 days ago' +%Y%m%d%H%M.%S)" "$HOME/Library/Caches/com.test.orphan2" 2>/dev/null || true
# Mock safe_clean to fail on first item, succeed on second
safe_clean() {
@@ -229,6 +229,8 @@ pgrep() {
}
run_with_timeout() { shift; "$@"; }
get_file_mtime() { echo 0; }
get_path_size_kb() { echo 4; }
safe_clean() {
echo "$2"
@@ -254,6 +256,51 @@ EOF
[[ "$output" == *"PASS: Claude VM removed"* ]]
}
@test "clean_orphaned_app_data keeps recent Claude VM bundle when Claude lookup misses" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/apps.sh"
scan_installed_apps() {
: > "$1"
}
mdfind() {
return 0
}
pgrep() {
return 1
}
run_with_timeout() { shift; "$@"; }
get_file_mtime() { date +%s; }
safe_clean() {
echo "UNEXPECTED:$2"
return 1
}
start_section_spinner() { :; }
stop_section_spinner() { :; }
mkdir -p "$HOME/Library/Caches"
mkdir -p "$HOME/Library/Application Support/Claude/vm_bundles/claudevm.bundle"
echo "vm data" > "$HOME/Library/Application Support/Claude/vm_bundles/claudevm.bundle/rootfs.img"
clean_orphaned_app_data
if [[ -d "$HOME/Library/Application Support/Claude/vm_bundles/claudevm.bundle" ]]; then
echo "PASS: Recent Claude VM kept"
fi
EOF
[ "$status" -eq 0 ]
[[ "$output" != *"UNEXPECTED:Orphaned Claude workspace VM"* ]]
[[ "$output" == *"PASS: Recent Claude VM kept"* ]]
}
@test "clean_orphaned_app_data keeps Claude VM bundle when Claude is installed" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail

View File

@@ -160,24 +160,50 @@ EOF
}
@test "clean_dev_docker skips when daemon not running" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" MO_DEBUG=1 DRY_RUN=false bash --noprofile --norc <<'EOF'
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=false bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/dev.sh"
start_section_spinner() { :; }
stop_section_spinner() { :; }
run_with_timeout() { return 1; }
clean_tool_cache() { echo "$1"; }
safe_clean() { echo "$2"; }
debug_log() { echo "$*"; }
debug_log() { :; }
docker() { return 1; }
export -f docker
clean_dev_docker
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Docker daemon not running"* ]]
[[ "$output" != *"Docker build cache"* ]]
[[ "$output" == *"Docker unused data · skipped (daemon not running)"* ]]
[[ "$output" == *"Docker BuildX cache"* ]]
[[ "$output" != *"Docker unused data|Docker unused data docker system prune -af --volumes"* ]]
}
@test "clean_dev_docker prunes unused docker data when daemon is running" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=false bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/dev.sh"
start_section_spinner() { :; }
stop_section_spinner() { :; }
run_with_timeout() { shift; "$@"; }
clean_tool_cache() { echo "$1|$*"; }
safe_clean() { :; }
note_activity() { :; }
debug_log() { :; }
docker() {
if [[ "$1" == "info" ]]; then
return 0
fi
return 0
}
export -f docker
clean_dev_docker
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Docker unused data|Docker unused data docker system prune -af --volumes"* ]]
}
@test "clean_developer_tools runs key stages" {

View File

@@ -138,6 +138,25 @@ setup() {
rm -rf "$HOME/Projects"
}
@test "clean_project_caches removes pycache directories as single targets" {
mkdir -p "$HOME/Projects/python-app/__pycache__"
touch "$HOME/Projects/python-app/pyproject.toml"
touch "$HOME/Projects/python-app/__pycache__/module.pyc"
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/caches.sh"
safe_clean() { echo "$2|$1"; }
clean_project_caches
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Python bytecode cache|$HOME/Projects/python-app/__pycache__"* ]]
[[ "$output" != *"module.pyc"* ]]
rm -rf "$HOME/Projects"
}
@test "clean_project_caches scans configured roots instead of HOME" {
mkdir -p "$HOME/.config/mole"
mkdir -p "$HOME/CustomProjects/app/.next/cache"
@@ -177,7 +196,8 @@ EOF
[ "$status" -eq 0 ]
[[ "$output" == *"Next.js build cache"* ]]
grep -q -- "-P $HOME/CustomProjects " "$find_log"
! grep -q -- "-P $HOME " "$find_log"
run grep -q -- "-P $HOME " "$find_log"
[ "$status" -eq 1 ]
rm -rf "$HOME/CustomProjects" "$HOME/.config/mole" "$fake_bin" "$find_log"
}
@@ -251,6 +271,7 @@ set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/caches.sh"
MO_TIMEOUT_BIN=""
MO_TIMEOUT_PERL_BIN="${MO_TIMEOUT_PERL_BIN:-$(command -v perl)}"
export MOLE_PROJECT_CACHE_DISCOVERY_TIMEOUT=0.5
export MOLE_PROJECT_CACHE_SCAN_TIMEOUT=0.5
SECONDS=0

View File

@@ -220,6 +220,63 @@ EOF
[[ "$total_kb" -ge 2 ]]
}
@test "clean_application_support_logs uses bulk clean for large Application Support directories" {
local support_home="$HOME/support-appsupport-bulk"
run env HOME="$support_home" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=true bash --noprofile --norc <<'EOF'
set -euo pipefail
mkdir -p "$HOME"
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
start_section_spinner() { echo "SPIN:$1"; }
stop_section_spinner() { :; }
note_activity() { :; }
safe_remove() { echo "REMOVE:$1"; }
update_progress_if_needed() { return 1; }
should_protect_data() { return 1; }
is_critical_system_component() { return 1; }
bytes_to_human() { echo "0B"; }
files_cleaned=0
total_size_cleaned=0
total_items=0
mkdir -p "$HOME/Library/Application Support/adspower_global/logs"
for i in $(seq 1 101); do
touch "$HOME/Library/Application Support/adspower_global/logs/file-$i.log"
done
clean_application_support_logs
rm -rf "$HOME/Library/Application Support"
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"SPIN:Scanning Application Support... 1/1 [adspower_global, bulk clean]"* ]]
[[ "$output" == *"Application Support logs/caches"* ]]
[[ "$output" != *"151250 items"* ]]
[[ "$output" != *"REMOVE:"* ]]
}
@test "app_support_entry_count_capped stops at cap without failing under pipefail" {
local support_home="$HOME/support-appsupport-cap"
run env HOME="$support_home" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
mkdir -p "$HOME"
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/user.sh"
mkdir -p "$HOME/Library/Application Support/adspower_global/logs"
for i in $(seq 1 150); do
touch "$HOME/Library/Application Support/adspower_global/logs/file-$i.log"
done
count=$(app_support_entry_count_capped "$HOME/Library/Application Support/adspower_global/logs" 1 101)
echo "COUNT=$count"
rm -rf "$HOME/Library/Application Support"
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"COUNT=101"* ]]
}
@test "clean_group_container_caches keeps protected caches and cleans non-protected caches" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" DRY_RUN=false /bin/bash --noprofile --norc <<'EOF'
set -euo pipefail

View File

@@ -66,6 +66,9 @@ teardown() {
}
@test "validate_path_for_deletion rejects system directories" {
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; validate_path_for_deletion '/'"
[ "$status" -eq 1 ]
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; validate_path_for_deletion '/System'"
[ "$status" -eq 1 ]
@@ -86,6 +89,15 @@ teardown() {
[ "$status" -eq 1 ]
}
@test "validate_path_for_deletion rejects symlink to protected system path" {
local link_path="$TEST_DIR/system-link"
ln -s "/System" "$link_path"
run bash -c "source '$PROJECT_ROOT/lib/core/common.sh'; validate_path_for_deletion '$link_path' 2>&1"
[ "$status" -eq 1 ]
[[ "$output" == *"protected system path"* ]]
}
@test "safe_remove successfully removes file" {
local test_file="$TEST_DIR/test_file.txt"
echo "test" > "$test_file"
@@ -134,6 +146,22 @@ teardown() {
[ "$status" -eq 1 ]
}
@test "safe_sudo_remove refuses symlink paths" {
local target_dir="$TEST_DIR/real"
local link_dir="$TEST_DIR/link"
mkdir -p "$target_dir"
ln -s "$target_dir" "$link_dir"
run bash -c "
source '$PROJECT_ROOT/lib/core/common.sh'
sudo() { return 0; }
export -f sudo
safe_sudo_remove '$link_dir' 2>&1
"
[ "$status" -eq 1 ]
[[ "$output" == *"Refusing to sudo remove symlink"* ]]
}
@test "safe_find_delete rejects symlinked directory" {
local real_dir="$TEST_DIR/real"
local link_dir="$TEST_DIR/link"

View File

@@ -136,6 +136,24 @@ EOF
[[ "$output" != *"NDK versions"* ]]
}
@test "clean_xcode_device_support handles empty directories under nounset" {
local ds_dir="$HOME/EmptyDeviceSupport"
mkdir -p "$ds_dir"
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/dev.sh"
note_activity() { :; }
safe_clean() { :; }
clean_xcode_device_support "$HOME/EmptyDeviceSupport" "iOS DeviceSupport"
echo "survived"
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"survived"* ]]
}
@test "clean_xcode_documentation_cache keeps newest DeveloperDocumentation index" {
local doc_root="$HOME/DocumentationCache"
mkdir -p "$doc_root"

View File

@@ -181,3 +181,21 @@ EOF
[ "$status" -eq 1 ]
[[ "$output" == *"Unknown action"* ]]
}
@test "opt_launch_services_rebuild handles missing lsregister without exiting" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/optimize/tasks.sh"
get_lsregister_path() {
echo ""
return 0
}
opt_launch_services_rebuild
echo "survived"
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"lsregister not found"* ]]
[[ "$output" == *"survived"* ]]
}

View File

@@ -683,6 +683,25 @@ EOF
[[ "$status" -eq 0 ]] || [[ "$status" -eq 2 ]]
}
@test "clean_project_artifacts: handles empty menu options under set -u" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail
source "$PROJECT_ROOT/lib/core/common.sh"
source "$PROJECT_ROOT/lib/clean/project.sh"
mkdir -p "$HOME/www/test-project/node_modules"
touch "$HOME/www/test-project/package.json"
PURGE_SEARCH_PATHS=("$HOME/www")
get_dir_size_kb() { echo 0; }
clean_project_artifacts </dev/null
EOF
[ "$status" -eq 0 ]
[[ "$output" == *"No artifacts found to purge"* ]]
}
@test "clean_project_artifacts: dry-run does not count failed removals" {
run env HOME="$HOME" PROJECT_ROOT="$PROJECT_ROOT" bash --noprofile --norc <<'EOF'
set -euo pipefail

View File

@@ -114,6 +114,7 @@ EOF
set -euo pipefail
source "$PROJECT_ROOT/lib/core/timeout.sh"
MO_TIMEOUT_BIN=""
MO_TIMEOUT_PERL_BIN="${MO_TIMEOUT_PERL_BIN:-$(command -v perl)}"
SECONDS=0
set +e
run_with_timeout 1 "$FAKE_CMD"

View File

@@ -600,7 +600,7 @@ EOF
@test "get_homebrew_latest_version prefers brew outdated verbose target version" {
run bash --noprofile --norc <<'EOF'
set -euo pipefail
MOLE_SKIP_MAIN=1 source "$PROJECT_ROOT/mole"
MOLE_TEST_MODE=1 MOLE_SKIP_MAIN=1 source "$PROJECT_ROOT/mole"
brew() {
if [[ "${1:-}" == "outdated" ]]; then
@@ -625,7 +625,7 @@ EOF
@test "get_homebrew_latest_version parses brew info fallback with heading prefix" {
run bash --noprofile --norc <<'EOF'
set -euo pipefail
MOLE_SKIP_MAIN=1 source "$PROJECT_ROOT/mole"
MOLE_TEST_MODE=1 MOLE_SKIP_MAIN=1 source "$PROJECT_ROOT/mole"
brew() {
if [[ "${1:-}" == "outdated" ]]; then