diff --git a/.github/workflows/atlas-acceptance.yml b/.github/workflows/atlas-acceptance.yml new file mode 100644 index 0000000..57df870 --- /dev/null +++ b/.github/workflows/atlas-acceptance.yml @@ -0,0 +1,23 @@ +name: Atlas Acceptance + +on: + workflow_dispatch: + push: + paths: + - 'Apps/**' + - 'Packages/**' + - 'Helpers/**' + - 'XPC/**' + - 'Testing/**' + - 'scripts/atlas/**' + - 'Docs/Execution/MVP-Acceptance-Matrix.md' + - '.github/workflows/atlas-acceptance.yml' + +jobs: + acceptance: + runs-on: macos-latest + steps: + - uses: actions/checkout@v4 + + - name: Run Atlas acceptance pipeline + run: ./scripts/atlas/full-acceptance.sh diff --git a/.github/workflows/atlas-native.yml b/.github/workflows/atlas-native.yml new file mode 100644 index 0000000..c59e4ed --- /dev/null +++ b/.github/workflows/atlas-native.yml @@ -0,0 +1,31 @@ +name: Atlas Native + +on: + workflow_dispatch: + push: + paths: + - 'Apps/**' + - 'Packages/**' + - 'XPC/**' + - 'Helpers/**' + - 'project.yml' + - 'scripts/atlas/**' + - '.github/workflows/atlas-native.yml' + +jobs: + build-native: + runs-on: macos-latest + steps: + - uses: actions/checkout@v4 + + - name: Build and package Atlas native app + run: ./scripts/atlas/package-native.sh + + - name: Verify DMG can install to the user Applications folder + run: ./scripts/atlas/verify-dmg-install.sh + + - name: Upload native app and installer artifacts + uses: actions/upload-artifact@v4 + with: + name: atlas-native-app + path: dist/native/* diff --git a/Apps/AtlasApp/README.md b/Apps/AtlasApp/README.md new file mode 100644 index 0000000..cf85284 --- /dev/null +++ b/Apps/AtlasApp/README.md @@ -0,0 +1,15 @@ +# AtlasApp + +## Responsibility + +- Main macOS application target +- `NavigationSplitView` shell for the frozen MVP modules +- Shared app-state wiring for search, task center, and route selection +- Dependency handoff into feature packages and worker-backed Smart Clean actions + +## Current Scaffold + +- `AtlasApp.swift` — `@main` entry for the macOS app shell +- `AppShellView.swift` — sidebar navigation, toolbar, and task-center popover +- `AtlasAppModel.swift` — shared scaffold state backed by the application-layer workspace controller +- `TaskCenterView.swift` — global task surface placeholder wired to `History` diff --git a/Apps/AtlasApp/Sources/AtlasApp/AppShellView.swift b/Apps/AtlasApp/Sources/AtlasApp/AppShellView.swift new file mode 100644 index 0000000..d9cd7f6 --- /dev/null +++ b/Apps/AtlasApp/Sources/AtlasApp/AppShellView.swift @@ -0,0 +1,279 @@ +import AtlasDesignSystem +import AtlasDomain +import AtlasFeaturesApps +import AtlasFeaturesHistory +import AtlasFeaturesOverview +import AtlasFeaturesPermissions +import AtlasFeaturesSettings +import AtlasFeaturesSmartClean +import SwiftUI + +struct AppShellView: View { + @ObservedObject var model: AtlasAppModel + + var body: some View { + NavigationSplitView { + List(AtlasRoute.allCases, selection: $model.selection) { route in + SidebarRouteRow(route: route) + .tag(route) + } + .navigationTitle(AtlasL10n.string("app.name")) + .navigationSplitViewColumnWidth(min: AtlasLayout.sidebarMinWidth, ideal: AtlasLayout.sidebarIdealWidth) + .listStyle(.sidebar) + .accessibilityIdentifier("atlas.sidebar") + } detail: { + let route = model.selection ?? .overview + + detailView(for: route) + .id(route) + .transition(.opacity) + .searchable( + text: Binding( + get: { model.searchText(for: route) }, + set: { model.setSearchText($0, for: route) } + ), + prompt: AtlasL10n.string("app.search.prompt.route", route.searchPromptLabel) + ) + .accessibilityHint(AtlasL10n.string("app.search.hint.route", route.searchPromptLabel)) + .toolbar { + ToolbarItemGroup { + Button { + model.openTaskCenter() + } label: { + Label { + Text(AtlasL10n.string("toolbar.taskcenter")) + } icon: { + ZStack(alignment: .topTrailing) { + Image(systemName: AtlasIcon.taskCenter) + .symbolRenderingMode(.hierarchical) + + if activeTaskCount > 0 { + Text(activeTaskCount > 99 ? "99+" : "\(activeTaskCount)") + .font(.caption2.weight(.bold)) + .foregroundStyle(.white) + .padding(.horizontal, activeTaskCount > 9 ? AtlasSpacing.xxs : AtlasSpacing.xs) + .padding(.vertical, 2) + .background(Capsule(style: .continuous).fill(AtlasColor.accent)) + .offset(x: 10, y: -8) + } + } + } + } + .help(AtlasL10n.string("toolbar.taskcenter.help")) + .accessibilityIdentifier("toolbar.taskCenter") + .accessibilityLabel(AtlasL10n.string("toolbar.taskcenter.accessibilityLabel")) + .accessibilityHint(AtlasL10n.string("toolbar.taskcenter.accessibilityHint")) + + Button { + model.navigate(to: .permissions) + Task { + await model.inspectPermissions() + } + } label: { + Label { + Text(AtlasL10n.string("toolbar.permissions")) + } icon: { + Image(systemName: AtlasIcon.permissions) + .symbolRenderingMode(.hierarchical) + } + } + .help(AtlasL10n.string("toolbar.permissions.help")) + .accessibilityIdentifier("toolbar.permissions") + .accessibilityLabel(AtlasL10n.string("toolbar.permissions.accessibilityLabel")) + .accessibilityHint(AtlasL10n.string("toolbar.permissions.accessibilityHint")) + + Button { + model.navigate(to: .settings) + } label: { + Label { + Text(AtlasL10n.string("toolbar.settings")) + } icon: { + Image(systemName: AtlasIcon.settings) + .symbolRenderingMode(.hierarchical) + } + } + .help(AtlasL10n.string("toolbar.settings.help")) + .accessibilityIdentifier("toolbar.settings") + .accessibilityLabel(AtlasL10n.string("toolbar.settings.accessibilityLabel")) + .accessibilityHint(AtlasL10n.string("toolbar.settings.accessibilityHint")) + } + } + .animation(AtlasMotion.slow, value: model.selection) + } + .navigationSplitViewStyle(.balanced) + .task { + await model.refreshHealthSnapshotIfNeeded() + await model.refreshPermissionsIfNeeded() + } + .onChange(of: model.selection, initial: false) { _, selection in + guard selection == .permissions else { + return + } + Task { + await model.inspectPermissions() + } + } + .popover(isPresented: $model.isTaskCenterPresented) { + TaskCenterView( + taskRuns: model.taskCenterTaskRuns, + summary: model.taskCenterSummary + ) { + model.closeTaskCenter() + model.navigate(to: .history) + } + .onExitCommand { + model.closeTaskCenter() + } + } + } + + @ViewBuilder + private func detailView(for route: AtlasRoute) -> some View { + switch route { + case .overview: + OverviewFeatureView( + snapshot: model.filteredSnapshot, + isRefreshingHealthSnapshot: model.isHealthSnapshotRefreshing + ) + case .smartClean: + SmartCleanFeatureView( + findings: model.filteredFindings, + plan: model.currentPlan, + scanSummary: model.latestScanSummary, + scanProgress: model.latestScanProgress, + isScanning: model.isScanRunning, + isExecutingPlan: model.isPlanRunning, + isCurrentPlanFresh: model.isCurrentSmartCleanPlanFresh, + canExecutePlan: model.canExecuteCurrentSmartCleanPlan, + planIssue: model.smartCleanPlanIssue, + onStartScan: { + Task { await model.runSmartCleanScan() } + }, + onRefreshPreview: { + Task { await model.refreshPlanPreview() } + }, + onExecutePlan: { + Task { await model.executeCurrentPlan() } + } + ) + case .apps: + AppsFeatureView( + apps: model.filteredApps, + previewPlan: model.currentAppPreview, + currentPreviewedAppID: model.currentPreviewedAppID, + summary: model.latestAppsSummary, + isRunning: model.isAppActionRunning, + activePreviewAppID: model.activePreviewAppID, + activeUninstallAppID: model.activeUninstallAppID, + onRefreshApps: { + Task { await model.refreshApps() } + }, + onPreviewAppUninstall: { appID in + Task { await model.previewAppUninstall(appID: appID) } + }, + onExecuteAppUninstall: { appID in + Task { await model.executeAppUninstall(appID: appID) } + } + ) + case .history: + HistoryFeatureView( + taskRuns: model.filteredTaskRuns, + recoveryItems: model.filteredRecoveryItems, + restoringItemID: model.restoringRecoveryItemID, + onRestoreItem: { itemID in + Task { await model.restoreRecoveryItem(itemID) } + } + ) + case .permissions: + PermissionsFeatureView( + permissionStates: model.filteredPermissionStates, + summary: model.latestPermissionsSummary, + isRefreshing: model.isPermissionsRefreshing, + onRefresh: { + Task { await model.inspectPermissions() } + }, + onRequestNotificationPermission: { + Task { await model.requestNotificationPermission() } + } + ) + case .settings: + SettingsFeatureView( + settings: model.settings, + onSetLanguage: { language in + Task { await model.setLanguage(language) } + }, + onSetRecoveryRetention: { days in + Task { await model.setRecoveryRetentionDays(days) } + }, + onToggleNotifications: { isEnabled in + Task { await model.setNotificationsEnabled(isEnabled) } + } + ) + } + } + + private var activeTaskCount: Int { + model.snapshot.taskRuns.filter { taskRun in + taskRun.status == .queued || taskRun.status == .running + }.count + } +} + +private struct SidebarRouteRow: View { + let route: AtlasRoute + + var body: some View { + Label { + VStack(alignment: .leading, spacing: AtlasSpacing.xxs) { + Text(route.title) + .font(AtlasTypography.rowTitle) + + Text(route.subtitle) + .font(AtlasTypography.captionSmall) + .foregroundStyle(.secondary) + .lineLimit(2) + } + } icon: { + ZStack { + RoundedRectangle(cornerRadius: AtlasRadius.sm, style: .continuous) + .fill(AtlasColor.brand.opacity(0.1)) + .frame(width: AtlasLayout.sidebarIconSize, height: AtlasLayout.sidebarIconSize) + + Image(systemName: route.systemImage) + .font(.system(size: 14, weight: .semibold)) + .foregroundStyle(AtlasColor.brand) + .accessibilityHidden(true) + } + } + .padding(.vertical, AtlasSpacing.sm) + .contentShape(Rectangle()) + .listRowSeparator(.hidden) + .accessibilityElement(children: .combine) + .accessibilityIdentifier("route.\(route.id)") + .accessibilityLabel("\(route.title). \(route.subtitle)") + .accessibilityHint(AtlasL10n.string("sidebar.route.hint", route.shortcutNumber)) + } +} + +private extension AtlasRoute { + var searchPromptLabel: String { + title + } + + var shortcutNumber: String { + switch self { + case .overview: + return "1" + case .smartClean: + return "2" + case .apps: + return "3" + case .history: + return "4" + case .permissions: + return "5" + case .settings: + return "6" + } + } +} diff --git a/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/Contents.json b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/Contents.json new file mode 100644 index 0000000..d725673 --- /dev/null +++ b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/Contents.json @@ -0,0 +1,68 @@ +{ + "images": [ + { + "filename": "icon_16x16.png", + "idiom": "mac", + "scale": "1x", + "size": "16x16" + }, + { + "filename": "icon_32x32.png", + "idiom": "mac", + "scale": "2x", + "size": "16x16" + }, + { + "filename": "icon_32x32.png", + "idiom": "mac", + "scale": "1x", + "size": "32x32" + }, + { + "filename": "icon_64x64.png", + "idiom": "mac", + "scale": "2x", + "size": "32x32" + }, + { + "filename": "icon_128x128.png", + "idiom": "mac", + "scale": "1x", + "size": "128x128" + }, + { + "filename": "icon_256x256.png", + "idiom": "mac", + "scale": "2x", + "size": "128x128" + }, + { + "filename": "icon_256x256.png", + "idiom": "mac", + "scale": "1x", + "size": "256x256" + }, + { + "filename": "icon_512x512.png", + "idiom": "mac", + "scale": "2x", + "size": "256x256" + }, + { + "filename": "icon_512x512.png", + "idiom": "mac", + "scale": "1x", + "size": "512x512" + }, + { + "filename": "icon_1024x1024.png", + "idiom": "mac", + "scale": "2x", + "size": "512x512" + } + ], + "info": { + "author": "atlas-icon-generator", + "version": 1 + } +} \ No newline at end of file diff --git a/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_1024.svg b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_1024.svg new file mode 100644 index 0000000..9643a03 --- /dev/null +++ b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_1024.svg @@ -0,0 +1,87 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_1024x1024.png b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_1024x1024.png new file mode 100644 index 0000000..f991f74 Binary files /dev/null and b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_1024x1024.png differ diff --git a/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_128x128.png b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_128x128.png new file mode 100644 index 0000000..af1e2d0 Binary files /dev/null and b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_128x128.png differ diff --git a/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_16x16.png b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_16x16.png new file mode 100644 index 0000000..00eb943 Binary files /dev/null and b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_16x16.png differ diff --git a/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_256x256.png b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_256x256.png new file mode 100644 index 0000000..7913b34 Binary files /dev/null and b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_256x256.png differ diff --git a/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_32x32.png b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_32x32.png new file mode 100644 index 0000000..70ff87e Binary files /dev/null and b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_32x32.png differ diff --git a/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_512x512.png b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_512x512.png new file mode 100644 index 0000000..8099b72 Binary files /dev/null and b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_512x512.png differ diff --git a/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_64x64.png b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_64x64.png new file mode 100644 index 0000000..b49718f Binary files /dev/null and b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/AppIcon.appiconset/icon_64x64.png differ diff --git a/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/Contents.json b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/Contents.json new file mode 100644 index 0000000..73c0059 --- /dev/null +++ b/Apps/AtlasApp/Sources/AtlasApp/Assets.xcassets/Contents.json @@ -0,0 +1,6 @@ +{ + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/Apps/AtlasApp/Sources/AtlasApp/AtlasApp.swift b/Apps/AtlasApp/Sources/AtlasApp/AtlasApp.swift new file mode 100644 index 0000000..2454dc9 --- /dev/null +++ b/Apps/AtlasApp/Sources/AtlasApp/AtlasApp.swift @@ -0,0 +1,19 @@ +import AtlasDomain +import SwiftUI + +@main +struct AtlasApp: App { + @StateObject private var model = AtlasAppModel() + + var body: some Scene { + WindowGroup(AtlasL10n.string("app.name")) { + AppShellView(model: model) + .environment(\.locale, model.appLanguage.locale) + .frame(minWidth: 1120, minHeight: 720) + } + .commands { + AtlasAppCommands(model: model) + } + .windowStyle(.hiddenTitleBar) + } +} diff --git a/Apps/AtlasApp/Sources/AtlasApp/AtlasAppCommands.swift b/Apps/AtlasApp/Sources/AtlasApp/AtlasAppCommands.swift new file mode 100644 index 0000000..60b4706 --- /dev/null +++ b/Apps/AtlasApp/Sources/AtlasApp/AtlasAppCommands.swift @@ -0,0 +1,87 @@ +import AtlasDomain +import SwiftUI + +struct AtlasAppCommands: Commands { + @ObservedObject var model: AtlasAppModel + + var body: some Commands { + CommandMenu(AtlasL10n.string("commands.navigate.menu")) { + ForEach(AtlasRoute.allCases) { route in + Button(route.title) { + model.navigate(to: route) + } + .keyboardShortcut(route.shortcutKey, modifiers: .command) + } + + Divider() + + Button(model.isTaskCenterPresented ? AtlasL10n.string("commands.taskcenter.close") : AtlasL10n.string("commands.taskcenter.open")) { + model.toggleTaskCenter() + } + .keyboardShortcut("7", modifiers: .command) + } + + CommandMenu(AtlasL10n.string("commands.actions.menu")) { + Button(AtlasL10n.string("commands.actions.refreshCurrent")) { + Task { + await model.refreshCurrentRoute() + } + } + .keyboardShortcut("r", modifiers: .command) + + Button(AtlasL10n.string("commands.actions.runScan")) { + Task { + await model.runSmartCleanScan() + } + } + .keyboardShortcut("r", modifiers: [.command, .shift]) + .disabled(model.isWorkflowBusy) + + Button(AtlasL10n.string("commands.actions.refreshApps")) { + Task { + model.navigate(to: .apps) + await model.refreshApps() + } + } + .keyboardShortcut("a", modifiers: [.command, .option]) + .disabled(model.isWorkflowBusy) + + Button(AtlasL10n.string("commands.actions.refreshPermissions")) { + Task { + model.navigate(to: .permissions) + await model.inspectPermissions() + } + } + .keyboardShortcut("p", modifiers: [.command, .option]) + .disabled(model.isWorkflowBusy) + + Button(AtlasL10n.string("commands.actions.refreshHealth")) { + Task { + model.navigate(to: .overview) + await model.refreshHealthSnapshot() + } + } + .keyboardShortcut("h", modifiers: [.command, .option]) + .disabled(model.isWorkflowBusy) + } + } +} + +private extension AtlasRoute { + var shortcutKey: KeyEquivalent { + switch self { + case .overview: + return "1" + case .smartClean: + return "2" + case .apps: + return "3" + case .history: + return "4" + case .permissions: + return "5" + case .settings: + return "6" + } + } +} diff --git a/Apps/AtlasApp/Sources/AtlasApp/AtlasAppModel.swift b/Apps/AtlasApp/Sources/AtlasApp/AtlasAppModel.swift new file mode 100644 index 0000000..3763c04 --- /dev/null +++ b/Apps/AtlasApp/Sources/AtlasApp/AtlasAppModel.swift @@ -0,0 +1,576 @@ +import AtlasApplication +import AtlasCoreAdapters +import AtlasDomain +import AtlasInfrastructure +import Combine +import Foundation +import SwiftUI +import UserNotifications + +@MainActor +final class AtlasAppModel: ObservableObject { + @Published var selection: AtlasRoute? = .overview + @Published private var searchTextByRoute: [AtlasRoute: String] = [:] + @Published var isTaskCenterPresented = false + @Published private(set) var snapshot: AtlasWorkspaceSnapshot + @Published private(set) var currentPlan: ActionPlan + @Published private(set) var currentAppPreview: ActionPlan? + @Published private(set) var currentPreviewedAppID: UUID? + @Published private(set) var settings: AtlasSettings + @Published private(set) var isHealthSnapshotRefreshing = false + @Published private(set) var isScanRunning = false + @Published private(set) var isPlanRunning = false + @Published private(set) var isPermissionsRefreshing = false + @Published private(set) var isAppActionRunning = false + @Published private(set) var activePreviewAppID: UUID? + @Published private(set) var activeUninstallAppID: UUID? + @Published private(set) var restoringRecoveryItemID: UUID? + @Published private(set) var latestScanSummary: String + @Published private(set) var latestAppsSummary: String + @Published private(set) var latestPermissionsSummary: String + @Published private(set) var latestScanProgress: Double = 0 + @Published private(set) var isCurrentSmartCleanPlanFresh: Bool + @Published private(set) var smartCleanPlanIssue: String? + + private let workspaceController: AtlasWorkspaceController + private let notificationPermissionRequester: @Sendable () async -> Bool + private var didRequestInitialHealthSnapshot = false + private var didRequestInitialPermissionSnapshot = false + + init( + repository: AtlasWorkspaceRepository = AtlasWorkspaceRepository(), + workerService: (any AtlasWorkerServing)? = nil, + notificationPermissionRequester: (@Sendable () async -> Bool)? = nil + ) { + let state = repository.loadState() + self.snapshot = state.snapshot + self.currentPlan = state.currentPlan + self.settings = state.settings + AtlasL10n.setCurrentLanguage(state.settings.language) + self.latestScanSummary = AtlasL10n.string("model.scan.ready") + self.latestAppsSummary = AtlasL10n.string("model.apps.ready") + self.latestPermissionsSummary = AtlasL10n.string("model.permissions.ready") + self.isCurrentSmartCleanPlanFresh = false + self.smartCleanPlanIssue = nil + let directWorker = AtlasScaffoldWorkerService( + repository: repository, + healthSnapshotProvider: MoleHealthAdapter(), + smartCleanScanProvider: MoleSmartCleanAdapter(), + appsInventoryProvider: MacAppsInventoryAdapter(), + helperExecutor: AtlasPrivilegedHelperClient() + ) + let prefersXPCWorker = ProcessInfo.processInfo.environment["ATLAS_PREFER_XPC_WORKER"] == "1" + let defaultWorker: any AtlasWorkerServing = prefersXPCWorker + ? AtlasPreferredWorkerService( + fallbackWorker: directWorker, + allowFallback: true + ) + : directWorker + self.workspaceController = AtlasWorkspaceController( + worker: workerService ?? defaultWorker + ) + self.notificationPermissionRequester = notificationPermissionRequester ?? { + await withCheckedContinuation { continuation in + UNUserNotificationCenter.current().requestAuthorization(options: [.alert, .badge, .sound]) { granted, _ in + continuation.resume(returning: granted) + } + } + } + } + + var appLanguage: AtlasLanguage { + settings.language + } + + func searchText(for route: AtlasRoute) -> String { + searchTextByRoute[route, default: ""] + } + + func setSearchText(_ text: String, for route: AtlasRoute) { + searchTextByRoute[route] = text + } + + var filteredSnapshot: AtlasWorkspaceSnapshot { + var filtered = snapshot + filtered.findings = filter(snapshot.findings, route: .overview) { finding in + [finding.title, finding.detail, AtlasL10n.localizedCategory(finding.category), finding.risk.title] + } + filtered.apps = filter(snapshot.apps, route: .overview) { app in + [app.name, app.bundleIdentifier, app.bundlePath, "\(app.leftoverItems)"] + } + filtered.taskRuns = filter(snapshot.taskRuns, route: .overview) { task in + [task.kind.title, task.status.title, task.summary] + } + filtered.recoveryItems = filter(snapshot.recoveryItems, route: .overview) { item in + [item.title, item.detail, item.originalPath] + } + filtered.permissions = filter(snapshot.permissions, route: .overview) { permission in + [ + permission.kind.title, + permission.rationale, + permissionStatusText(for: permission) + ] + } + return filtered + } + + var filteredFindings: [Finding] { + filter(snapshot.findings, route: .smartClean) { finding in + [finding.title, finding.detail, AtlasL10n.localizedCategory(finding.category), finding.risk.title] + } + } + + var filteredApps: [AppFootprint] { + filter(snapshot.apps, route: .apps) { app in + [app.name, app.bundleIdentifier, app.bundlePath, "\(app.leftoverItems)"] + } + } + + var filteredTaskRuns: [TaskRun] { + filter(snapshot.taskRuns, route: .history) { task in + [task.kind.title, task.status.title, task.summary] + } + } + + var filteredRecoveryItems: [RecoveryItem] { + filter(snapshot.recoveryItems, route: .history) { item in + [item.title, item.detail, item.originalPath] + } + } + + var filteredPermissionStates: [PermissionState] { + filter(snapshot.permissions, route: .permissions) { permission in + [ + permission.kind.title, + permission.rationale, + permissionStatusText(for: permission) + ] + } + } + + var taskCenterTaskRuns: [TaskRun] { + snapshot.taskRuns + } + + var taskCenterSummary: String { + let activeTaskCount = snapshot.taskRuns.filter { taskRun in + taskRun.status == .queued || taskRun.status == .running + }.count + + if activeTaskCount == 0 { + return AtlasL10n.string("model.taskcenter.none") + } + + let key = activeTaskCount == 1 ? "model.taskcenter.active.one" : "model.taskcenter.active.other" + return AtlasL10n.string(key, activeTaskCount) + } + + var isWorkflowBusy: Bool { + isHealthSnapshotRefreshing + || isScanRunning + || isPlanRunning + || isPermissionsRefreshing + || isAppActionRunning + || restoringRecoveryItemID != nil + } + + var canExecuteCurrentSmartCleanPlan: Bool { + !currentPlan.items.isEmpty && isCurrentSmartCleanPlanFresh && currentSmartCleanPlanHasExecutableTargets + } + + var currentSmartCleanPlanHasExecutableTargets: Bool { + let selectedIDs = Set(currentPlan.items.map(\.id)) + let executableFindings = snapshot.findings.filter { selectedIDs.contains($0.id) && !$0.targetPathsDescriptionIsInspectionOnly } + guard !executableFindings.isEmpty else { + return false + } + return executableFindings.allSatisfy { !($0.targetPaths ?? []).isEmpty } + } + + func refreshHealthSnapshotIfNeeded() async { + guard !didRequestInitialHealthSnapshot else { + return + } + + didRequestInitialHealthSnapshot = true + await refreshHealthSnapshot() + } + + func refreshPermissionsIfNeeded() async { + guard !didRequestInitialPermissionSnapshot else { + return + } + + didRequestInitialPermissionSnapshot = true + await inspectPermissions() + } + + func refreshHealthSnapshot() async { + guard !isHealthSnapshotRefreshing else { + return + } + + isHealthSnapshotRefreshing = true + + do { + let output = try await workspaceController.healthSnapshot() + withAnimation(.snappy(duration: 0.24)) { + snapshot = output.snapshot + } + } catch { + latestScanSummary = error.localizedDescription + } + + isHealthSnapshotRefreshing = false + } + + func inspectPermissions() async { + guard !isPermissionsRefreshing else { + return + } + + isPermissionsRefreshing = true + latestPermissionsSummary = AtlasL10n.string("model.permissions.refreshing") + + do { + let output = try await workspaceController.inspectPermissions() + withAnimation(.snappy(duration: 0.24)) { + snapshot = output.snapshot + } + + let grantedCount = output.snapshot.permissions.filter(\.isGranted).count + latestPermissionsSummary = AtlasL10n.string( + output.snapshot.permissions.count == 1 ? "model.permissions.summary.one" : "model.permissions.summary.other", + grantedCount, + output.snapshot.permissions.count + ) + } catch { + latestPermissionsSummary = error.localizedDescription + } + + isPermissionsRefreshing = false + } + + func runSmartCleanScan() async { + guard !isScanRunning else { + return + } + + selection = .smartClean + isScanRunning = true + latestScanSummary = AtlasL10n.string("model.scan.submitting") + latestScanProgress = 0 + + do { + let output = try await workspaceController.startScan() + withAnimation(.snappy(duration: 0.24)) { + snapshot = output.snapshot + currentPlan = output.actionPlan ?? currentPlan + latestScanSummary = output.summary + latestScanProgress = output.progressFraction + isCurrentSmartCleanPlanFresh = output.actionPlan != nil + smartCleanPlanIssue = nil + } + } catch { + latestScanSummary = error.localizedDescription + latestScanProgress = 0 + smartCleanPlanIssue = error.localizedDescription + } + + isScanRunning = false + } + + @discardableResult + func refreshPlanPreview() async -> Bool { + do { + let output = try await workspaceController.previewPlan(findingIDs: snapshot.findings.map(\.id)) + withAnimation(.snappy(duration: 0.24)) { + snapshot = output.snapshot + currentPlan = output.actionPlan + latestScanSummary = output.summary + latestScanProgress = min(max(latestScanProgress, 1), 1) + isCurrentSmartCleanPlanFresh = true + smartCleanPlanIssue = nil + } + return true + } catch { + latestScanSummary = error.localizedDescription + smartCleanPlanIssue = error.localizedDescription + return false + } + } + + func executeCurrentPlan() async { + guard !isPlanRunning, !currentPlan.items.isEmpty else { + return + } + + selection = .smartClean + isPlanRunning = true + + do { + let output = try await workspaceController.executePlan(planID: currentPlan.id) + withAnimation(.snappy(duration: 0.24)) { + snapshot = output.snapshot + latestScanSummary = output.summary + latestScanProgress = output.progressFraction + smartCleanPlanIssue = nil + } + let didRefreshPlan = await refreshPlanPreview() + if !didRefreshPlan { + isCurrentSmartCleanPlanFresh = false + } + } catch { + latestScanSummary = error.localizedDescription + smartCleanPlanIssue = error.localizedDescription + } + + isPlanRunning = false + } + + func refreshApps() async { + guard !isAppActionRunning else { + return + } + + selection = .apps + isAppActionRunning = true + activePreviewAppID = nil + activeUninstallAppID = nil + currentAppPreview = nil + currentPreviewedAppID = nil + latestAppsSummary = AtlasL10n.string("model.apps.refreshing") + + do { + let output = try await workspaceController.listApps() + withAnimation(.snappy(duration: 0.24)) { + snapshot = output.snapshot + latestAppsSummary = output.summary + } + } catch { + latestAppsSummary = error.localizedDescription + } + + isAppActionRunning = false + } + + func previewAppUninstall(appID: UUID) async { + guard !isAppActionRunning else { + return + } + + selection = .apps + isAppActionRunning = true + activePreviewAppID = appID + activeUninstallAppID = nil + + do { + let output = try await workspaceController.previewAppUninstall(appID: appID) + withAnimation(.snappy(duration: 0.24)) { + snapshot = output.snapshot + currentAppPreview = output.actionPlan + currentPreviewedAppID = appID + latestAppsSummary = output.summary + } + } catch { + latestAppsSummary = error.localizedDescription + } + + activePreviewAppID = nil + isAppActionRunning = false + } + + func executeAppUninstall(appID: UUID) async { + guard !isAppActionRunning else { + return + } + + selection = .apps + isAppActionRunning = true + activePreviewAppID = nil + activeUninstallAppID = appID + + do { + let output = try await workspaceController.executeAppUninstall(appID: appID) + withAnimation(.snappy(duration: 0.24)) { + snapshot = output.snapshot + currentAppPreview = nil + currentPreviewedAppID = nil + latestAppsSummary = output.summary + } + } catch { + latestAppsSummary = error.localizedDescription + } + + activeUninstallAppID = nil + isAppActionRunning = false + } + + func restoreRecoveryItem(_ itemID: UUID) async { + guard restoringRecoveryItemID == nil else { + return + } + + restoringRecoveryItemID = itemID + + do { + let output = try await workspaceController.restoreItems(itemIDs: [itemID]) + withAnimation(.snappy(duration: 0.24)) { + snapshot = output.snapshot + latestScanSummary = output.summary + } + await refreshPlanPreview() + } catch { + latestScanSummary = error.localizedDescription + } + + restoringRecoveryItemID = nil + } + + func setRecoveryRetentionDays(_ days: Int) async { + await updateSettings { settings in + settings.recoveryRetentionDays = days + } + } + + func setNotificationsEnabled(_ isEnabled: Bool) async { + if isEnabled, snapshot.permissions.first(where: { $0.kind == .notifications })?.isGranted != true { + _ = await notificationPermissionRequester() + } + await updateSettings { settings in + settings.notificationsEnabled = isEnabled + } + await inspectPermissions() + } + + func requestNotificationPermission() async { + _ = await notificationPermissionRequester() + await inspectPermissions() + } + + func setLanguage(_ language: AtlasLanguage) async { + guard settings.language != language else { + return + } + + await updateSettings { settings in + settings.language = language + settings.acknowledgementText = AtlasL10n.acknowledgement(language: language) + settings.thirdPartyNoticesText = AtlasL10n.thirdPartyNotices(language: language) + } + + AtlasL10n.setCurrentLanguage(language) + refreshLocalizedReadySummaries() + if !snapshot.findings.isEmpty { + await refreshPlanPreview() + } + currentAppPreview = nil + currentPreviewedAppID = nil + } + + func refreshCurrentRoute() async { + switch selection ?? .overview { + case .overview: + await refreshHealthSnapshot() + case .smartClean: + await runSmartCleanScan() + case .apps: + await refreshApps() + case .history: + break + case .permissions: + await inspectPermissions() + case .settings: + break + } + } + + func navigate(to route: AtlasRoute) { + withAnimation(.snappy(duration: 0.2)) { + selection = route + } + } + + func openTaskCenter() { + withAnimation(.snappy(duration: 0.2)) { + isTaskCenterPresented = true + } + } + + func closeTaskCenter() { + withAnimation(.snappy(duration: 0.2)) { + isTaskCenterPresented = false + } + } + + func toggleTaskCenter() { + withAnimation(.snappy(duration: 0.2)) { + isTaskCenterPresented.toggle() + } + } + + private func updateSettings(_ mutate: (inout AtlasSettings) -> Void) async { + var updated = settings + mutate(&updated) + + do { + let output = try await workspaceController.updateSettings(updated) + AtlasL10n.setCurrentLanguage(output.settings.language) + withAnimation(.snappy(duration: 0.2)) { + settings = output.settings + } + } catch { + latestAppsSummary = error.localizedDescription + } + } + + private func refreshLocalizedReadySummaries() { + if !isScanRunning && !isPlanRunning { + latestScanSummary = AtlasL10n.string("model.scan.ready") + } + if !isAppActionRunning { + latestAppsSummary = AtlasL10n.string("model.apps.ready") + } + if !isPermissionsRefreshing { + latestPermissionsSummary = AtlasL10n.string("model.permissions.ready") + } + } + + private func filter( + _ elements: [Element], + route: AtlasRoute, + fields: (Element) -> [String] + ) -> [Element] { + let query = searchText(for: route) + .trimmingCharacters(in: .whitespacesAndNewlines) + .lowercased() + + guard !query.isEmpty else { + return elements + } + + return elements.filter { element in + fields(element) + .joined(separator: " ") + .lowercased() + .contains(query) + } + } +} + +private extension Finding { + var targetPathsDescriptionIsInspectionOnly: Bool { + risk == .advanced || !AtlasSmartCleanExecutionSupport.isFindingExecutionSupported(self) + } +} + +private extension AtlasAppModel { + func permissionStatusText(for permission: PermissionState) -> String { + if permission.isGranted { + return AtlasL10n.string("common.granted") + } + return permission.kind.isRequiredForCurrentWorkflows + ? AtlasL10n.string("permissions.status.required") + : AtlasL10n.string("permissions.status.optional") + } +} diff --git a/Apps/AtlasApp/Sources/AtlasApp/TaskCenterView.swift b/Apps/AtlasApp/Sources/AtlasApp/TaskCenterView.swift new file mode 100644 index 0000000..1b3027d --- /dev/null +++ b/Apps/AtlasApp/Sources/AtlasApp/TaskCenterView.swift @@ -0,0 +1,106 @@ +import AtlasDesignSystem +import AtlasDomain +import SwiftUI + +struct TaskCenterView: View { + let taskRuns: [TaskRun] + let summary: String + let onOpenHistory: () -> Void + + var body: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.xl) { + VStack(alignment: .leading, spacing: AtlasSpacing.sm) { + Text(AtlasL10n.string("taskcenter.title")) + .font(AtlasTypography.sectionTitle) + + Text(summary) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + } + + Divider() + + AtlasCallout( + title: taskRuns.isEmpty ? AtlasL10n.string("taskcenter.callout.empty.title") : AtlasL10n.string("taskcenter.callout.active.title"), + detail: taskRuns.isEmpty + ? AtlasL10n.string("taskcenter.callout.empty.detail") + : AtlasL10n.string("taskcenter.callout.active.detail"), + tone: taskRuns.isEmpty ? .neutral : .success, + systemImage: taskRuns.isEmpty ? "clock.badge.questionmark" : "clock.arrow.circlepath" + ) + + if taskRuns.isEmpty { + AtlasEmptyState( + title: AtlasL10n.string("taskcenter.empty.title"), + detail: AtlasL10n.string("taskcenter.empty.detail"), + systemImage: "list.bullet.rectangle.portrait", + tone: .neutral + ) + } else { + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + ForEach(taskRuns.prefix(5)) { taskRun in + AtlasDetailRow( + title: taskRun.kind.title, + subtitle: taskRun.summary, + footnote: timelineFootnote(for: taskRun), + systemImage: icon(for: taskRun.kind), + tone: taskRun.status.tintTone + ) { + AtlasStatusChip(taskRun.status.title, tone: taskRun.status.tintTone) + } + } + } + } + + Button(action: onOpenHistory) { + Label(AtlasL10n.string("taskcenter.openHistory"), systemImage: "arrow.right.circle.fill") + } + .buttonStyle(.borderedProminent) + .controlSize(.large) + .keyboardShortcut(.defaultAction) + .accessibilityIdentifier("taskcenter.openHistory") + .accessibilityHint(AtlasL10n.string("taskcenter.openHistory.hint")) + } + .padding(AtlasSpacing.xl) + .frame(width: 430) + .accessibilityIdentifier("taskcenter.panel") + } + + private func timelineFootnote(for taskRun: TaskRun) -> String { + let start = AtlasFormatters.shortDate(taskRun.startedAt) + if let finishedAt = taskRun.finishedAt { + return AtlasL10n.string("taskcenter.timeline.finished", start, AtlasFormatters.shortDate(finishedAt)) + } + return AtlasL10n.string("taskcenter.timeline.running", start) + } + + private func icon(for kind: TaskKind) -> String { + switch kind { + case .scan: + return "sparkles" + case .executePlan: + return "play.circle" + case .uninstallApp: + return "trash" + case .restore: + return "arrow.uturn.backward.circle" + case .inspectPermissions: + return "lock.shield" + } + } +} + +private extension TaskStatus { + var tintTone: AtlasTone { + switch self { + case .queued: + return .neutral + case .running: + return .warning + case .completed: + return .success + case .failed, .cancelled: + return .danger + } + } +} diff --git a/Apps/AtlasApp/Tests/AtlasAppTests/AtlasAppModelTests.swift b/Apps/AtlasApp/Tests/AtlasAppTests/AtlasAppModelTests.swift new file mode 100644 index 0000000..c470da5 --- /dev/null +++ b/Apps/AtlasApp/Tests/AtlasAppTests/AtlasAppModelTests.swift @@ -0,0 +1,297 @@ +import XCTest +@testable import AtlasApp +import AtlasApplication +import AtlasDomain +import AtlasInfrastructure + +@MainActor +final class AtlasAppModelTests: XCTestCase { + + func testCurrentSmartCleanPlanStartsAsCachedUntilSessionRefresh() { + let model = AtlasAppModel(repository: makeRepository(), workerService: AtlasScaffoldWorkerService(allowStateOnlyCleanExecution: true)) + + XCTAssertFalse(model.isCurrentSmartCleanPlanFresh) + XCTAssertFalse(model.canExecuteCurrentSmartCleanPlan) + XCTAssertNil(model.smartCleanPlanIssue) + } + + func testFailedSmartCleanScanKeepsCachedPlanAndExposesFailureReason() async { + let repository = makeRepository() + let worker = AtlasScaffoldWorkerService( + repository: repository, + smartCleanScanProvider: FailingSmartCleanProvider() + ) + let model = AtlasAppModel(repository: repository, workerService: worker) + + await model.runSmartCleanScan() + + XCTAssertFalse(model.isCurrentSmartCleanPlanFresh) + XCTAssertFalse(model.canExecuteCurrentSmartCleanPlan) + XCTAssertNotNil(model.smartCleanPlanIssue) + XCTAssertTrue(model.latestScanSummary.contains("Smart Clean scan is unavailable")) + } + + func testRefreshPlanPreviewKeepsPlanNonExecutableWhenFindingsLackTargets() async { + let repository = makeRepository() + let worker = AtlasScaffoldWorkerService(repository: repository, allowStateOnlyCleanExecution: true) + let model = AtlasAppModel(repository: repository, workerService: worker) + + let refreshed = await model.refreshPlanPreview() + + XCTAssertTrue(refreshed) + XCTAssertTrue(model.isCurrentSmartCleanPlanFresh) + XCTAssertFalse(model.canExecuteCurrentSmartCleanPlan) + } + + func testRunSmartCleanScanMarksPlanAsFreshForCurrentSession() async throws { + let repository = makeRepository() + let worker = AtlasScaffoldWorkerService( + repository: repository, + smartCleanScanProvider: FakeSmartCleanProvider(), + allowStateOnlyCleanExecution: true + ) + let model = AtlasAppModel(repository: repository, workerService: worker) + + await model.runSmartCleanScan() + + XCTAssertTrue(model.isCurrentSmartCleanPlanFresh) + XCTAssertNil(model.smartCleanPlanIssue) + XCTAssertTrue(model.canExecuteCurrentSmartCleanPlan) + } + func testRunSmartCleanScanUpdatesSummaryProgressAndPlan() async throws { + let repository = makeRepository() + let worker = AtlasScaffoldWorkerService( + repository: repository, + smartCleanScanProvider: FakeSmartCleanProvider() + ) + let model = AtlasAppModel(repository: repository, workerService: worker) + + await model.runSmartCleanScan() + + XCTAssertEqual(model.snapshot.findings.count, 2) + XCTAssertEqual(model.currentPlan.items.count, 2) + XCTAssertEqual(model.latestScanProgress, 1) + XCTAssertTrue(model.latestScanSummary.contains("2 reclaimable item")) + } + + func testExecuteCurrentPlanMovesFindingsIntoRecovery() async throws { + let repository = makeRepository() + let worker = AtlasScaffoldWorkerService( + repository: repository, + smartCleanScanProvider: FakeSmartCleanProvider(), + allowStateOnlyCleanExecution: true + ) + let model = AtlasAppModel(repository: repository, workerService: worker) + let initialRecoveryCount = model.snapshot.recoveryItems.count + + await model.runSmartCleanScan() + await model.executeCurrentPlan() + + XCTAssertGreaterThan(model.snapshot.recoveryItems.count, initialRecoveryCount) + XCTAssertEqual(model.snapshot.taskRuns.first?.kind, .executePlan) + XCTAssertGreaterThan(model.latestScanProgress, 0) + } + + func testRefreshAppsUsesInventoryProvider() async throws { + let repository = makeRepository() + let worker = AtlasScaffoldWorkerService( + repository: repository, + appsInventoryProvider: FakeInventoryProvider() + ) + let model = AtlasAppModel(repository: repository, workerService: worker) + + await model.refreshApps() + + XCTAssertEqual(model.snapshot.apps.count, 1) + XCTAssertEqual(model.snapshot.apps.first?.name, "Sample App") + XCTAssertEqual(model.latestAppsSummary, AtlasL10n.string("application.apps.loaded.one")) + } + + func testRestoreRecoveryItemReturnsFindingToWorkspace() async throws { + let repository = makeRepository() + let worker = AtlasScaffoldWorkerService(repository: repository, allowStateOnlyCleanExecution: true) + let model = AtlasAppModel(repository: repository, workerService: worker) + + await model.executeCurrentPlan() + let recoveryItemID = try XCTUnwrap(model.snapshot.recoveryItems.first?.id) + let findingsCountAfterExecute = model.snapshot.findings.count + + await model.restoreRecoveryItem(recoveryItemID) + + XCTAssertGreaterThan(model.snapshot.findings.count, findingsCountAfterExecute) + XCTAssertFalse(model.snapshot.recoveryItems.contains(where: { $0.id == recoveryItemID })) + } + + func testSettingsUpdatePersistsThroughWorker() async throws { + let repository = makeRepository() + let permissionInspector = AtlasPermissionInspector( + homeDirectoryURL: FileManager.default.temporaryDirectory, + fullDiskAccessProbeURLs: [URL(fileURLWithPath: "/tmp/fda-probe")], + protectedLocationReader: { _ in false }, + accessibilityStatusProvider: { false }, + notificationsAuthorizationProvider: { false } + ) + let worker = AtlasScaffoldWorkerService( + repository: repository, + permissionInspector: permissionInspector, + allowStateOnlyCleanExecution: true + ) + let model = AtlasAppModel( + repository: repository, + workerService: worker, + notificationPermissionRequester: { true } + ) + + await model.setRecoveryRetentionDays(14) + await model.setNotificationsEnabled(false) + + XCTAssertEqual(model.settings.recoveryRetentionDays, 14) + XCTAssertFalse(model.settings.notificationsEnabled) + XCTAssertEqual(repository.loadSettings().recoveryRetentionDays, 14) + XCTAssertFalse(repository.loadSettings().notificationsEnabled) + } + + func testRefreshCurrentRouteRefreshesAppsWhenAppsSelected() async throws { + let repository = makeRepository() + let worker = AtlasScaffoldWorkerService( + repository: repository, + appsInventoryProvider: FakeInventoryProvider() + ) + let model = AtlasAppModel(repository: repository, workerService: worker) + + model.navigate(to: .apps) + await model.refreshCurrentRoute() + + XCTAssertEqual(model.selection, .apps) + XCTAssertEqual(model.snapshot.apps.count, 1) + XCTAssertEqual(model.snapshot.apps.first?.name, "Sample App") + XCTAssertEqual(model.latestAppsSummary, AtlasL10n.string("application.apps.loaded.one")) + } + + func testSetNotificationsEnabledRequestsNotificationPermissionWhenEnabling() async { + let repository = makeRepository() + let permissionInspector = AtlasPermissionInspector( + homeDirectoryURL: FileManager.default.temporaryDirectory, + fullDiskAccessProbeURLs: [URL(fileURLWithPath: "/tmp/fda-probe")], + protectedLocationReader: { _ in false }, + accessibilityStatusProvider: { false }, + notificationsAuthorizationProvider: { false } + ) + let worker = AtlasScaffoldWorkerService( + repository: repository, + permissionInspector: permissionInspector, + allowStateOnlyCleanExecution: true + ) + let recorder = NotificationPermissionRecorder() + let model = AtlasAppModel( + repository: repository, + workerService: worker, + notificationPermissionRequester: { await recorder.request() } + ) + + await model.setNotificationsEnabled(false) + await model.setNotificationsEnabled(true) + + let callCount = await recorder.callCount() + XCTAssertEqual(callCount, 1) + } + + func testRefreshPermissionsIfNeededUpdatesSnapshotFromWorker() async { + let repository = makeRepository() + let permissionInspector = AtlasPermissionInspector( + homeDirectoryURL: FileManager.default.temporaryDirectory, + fullDiskAccessProbeURLs: [URL(fileURLWithPath: "/tmp/fda-probe")], + protectedLocationReader: { _ in true }, + accessibilityStatusProvider: { true }, + notificationsAuthorizationProvider: { false } + ) + let worker = AtlasScaffoldWorkerService( + repository: repository, + permissionInspector: permissionInspector, + allowStateOnlyCleanExecution: true + ) + let model = AtlasAppModel(repository: repository, workerService: worker) + + await model.refreshPermissionsIfNeeded() + + XCTAssertEqual(model.snapshot.permissions.first(where: { $0.kind == .fullDiskAccess })?.isGranted, true) + XCTAssertEqual(model.snapshot.permissions.first(where: { $0.kind == .accessibility })?.isGranted, true) + XCTAssertEqual(model.snapshot.permissions.first(where: { $0.kind == .notifications })?.isGranted, false) + } + + func testToggleTaskCenterFlipsPresentationState() { + let model = AtlasAppModel(repository: makeRepository(), workerService: AtlasScaffoldWorkerService(allowStateOnlyCleanExecution: true)) + + XCTAssertFalse(model.isTaskCenterPresented) + model.toggleTaskCenter() + XCTAssertTrue(model.isTaskCenterPresented) + model.toggleTaskCenter() + XCTAssertFalse(model.isTaskCenterPresented) + } + + + func testSetLanguagePersistsThroughWorkerAndUpdatesLocalization() async throws { + let repository = makeRepository() + let worker = AtlasScaffoldWorkerService(repository: repository, allowStateOnlyCleanExecution: true) + let model = AtlasAppModel(repository: repository, workerService: worker) + + await model.setLanguage(.en) + + XCTAssertEqual(model.settings.language, .en) + XCTAssertEqual(repository.loadSettings().language, .en) + XCTAssertEqual(AtlasRoute.overview.title, "Overview") + } + + private func makeRepository() -> AtlasWorkspaceRepository { + AtlasWorkspaceRepository( + stateFileURL: FileManager.default.temporaryDirectory + .appendingPathComponent(UUID().uuidString, isDirectory: true) + .appendingPathComponent("workspace-state.json") + ) + } +} + +private struct FakeSmartCleanProvider: AtlasSmartCleanScanProviding { + func collectSmartCleanScan() async throws -> AtlasSmartCleanScanResult { + AtlasSmartCleanScanResult( + findings: [ + Finding(title: "Build Cache", detail: "Temporary build outputs.", bytes: 512_000_000, risk: .safe, category: "Developer", targetPaths: [FileManager.default.homeDirectoryForCurrentUser.appendingPathComponent("Library/Caches/FakeBuildCache.bin").path]), + Finding(title: "Old Runtime", detail: "Unused runtime assets.", bytes: 1_024_000_000, risk: .review, category: "Developer", targetPaths: [FileManager.default.homeDirectoryForCurrentUser.appendingPathComponent("Library/Developer/Xcode/DerivedData/FakeOldRuntime").path]), + ], + summary: "Smart Clean dry run found 2 reclaimable items." + ) + } +} + +private struct FakeInventoryProvider: AtlasAppInventoryProviding { + func collectInstalledApps() async throws -> [AppFootprint] { + [ + AppFootprint( + name: "Sample App", + bundleIdentifier: "com.example.sample", + bundlePath: "/Applications/Sample App.app", + bytes: 2_048_000_000, + leftoverItems: 3 + ) + ] + } +} + +private struct FailingSmartCleanProvider: AtlasSmartCleanScanProviding { + func collectSmartCleanScan() async throws -> AtlasSmartCleanScanResult { + throw NSError(domain: "AtlasAppModelTests", code: 1, userInfo: [NSLocalizedDescriptionKey: "Fixture scan failed."]) + } +} + +private actor NotificationPermissionRecorder { + private var calls = 0 + + func request() -> Bool { + calls += 1 + return true + } + + func callCount() -> Int { + calls + } +} diff --git a/Apps/AtlasAppUITests/AtlasAppUITests.swift b/Apps/AtlasAppUITests/AtlasAppUITests.swift new file mode 100644 index 0000000..2baea66 --- /dev/null +++ b/Apps/AtlasAppUITests/AtlasAppUITests.swift @@ -0,0 +1,85 @@ +import XCTest + +final class AtlasAppUITests: XCTestCase { + override func setUpWithError() throws { + continueAfterFailure = false + } + + func testSidebarShowsFrozenMVPRoutes() { + let app = makeApp() + app.launch() + + XCTAssertTrue(app.windows.firstMatch.waitForExistence(timeout: 5)) + let sidebar = app.outlines["atlas.sidebar"] + XCTAssertTrue(sidebar.waitForExistence(timeout: 5)) + + for routeID in ["overview", "smartClean", "apps", "history", "permissions", "settings"] { + XCTAssertTrue(app.staticTexts["route.\(routeID)"].waitForExistence(timeout: 3), "Missing route: \(routeID)") + } + } + + func testDefaultLanguageIsChineseAndCanSwitchToEnglish() { + let app = makeApp() + app.launch() + + XCTAssertTrue(app.staticTexts["概览"].waitForExistence(timeout: 5)) + app.staticTexts["route.settings"].click() + + let englishButton = app.buttons["English"] + let englishRadio = app.radioButtons["English"] + let didFindEnglishControl = englishButton.waitForExistence(timeout: 3) || englishRadio.waitForExistence(timeout: 3) + XCTAssertTrue(didFindEnglishControl) + if englishButton.exists { + englishButton.click() + XCTAssertTrue(englishButton.exists) + } else { + englishRadio.click() + XCTAssertTrue(englishRadio.exists) + } + } + + func testSmartCleanAndSettingsPrimaryControlsExist() { + let app = makeApp() + app.launch() + + let sidebar = app.outlines["atlas.sidebar"] + XCTAssertTrue(sidebar.waitForExistence(timeout: 5)) + + app.staticTexts["route.smartClean"].click() + XCTAssertTrue(app.buttons["smartclean.runScan"].waitForExistence(timeout: 5)) + XCTAssertTrue(app.buttons["smartclean.refreshPreview"].waitForExistence(timeout: 5)) + XCTAssertFalse(app.buttons["smartclean.executePreview"].waitForExistence(timeout: 2)) + + app.staticTexts["route.settings"].click() + XCTAssertTrue(app.segmentedControls["settings.language"].waitForExistence(timeout: 5) || app.radioGroups["settings.language"].waitForExistence(timeout: 5)) + XCTAssertTrue(app.switches["settings.notifications"].waitForExistence(timeout: 5)) + let recoveryPanelButton = app.buttons["settings.panel.recovery"] + XCTAssertTrue(recoveryPanelButton.waitForExistence(timeout: 5)) + recoveryPanelButton.click() + XCTAssertTrue(app.steppers["settings.recoveryRetention"].waitForExistence(timeout: 5)) + } + + func testKeyboardShortcutsNavigateAndOpenTaskCenter() { + let app = makeApp() + app.launch() + + let window = app.windows.firstMatch + XCTAssertTrue(window.waitForExistence(timeout: 5)) + + window.typeKey("2", modifierFlags: .command) + XCTAssertTrue(app.buttons["smartclean.runScan"].waitForExistence(timeout: 5)) + + window.typeKey("5", modifierFlags: .command) + XCTAssertTrue(app.buttons["permissions.refresh"].waitForExistence(timeout: 5)) + + window.typeKey("7", modifierFlags: .command) + XCTAssertTrue(app.otherElements["taskcenter.panel"].waitForExistence(timeout: 5)) + } + + private func makeApp() -> XCUIApplication { + let app = XCUIApplication() + let stateFile = NSTemporaryDirectory() + UUID().uuidString + "/workspace-state.json" + app.launchEnvironment["ATLAS_STATE_FILE"] = stateFile + return app + } +} diff --git a/Apps/Package.swift b/Apps/Package.swift new file mode 100644 index 0000000..c4812b0 --- /dev/null +++ b/Apps/Package.swift @@ -0,0 +1,43 @@ +// swift-tools-version: 5.10 +import PackageDescription + +let package = Package( + name: "AtlasApps", + platforms: [.macOS(.v14)], + products: [ + .executable(name: "AtlasApp", targets: ["AtlasApp"]), + ], + dependencies: [ + .package(path: "../Packages"), + ], + targets: [ + .executableTarget( + name: "AtlasApp", + dependencies: [ + .product(name: "AtlasApplication", package: "Packages"), + .product(name: "AtlasCoreAdapters", package: "Packages"), + .product(name: "AtlasDesignSystem", package: "Packages"), + .product(name: "AtlasDomain", package: "Packages"), + .product(name: "AtlasFeaturesApps", package: "Packages"), + .product(name: "AtlasFeaturesHistory", package: "Packages"), + .product(name: "AtlasFeaturesOverview", package: "Packages"), + .product(name: "AtlasFeaturesPermissions", package: "Packages"), + .product(name: "AtlasFeaturesSettings", package: "Packages"), + .product(name: "AtlasFeaturesSmartClean", package: "Packages"), + .product(name: "AtlasInfrastructure", package: "Packages"), + ], + path: "AtlasApp/Sources/AtlasApp", + resources: [.process("Assets.xcassets")] + ), + .testTarget( + name: "AtlasAppTests", + dependencies: [ + "AtlasApp", + .product(name: "AtlasApplication", package: "Packages"), + .product(name: "AtlasDomain", package: "Packages"), + .product(name: "AtlasInfrastructure", package: "Packages"), + ], + path: "AtlasApp/Tests/AtlasAppTests" + ), + ] +) diff --git a/Apps/README.md b/Apps/README.md new file mode 100644 index 0000000..48430d9 --- /dev/null +++ b/Apps/README.md @@ -0,0 +1,10 @@ +# Apps + +This directory contains user-facing application targets. + +## Current Entry + +- `AtlasApp/` hosts the main native macOS shell. +- `Package.swift` exposes the app shell as a SwiftPM executable target for local iteration. +- The app shell now wires fallback health, Smart Clean, app inventory, and helper integrations through the structured worker path. +- Root `project.yml` can regenerate `Atlas.xcodeproj` with `xcodegen generate` for native app packaging and installer production. diff --git a/Docs/ADR/ADR-001-Worker-and-Helper-Boundary.md b/Docs/ADR/ADR-001-Worker-and-Helper-Boundary.md new file mode 100644 index 0000000..0ae05f8 --- /dev/null +++ b/Docs/ADR/ADR-001-Worker-and-Helper-Boundary.md @@ -0,0 +1,21 @@ +# ADR-001: Worker and Helper Boundary + +## Status + +Accepted + +## Context + +Atlas for Mac needs long-running scanning and cleanup operations, but must avoid running privileged or shell-oriented logic directly inside the UI process. + +## Decision + +- Use a non-privileged worker process for orchestration and progress streaming. +- Use a separate privileged helper for approved structured actions only. +- Disallow arbitrary shell passthrough from the UI. + +## Consequences + +- Better crash isolation +- Clearer audit boundaries +- More initial setup complexity diff --git a/Docs/ADR/ADR-002-Protocol-and-Adapters.md b/Docs/ADR/ADR-002-Protocol-and-Adapters.md new file mode 100644 index 0000000..148292f --- /dev/null +++ b/Docs/ADR/ADR-002-Protocol-and-Adapters.md @@ -0,0 +1,21 @@ +# ADR-002: Structured Protocol and Adapter Layer + +## Status + +Accepted + +## Context + +Existing upstream capabilities are terminal-oriented and not suitable as a direct contract for a native GUI. + +## Decision + +- Define a structured local JSON protocol. +- Wrap reusable upstream logic behind adapters. +- Keep UI components unaware of script or terminal output format. + +## Consequences + +- Faster GUI iteration +- Safer schema evolution +- Additional adapter maintenance cost diff --git a/Docs/ADR/ADR-003-Workspace-State-and-MVP-Command-Expansion.md b/Docs/ADR/ADR-003-Workspace-State-and-MVP-Command-Expansion.md new file mode 100644 index 0000000..7ef08a8 --- /dev/null +++ b/Docs/ADR/ADR-003-Workspace-State-and-MVP-Command-Expansion.md @@ -0,0 +1,29 @@ +# ADR-003: Workspace State Persistence and MVP Command Expansion + +## Status + +Accepted + +## Context + +Atlas for Mac already had a native shell, worker transport, and upstream-backed adapters for health and Smart Clean dry runs, but several frozen MVP flows still depended on in-memory scaffold state. That left history, recovery, settings, and app uninstall behavior incomplete across launches and weakened the value of the worker boundary. + +## Decision + +- Persist a local JSON-backed workspace state for MVP. +- Store the latest workspace snapshot, current Smart Clean plan, and user settings together. +- Expand the structured worker protocol to cover missing frozen-scope flows: Smart Clean execute, recovery restore, apps list, app uninstall preview/execute, and settings get/set. +- Keep these flows behind the existing application/protocol/worker boundaries instead of adding direct UI-side mutations. + +## Consequences + +- History, recovery, app removal, and settings now survive beyond a single process lifetime. +- The UI can complete more of the MVP through stable worker commands without parsing or mutating raw script state. +- The protocol surface is larger and must stay synchronized with docs and tests. +- Local JSON persistence is acceptable for MVP, but future production hardening may require a more robust store. + +## Alternatives Considered + +- Keep all new flows in memory only — rejected because recovery and settings would reset across launches. +- Let the UI mutate app/history/settings state directly — rejected because it breaks the worker-first architecture. +- Introduce a database immediately — rejected because it adds complexity beyond MVP needs. diff --git a/Docs/ADR/ADR-004-Helper-Executable-and-Native-Packaging.md b/Docs/ADR/ADR-004-Helper-Executable-and-Native-Packaging.md new file mode 100644 index 0000000..a5d3853 --- /dev/null +++ b/Docs/ADR/ADR-004-Helper-Executable-and-Native-Packaging.md @@ -0,0 +1,29 @@ +# ADR-004: Helper Executable and Native Packaging Pipeline + +## Status + +Accepted + +## Context + +Atlas for Mac needed to move beyond a print-only helper stub and legacy CLI release workflows. The MVP required a structured helper execution path for destructive actions plus a native build/package pipeline that could produce a distributable macOS app bundle. + +## Decision + +- Implement the helper as a JSON-driven executable that validates allowlisted target paths before acting. +- Invoke the helper from the worker through a structured client rather than direct UI mutations. +- Build the app with `xcodegen + xcodebuild`, embed the helper binary into `Contents/Helpers/`, then emit `.zip`, `.dmg`, and `.pkg` distribution artifacts during packaging. +- Add a native GitHub Actions workflow that packages the app artifact and can optionally extend to signing/notarization when release credentials are available. + +## Consequences + +- The worker/helper boundary is now implemented as code, not just documentation. +- Local and CI environments can produce a real `.app` bundle, `.zip`, `.dmg`, and `.pkg` installer artifacts for MVP verification, with DMG installation validated into the user Applications folder. +- The helper is still not a fully blessed privileged service, so future release hardening may deepen this path. +- Packaging now depends on Xcode project generation remaining synchronized with `project.yml`. + +## Alternatives Considered + +- Keep the helper as a stub — rejected because uninstall and destructive flows would remain architecturally incomplete. +- Bundle no helper and let the worker mutate files directly — rejected because it weakens privilege boundaries. +- Delay native packaging until release week — rejected because it postpones critical integration risk discovery. diff --git a/Docs/ADR/ADR-005-Localization-and-App-Language-Preference.md b/Docs/ADR/ADR-005-Localization-and-App-Language-Preference.md new file mode 100644 index 0000000..2ca9baa --- /dev/null +++ b/Docs/ADR/ADR-005-Localization-and-App-Language-Preference.md @@ -0,0 +1,29 @@ +# ADR-005: Localization Framework and App-Language Preference + +## Status + +Accepted + +## Context + +Atlas for Mac needed a real multilingual foundation rather than scattered hard-coded English strings. The user requirement was to support Chinese and English first, default to Chinese, and keep the language choice aligned across the app shell, settings, and worker-generated summaries. + +## Decision + +- Add a package-scoped localization layer with structured resources in `AtlasDomain` so the Swift package graph can share one localization source. +- Persist the app-language preference in `AtlasSettings` and default it to `zh-Hans`. +- Inject the selected locale at the app shell while also using the persisted setting to localize worker-generated summaries and settings-derived copy. +- Keep localized legal copy derived from the selected language rather than treating it as ad hoc free text. + +## Consequences + +- The app now supports `简体中文` and `English` with Chinese as the default user experience. +- Settings persistence, protocol payloads, and local workspace state now include the app-language preference. +- UI automation needs stable identifiers rather than relying only on visible text, because visible labels can now change by language. +- Future languages can be added by extending the shared localization resources rather than editing each screen in isolation. + +## Alternatives Considered + +- Use system language only and skip an in-app switch — rejected because the requirement explicitly needed in-app Chinese/English switching. +- Store language only in app-local UI state — rejected because worker-generated summaries and persisted settings copy would drift from the selected language. +- Localize each feature independently without a shared resource layer — rejected because it would create duplication and drift across the package graph. diff --git a/Docs/ADR/ADR-006-Fail-Closed-Execution-Capability.md b/Docs/ADR/ADR-006-Fail-Closed-Execution-Capability.md new file mode 100644 index 0000000..236062d --- /dev/null +++ b/Docs/ADR/ADR-006-Fail-Closed-Execution-Capability.md @@ -0,0 +1,33 @@ +# ADR-006: Fail-Closed Execution Capability + +## Status + +Accepted + +## Context + +Atlas currently mixes real scanning with scaffold/state-based execution in some flows, especially `Smart Clean`. This creates a user trust gap: the product can appear to have cleaned disk space even when a subsequent real scan rediscovers the same data. + +The worker selection path also allowed silent fallback from XPC to the scaffold worker, which could mask infrastructure failures and blur the line between real execution and development fallback behavior. + +## Decision + +- Release-facing execution paths must fail closed when real execution capability is unavailable. +- Silent fallback from XPC to the scaffold worker is opt-in for development only. +- `Smart Clean` scan must reject when the upstream scan adapter fails, instead of silently fabricating findings from scaffold data. +- `Smart Clean` execute must reject while only state-based execution is available, but may execute a real Trash-based path for structured safe targets. +- Recovery may physically restore targets when structured trash-to-original mappings are available. + +## Consequences + +- Users get a truthful failure instead of a misleading success. +- Development and tests can still opt into scaffold fallback and state-only execution explicitly. +- `Smart Clean` execute now supports a partial real execution path for structured safe targets. +- The system now carries structured executable targets and `scan -> execute -> rescan` contract coverage for that subset. +- Broader Smart Clean categories and full physical recovery coverage still need follow-up implementation. + +## Alternatives Considered + +- Keep silent fallback and state-only execution — rejected because it misrepresents execution capability. +- Run `bin/clean.sh` directly for plan execution — rejected because the current upstream command surface is not scoped to the reviewed Atlas plan and would bypass recovery-first guarantees. +- Hide the execute button only in UI — rejected because the trust problem exists in the worker boundary, not only the presentation layer. diff --git a/Docs/ATTRIBUTION.md b/Docs/ATTRIBUTION.md new file mode 100644 index 0000000..711cd43 --- /dev/null +++ b/Docs/ATTRIBUTION.md @@ -0,0 +1,26 @@ +# Attribution + +## Product Branding Rule + +Atlas for Mac is an independent product and does not use the Mole brand in user-facing naming. + +## Upstream Acknowledgement + +The project acknowledges the open-source project `Mole` by `tw93 and contributors` as an upstream inspiration and potential source of reused or adapted code. + +## User-Facing Copy + +Recommended acknowledgement copy: + +> Atlas for Mac includes software derived from the open-source project Mole by tw93 and contributors, used under the MIT License. Atlas for Mac is an independent product and is not affiliated with or endorsed by the original authors. + +## Placement + +- `Settings > Acknowledgements` +- `About > Open Source` +- repository-level third-party notice files +- release bundle notice materials + +## Maintenance Rule + +If upstream-derived code is shipped, keep the copyright notice and MIT license text available in distributed materials. diff --git a/Docs/Architecture.md b/Docs/Architecture.md new file mode 100644 index 0000000..7a43e65 --- /dev/null +++ b/Docs/Architecture.md @@ -0,0 +1,81 @@ +# Architecture + +## High-Level Topology + +- `AtlasApp` — main macOS application shell +- `AtlasWorkerXPC` — non-privileged worker service +- `AtlasPrivilegedHelper` — allowlisted helper executable for structured destructive actions +- `AtlasCoreAdapters` — wrappers around reusable upstream and local system capabilities +- `AtlasStore` — persistence for runs, rules, recovery, settings, diagnostics, and the app-language preference + +## Layering + +### Presentation + +- SwiftUI scenes and views +- Navigation state +- View models or reducers +- App-language selection and locale injection at the app shell + +### Application + +- Use cases such as `StartScan`, `PreviewPlan`, `ExecutePlan`, `RestoreItems` +- App uninstall flows: `ListApps`, `PreviewAppUninstall`, `ExecuteAppUninstall` +- Settings flows: `GetSettings`, `UpdateSettings` + +### Domain + +- `Finding` +- `ActionPlan` +- `ActionItem` +- `TaskRun` +- `RecoveryItem` +- `RecoveryPayload` +- `AppFootprint` +- `PermissionState` +- `AtlasSettings` +- `AtlasLanguage` + +### Infrastructure + +- XPC transport +- JSON-backed workspace state persistence +- Logging and audit events +- Best-effort permission inspection +- Helper executable client +- Process orchestration + +### Execution + +- Upstream adapters: `MoleHealthAdapter`, `MoleSmartCleanAdapter` +- Release and packaged worker flows load upstream shell runtime from bundled `MoleRuntime` resources instead of source-tree paths +- Local adapters: `MacAppsInventoryAdapter` +- Recovery-first state mutation for Smart Clean and app uninstall flows +- Allowlisted helper actions for bundle trashing, restoration, and launch-service removal +- Release-facing execution must fail closed when real worker/adapter/helper capability is unavailable; scaffold fallback is development-only by opt-in +- Smart Clean now supports a real Trash-based execution path for a safe structured subset of user-owned targets, plus physical restoration when recovery mappings are present + +## Process Boundaries + +- UI must not parse shell output directly. +- UI must not execute privileged shell commands directly. +- `AtlasWorkerXPC` owns long-running task orchestration and progress events. +- Direct-distribution builds default to the same real worker implementation in-process; `AtlasWorkerXPC` remains available behind `ATLAS_PREFER_XPC_WORKER=1` for explicit runtime validation. +- `AtlasPrivilegedHelper` accepts structured actions only and validates paths before acting. +- Persistent workspace mutation belongs behind the repository/worker boundary rather than ad hoc UI state. +- UI copy localization is sourced from structured package resources instead of hard-coded per-screen strings. + +## Distribution Direction + +- Distribution target: `Developer ID + Hardened Runtime + Notarization` +- Initial release target: direct distribution, not Mac App Store +- Native packaging currently uses `xcodegen + xcodebuild`, embeds the helper into `Contents/Helpers/`, and emits `.zip`, `.dmg`, and `.pkg` distribution artifacts. +- Local internal packaging now prefers a stable non-ad-hoc app signature when a usable identity is available, so macOS TCC decisions can survive rebuilds more reliably during development. +- If Apple release certificates are unavailable, Atlas can fall back to a repo-managed local signing keychain for stable app-bundle identity; public release artifacts still require `Developer ID`. + +## Security Principles + +- Least privilege by default +- Explain permission need before request +- Prefer `Trash` or recovery-backed restore paths +- Audit all destructive actions diff --git a/Docs/Backlog.md b/Docs/Backlog.md new file mode 100644 index 0000000..0fedf0e --- /dev/null +++ b/Docs/Backlog.md @@ -0,0 +1,212 @@ +# Backlog + +## Board Model + +### Status + +- `Backlog` +- `Ready` +- `In Progress` +- `In Review` +- `Blocked` +- `Done` +- `Frozen` + +### Priority + +- `P0` — required for MVP viability +- `P1` — important but can follow MVP +- `P2` — exploratory or future work + +## MVP Scope + +- `Overview` +- `Smart Clean` +- `Apps` +- `History` +- `Recovery` +- `Permissions` +- `Settings` + +## Deferred to P1 + +- `Storage treemap` +- `Menu Bar` +- `Automation` + +## Epics + +- `EPIC-01` Brand and Compliance +- `EPIC-02` Information Architecture and Interaction Design +- `EPIC-03` Protocol and Domain Model +- `EPIC-04` App Shell and Engineering Scaffold +- `EPIC-05` Scan and Action Plan +- `EPIC-06` Apps and Uninstall +- `EPIC-07` History and Recovery +- `EPIC-08` Permissions and System Integration +- `EPIC-09` Quality and Verification +- `EPIC-10` Packaging, Signing, and Release + +## Now / Next / Later + +### Now + +- Week 1 scope freeze +- Week 2 design freeze for core screens +- Week 3 architecture and protocol freeze + +### Next + +- Week 4 scaffold creation +- Week 5 scan pipeline +- Week 6 action-plan preview and execute path + +### Later + +- Week 7 apps flow +- Week 8 permissions, history, recovery +- Week 9 helper integration +- Week 10 hardening +- Week 11 beta candidate +- Week 12 release-readiness review + +## Seed Issues + +### Week 1 + +- `ATL-001` Freeze naming rules — `Product Agent` +- `ATL-002` Freeze MVP scope — `Product Agent` +- `ATL-003` Define goals and metrics — `Product Agent` +- `ATL-004` Start decision and risk log — `Product Agent` +- `ATL-005` Draft `IA v1` — `UX Agent` +- `ATL-006` Draft three core flows — `UX Agent` +- `ATL-007` Draft page-state matrix — `UX Agent` +- `ATL-008` Define domain models — `Core Agent` +- `ATL-009` Define protocol — `Core Agent` +- `ATL-010` Define task state and errors — `Core Agent` +- `ATL-011` Draft worker/helper boundary — `System Agent` +- `ATL-012` Draft permission matrix — `System Agent` +- `ATL-013` Audit upstream reusable capabilities — `Adapter Agent` +- `ATL-014` Report JSON adaptation blockers — `Adapter Agent` +- `ATL-017` Create acceptance matrix — `QA Agent` +- `ATL-019` Draft attribution docs — `Docs Agent` +- `ATL-020` Week 1 gate review — `Product Agent` + +### Week 2 + +- `ATL-021` `Overview` high-fidelity design — `UX Agent` +- `ATL-022` `Smart Clean` high-fidelity design — `UX Agent` +- `ATL-023` `Apps` high-fidelity design — `UX Agent` +- `ATL-024` Permission explainer sheets — `UX Agent` +- `ATL-025` Freeze `Protocol v1.1` — `Core Agent` +- `ATL-026` Freeze persistence model — `Core Agent` +- `ATL-027` Draft worker XPC interface — `System Agent` +- `ATL-028` Draft helper allowlist — `System Agent` +- `ATL-029` Draft package and target graph — `Mac App Agent` +- `ATL-030` Draft navigation and state model — `Mac App Agent` +- `ATL-031` Draft scan adapter chain — `Adapter Agent` +- `ATL-032` Draft app-footprint adapter chain — `Adapter Agent` +- `ATL-034` MVP acceptance matrix v1 — `QA Agent` +- `ATL-036` Attribution file v1 — `Docs Agent` +- `ATL-037` Third-party notices v1 — `Docs Agent` +- `ATL-040` Week 2 gate review — `Product Agent` + +### Week 3 + +- `ATL-041` Freeze `Architecture v1` — `Core Agent` + `System Agent` +- `ATL-042` Freeze `Protocol Schema v1` — `Core Agent` +- `ATL-043` Freeze error registry — `Core Agent` +- `ATL-044` Freeze task state machine — `Core Agent` +- `ATL-045` Freeze persistence model — `Core Agent` +- `ATL-046` Freeze worker XPC method set — `System Agent` +- `ATL-047` Freeze helper action allowlist — `System Agent` +- `ATL-048` Freeze helper validation rules — `System Agent` +- `ATL-049` Freeze app-shell route map — `Mac App Agent` +- `ATL-050` Freeze package dependency graph — `Mac App Agent` +- `ATL-052` Freeze scan adapter path — `Adapter Agent` +- `ATL-053` Freeze apps list adapter path — `Adapter Agent` +- `ATL-056` Draft contract test suite — `QA Agent` +- `ATL-060` Week 3 gate review — `Product Agent` + +## Post-MVP Polish Track + +### Current Status + +- `Complete` — UI audit completed with explicit `P0 / P1 / P2` remediation directions in `Docs/Execution/UI-Audit-2026-03-08.md`. +- `Complete` — frozen MVP workflows are implemented end to end. +- `Complete` — post-MVP polish for trust, hierarchy, loading states, keyboard flow, and accessibility. +- `Complete` — Chinese-first bilingual localization framework with persisted app-language switching. +- `Open` — manual localization QA and release-signing/notarization remain as the main next steps. + +### Focus + +- Make the existing MVP feel safe, clear, and native before expanding scope. +- Prioritize first-use trust, smooth feedback, and visual consistency across the frozen MVP modules. +- Keep polish work inside `Overview`, `Smart Clean`, `Apps`, `History`, `Recovery`, `Permissions`, and `Settings`. + +### Epics + +- `EPIC-11` First-Run Activation and Permission Trust +- `EPIC-12` Smart Clean Explainability and Execution Confidence +- `EPIC-13` Apps Uninstall Confidence and Recovery Clarity +- `EPIC-14` Visual System and Interaction Consistency +- `EPIC-15` Perceived Performance and State Coverage + +### Now / Next / Later + +#### Now + +- Run manual bilingual QA on a clean machine +- Validate first-launch behavior with a fresh workspace-state file +- Prepare signed packaging inputs if external distribution is needed + +#### Next + +- Add additional supported languages only after translation QA and copy governance are in place +- Revisit post-beta manual polish items that require human UX review rather than more structural engineering work +- Convert the current unsigned packaging flow into a signed and notarized release path + +#### Later + +- Extend localization coverage to future deferred modules when scope reopens +- Add localization linting or snapshot checks if the language matrix expands +- Revisit copy tone and translation review during release hardening + +### Seed Issues + +#### Polish Week 1 + +- `ATL-101` Audit state coverage for all MVP screens — `UX Agent` +- `ATL-102` Define polish scorecard and acceptance targets — `Product Agent` +- `ATL-103` Refresh shared design tokens and card hierarchy — `Mac App Agent` +- `ATL-104` Polish `Smart Clean` scan controls, preview hierarchy, and execution feedback — `Mac App Agent` +- `ATL-105` Polish `Apps` uninstall preview, leftovers messaging, and recovery cues — `Mac App Agent` +- `ATL-106` Rewrite trust-critical copy for permissions, destructive actions, and restore paths — `UX Agent` +- `ATL-107` Add loading, empty, error, and partial-permission states to the primary screens — `Mac App Agent` +- `ATL-108` Add narrow UI verification for first-run, scan, and uninstall flows — `QA Agent` +- `ATL-110` Polish Week 1 gate review — `Product Agent` + +#### Polish Week 2 + +- `ATL-111` Tighten `Overview` information density and recommendation ranking — `UX Agent` +- `ATL-112` Improve `History` readability and restore confidence markers — `Mac App Agent` +- `ATL-113` Improve `Permissions` guidance for limited mode and just-in-time prompts — `UX Agent` +- `ATL-114` Normalize cross-screen action labels, confirmation sheets, and completion summaries — `Docs Agent` +- `ATL-115` Measure perceived latency and remove avoidable visual jumps in core flows — `QA Agent` +- `ATL-116` Polish Week 2 gate review — `Product Agent` + +## Definition of Ready + +- Scope is clear and bounded +- Dependencies are listed +- Owner Agent is assigned +- Acceptance criteria are testable +- Deliverable format is known + +## Definition of Done + +- Acceptance criteria are satisfied +- Relevant docs are updated +- Decision log is updated if scope or architecture changed +- Risks and blockers are recorded +- Handoff notes are attached diff --git a/Docs/COPY_GUIDELINES.md b/Docs/COPY_GUIDELINES.md new file mode 100644 index 0000000..4627bd0 --- /dev/null +++ b/Docs/COPY_GUIDELINES.md @@ -0,0 +1,53 @@ +# Copy Guidelines + +## Tone + +- Calm +- Direct +- Reassuring +- Technical only when necessary + +## Product Voice + +- Explain what happened first. +- Explain impact second. +- Offer a next step every time. +- Avoid fear-based maintenance language. + +## Good Patterns + +- `Results may be incomplete without Full Disk Access.` +- `You can keep using limited mode and grant access later.` +- `Most selected actions are recoverable.` + +## Avoid + +- `Critical error` +- `Illegal operation` +- `You must allow this` +- `Your Mac is at risk` + +## CTA Style + +- Use clear verbs: `Retry`, `Open System Settings`, `Review Plan`, `Restore` +- Avoid generic CTA labels such as `OK` and `Continue` + +## Glossary + +- `Scan` — read-only analysis that collects findings; it never removes anything by itself. +- `Cleanup Plan` / `Uninstall Plan` — the actionable set of reviewed steps Atlas proposes from current findings. +- `Review` — the user checks the plan before it runs. Avoid using `preview` as the primary noun when the UI is really showing a plan. +- `Run Plan` / `Run Uninstall` — apply a reviewed plan. Use this for the action that changes the system. +- `Reclaimable Space` — the estimated space the current plan can free. Make it explicit when the value recalculates after execution. +- `Recoverable` — Atlas can restore the result from History while the retention window is still open. +- `App Footprint` — the current disk space an app uses. +- `Leftover Files` — extra support files, caches, or launch items related to an app uninstall. +- `Limited Mode` — Atlas works with partial permissions and asks for more access only when a specific workflow needs it. + +## Consistency Rules + +- Prefer `plan` over `preview` when referring to the actionable object the user can run. +- Use `review` for the decision step before execution, not for the execution step itself. +- If a button opens macOS settings, label it `Open System Settings` instead of implying Atlas grants access directly. +- Distinguish `current plan` from `remaining items after execution` whenever reclaimable-space values can change. +- Keep permission language calm and reversible: explain what access unlocks, whether it can wait, and what the next step is. diff --git a/Docs/DECISIONS.md b/Docs/DECISIONS.md new file mode 100644 index 0000000..b08bf3d --- /dev/null +++ b/Docs/DECISIONS.md @@ -0,0 +1,61 @@ +# Decision Log + +## Frozen Decisions + +### D-001 Naming + +- Internal product name: `Atlas for Mac` +- User-facing naming must not use `Mole` + +### D-002 Open-Source Attribution + +- Atlas for Mac is an independent product +- Upstream attribution must acknowledge Mole by tw93 and contributors +- Shipped materials must include MIT notice when upstream-derived code is distributed + +### D-003 Distribution + +- MVP distribution target is direct distribution +- Use `Developer ID + Hardened Runtime + Notarization` +- Do not target Mac App Store for MVP + +### D-004 MVP Scope + +- In scope: `Overview`, `Smart Clean`, `Apps`, `History`, `Recovery`, `Permissions`, `Settings` +- Out of MVP: `Storage treemap`, `Menu Bar`, `Automation` + +### D-005 Process Boundaries + +- UI must not parse terminal output directly +- Privileged actions must go through a structured helper boundary +- Worker owns long-running orchestration and progress streaming + +### D-006 MVP Persistence and Command Surface + +- MVP workspace state is persisted locally as a structured JSON store +- Settings, history, recovery, Smart Clean execute, and app uninstall flows use structured worker commands +- UI state should reflect repository-backed worker results instead of direct mutation + +### D-007 Helper Execution and Native Packaging + +- Destructive helper actions use a structured executable boundary with path validation +- Native MVP packaging uses `xcodegen + xcodebuild`, then embeds the helper into the app bundle +- Signing and notarization remain optional release-time steps driven by credentials +- Internal packaging should prefer a stable local app-signing identity over ad hoc signing whenever possible so macOS permission state does not drift across rebuilds + +### D-008 App Language Preference and Localization + +- MVP now supports `简体中文` and `English` through a persisted app-language preference +- The default app language is `简体中文` +- User-facing shell copy is localized through package-scoped resources instead of hard-coded per-screen strings +- The language preference is stored alongside other settings so worker-generated summaries stay aligned with UI language + +### D-009 Execution Capability Honesty + +- User-facing execution flows must fail closed when real disk-backed execution is unavailable +- Atlas must not silently fall back to scaffold behavior for release-facing cleanup execution +- Smart Clean execute must not claim success until real filesystem side effects are implemented + +## Update Rule + +Add a new decision entry whenever product scope, protocol, privilege boundaries, release route, or recovery model changes. diff --git a/Docs/DESIGN_SPEC.md b/Docs/DESIGN_SPEC.md new file mode 100644 index 0000000..f68b4db --- /dev/null +++ b/Docs/DESIGN_SPEC.md @@ -0,0 +1,878 @@ +# Atlas for Mac — Design Specification v2 + +> **Status**: Ready for implementation +> **Brand Token 文件**: `Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/AtlasBrand.swift` (已创建并编译通过) + +--- + +## 1. Brand Identity + +### 1.1 品牌概念:Calm Authority(沉稳的权威感) + +Atlas — 如同制图师为你的系统绘制地形图。精确、可信、从容不迫。 + +### 1.2 色彩体系 + +| Token | Light Mode | Dark Mode | 用途 | +|-------|-----------|-----------|------| +| `AtlasColor.brand` | `#0F766E` 深青绿 | `#148F85` 亮青绿 | 主色调、主要按钮、激活状态 | +| `AtlasColor.accent` | `#34D399` 清新薄荷绿 | `#52E2B5` 明亮薄荷绿 | 高亮、徽章、品牌点缀 | +| `AtlasColor.success` | systemGreen | systemGreen | 安全、已授权、已完成 | +| `AtlasColor.warning` | systemOrange | systemOrange | 需审查、运行中 | +| `AtlasColor.danger` | systemRed | systemRed | 失败、高级风险 | +| `AtlasColor.card` | controlBackgroundColor | controlBackgroundColor | 卡片基底 | +| `AtlasColor.cardRaised` | `white @ 65%` | `white @ 6%` | 浮起卡片的玻璃质感层 | +| `AtlasColor.border` | `primary @ 8%` | `primary @ 8%` | 普通卡片描边 | +| `AtlasColor.borderEmphasis` | `primary @ 14%` | `primary @ 14%` | 高亮卡片/焦点态描边 | + +### 1.3 字体标尺 + +| Token | 定义 | 使用场景 | +|-------|------|---------| +| `AtlasTypography.heroMetric` | 40pt bold rounded | Dashboard 最重要的单一数值 | +| `AtlasTypography.screenTitle` | 34pt bold rounded | 每个屏幕的大标题 | +| `AtlasTypography.cardMetric` | 28pt bold rounded | 网格中的指标卡数值 | +| `AtlasTypography.sectionTitle` | title3 semibold | InfoCard 内的分区标题 | +| `AtlasTypography.label` | subheadline semibold | 指标标题、侧边栏主文本 | +| `AtlasTypography.rowTitle` | headline | DetailRow 标题 | +| `AtlasTypography.body` | subheadline | 正文说明 | +| `AtlasTypography.caption` | caption semibold | Chip、脚注、overline | + +### 1.4 间距网格 (4pt base) + +| Token | 值 | 场景 | +|-------|-----|------| +| `AtlasSpacing.xxs` | 4pt | 最小内边距 | +| `AtlasSpacing.xs` | 6pt | Chip 内边距 | +| `AtlasSpacing.sm` | 8pt | 行间距紧凑 | +| `AtlasSpacing.md` | 12pt | 元素间默认间距 | +| `AtlasSpacing.lg` | 16pt | 卡片内边距、分区间距 | +| `AtlasSpacing.xl` | 20pt | 宽卡片内边距 | +| `AtlasSpacing.xxl` | 24pt | 屏幕级垂直节奏 | +| `AtlasSpacing.screenH` | 28pt | 屏幕水平边距 | +| `AtlasSpacing.section` | 32pt | 大分区间隔 | + +### 1.5 圆角 + +| Token | 值 | 场景 | +|-------|-----|------| +| `AtlasRadius.sm` | 8pt | Chip、Tag | +| `AtlasRadius.md` | 12pt | Callout、内嵌卡片 | +| `AtlasRadius.lg` | 16pt | DetailRow、紧凑卡片 | +| `AtlasRadius.xl` | 20pt | 标准 InfoCard/MetricCard | +| `AtlasRadius.xxl` | 24pt | 高亮/英雄卡片 | + +### 1.6 三级高程(Elevation) + +| 级别 | 阴影 | 圆角 | 描边 | 用途 | +|------|------|------|------|------| +| `.flat` | 无 | 16pt | 4% opacity | 嵌套内容、行内子卡片 | +| `.raised` | r18 y10 @5% | 20pt | 8% opacity | 默认卡片(AtlasInfoCard/MetricCard) | +| `.prominent` | r28 y16 @9% + 内发光 | 24pt | 12% opacity, 1.5pt | 英雄指标、主操作区 | + +### 1.7 动画曲线 + +| Token | 值 | 场景 | +|-------|-----|------| +| `AtlasMotion.fast` | snappy 0.15s | hover、按压、chip | +| `AtlasMotion.standard` | snappy 0.22s | 选择、切换、卡片状态 | +| `AtlasMotion.slow` | snappy 0.35s | 页面转场、英雄揭示 | +| `AtlasMotion.spring` | spring(0.45, 0.7) | 完成庆祝、弹性反馈 | + +### 1.8 按钮层级 + +| 样式 | 外观 | 场景 | +|------|------|------| +| `.atlasPrimary` | 品牌色填充胶囊 + 投影 + 按压缩放 | 每屏唯一最重要 CTA | +| `.atlasSecondary` | 品牌色描边胶囊 + 淡底 | 辅助操作 | +| `.atlasGhost` | 纯文字 + hover 淡底 | 低频操作 | + +--- + +## 2. 设计系统组件迁移 + +> 所有修改在 `AtlasDesignSystem.swift` 中进行。`AtlasBrand.swift` 已包含新 Token,不需要修改。 + +### 2.1 AtlasScreen — 约束阅读宽度 + 移除冗余 overline + +**文件**: `Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/AtlasDesignSystem.swift` + +**当前问题**: +- line 100: `.frame(maxWidth: .infinity)` 导致宽窗口下文本行过长 +- line 109: 每屏都显示 "Atlas for Mac" overline,冗余 + +**改动**: + +```swift +// body 中 ScrollView 内的 VStack 改为: +ScrollView { + VStack(alignment: .leading, spacing: AtlasSpacing.xxl) { + header + content + } + .frame(maxWidth: AtlasLayout.maxReadingWidth, alignment: .leading) + .padding(.horizontal, AtlasSpacing.screenH) + .padding(.vertical, AtlasSpacing.xxl) + .frame(maxWidth: .infinity, alignment: .leading) // 外层居中容器 +} +``` + +**header 改为**: +- 移除 "Atlas for Mac" overline(line 109-113 整块删除) +- 使用 `AtlasTypography.screenTitle` 替换 line 117 的硬编码字号 + +```swift +private var header: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.sm) { + Text(title) + .font(AtlasTypography.screenTitle) + + Text(subtitle) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } +} +``` + +### 2.2 AtlasMetricCard — 支持 elevation 参数 + 使用 Token + +**文件**: `AtlasDesignSystem.swift` + +**改动**: +- 新增 `elevation: AtlasElevation = .raised` 参数 +- 替换 line 165 硬编码字号为 `AtlasTypography.cardMetric` +- 替换 line 160 硬编码字号为 `AtlasTypography.label` +- 替换 line 175 硬编码 `padding(18)` 为 `padding(AtlasSpacing.xl)` +- 替换 line 176-177 的 `cardBackground`/`cardBorder` 为 `atlasCardBackground`/`atlasCardBorder`(传入 elevation) + +```swift +public struct AtlasMetricCard: View { + private let title: String + private let value: String + private let detail: String + private let tone: AtlasTone + private let systemImage: String? + private let elevation: AtlasElevation // 新增 + + public init( + title: String, + value: String, + detail: String, + tone: AtlasTone = .neutral, + systemImage: String? = nil, + elevation: AtlasElevation = .raised // 新增 + ) { + self.title = title + self.value = value + self.detail = detail + self.tone = tone + self.systemImage = systemImage + self.elevation = elevation + } + + public var body: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.lg) { + HStack(alignment: .center, spacing: AtlasSpacing.md) { + if let systemImage { + Image(systemName: systemImage) + .font(.headline) + .foregroundStyle(tone.tint) + .accessibilityHidden(true) + } + Text(title) + .font(AtlasTypography.label) + .foregroundStyle(.secondary) + } + + Text(value) + .font(elevation == .prominent ? AtlasTypography.heroMetric : AtlasTypography.cardMetric) + .foregroundStyle(.primary) + .contentTransition(.numericText()) + + Text(detail) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + .frame(maxWidth: .infinity, alignment: .leading) + .padding(AtlasSpacing.xl) + .background(atlasCardBackground(tone: tone, elevation: elevation)) + .overlay(atlasCardBorder(tone: tone, elevation: elevation)) + .accessibilityElement(children: .ignore) + .accessibilityLabel(Text(title)) + .accessibilityValue(Text(value)) + .accessibilityHint(Text(detail)) + } +} +``` + +### 2.3 AtlasInfoCard — 使用 Token + +**文件**: `AtlasDesignSystem.swift` + +**改动**: +- 替换 line 204 `spacing: 18` → `AtlasSpacing.xl` +- 替换 line 209 `.title3.weight(.semibold)` → `AtlasTypography.sectionTitle` +- 替换 line 214 `.subheadline` → `AtlasTypography.body` +- 替换 line 224 `padding(22)` → `padding(AtlasSpacing.xxl)` +- 替换 line 225-226 为 `atlasCardBackground`/`atlasCardBorder` + +### 2.4 AtlasCallout — 使用 Token + +**文件**: `AtlasDesignSystem.swift` + +**改动**: +- 替换 line 249 `spacing: 14` → `AtlasSpacing.lg` +- 替换 line 256 `spacing: 6` → `AtlasSpacing.xs` +- 替换 line 258 `.headline` → `AtlasTypography.rowTitle` +- 替换 line 261 `.subheadline` → `AtlasTypography.body` +- 替换 line 266 `padding(16)` → `padding(AtlasSpacing.lg)` +- 替换 line 269 `cornerRadius: 16` → `AtlasRadius.lg` +- 替换 line 273 `cornerRadius: 16` → `AtlasRadius.lg` + +### 2.5 AtlasDetailRow — 使用 Token + 添加 hover 效果 + +**文件**: `AtlasDesignSystem.swift` + +**改动**: +- line 307 `spacing: 14` → `AtlasSpacing.lg` +- line 312 `frame(width: 36, height: 36)` → `frame(width: AtlasLayout.sidebarIconSize + 4, height: AtlasLayout.sidebarIconSize + 4)` +- line 321 `spacing: 6` → `AtlasSpacing.xs` +- line 338 `Spacer(minLength: 16)` → `Spacer(minLength: AtlasSpacing.lg)` +- line 343 `padding(16)` → `padding(AtlasSpacing.lg)` +- line 345-347 替换为 `.fill(AtlasColor.cardRaised)` 并使用 `AtlasRadius.lg` +- line 350 `Color.primary.opacity(0.06)` → `AtlasColor.border` +- **新增**: 在 `.overlay` 之后添加 `.atlasHover()` + +### 2.6 AtlasStatusChip — 使用 Token + +**文件**: `AtlasDesignSystem.swift` + +**改动**: +- line 421 `.caption.weight(.semibold)` → `AtlasTypography.caption` +- line 422 `padding(.horizontal, 10)` → `padding(.horizontal, AtlasSpacing.md)` +- line 423 `padding(.vertical, 6)` → `padding(.vertical, AtlasSpacing.xs)` + +### 2.7 AtlasEmptyState — 更有个性 + +**文件**: `AtlasDesignSystem.swift` + +**改动**: +- 图标容器从 56x56 放大到 72x72 +- 圆形背景改为渐变填充 +- 添加外圈装饰环 +- 增加整体 padding + +```swift +public var body: some View { + VStack(spacing: AtlasSpacing.lg) { + ZStack { + // 外圈装饰环 + Circle() + .strokeBorder(tone.border, lineWidth: 0.5) + .frame(width: 80, height: 80) + + // 渐变填充背景 + Circle() + .fill( + LinearGradient( + colors: [tone.softFill, tone.softFill.opacity(0.3)], + startPoint: .topLeading, + endPoint: .bottomTrailing + ) + ) + .frame(width: 72, height: 72) + + Image(systemName: systemImage) + .font(.system(size: 28, weight: .semibold)) + .foregroundStyle(tone.tint) + .accessibilityHidden(true) + } + + VStack(spacing: AtlasSpacing.xs) { + Text(title) + .font(AtlasTypography.rowTitle) + + Text(detail) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .multilineTextAlignment(.center) + .fixedSize(horizontal: false, vertical: true) + } + } + .frame(maxWidth: .infinity) + .padding(AtlasSpacing.section) + .background( + RoundedRectangle(cornerRadius: AtlasRadius.xl, style: .continuous) + .fill(Color.primary.opacity(0.03)) + ) + .overlay( + RoundedRectangle(cornerRadius: AtlasRadius.xl, style: .continuous) + .strokeBorder(Color.primary.opacity(0.06), lineWidth: 1) + ) + .accessibilityElement(children: .ignore) + .accessibilityLabel(Text(title)) + .accessibilityValue(Text(detail)) +} +``` + +### 2.8 AtlasLoadingState — 添加脉冲动画 + 使用 Token + +**文件**: `AtlasDesignSystem.swift` + +**改动**: + +```swift +public struct AtlasLoadingState: View { + private let title: String + private let detail: String + private let progress: Double? + @State private var pulsePhase = false + + public init(title: String, detail: String, progress: Double? = nil) { + self.title = title + self.detail = detail + self.progress = progress + } + + public var body: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.lg) { + HStack(spacing: AtlasSpacing.md) { + ProgressView() + .controlSize(.small) + .accessibilityHidden(true) + + Text(title) + .font(AtlasTypography.rowTitle) + } + + Text(detail) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + + if let progress { + ProgressView(value: progress, total: 1) + .controlSize(.large) + } + } + .frame(maxWidth: .infinity, alignment: .leading) + .padding(AtlasSpacing.xl) + .background( + RoundedRectangle(cornerRadius: AtlasRadius.lg, style: .continuous) + .fill(Color.primary.opacity(pulsePhase ? 0.05 : 0.03)) + ) + .overlay( + RoundedRectangle(cornerRadius: AtlasRadius.lg, style: .continuous) + .strokeBorder(Color.primary.opacity(0.08), lineWidth: 1) + ) + .onAppear { + withAnimation(.easeInOut(duration: 1.5).repeatForever(autoreverses: true)) { + pulsePhase = true + } + } + .accessibilityElement(children: .ignore) + .accessibilityLabel(Text(title)) + .accessibilityValue(Text(progress.map { "\(Int(($0 * 100).rounded())) percent complete" } ?? detail)) + .accessibilityHint(Text(detail)) + } +} +``` + +### 2.9 删除旧的私有辅助函数 + +**文件**: `AtlasDesignSystem.swift` + +删除 line 540-560 的旧 `cardBackground` 和 `cardBorder` 函数。它们被 `AtlasBrand.swift` 中的 `atlasCardBackground` 和 `atlasCardBorder` 替代。 + +**注意**: 确保所有引用点都已迁移到新函数后再删除。也删除旧的 `AtlasPalette` 枚举(line 66-73),因为它被 `AtlasColor` 替代。对 `AtlasScreen` 中引用 `AtlasPalette.canvasTop`/`canvasBottom` 的地方,改为 `AtlasColor.canvasTop`/`AtlasColor.canvasBottom`。 + +--- + +## 3. App Shell 改进 + +### 3.1 侧边栏行视觉升级 + +**文件**: `Apps/AtlasApp/Sources/AtlasApp/AppShellView.swift` + +**当前** (line 162-186): 标准 Label + VStack,无视觉亮点。 + +**改为**: + +```swift +private struct SidebarRouteRow: View { + let route: AtlasRoute + + var body: some View { + Label { + VStack(alignment: .leading, spacing: AtlasSpacing.xxs) { + Text(route.title) + .font(AtlasTypography.rowTitle) + + Text(route.subtitle) + .font(AtlasTypography.captionSmall) + .foregroundStyle(.secondary) + .lineLimit(2) + } + } icon: { + // Apple System Settings 风格:圆角矩形图标背景 + ZStack { + RoundedRectangle(cornerRadius: AtlasRadius.sm, style: .continuous) + .fill(AtlasColor.brand.opacity(0.1)) + .frame(width: AtlasLayout.sidebarIconSize, height: AtlasLayout.sidebarIconSize) + + Image(systemName: route.systemImage) + .font(.system(size: 14, weight: .semibold)) + .foregroundStyle(AtlasColor.brand) + .accessibilityHidden(true) + } + } + .padding(.vertical, AtlasSpacing.sm) + .contentShape(Rectangle()) + .listRowSeparator(.hidden) + .accessibilityElement(children: .combine) + .accessibilityIdentifier("route.\(route.id)") + .accessibilityLabel("\(route.title). \(route.subtitle)") + .accessibilityHint(AtlasL10n.string("sidebar.route.hint", route.shortcutNumber)) + } +} +``` + +### 3.2 工具栏图标增强 + +**文件**: `AppShellView.swift` + +**当前** (line 28-61): 标准 toolbar 按钮,无视觉层次。 + +**改动**: +- 对所有 toolbar `Image(systemName:)` 添加 `.symbolRenderingMode(.hierarchical)` +- 给 TaskCenter 按钮添加活跃任务计数徽章 + +```swift +ToolbarItemGroup { + Button { + model.openTaskCenter() + } label: { + Label(AtlasL10n.string("toolbar.taskcenter"), systemImage: AtlasIcon.taskCenter) + .symbolRenderingMode(.hierarchical) + } + // ... 其他修饰符不变 + + Button { + model.navigate(to: .permissions) + Task { await model.inspectPermissions() } + } label: { + Label(AtlasL10n.string("toolbar.permissions"), systemImage: AtlasIcon.permissions) + .symbolRenderingMode(.hierarchical) + } + // ... 其他修饰符不变 + + Button { + model.navigate(to: .settings) + } label: { + Label(AtlasL10n.string("toolbar.settings"), systemImage: AtlasIcon.settings) + .symbolRenderingMode(.hierarchical) + } + // ... 其他修饰符不变 +} +``` + +### 3.3 详情页转场动画 + +**文件**: `AppShellView.swift` + +**当前** (line 24): `detailView(for:)` 无转场效果。 + +**改动**: 在 detail 闭包中添加视图标识和转场: + +```swift +} detail: { + detailView(for: model.selection ?? .overview) + .id(model.selection) // 关键:强制视图切换时触发转场 + .transition(.opacity) + .searchable(...) + .toolbar { ... } + .animation(AtlasMotion.slow, value: model.selection) +} +``` + +--- + +## 4. Feature Screen 改进 + +### 4.1 OverviewFeatureView — 英雄指标 + 共享列定义 + +**文件**: `Packages/AtlasFeaturesOverview/Sources/AtlasFeaturesOverview/OverviewFeatureView.swift` + +**改动 1** — 英雄指标差异化 (line 31-53): + +将"可回收空间"指标升级为 `.prominent` 高程,其余保持 `.raised`: + +```swift +LazyVGrid(columns: AtlasLayout.metricColumns, spacing: AtlasSpacing.lg) { + AtlasMetricCard( + title: AtlasL10n.string("overview.metric.reclaimable.title"), + value: AtlasFormatters.byteCount(snapshot.reclaimableSpaceBytes), + detail: AtlasL10n.string("overview.metric.reclaimable.detail"), + tone: .success, + systemImage: "sparkles", + elevation: .prominent // 英雄指标 + ) + AtlasMetricCard( + title: AtlasL10n.string("overview.metric.findings.title"), + value: "\(snapshot.findings.count)", + detail: AtlasL10n.string("overview.metric.findings.detail"), + tone: .neutral, + systemImage: "line.3.horizontal.decrease.circle" + // elevation 默认 .raised + ) + AtlasMetricCard( + title: AtlasL10n.string("overview.metric.permissions.title"), + value: "\(grantedPermissionCount)/\(snapshot.permissions.count)", + detail: grantedPermissionCount == snapshot.permissions.count + ? AtlasL10n.string("overview.metric.permissions.ready") + : AtlasL10n.string("overview.metric.permissions.limited"), + tone: grantedPermissionCount == snapshot.permissions.count ? .success : .warning, + systemImage: "lock.shield" + // elevation 默认 .raised + ) +} +``` + +**改动 2** — 删除私有 `columns` 属性 (line 185-191),全部替换为 `AtlasLayout.metricColumns`。 + +**改动 3** — 所有 `spacing: 16` 替换为 `AtlasSpacing.lg`,所有 `spacing: 12` 替换为 `AtlasSpacing.md`。 + +### 4.2 SmartCleanFeatureView — 解决双 CTA 竞争 + +**文件**: `Packages/AtlasFeaturesSmartClean/Sources/AtlasFeaturesSmartClean/SmartCleanFeatureView.swift` + +**核心问题**: line 85 和 line 112 同时使用 `.borderedProminent`,导致两个主要按钮视觉权重相同。 + +**改动**: 根据当前状态动态切换按钮层级。 + +```swift +HStack(spacing: AtlasSpacing.md) { + // Run Scan 按钮 + Button(action: onStartScan) { + Label(AtlasL10n.string("smartclean.action.runScan"), systemImage: "sparkles") + } + .buttonStyle(plan.items.isEmpty ? .atlasPrimary : .atlasSecondary) + .disabled(isScanning || isExecutingPlan) + .keyboardShortcut(plan.items.isEmpty ? .defaultAction : KeyEquivalent("s"), modifiers: plan.items.isEmpty ? [] : [.command, .option]) + .accessibilityIdentifier("smartclean.runScan") + .accessibilityHint(AtlasL10n.string("smartclean.action.runScan.hint")) + + // Refresh Preview 按钮 + Button(action: onRefreshPreview) { + Label(AtlasL10n.string("smartclean.action.refreshPreview"), systemImage: "arrow.clockwise") + } + .buttonStyle(.atlasGhost) + .disabled(isScanning || isExecutingPlan) + .accessibilityIdentifier("smartclean.refreshPreview") + .accessibilityHint(AtlasL10n.string("smartclean.action.refreshPreview.hint")) + + Spacer() + + // Execute 按钮 — 仅当 plan 有内容时为主要按钮 + Button(action: onExecutePlan) { + Label(AtlasL10n.string("smartclean.action.execute"), systemImage: "play.fill") + } + .buttonStyle(plan.items.isEmpty ? .atlasSecondary : .atlasPrimary) + .disabled(isScanning || isExecutingPlan || plan.items.isEmpty) + .keyboardShortcut(plan.items.isEmpty ? nil : .defaultAction) + .accessibilityIdentifier("smartclean.executePreview") + .accessibilityHint(AtlasL10n.string("smartclean.action.execute.hint")) +} +``` + +> **注意**: `.keyboardShortcut` 条件赋值在 SwiftUI 中需要用 `if/else` 包裹两个完整的 `Button`,不能直接三元。保持现有的 `Group { if ... else ... }` 结构,但把内部的 `.buttonStyle` 改为条件化。 + +**实际可编译方案**(考虑 SwiftUI 限制): + +```swift +HStack(spacing: AtlasSpacing.md) { + Group { + if plan.items.isEmpty { + Button(action: onStartScan) { + Label(AtlasL10n.string("smartclean.action.runScan"), systemImage: "sparkles") + } + .keyboardShortcut(.defaultAction) + } else { + Button(action: onStartScan) { + Label(AtlasL10n.string("smartclean.action.runScan"), systemImage: "sparkles") + } + } + } + .buttonStyle(plan.items.isEmpty ? .borderedProminent : .bordered) // 关键改动 + .controlSize(.large) + .disabled(isScanning || isExecutingPlan) + .accessibilityIdentifier("smartclean.runScan") + + Button(action: onRefreshPreview) { + Label(AtlasL10n.string("smartclean.action.refreshPreview"), systemImage: "arrow.clockwise") + } + .buttonStyle(.bordered) + .controlSize(.large) + .disabled(isScanning || isExecutingPlan) + .accessibilityIdentifier("smartclean.refreshPreview") + + Spacer() + + Group { + if !plan.items.isEmpty { + Button(action: onExecutePlan) { + Label(AtlasL10n.string("smartclean.action.execute"), systemImage: "play.fill") + } + .keyboardShortcut(.defaultAction) + } else { + Button(action: onExecutePlan) { + Label(AtlasL10n.string("smartclean.action.execute"), systemImage: "play.fill") + } + } + } + .buttonStyle(!plan.items.isEmpty ? .borderedProminent : .bordered) // 关键改动 + .controlSize(.large) + .disabled(isScanning || isExecutingPlan || plan.items.isEmpty) + .accessibilityIdentifier("smartclean.executePreview") +} +``` + +**额外改动**: 删除私有 `columns` (line 231-237),替换为 `AtlasLayout.metricColumns`。所有 `spacing: 16` → `AtlasSpacing.lg`。 + +### 4.3 AppsFeatureView — 行内按钮水平化 + +**文件**: `Packages/AtlasFeaturesApps/Sources/AtlasFeaturesApps/AppsFeatureView.swift` + +**当前问题**: line 181-208 的 trailing 区域是 VStack,包含 byteCount + chip + HStack(两个按钮),导致每行非常高。 + +**改动**: 将 trailing 重构为更紧凑的布局: + +```swift +// line 181 trailing 改为: +VStack(alignment: .trailing, spacing: AtlasSpacing.sm) { + HStack(spacing: AtlasSpacing.sm) { + AtlasStatusChip( + AtlasL10n.string("apps.list.row.leftovers", app.leftoverItems), + tone: app.leftoverItems > 0 ? .warning : .success + ) + Text(AtlasFormatters.byteCount(app.bytes)) + .font(AtlasTypography.label) + .foregroundStyle(.secondary) + } + + HStack(spacing: AtlasSpacing.sm) { + Button(activePreviewAppID == app.id ? AtlasL10n.string("apps.preview.running") : AtlasL10n.string("apps.preview.action")) { + onPreviewAppUninstall(app.id) + } + .buttonStyle(.bordered) + .controlSize(.small) + .disabled(isRunning) + + Button(activeUninstallAppID == app.id ? AtlasL10n.string("apps.uninstall.running") : AtlasL10n.string("apps.uninstall.action")) { + onExecuteAppUninstall(app.id) + } + .buttonStyle(.borderedProminent) + .controlSize(.small) + .disabled(isRunning) + } +} +``` + +**额外改动**: 删除私有 `columns`,替换为 `AtlasLayout.metricColumns`。 + +### 4.4 SettingsFeatureView — 轻量化设置页 + +**文件**: `Packages/AtlasFeaturesSettings/Sources/AtlasFeaturesSettings/SettingsFeatureView.swift` + +**当前问题**: 5 个 `AtlasInfoCard` 连续堆叠,视觉过重。 + +**改动**: +1. **General 区域** (line 35): 保留 `AtlasInfoCard`,不变 +2. **Exclusions 区域** (line 118): 保留,不变 +3. **Trust & Transparency** (line 143): 保留,不变 +4. **Acknowledgement** (line 177): 改为 `DisclosureGroup` +5. **Notices** (line 187): 改为 `DisclosureGroup` + +```swift +// 替换 line 177-195 的两个 AtlasInfoCard 为: +AtlasInfoCard( + title: AtlasL10n.string("settings.legal.title"), // 新增合并标题:"法律信息" + subtitle: AtlasL10n.string("settings.legal.subtitle") +) { + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + DisclosureGroup(AtlasL10n.string("settings.acknowledgement.title")) { + Text(settings.acknowledgementText) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .textSelection(.enabled) + .padding(.top, AtlasSpacing.sm) + } + + Divider() + + DisclosureGroup(AtlasL10n.string("settings.notices.title")) { + Text(settings.thirdPartyNoticesText) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .textSelection(.enabled) + .padding(.top, AtlasSpacing.sm) + } + } +} +``` + +> **注意**: 需要在 Localizable.strings 中新增 `settings.legal.title` 和 `settings.legal.subtitle` 两个 key。中文值分别为 "法律信息" 和 "致谢与第三方声明"。英文值分别为 "Legal" 和 "Acknowledgements and third-party notices"。 + +### 4.5 PermissionsFeatureView — 添加授权入口 + +**文件**: `Packages/AtlasFeaturesPermissions/Sources/AtlasFeaturesPermissions/PermissionsFeatureView.swift` + +**当前问题**: 未授权的权限行只显示 "Needed Later" chip,无操作入口。 + +**改动**: 在 line 109-113 的 trailing 区域添加条件按钮: + +```swift +// line 109 trailing 改为: +VStack(alignment: .trailing, spacing: AtlasSpacing.sm) { + AtlasStatusChip( + state.isGranted ? AtlasL10n.string("common.granted") : AtlasL10n.string("common.neededLater"), + tone: state.isGranted ? .success : .warning + ) + + if !state.isGranted { + Button(AtlasL10n.string("permissions.grant.action")) { + openSystemPreferences(for: state.kind) + } + .buttonStyle(.bordered) + .controlSize(.small) + } +} +``` + +添加跳转函数: + +```swift +private func openSystemPreferences(for kind: PermissionKind) { + let urlString: String + switch kind { + case .fullDiskAccess: + urlString = "x-apple.systempreferences:com.apple.preference.security?Privacy_AllFiles" + case .accessibility: + urlString = "x-apple.systempreferences:com.apple.preference.security?Privacy_Accessibility" + case .notifications: + urlString = "x-apple.systempreferences:com.apple.preference.security?Privacy_Notifications" + } + if let url = URL(string: urlString) { + NSWorkspace.shared.open(url) + } +} +``` + +**额外改动**: 删除私有 `columns`,替换为 `AtlasLayout.metricColumns`。 + +### 4.6 HistoryFeatureView — 使用 Token + +**文件**: `Packages/AtlasFeaturesHistory/Sources/AtlasFeaturesHistory/HistoryFeatureView.swift` + +**改动**: 仅 Token 替换,无结构性变化。 +- 所有 `spacing: 12` → `AtlasSpacing.md` +- 所有 `spacing: 10` → `AtlasSpacing.md` + +### 4.7 TaskCenterView — 使用 Token + 添加分隔线 + +**文件**: `Apps/AtlasApp/Sources/AtlasApp/TaskCenterView.swift` + +**改动**: +- line 11 `spacing: 18` → `AtlasSpacing.xl` +- line 12 `spacing: 8` → `AtlasSpacing.sm` +- line 14 `.title2.weight(.semibold)` → `AtlasTypography.sectionTitle` +- line 17 `.subheadline` → `AtlasTypography.body` +- line 38 `spacing: 10` → `AtlasSpacing.md` +- line 62 `padding(20)` → `padding(AtlasSpacing.xl)` +- 在标题和 callout 之间添加 `Divider()` + +--- + +## 5. 全局搜索替换清单 + +以下是可以安全地在所有 Feature View 文件中批量替换的模式: + +| 搜索 | 替换 | 范围 | +|------|------|------| +| `spacing: 16)` (在 LazyVGrid/VStack 中) | `spacing: AtlasSpacing.lg)` | 所有 Feature View | +| `spacing: 12)` (在 VStack 中) | `spacing: AtlasSpacing.md)` | 所有 Feature View | +| `spacing: 8)` (在 VStack 中) | `spacing: AtlasSpacing.sm)` | 所有 Feature View | +| `spacing: 10)` | `spacing: AtlasSpacing.md)` | TaskCenterView | +| `.font(.subheadline)` (非 `.weight`) | `.font(AtlasTypography.body)` | 所有文件 | +| `.font(.subheadline.weight(.semibold))` | `.font(AtlasTypography.label)` | 所有文件 | +| `.font(.headline)` | `.font(AtlasTypography.rowTitle)` | 所有文件(非 icon 处) | +| `.font(.caption.weight(.semibold))` | `.font(AtlasTypography.caption)` | 所有文件 | +| 私有 `columns` 属性 | `AtlasLayout.metricColumns` | Overview/SmartClean/Apps/Permissions | + +--- + +## 6. 新增本地化字符串 + +在 `zh-Hans.lproj/Localizable.strings` 和 `en.lproj/Localizable.strings` 中添加: + +| Key | 中文 | English | +|-----|------|---------| +| `settings.legal.title` | 法律信息 | Legal | +| `settings.legal.subtitle` | 致谢与第三方声明 | Acknowledgements and third-party notices | +| `permissions.grant.action` | 前往授权 | Grant Access | + +--- + +## 7. 实施顺序 + +### Phase 1 — 设计系统核心迁移 +1. 在 `AtlasDesignSystem.swift` 中删除 `AtlasPalette`,所有引用改为 `AtlasColor.*` +2. 删除旧的 `cardBackground`/`cardBorder` 函数,所有引用改为 `atlasCardBackground`/`atlasCardBorder` +3. 用 Token 重写 `AtlasScreen`(§2.1) +4. 用 Token 重写 `AtlasMetricCard`(§2.2) +5. 用 Token 重写 `AtlasInfoCard`(§2.3) +6. 用 Token 重写 `AtlasCallout`(§2.4) +7. 用 Token 重写 `AtlasDetailRow`(§2.5) +8. 用 Token 重写 `AtlasStatusChip`(§2.6) +9. 用 Token 重写 `AtlasEmptyState`(§2.7) +10. 用 Token 重写 `AtlasLoadingState`(§2.8) + +### Phase 2 — App Shell +11. 侧边栏行升级(§3.1) +12. 工具栏图标增强(§3.2) +13. 详情页转场动画(§3.3) + +### Phase 3 — Feature Screen 优化 +14. Overview 英雄指标(§4.1) +15. SmartClean 双 CTA 修复(§4.2) +16. Apps 行内按钮(§4.3) +17. Settings 轻量化(§4.4) +18. Permissions 授权入口(§4.5) +19. History Token 替换(§4.6) +20. TaskCenter Token 替换(§4.7) + +### Phase 4 — 全局清理 +21. 批量替换 spacing/font 硬编码(§5) +22. 新增本地化字符串(§6) +23. 编译验证 + 全量 UI 测试 + +--- + +## 8. 文件清单 + +| 文件 | 改动类型 | +|------|---------| +| `Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/AtlasBrand.swift` | ✅ 已创建 | +| `Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/AtlasDesignSystem.swift` | 重构 | +| `Apps/AtlasApp/Sources/AtlasApp/AppShellView.swift` | 修改 | +| `Apps/AtlasApp/Sources/AtlasApp/TaskCenterView.swift` | 修改 | +| `Packages/AtlasFeaturesOverview/Sources/.../OverviewFeatureView.swift` | 修改 | +| `Packages/AtlasFeaturesSmartClean/Sources/.../SmartCleanFeatureView.swift` | 修改 | +| `Packages/AtlasFeaturesApps/Sources/.../AppsFeatureView.swift` | 修改 | +| `Packages/AtlasFeaturesHistory/Sources/.../HistoryFeatureView.swift` | 修改 | +| `Packages/AtlasFeaturesPermissions/Sources/.../PermissionsFeatureView.swift` | 修改 | +| `Packages/AtlasFeaturesSettings/Sources/.../SettingsFeatureView.swift` | 修改 | +| `Packages/AtlasDomain/Sources/.../Resources/zh-Hans.lproj/Localizable.strings` | 新增 3 个 key | +| `Packages/AtlasDomain/Sources/.../Resources/en.lproj/Localizable.strings` | 新增 3 个 key | diff --git a/Docs/ErrorCodes.md b/Docs/ErrorCodes.md new file mode 100644 index 0000000..f1522a8 --- /dev/null +++ b/Docs/ErrorCodes.md @@ -0,0 +1,37 @@ +# Error Codes + +## Principles + +- Use stable machine-readable codes. +- Map each code to a user-facing title, body, and next step. +- Separate recoverable conditions from fatal conditions. + +## Registry + +- `permission_denied` +- `permission_limited` +- `admin_required` +- `path_protected` +- `path_not_found` +- `action_not_allowed` +- `helper_unavailable` +- `execution_unavailable` +- `worker_crashed` +- `protocol_mismatch` +- `partial_failure` +- `task_cancelled` +- `restore_expired` +- `restore_conflict` +- `idempotency_conflict` + +## Mapping Rules + +- Use inline presentation for row-level issues. +- Use banners for limited access and incomplete results. +- Use sheets for permission and destructive confirmation flows. +- Use result pages for partial success, cancellation, and recovery outcomes. + +## Format + +- User-visible format recommendation: `ATLAS--` +- Example: `ATLAS-EXEC-004` diff --git a/Docs/Execution/Beta-Acceptance-Checklist.md b/Docs/Execution/Beta-Acceptance-Checklist.md new file mode 100644 index 0000000..9e7b812 --- /dev/null +++ b/Docs/Execution/Beta-Acceptance-Checklist.md @@ -0,0 +1,125 @@ +# Beta Acceptance Checklist + +## Goal + +Provide a release-facing checklist for deciding whether Atlas for Mac is ready to enter or exit the beta phase. + +## Scope + +This checklist applies to the frozen MVP modules: + +- `Overview` +- `Smart Clean` +- `Apps` +- `History` +- `Recovery` +- `Permissions` +- `Settings` + +## Entry Criteria + +Before starting beta acceptance, confirm all of the following: + +- [ ] `swift test --package-path Packages` passes +- [ ] `swift test --package-path Apps` passes +- [ ] `./scripts/atlas/full-acceptance.sh` passes +- [ ] `dist/native/Atlas for Mac.app` is freshly built +- [ ] `dist/native/Atlas-for-Mac.dmg` is freshly built +- [ ] `dist/native/Atlas-for-Mac.pkg` is freshly built +- [ ] `Docs/Execution/MVP-Acceptance-Matrix.md` is up to date +- [ ] Known blockers are documented in `Docs/RISKS.md` + +## Build & Artifact Checks + +- [ ] App bundle opens from `dist/native/Atlas for Mac.app` +- [ ] DMG mounts successfully +- [ ] DMG contains `Atlas for Mac.app` +- [ ] DMG contains `Applications` shortcut +- [ ] PKG expands with `pkgutil --expand` +- [ ] SHA256 file exists and matches current artifacts +- [ ] Embedded helper exists at `Contents/Helpers/AtlasPrivilegedHelper` +- [ ] Embedded XPC service exists at `Contents/XPCServices/AtlasWorkerXPC.xpc` +- [ ] `./scripts/atlas/verify-bundle-contents.sh` passes + +## Functional Beta Checks + +### Overview +- [ ] App launches to a visible primary window +- [ ] Sidebar navigation shows all frozen MVP routes +- [ ] Overview displays health summary cards +- [ ] Overview displays reclaimable space summary +- [ ] Overview displays recent activity without crash + +### Smart Clean +- [ ] User can open `Smart Clean` +- [ ] User can run scan +- [ ] User can refresh preview +- [ ] User can execute preview +- [ ] Execution updates `History` +- [ ] Execution creates `Recovery` entries for recoverable items + +### Apps +- [ ] User can open `Apps` +- [ ] User can refresh app footprints +- [ ] User can preview uninstall +- [ ] User can execute uninstall +- [ ] Uninstall updates `History` +- [ ] Uninstall creates `Recovery` entry + +### History / Recovery +- [ ] History shows recent task runs +- [ ] Recovery shows recoverable items after destructive flows +- [ ] User can restore a recovery item +- [ ] Restored item disappears from recovery list + +### Permissions +- [ ] Permissions screen opens without crash +- [ ] User can refresh permission status +- [ ] Permission cards render all expected states + +### Settings +- [ ] Settings screen opens without crash +- [ ] User can change recovery retention +- [ ] User can toggle notifications +- [ ] Settings persist after relaunch +- [ ] Acknowledgement copy is visible +- [ ] Third-party notices copy is visible + +## Install Checks + +### DMG Path +- [ ] DMG install validation passes with `KEEP_INSTALLED_APP=1 ./scripts/atlas/verify-dmg-install.sh` +- [ ] Installed app exists at `~/Applications/Atlas for Mac.app` +- [ ] Installed app launches successfully + +### PKG Path +- [ ] PKG builds successfully +- [ ] PKG expands successfully +- [ ] PKG signing status is known (`Developer ID signed`, `ad hoc signed`, or `unsigned`) + +## Native UI Automation Checks + +- [ ] `./scripts/atlas/ui-automation-preflight.sh` passes on the validating machine +- [ ] `./scripts/atlas/run-ui-automation.sh` passes +- [ ] UI smoke confirms sidebar routes and primary Smart Clean / Settings controls + +## Beta Exit Criteria + +Mark beta candidate as ready only if all are true: + +- [ ] No P0 functional blocker remains open +- [ ] No P0 crash-on-launch or crash-on-primary-workflow remains open +- [ ] All frozen MVP workflows complete end to end +- [ ] Install path has been validated on the current candidate build +- [ ] Known unsupported areas are explicitly documented +- [ ] Release-signing status is explicit + +## Sign-off + +| Role | Name | Status | Notes | +|------|------|--------|-------| +| `QA Agent` | | Pending | | +| `Mac App Agent` | | Pending | | +| `System Agent` | | Pending | | +| `Release Agent` | | Pending | | +| `Product Agent` | | Pending | | diff --git a/Docs/Execution/Beta-Gate-Review.md b/Docs/Execution/Beta-Gate-Review.md new file mode 100644 index 0000000..b07c9cc --- /dev/null +++ b/Docs/Execution/Beta-Gate-Review.md @@ -0,0 +1,95 @@ +# Beta Gate Review + +## Gate + +- `Beta Candidate` + +## Review Date + +- `2026-03-07` + +## Scope Reviewed + +- `Overview` +- `Smart Clean` +- `Apps` +- `History` +- `Recovery` +- `Permissions` +- `Settings` +- native packaging and install flow +- native UI smoke coverage +- Chinese-first app-language switching and localized shell copy + +## Readiness Checklist + +- [x] Required P0 tasks complete +- [x] Docs updated +- [x] Risks reviewed +- [x] Open questions below threshold for internal beta +- [x] Next-stage inputs available + +## Evidence Reviewed + +- `Docs/Execution/MVP-Acceptance-Matrix.md` +- `Docs/Execution/Beta-Acceptance-Checklist.md` +- `Docs/Execution/Manual-Test-SOP.md` +- `scripts/atlas/full-acceptance.sh` +- `scripts/atlas/run-ui-automation.sh` +- `scripts/atlas/signing-preflight.sh` + +## Automated Validation Summary + +- `swift test --package-path Packages` — pass +- `swift test --package-path Apps` — pass +- `./scripts/atlas/full-acceptance.sh` — pass +- `./scripts/atlas/run-ui-automation.sh` — pass on a machine with Accessibility trust (`4` UI tests, including language switching) +- `./scripts/atlas/verify-dmg-install.sh` — pass +- `./scripts/atlas/verify-app-launch.sh` — pass +- `./scripts/atlas/package-native.sh` — pass + +## Beta Assessment + +### Product Functionality + +- Core frozen MVP workflows are complete end to end. +- Recovery-first behavior is visible in both Smart Clean and Apps flows. +- Settings and permission refresh flows are functional. +- The app now defaults to `简体中文` and supports switching to `English` through persisted settings. + +### Packaging and Installability + +- `.app`, `.zip`, `.dmg`, and `.pkg` artifacts are produced successfully. +- Native packaging has been rerun successfully after the localization work. +- DMG installation into `~/Applications/Atlas for Mac.app` is validated. +- Installed app launch is validated. + +### Test Coverage + +- Shared package tests are green. +- App-layer tests are green. +- Native UI smoke is green on a machine with Accessibility trust. +- Manual beta checklist and SOP are now present for human validation. + +## Blockers + +- Public signed/notarized distribution is still blocked by missing Apple release credentials: + - `Developer ID Application` + - `Developer ID Installer` + - `ATLAS_NOTARY_PROFILE` + +## Decision + +- `Pass with Conditions` + +## Conditions + +- Internal beta / trusted-user beta can proceed with the current ad hoc-signed local artifacts. +- Public beta or broad external distribution must wait until signing and notarization credentials are available and the release packaging path is re-run. + +## Follow-up Actions + +- Obtain Apple signing and notarization credentials. +- Re-run `./scripts/atlas/signing-preflight.sh`. +- Re-run `./scripts/atlas/package-native.sh` with signing/notarization environment variables. +- Validate signed DMG / PKG install behavior on a clean machine. diff --git a/Docs/Execution/Current-Status-2026-03-07.md b/Docs/Execution/Current-Status-2026-03-07.md new file mode 100644 index 0000000..7217742 --- /dev/null +++ b/Docs/Execution/Current-Status-2026-03-07.md @@ -0,0 +1,69 @@ +# Current Engineering Status — 2026-03-07 + +## Overall Status + +- Product state: `Frozen MVP complete` +- Experience state: `Post-MVP polish pass complete` +- Localization state: `Chinese-first bilingual framework complete` +- Packaging state: `Unsigned native artifacts refreshed` +- Release state: `Internal beta ready, public release still gated by signing/notarization credentials` + +## What Is Complete + +### Frozen MVP Workflows + +- `Overview` +- `Smart Clean` +- `Apps` +- `History` +- `Recovery` +- `Permissions` +- `Settings` + +### Productization and Polish + +- Shared SwiftUI design-system uplift landed +- Empty/loading/error/trust states were strengthened across the MVP shell +- Keyboard navigation and command shortcuts landed for the main shell +- Accessibility semantics and stable UI-automation identifiers landed +- Native UI smoke is green on a trusted local machine + +### Localization + +- Shared localization framework added across the Swift package graph +- Supported app languages: `简体中文`, `English` +- Default app language: `简体中文` +- User language preference now persists through `AtlasSettings` +- Worker-generated summaries and settings-driven copy now follow the selected app language + +### Packaging + +- Native `.app`, `.zip`, `.dmg`, and `.pkg` artifacts build successfully +- Latest local packaging rerun completed after localization work +- Current artifact directory: `dist/native/` + +## Validation Snapshot + +- `swift build --package-path Packages` — pass +- `swift build --package-path Apps` — pass +- `swift test --package-path Packages` — pass (`23` tests) +- `swift test --package-path Apps` — pass (`8` tests) +- `./scripts/atlas/run-ui-automation.sh` — environment-conditional on the current machine; standalone repro confirms current timeout is machine-level, not Atlas-specific +- `./scripts/atlas/package-native.sh` — pass +- `./scripts/atlas/full-acceptance.sh` — pass with documented UI-automation environment condition + +## Current Blockers + +- `Smart Clean` execute now supports a real Trash-based path for structured safe targets, and those targets can be physically restored. Full disk-backed coverage is still incomplete, and unsupported targets fail closed. See `Docs/Execution/Execution-Chain-Audit-2026-03-09.md`. +- Silent fallback from XPC to the scaffold worker can mask execution-path failures in user-facing flows. See `Docs/Execution/Execution-Chain-Audit-2026-03-09.md`. +- Public signed distribution is still blocked by missing Apple release credentials: + - `Developer ID Application` + - `Developer ID Installer` + - `ATLAS_NOTARY_PROFILE` + +## Recommended Next Steps + +1. Run a dedicated manual localization QA pass for Chinese and English on a clean machine. +2. Reinstall the latest packaged app and verify first-launch language behavior with a fresh state file. +3. Re-check macOS UI automation on a clean/trusted machine if native XCUITest evidence is needed without the current environment condition. +4. If release-ready output is needed, obtain signing/notarization credentials and rerun native packaging. diff --git a/Docs/Execution/Execution-Chain-Audit-2026-03-09.md b/Docs/Execution/Execution-Chain-Audit-2026-03-09.md new file mode 100644 index 0000000..5caa314 --- /dev/null +++ b/Docs/Execution/Execution-Chain-Audit-2026-03-09.md @@ -0,0 +1,146 @@ +# Execution Chain Audit — 2026-03-09 + +## Scope + +This audit reviews the user-visible execution path for: + +- `Smart Clean` scan +- `Smart Clean` execute +- `Apps` uninstall preview / execute +- `Recovery` restore +- worker selection and fallback behavior + +## Summary + +Atlas currently ships a mixed execution model: + +- `Smart Clean` scan is backed by a real upstream dry-run adapter. +- `Apps` inventory is backed by a real local inventory adapter. +- `App uninstall` can invoke the packaged helper for the main app bundle path. +- `Smart Clean` execute now supports a real Trash-based execution path for a safe subset of structured user-owned cleanup targets, but broader execution coverage is still incomplete. +- `Restore` is currently state rehydration, not physical file restoration. +- Worker submission can silently fall back from XPC to the scaffold worker, which makes execution capability look stronger than it really is. + +## End-to-End Chain + +### 1. UI and App Model + +- `Apps/AtlasApp/Sources/AtlasApp/AtlasAppModel.swift:190` starts Smart Clean scan through `workspaceController.startScan()`. +- `Apps/AtlasApp/Sources/AtlasApp/AtlasAppModel.swift:230` runs the current Smart Clean plan through `workspaceController.executePlan(planID:)`. +- `Apps/AtlasApp/Sources/AtlasApp/AtlasAppModel.swift:245` immediately refreshes the plan after execution, so the UI shows the remaining plan rather than the just-executed plan. + +### 2. Application Layer + +- `Packages/AtlasApplication/Sources/AtlasApplication/AtlasApplication.swift:281` maps scan requests into structured worker requests. +- `Packages/AtlasApplication/Sources/AtlasApplication/AtlasApplication.swift:319` maps plan execution into a worker request and trusts the returned snapshot/events. +- The application layer does not distinguish between “state-only execution” and “real filesystem side effects”. + +### 3. Worker Selection + +- `Packages/AtlasInfrastructure/Sources/AtlasInfrastructure/AtlasXPCTransport.swift:272` defines `AtlasPreferredWorkerService`. +- `Packages/AtlasInfrastructure/Sources/AtlasInfrastructure/AtlasXPCTransport.swift:288` submits to XPC first. +- `Packages/AtlasInfrastructure/Sources/AtlasInfrastructure/AtlasXPCTransport.swift:291` silently falls back to `AtlasScaffoldWorkerService` on any XPC error. + +## Real vs Scaffold Classification + +### Real or Mostly Real + +#### Smart Clean scan + +- `XPC/AtlasWorkerXPC/Sources/AtlasWorkerXPC/main.swift:5` wires `MoleSmartCleanAdapter` into the worker. +- `Packages/AtlasInfrastructure/Sources/AtlasInfrastructure/AtlasInfrastructure.swift:319` uses the configured `smartCleanScanProvider` when available. +- `Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/MoleSmartCleanAdapter.swift:12` runs the upstream `bin/clean.sh --dry-run` flow and parses findings. + +Result: +- The scan result can reflect the actual machine state. + +#### Apps list + +- `XPC/AtlasWorkerXPC/Sources/AtlasWorkerXPC/main.swift:8` wires `MacAppsInventoryAdapter` into the worker. +- `Packages/AtlasInfrastructure/Sources/AtlasInfrastructure/AtlasInfrastructure.swift:495` refreshes app inventory from the real adapter when available. + +Result: +- App footprint listing is grounded in real local inventory. + +#### App uninstall bundle removal + +- `XPC/AtlasWorkerXPC/Sources/AtlasWorkerXPC/main.swift:9` wires the helper client into the worker. +- `Packages/AtlasInfrastructure/Sources/AtlasInfrastructure/AtlasInfrastructure.swift:533` checks whether the bundle path exists. +- `Packages/AtlasInfrastructure/Sources/AtlasInfrastructure/AtlasInfrastructure.swift:552` invokes the helper with `AtlasHelperAction(kind: .trashItems, targetPath: app.bundlePath)`. +- `Helpers/AtlasPrivilegedHelper/Sources/AtlasPrivilegedHelperCore/HelperActionExecutor.swift:35` supports `trashItems`. + +Result: +- The main `.app` bundle path can be moved through the helper boundary. + +### Mixed Real / Incomplete + +#### Smart Clean execute + +- `Packages/AtlasInfrastructure/Sources/AtlasInfrastructure/AtlasInfrastructure.swift:383` begins Smart Clean plan execution. +- `Packages/AtlasInfrastructure/Sources/AtlasInfrastructure/AtlasInfrastructure.swift:414` removes selected findings from the in-memory snapshot. +- `Packages/AtlasInfrastructure/Sources/AtlasInfrastructure/AtlasInfrastructure.swift:416` recalculates reclaimable space from the remaining findings only. +- `Packages/AtlasInfrastructure/Sources/AtlasInfrastructure/AtlasInfrastructure.swift:417` rebuilds the current plan from the remaining findings. + +What is now real: +- Structured scan findings can carry concrete `targetPaths`. +- Safe user-owned targets are moved to Trash during execution. +- `scan -> execute -> rescan` is now covered for a file-backed safe target path. + +What is still missing: +- Broad execution coverage for all Smart Clean categories. +- A helper-backed strategy for protected or privileged Smart Clean targets. +- A physical restoration flow that mirrors the new real Trash-based execution path. + +User-visible consequence: +- Safe structured targets can now disappear on the next real scan. Unsupported targets fail closed instead of pretending to be cleaned. + +#### Restore + +- `Packages/AtlasInfrastructure/Sources/AtlasInfrastructure/AtlasInfrastructure.swift:445` restores items by re-inserting stored payloads into Atlas state. +- No physical restore operation is performed against the filesystem. + +User-visible consequence: +- Recovery currently restores Atlas’s structured workspace model, not a verified on-disk artifact. + +## Protocol and Domain Gaps + +### Current protocol shape + +- `Packages/AtlasProtocol/Sources/AtlasProtocol/AtlasProtocol.swift:92` only allows helper actions such as `trashItems` and `removeLaunchService`. +- `Packages/AtlasDomain/Sources/AtlasDomain/AtlasDomain.swift:109` defines `ActionItem.Kind` values such as `removeCache`, `removeApp`, `archiveFile`, and `inspectPermission`. + +Gap: +- `ActionItem.Kind` communicates user intent, but it does not carry the executable path set or helper-ready structured target information required to make Smart Clean execution real. + +## Risks + +### R-011 Smart Clean Execution Trust Gap + +- Severity: `High` +- Area: `Execution / UX / Trust` +- Risk: The UI presents Smart Clean execution as if it performs disk cleanup, but the current worker only mutates Atlas state for Smart Clean items. +- User impact: Users can believe cleanup succeeded even when the next scan rediscovers the same disk usage. +- Recommended action: Make execution capability explicit and block release-facing trust claims until Smart Clean execution is backed by real side effects. + +### R-012 Silent Fallback Masks Capability Loss + +- Severity: `High` +- Area: `System / Execution` +- Risk: Silent fallback from XPC to the scaffold worker can hide worker/XPC failures and blur the line between real execution and fallback behavior. +- User impact: Local execution may look successful even when the primary worker path is unavailable. +- Recommended action: Remove or narrow silent fallback in user-facing execution paths and surface a concrete error when real execution infrastructure is unavailable. + +## Recommended Fix Order + +1. Remove silent fallback for release-facing execution flows or gate it behind an explicit development-only mode. +2. Introduce executable structured targets for Smart Clean action items so the worker can perform real side effects. +3. Route Smart Clean destructive actions through the helper boundary where privilege or safety validation is required. +4. Add `scan -> execute -> rescan` contract coverage proving real disk impact. +5. Separate “logical recovery in Atlas state” from “physical file restoration” in both UI copy and implementation. + +## Acceptance Criteria for the Follow-up Fix + +- Running Smart Clean on real findings reduces those findings on a subsequent real scan. +- If the worker/helper cannot perform the action, the user sees a clear failure rather than a silent fallback success. +- History records only claim completion when the filesystem side effect actually happened. +- Recovery messaging distinguishes between physical restoration and model restoration until both are truly implemented. diff --git a/Docs/Execution/MVP-Acceptance-Matrix.md b/Docs/Execution/MVP-Acceptance-Matrix.md new file mode 100644 index 0000000..7de0757 --- /dev/null +++ b/Docs/Execution/MVP-Acceptance-Matrix.md @@ -0,0 +1,58 @@ +# MVP Acceptance Matrix + +## Goal + +Track the frozen Atlas for Mac MVP against user-visible acceptance criteria, automated coverage, and manual verification needs. + +## Scope + +- `Overview` +- `Smart Clean` +- `Apps` +- `History` +- `Recovery` +- `Permissions` +- `Settings` + +## Matrix + +| Module | Acceptance Criterion | Automated Coverage | Manual Verification | Status | +|--------|----------------------|--------------------|---------------------|--------| +| `Overview` | Shows health snapshot, reclaimable space, permissions summary, and recent activity | `swift test --package-path Packages`, `AtlasApplicationTests`, native build | Launch app and confirm overview renders without crash | covered | +| `Smart Clean` | User can scan, preview, and execute a recovery-first cleanup plan | `AtlasApplicationTests`, `AtlasInfrastructureTests`, `AtlasAppTests` | Launch app, run scan, review lanes, execute preview | covered | +| `Apps` | User can refresh apps, preview uninstall, and execute uninstall through worker flow | `AtlasApplicationTests`, `AtlasInfrastructureTests`, `AtlasAppTests`, `MacAppsInventoryAdapterTests` | Launch app, preview uninstall, execute uninstall, confirm history updates | covered | +| `History` | User can inspect runs and restore recovery items | `AtlasInfrastructureTests`, `AtlasAppTests` | Launch app, restore an item, verify it disappears from recovery list | covered | +| `Recovery` | Destructive flows create structured recovery items with expiry | `AtlasInfrastructureTests` | Inspect history/recovery entries after execute or uninstall | covered | +| `Permissions` | User can refresh best-effort macOS permission states | package tests + app build | Launch app, refresh permissions, inspect cards | partial-manual | +| `Settings` | User can update recovery retention and notifications and persist them | `AtlasApplicationTests`, `AtlasAppTests` | Relaunch app and verify settings remain persisted | covered | +| Packaging | App produces `.zip`, `.dmg`, `.pkg` | `scripts/atlas/package-native.sh` | Inspect output artifacts | covered | +| Installation | User can install from DMG into Applications | `scripts/atlas/verify-dmg-install.sh` | Open DMG and drag app to Applications | covered | +| Signed Distribution | Installer is signed and notarized | `scripts/atlas/signing-preflight.sh` + packaging with credentials | Verify Gatekeeper-friendly install on a clean machine | blocked-by-credentials | +| UI smoke | Sidebar and primary controls are automatable through native UI tests | `scripts/atlas/run-ui-automation.sh` | Run on a trusted local machine or CI agent with automation enabled | covered | + +## Required Manual Scenarios + +### Scenario 1: Smart Clean end-to-end +1. Launch the app. +2. Open `Smart Clean`. +3. Run scan. +4. Refresh preview. +5. Execute preview. +6. Confirm `History` and `Recovery` update. + +### Scenario 2: App uninstall end-to-end +1. Open `Apps`. +2. Refresh app footprints. +3. Preview uninstall for one app. +4. Execute uninstall. +5. Confirm the item appears in `History` / `Recovery`. + +### Scenario 3: DMG install verification +1. Build distribution artifacts. +2. Open `Atlas-for-Mac.dmg`. +3. Copy `Atlas for Mac.app` to `Applications`. +4. Launch the installed app. + +## Current Blocking Item + +- Signed/notarized public distribution remains blocked by missing Apple Developer release credentials. diff --git a/Docs/Execution/Manual-Test-SOP.md b/Docs/Execution/Manual-Test-SOP.md new file mode 100644 index 0000000..6436009 --- /dev/null +++ b/Docs/Execution/Manual-Test-SOP.md @@ -0,0 +1,196 @@ +# Manual Test SOP + +## Goal + +Provide a repeatable manual test procedure for Atlas for Mac beta validation on a real macOS machine. + +## Intended Tester + +- Internal QA +- Product owner +- Developer performing release candidate validation + +## Test Environment Preparation + +### Machine Requirements +- macOS 14 or newer +- Ability to grant Accessibility permission to the terminal and Xcode when UI automation is used +- Access to `dist/native` artifacts from the current candidate build + +### Clean Start Checklist +- Quit all running Atlas for Mac instances +- Remove old local install if testing a clean DMG install: + - `~/Applications/Atlas for Mac.app` +- Clear temporary validation folders if needed: + - `.build/atlas-native/` + - `.build/atlas-dmg-verify/` +- Ensure the build under test is freshly produced + +## Preflight Commands + +Run these before starting manual validation: + +```bash +swift test --package-path Packages +swift test --package-path Apps +./scripts/atlas/package-native.sh +./scripts/atlas/verify-bundle-contents.sh +KEEP_INSTALLED_APP=1 ./scripts/atlas/verify-dmg-install.sh +./scripts/atlas/verify-app-launch.sh +./scripts/atlas/ui-automation-preflight.sh || true +./scripts/atlas/run-ui-automation.sh || true +``` + +## Manual Test Logging Rules + +For every issue found, record: + +- build timestamp +- artifact used (`.app`, `.dmg`, `.pkg`) +- screen name +- exact steps +- expected result +- actual result +- screenshot or screen recording if possible +- whether it blocks the beta exit criteria + +## Scenario SOP + +### SOP-01 Launch and Navigation +1. Open `Atlas for Mac.app`. +2. Confirm the main window appears. +3. Confirm sidebar routes are visible: + - `Overview` + - `Smart Clean` + - `Apps` + - `History` + - `Permissions` + - `Settings` +4. Switch through all routes once. +5. Confirm no crash or blank screen occurs. + +**Pass condition** +- All routes render and navigation remains responsive. + +### SOP-02 Smart Clean Workflow +Reference: `Docs/Execution/Smart-Clean-Manual-Verification-2026-03-09.md` for disposable local fixtures and rescan/restore verification. + +1. Open `Smart Clean`. +2. Click `Run Scan`. +3. Wait for summary and progress to update. +4. Click `Refresh Preview`. +5. Review `Safe`, `Review`, and `Advanced` sections. +6. Click `Execute Preview`. +7. Open `History`. +8. Confirm a new execution record exists. +9. Confirm `Recovery` shows new entries. + +**Pass condition** +- Scan, preview, and execute complete without crash and leave history/recovery evidence. + +### SOP-03 Apps Workflow +1. Open `Apps`. +2. Click `Refresh App Footprints`. +3. Pick one app and click `Preview`. +4. Review the uninstall preview. +5. Click `Uninstall` for the selected app. +6. Open `History`. +7. Confirm an uninstall task run exists. +8. Confirm `Recovery` includes the app recovery entry. + +**Pass condition** +- Preview and uninstall flow complete through worker-backed behavior. + +### SOP-04 Recovery Restore Workflow +1. Open `History`. +2. In `Recovery`, choose one item. +3. Click `Restore`. +4. Confirm the item disappears from the recovery list. +5. Return to the relevant screen (`Smart Clean` or `Apps`) and confirm state reflects the restore. + +**Pass condition** +- Recovery restore succeeds and updates visible state. + +### SOP-05 Permissions Workflow +1. Open `Permissions`. +2. Click `Refresh Permission Status`. +3. Confirm cards render for: + - `Full Disk Access` + - `Accessibility` + - `Notifications` +4. If you enable `Full Disk Access`, fully quit and reopen Atlas, then confirm `Refresh Permission Status` can reflect the new state. +5. Confirm the page does not hang or crash if some permissions are missing. + +**Pass condition** +- Best-effort permission inspection returns a stable UI state, and Full Disk Access guidance matches the real macOS relaunch requirement. + +### SOP-06 Settings Workflow +1. Open `Settings`. +2. Change recovery retention. +3. Toggle notifications. +4. Quit the app. +5. Relaunch the app. +6. Return to `Settings`. +7. Confirm both values persisted. +8. Confirm acknowledgement and third-party notices text are visible. + +**Pass condition** +- Settings persist across relaunch and trust content is visible. + +### SOP-07 DMG Install Path +1. Double-click `dist/native/Atlas-for-Mac.dmg`. +2. Drag `Atlas for Mac.app` into `Applications`. +3. Launch the installed app from `Applications`. +4. Confirm the main window appears. + +**Pass condition** +- DMG install path behaves like a normal user install. + +### SOP-08 PKG Verification Path +1. Run `pkgutil --expand dist/native/Atlas-for-Mac.pkg dist/native/pkg-expand-manual`. +2. Confirm the package expands without error. +3. Run `pkgutil --check-signature dist/native/Atlas-for-Mac.pkg`. +4. Record whether the current build is `Developer ID signed`, `ad hoc signed`, or `unsigned`. + +**Pass condition** +- PKG structure is valid and signing state is explicitly recorded. + +### SOP-09 Native UI Smoke +1. Run `./scripts/atlas/ui-automation-preflight.sh`. +2. If the machine is trusted for Accessibility, run `./scripts/atlas/run-ui-automation.sh`. +3. Confirm the UI smoke test suite passes. + +**Pass condition** +- Native UI smoke passes on a machine with proper macOS automation permissions. + +## Failure Classification + +### P0 +- App does not launch +- Primary workflow crashes +- Smart Clean or Apps core flow cannot complete +- Recovery restore fails consistently + +### P1 +- Non-blocking but severe UX issue +- Persistent visual corruption on a core screen +- Packaging/install issue with a documented workaround + +### P2 +- Minor UI issue +- Copy inconsistency +- Non-core polish regression + +## Final Tester Output + +At the end of a run, summarize: + +- build under test +- artifact path used +- scenarios executed +- pass/fail for each scenario +- P0 / P1 / P2 issues found +- recommendation: + - `Pass` + - `Pass with Conditions` + - `Fail` diff --git a/Docs/Execution/Polish-Week-01.md b/Docs/Execution/Polish-Week-01.md new file mode 100644 index 0000000..e98034d --- /dev/null +++ b/Docs/Execution/Polish-Week-01.md @@ -0,0 +1,36 @@ +# Polish Week 1 Plan + +## Goal + +Establish a shared polish baseline and improve the two most trust-sensitive MVP flows: `Smart Clean` and `Apps`. + +## Must Deliver + +- MVP state audit covering default, loading, empty, partial-permission, success, and failure states +- Shared polish baseline for spacing, card hierarchy, CTA priority, status tone, and destructive-action language +- `Smart Clean` improvements for scan controls, preview readability, execution confidence, and result continuity +- `Apps` improvements for uninstall preview clarity, leftover visibility, and recovery confidence +- Narrow verification for first-run, scan, preview, execute, uninstall, and restore-adjacent flows + +## Day Plan + +- `Day 1` Audit all frozen MVP routes and record the missing states and trust gaps +- `Day 2` Tighten shared design-system primitives and copy rules before page-specific tweaks +- `Day 3` Polish `Smart Clean` from scan initiation through preview and execute feedback +- `Day 4` Polish `Apps` from refresh through uninstall preview and completion messaging +- `Day 5` Run focused verification and hold a gate review for Week 2 polish work + +## Owner Tasks + +- `Product Agent` define the polish scorecard and keep work inside the frozen MVP scope +- `UX Agent` close wording, hierarchy, and permission-guidance gaps in trust-critical surfaces +- `Mac App Agent` implement design-system and feature-level refinements for `Smart Clean` and `Apps` +- `QA Agent` verify the state matrix and catch visual or flow regressions in the primary paths +- `Docs Agent` keep backlog, execution notes, and follow-up risks in sync with the week output + +## Exit Criteria + +- `Smart Clean` and `Apps` read clearly without requiring implementation knowledge +- Primary CTAs are obvious, secondary actions are quieter, and destructive actions feel reversible +- The top-level screens no longer fall back to generic empty or ambiguous progress states in core flows +- Week 2 can focus on `Overview`, `History`, and `Permissions` without reopening Week 1 trust issues diff --git a/Docs/Execution/Release-Signing.md b/Docs/Execution/Release-Signing.md new file mode 100644 index 0000000..73d52b9 --- /dev/null +++ b/Docs/Execution/Release-Signing.md @@ -0,0 +1,70 @@ +# Release Signing and Notarization + +## Goal + +Turn Atlas for Mac from an installable local build into a publicly distributable macOS release. + +## Required Credentials + +- `Developer ID Application` certificate for app signing +- `Developer ID Installer` certificate for installer signing +- `notarytool` keychain profile for notarization + +## Environment Variables Used by Packaging + +- `ATLAS_CODESIGN_IDENTITY` +- `ATLAS_CODESIGN_KEYCHAIN` +- `ATLAS_INSTALLER_SIGN_IDENTITY` +- `ATLAS_NOTARY_PROFILE` + +## Stable Local Signing + +For local development machines that do not have Apple release certificates yet, provision a stable app-signing identity once: + +```bash +./scripts/atlas/ensure-local-signing-identity.sh +``` + +After that, `./scripts/atlas/package-native.sh` automatically prefers this local identity over ad hoc signing. This keeps the installed app bundle identity stable enough for macOS permission prompts and TCC decisions to behave consistently across rebuilds. + +Notes: + +- This local identity is only for internal/dev packaging. +- `.pkg` signing and notarization still require Apple `Developer ID Installer` and `notarytool` credentials. +- The local identity is stored in a dedicated keychain at `~/Library/Keychains/AtlasLocalSigning.keychain-db` unless overridden by env vars. + +## Preflight + +Run: + +```bash +./scripts/atlas/signing-preflight.sh +``` + +If preflight passes, the current machine is ready for signed packaging. + +## Signed Packaging + +Run: + +```bash +ATLAS_CODESIGN_IDENTITY="Developer ID Application: ()" \ +ATLAS_INSTALLER_SIGN_IDENTITY="Developer ID Installer: ()" \ +ATLAS_NOTARY_PROFILE="" \ +./scripts/atlas/package-native.sh +``` + +This signs the app bundle, emits `.zip`, `.dmg`, and `.pkg`, submits artifacts for notarization, and staples results when credentials are available. + +## Install Verification + +After packaging, validate the DMG installation path with: + +```bash +KEEP_INSTALLED_APP=1 ./scripts/atlas/verify-dmg-install.sh +``` + +## Current Repo State + +- Internal packaging can now use a stable local app-signing identity instead of ad hoc signing. +- Signed/notarized release artifacts remain blocked only by missing Apple release credentials on this machine. diff --git a/Docs/Execution/Smart-Clean-Execution-Coverage-2026-03-09.md b/Docs/Execution/Smart-Clean-Execution-Coverage-2026-03-09.md new file mode 100644 index 0000000..e250ec0 --- /dev/null +++ b/Docs/Execution/Smart-Clean-Execution-Coverage-2026-03-09.md @@ -0,0 +1,142 @@ +# Smart Clean Execution Coverage — 2026-03-09 + +## Goal + +Explain, in user-facing and release-facing terms, what `Smart Clean` can execute for real today, what still fails closed, and how recovery behaves for executed items. + +This document is intentionally simpler than `Docs/Execution/Execution-Chain-Audit-2026-03-09.md`. It is meant to support product, UX, QA, and release communication. + +## Current Position + +`Smart Clean` no longer presents a misleading “success” when only scaffold/state-based execution is available. + +The current behavior is now: + +- real scan when the upstream clean workflow succeeds +- real execution for a safe structured subset of targets +- physical restoration for executed items when recovery mappings are present +- explicit failure for unsupported or unstructured targets +- the UI now distinguishes cached plans from current-session verified plans and blocks execution until a plan is refreshed in the current session + +## What Runs for Real Today + +`Smart Clean` can physically move supported targets to Trash when the scan adapter returns structured execution targets. + +### Supported direct Trash targets + +These user-owned targets can be moved to Trash directly by the worker when they are returned as structured `targetPaths`: + +- `~/Library/Caches/*` +- `~/Library/Logs/*` +- `~/Library/Suggestions/*` +- `~/Library/Messages/Caches/*` +- `~/Library/Developer/Xcode/DerivedData/*` +- `~/.npm/*` +- `~/.npm_cache/*` +- `~/.oh-my-zsh/cache/*` +- paths containing: + - `__pycache__` + - `.next/cache` + - `component_crx_cache` + - `GoogleUpdater` + - `CoreSimulator.log` +- `.pyc` files under the current user home directory + +### Supported helper-backed targets + +Targets under these allowlisted roots can run through the helper boundary: + +- `/Applications/*` +- `~/Applications/*` +- `~/Library/LaunchAgents/*` +- `/Library/LaunchAgents/*` +- `/Library/LaunchDaemons/*` + +## What Does Not Run Yet + +The following categories remain incomplete unless they resolve to the supported structured targets above: + +- broader `System` cleanup paths +- partially aggregated dry-run results that do not yet carry executable sub-paths +- categories that only expose a summary concept rather than concrete target paths +- any Smart Clean item that requires a more privileged or more specific restore model than the current Trash-backed flow supports + +For these items, Atlas should fail closed rather than claim completion. + +## User-Facing Meaning + +### Cached vs current plan + +Atlas can persist the last generated Smart Clean plan across launches. That cached plan is useful for orientation, but it is not treated as directly executable until the current session successfully runs a fresh scan or plan update. + +The UI now makes this explicit by: + +- marking cached plans as previous results +- disabling `Run Plan` until the plan is revalidated +- showing which plan steps can run directly and which remain review-only + + +### When execution succeeds + +It means: + +- Atlas had concrete target paths for the selected plan items +- those targets were actually moved to Trash +- recovery records were created with enough information to support physical restoration for those targets + +It does **not** mean: + +- every Smart Clean category is fully implemented +- every reviewed item is physically restorable in every environment +- privileged or protected targets are universally supported yet + +### When execution is rejected + +It means Atlas is protecting trust by refusing to report a cleanup it cannot prove. + +Typical reasons: + +- the scan adapter did not produce executable targets for the item +- the current path falls outside the supported execution allowlist +- the required worker/helper capability is unavailable + +## Recovery Model + +### Physical restore available + +When a recovery item contains structured `restoreMappings`, Atlas can move the trashed item back to its original path. + +This is currently the most trustworthy recovery path because it corresponds to a real on-disk side effect. + +### Model-only restore still possible + +Older or unstructured recovery records may only restore Atlas’s internal workspace model. + +That means: + +- the item can reappear in Atlas UI state +- the underlying file may not be physically restored on disk + +## Product Messaging Guidance + +Use these statements consistently in user-facing communication: + +- `Smart Clean runs real cleanup only for supported items in the current plan.` +- `Unsupported items stay review-only until Atlas can execute them safely.` +- `Recoverable items can be restored when a recovery path is available.` +- `If Atlas cannot prove the cleanup step, it should fail instead of claiming success.` + +Avoid saying: + +- `Smart Clean cleans everything it scans` +- `All recoverable items can always be restored physically` +- `Execution succeeded` when the action only changed in-app state + +## Release Gate Recommendation + +Do not describe `Smart Clean` as fully disk-backed until all major frozen-MVP Smart Clean categories support: + +- structured execution targets +- real filesystem side effects +- physical recovery where promised +- `scan -> execute -> rescan` verification diff --git a/Docs/Execution/Smart-Clean-Manual-Verification-2026-03-09.md b/Docs/Execution/Smart-Clean-Manual-Verification-2026-03-09.md new file mode 100644 index 0000000..77a2f9c --- /dev/null +++ b/Docs/Execution/Smart-Clean-Manual-Verification-2026-03-09.md @@ -0,0 +1,142 @@ +# Smart Clean Manual Verification — 2026-03-09 + +## Goal + +Provide a fast, repeatable, machine-local verification flow for the current `Smart Clean` real-execution subset and physical recovery path. + +Use this when you want to verify real cleanup behavior on your own Mac without relying on arbitrary personal files. + +## Fixture Helper + +Use the helper script below to create disposable files only in currently supported Smart Clean execution areas: + +- `scripts/atlas/smart-clean-manual-fixtures.sh` + +Supported commands: + +```bash +./scripts/atlas/smart-clean-manual-fixtures.sh create +./scripts/atlas/smart-clean-manual-fixtures.sh status +./scripts/atlas/smart-clean-manual-fixtures.sh cleanup +``` + +## What the Helper Creates + +The helper creates disposable fixtures under these locations: + +- `~/Library/Caches/AtlasExecutionFixturesCache` +- `~/Library/Logs/AtlasExecutionFixturesLogs` +- `~/Library/Developer/Xcode/DerivedData/AtlasExecutionFixturesDerivedData` +- `~/Library/Caches/AtlasExecutionFixturesPycache` + +These locations are chosen because the current Smart Clean implementation can execute and restore them for real. + +## Verification Steps + +### 1. Prepare fixtures + +```bash +./scripts/atlas/smart-clean-manual-fixtures.sh create +``` + +Expected: +- The script prints the created roots and files. +- `status` shows non-zero size under all four fixture roots. + +### 2. Confirm upstream dry-run sees the fixtures + +```bash +bash bin/clean.sh --dry-run +``` + +Expected: +- The dry-run output or `~/.config/mole/clean-list.txt` reflects the fixture size increase under one or more higher-level roots such as: + - `~/Library/Caches` + - `~/Library/Logs` + - `~/Library/Developer/Xcode/DerivedData` +- The fixture helper `status` output gives you the exact on-disk paths to compare before and after execution. + +### 3. Run Smart Clean scan in the app + +- Open `Atlas for Mac` +- Go to `Smart Clean` +- Click `Run Scan` +- If needed, use the app search field and search for related visible terms such as `DerivedData`, `cache`, `logs`, or the exact original path shown by the helper script. + +Expected: +- A cleanup plan is generated. +- At least one fixture-backed item appears in the plan or filtered findings. +- `Estimated Space` / `预计释放空间` is non-zero. + +### 4. Execute the plan + +- Review the plan. +- Click `Run Plan` / `执行计划`. + +Expected: +- Execution completes successfully for supported fixture items. +- The app creates recovery entries. +- Atlas does not silently claim success for unsupported items. + +### 5. Verify physical side effects + +```bash +./scripts/atlas/smart-clean-manual-fixtures.sh status +``` + +Expected: +- Executed fixture files no longer exist at their original paths. +- The corresponding recovery entry exists inside the app. + +### 6. Verify scan → execute → rescan + +- Run another Smart Clean scan in the app. + +Expected: +- The executed fixture-backed items are no longer rediscovered. +- Estimated space drops accordingly. + +### 7. Verify physical restore + +- Go to `History` / `Recovery` +- Restore the executed fixture-backed item(s) + +Then run: + +```bash +./scripts/atlas/smart-clean-manual-fixtures.sh status +``` + +Expected: +- The restored file or directory is back at its original path. +- If the restored item is still reclaimable, a fresh scan can rediscover it. + +### 8. Clean up after verification + +```bash +./scripts/atlas/smart-clean-manual-fixtures.sh cleanup +``` + +Expected: +- All disposable fixture roots are removed. + +## Failure Interpretation + +### Expected failure + +If a scanned item is outside the currently supported structured safe subset, Atlas should fail closed instead of pretending to clean it. + +### Unexpected failure + +Treat these as regressions: + +- fixture files remain in place after a reported successful execution +- fixture items reappear immediately on rescan even though the original files are gone +- restore reports success but the original files do not return +- Smart Clean claims success when no executable targets exist + +## Recommended Companion Docs + +- `Docs/Execution/Smart-Clean-Execution-Coverage-2026-03-09.md` +- `Docs/Execution/Smart-Clean-QA-Checklist-2026-03-09.md` +- `Docs/Execution/Execution-Chain-Audit-2026-03-09.md` diff --git a/Docs/Execution/Smart-Clean-QA-Checklist-2026-03-09.md b/Docs/Execution/Smart-Clean-QA-Checklist-2026-03-09.md new file mode 100644 index 0000000..1f935fe --- /dev/null +++ b/Docs/Execution/Smart-Clean-QA-Checklist-2026-03-09.md @@ -0,0 +1,102 @@ +# Smart Clean QA Checklist — 2026-03-09 + +## Goal + +Provide a focused acceptance checklist for validating the current `Smart Clean` real-execution subset and physical recovery path. + +## Preconditions + +- Use a local machine where `Smart Clean` scan can run successfully +- Start from a fresh or known workspace-state file when possible +- Prefer disposable cache/test paths under the current user home directory + +## Test Matrix + +### 1. Real scan still works + +- [ ] Run `Smart Clean` scan +- [ ] Confirm the page shows a non-empty cleanup plan for at least one supported target +- [ ] Confirm the plan shows `Estimated Space` / `预计释放空间` + +Expected: +- Scan completes successfully +- The current cleanup plan is generated from real findings + +### 2. Real execution for safe target + +Recommended sample targets: +- a disposable file under `~/Library/Caches/...` +- a disposable `__pycache__` directory +- a disposable file under `~/Library/Developer/Xcode/DerivedData/...` + +Steps: +- [ ] Create a disposable target under a supported path +- [ ] Run scan and confirm the target appears in the plan +- [ ] Run the plan +- [ ] Confirm the target disappears from its original path +- [ ] Confirm a recovery entry is created + +Expected: +- Execution is accepted +- The file or directory is moved to Trash +- History and recovery both update + +### 3. Scan → execute → rescan + +- [ ] Run scan and note the item count / estimated space for the test target +- [ ] Run the plan for that target +- [ ] Run a fresh scan again + +Expected: +- The previously executed target no longer appears in scan results +- Estimated space decreases accordingly +- The app does not rediscover the same target immediately unless the target still exists physically + +### 4. Physical restore + +- [ ] Select the recovery item created from the executed target +- [ ] Run restore +- [ ] Confirm the file or directory returns to its original path +- [ ] Run scan again if relevant + +Expected: +- Restore succeeds +- The item reappears at the original path +- If the restored item still qualifies as reclaimable, a new scan can rediscover it + +### 5. Unsupported target fails closed + +Use an item that is scanned but not currently covered by the structured safe execution subset. + +- [ ] Run scan until the unsupported item appears in the plan +- [ ] Attempt execution + +Expected: +- Atlas rejects execution instead of claiming success +- The rejection reason explains that executable cleanup targets are unavailable or unsupported +- No misleading drop in reclaimable space is shown as if cleanup succeeded + +### 6. Worker/XPC fallback honesty + +- [ ] Simulate or observe worker unavailability in a development environment without `ATLAS_ALLOW_SCAFFOLD_FALLBACK=1` +- [ ] Attempt a release-facing execution action + +Expected: +- Atlas surfaces a concrete failure +- It does not silently fall back to scaffold behavior and report success + +## Regression Checks + +- [ ] `Apps` uninstall still works for bundle removal +- [ ] Existing app/package tests remain green +- [ ] `clean.sh --dry-run` still starts and exports cleanup lists successfully + +## Pass Criteria + +A Smart Clean execution change is acceptable only if all of the following are true: + +- supported targets are physically moved to Trash +- executed targets disappear on the next scan +- recovery can physically restore executed targets when mappings are present +- unsupported targets fail closed +- the UI does not imply broader execution coverage than what is actually implemented diff --git a/Docs/Execution/UI-Audit-2026-03-08.md b/Docs/Execution/UI-Audit-2026-03-08.md new file mode 100644 index 0000000..9519761 --- /dev/null +++ b/Docs/Execution/UI-Audit-2026-03-08.md @@ -0,0 +1,361 @@ +# UI Audit — 2026-03-08 + +## Scope + +This audit reviews the current Atlas for Mac frozen-MVP shell after the post-MVP polish and bilingual localization work. + +Audited surfaces: + +- `Overview` +- `Smart Clean` +- `Apps` +- `History` +- `Permissions` +- `Settings` +- `Task Center` +- app shell navigation, toolbar, keyboard shortcuts, and shared design system + +## Audit Method + +The review combines: + +- current product IA and copy guidance +- the shared SwiftUI design-system implementation +- screen-by-screen source review +- SwiftUI-focused UI guidance for hierarchy, keyboard flow, and accessibility + +Evidence anchors: + +- `Docs/IA.md` +- `Docs/COPY_GUIDELINES.md` +- `Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/AtlasDesignSystem.swift` +- `Apps/AtlasApp/Sources/AtlasApp/AppShellView.swift` +- `Apps/AtlasApp/Sources/AtlasApp/AtlasAppCommands.swift` +- feature screen implementations under `Packages/AtlasFeatures*/Sources/*` + +## Executive Summary + +Atlas for Mac has moved beyond MVP-shell quality and now reads as a real product. The UX is coherent, trust-aware, keyboard-aware, and bilingual. The strongest improvements are in information clarity, reversibility cues, and consistency of shared surfaces. + +The current gap is no longer “is this usable?” but “does this feel premium and native enough for a polished Mac utility?” + +### Current Assessment + +- Information architecture: `Strong` +- Trust and safety framing: `Strong` +- State coverage: `Strong` +- Accessibility and keyboard support: `Strong` +- Visual hierarchy depth: `Moderate` +- Density and reading rhythm: `Moderate` +- Secondary-surface polish: `Moderate` +- Premium native feel: `Moderate` + +## What Is Working Well + +### Product Clarity + +- The app shell presents a stable frozen-MVP navigation model. +- `Overview`, `Smart Clean`, and `Apps` now tell a coherent story rather than reading like disconnected feature demos. +- Trust-sensitive actions are consistently framed as preview-first and recovery-aware. + +### Interaction Model + +- Main routes are keyboard reachable. +- Core task triggers are available from both screen UI and command menus. +- Task Center behaves like a real secondary control surface, not just a debug panel. + +### Accessibility and Localization Foundations + +- Shared UI primitives now expose meaningful accessibility labels and hints. +- Stable identifiers make UI automation resilient even when visible text changes by language. +- Chinese-first localization with English switching is now structurally correct, not just cosmetic. + +## Primary Issues + +### P0 — Highest Priority + +#### 1. Card Hierarchy Is Still Too Flat + +Most major pages rely on the same card weight and spacing rhythm. This keeps the product tidy, but it reduces scan efficiency because too many sections feel equally important. + +Impact: + +- Users need more effort to identify the single most important panel on a page. +- High-signal guidance competes visually with secondary detail. + +Best-practice direction: + +- Establish one dominant “hero” block per screen. +- Reduce visual competition among secondary sections. +- Reserve stronger elevation/tone for the first action-worthy surface. + +Recommended changes: + +- `Overview`: promote the top health / reclaimable / next-step zone into a more dominant summary block. +- `Smart Clean`: make the scan / execute area the unmistakable primary zone. +- `Apps`: make uninstall preview or inventory summary visually dominant depending on state. + +#### 2. Reading Width Is Too Loose on Large Windows + +Pages currently stretch very comfortably, but on wide desktop windows the reading path becomes longer than necessary. + +Impact: + +- Long explanatory copy becomes harder to scan. +- Secondary cards feel visually disconnected. + +Best-practice direction: + +- Introduce a content-width ceiling for main reading regions. +- Let metric clusters stretch, but keep explanatory sections tighter. + +Recommended changes: + +- Add a constrained content container inside `AtlasScreen`. +- Allow dense metric rows to use more width than narrative sections. + +#### 3. Smart Clean Still Feels Like Two Primary CTA Zones + +The `Run Scan` and `Execute Preview` actions are logically distinct, but visually they still compete for primary importance. + +Impact: + +- The next best action is not always instantly obvious. +- The page feels more tool-like than guided. + +Best-practice direction: + +- Only one dominant primary action should exist at a time. +- The primary CTA should depend on state: + - no preview → `Run Scan` + - actionable preview → `Execute Preview` + +Recommended changes: + +- Downgrade the non-primary action to bordered / secondary treatment in each state. +- Keep `Refresh Preview` secondary at all times. + +#### 4. Settings Is Correct but Too Heavy + +The screen is comprehensive, but it reads as a long structured form rather than a calm preference center. + +Impact: + +- Lower discoverability of the most important controls. +- Legal / trust text visually outweighs active preferences. + +Best-practice direction: + +- Split into clearer subsections or collapsible regions. +- Keep active settings short and above long-form informational content. + +Recommended changes: + +- Separate into `General`, `Language`, `Trust`, and `Notices` visual groups. +- Move long acknowledgement text behind expansion or a deeper detail view. + +### P1 — Important Next Improvements + +#### 5. Sidebar Density Is Slightly Too High for Daily Use + +The two-line route treatment helps onboarding, but the constant subtitle presence adds noise once the user already understands the product. + +Recommended changes: + +- Reduce subtitle prominence. +- Consider showing subtitle only on selection, hover, or in onboarding mode. + +#### 6. Secondary Surfaces Trail the Primary Routes + +`Task Center` and some lower-priority sections now work well, but still feel more functional than premium. + +Recommended changes: + +- Tighten spacing and emphasis rules for popovers and secondary panels. +- Add a stronger visual relationship between summary text and follow-up action. + +#### 7. Typography Scale Could Be More Intentional + +The type hierarchy is good, but still somewhat conservative for a desktop utility with a lot of summary-driven UX. + +Recommended changes: + +- Slightly enlarge the primary summary tier. +- Slightly quiet secondary captions and helper text. +- Keep a more visible difference between page title, section title, and row title. + +#### 8. Cross-Screen Density Rules Need One More Pass + +Some screens are comfortably airy, others slightly dense. + +Recommended changes: + +- Standardize vertical rhythm for: + - card header to body spacing + - row spacing inside cards + - gap between stacked cards + +### P2 — Valuable but Not Immediate + +#### 9. Native Delight Layer + +The app is stable and clear, but not yet especially memorable. + +Potential upgrades: + +- more refined hover transitions +- better selected-state polish in the sidebar +- subtle page-entry choreography +- richer system-native empty-state illustration language + +#### 10. Progressive Disclosure for Advanced Users + +Future polish can separate mainstream users from power users without expanding scope. + +Potential upgrades: + +- compact vs comfortable density mode +- “advanced detail” toggles in `Smart Clean` and `Apps` +- richer developer-specific explanations where relevant + +## Screen-by-Screen Notes + +### Overview + +Strengths: + +- Clear high-level summary +- Good trust framing +- Useful activity surface + +Main issue: + +- Too many blocks feel equally important + +Priority: + +- `P0` + +### Smart Clean + +Strengths: + +- Strong preview-first structure +- Good risk grouping +- Good recoverability language + +Main issue: + +- CTA hierarchy still needs stronger state-based dominance + +Priority: + +- `P0` + +### Apps + +Strengths: + +- Good trust framing for uninstall +- Good leftover visibility +- Good preview-before-execute structure + +Main issue: + +- Preview state and inventory state should diverge more visually + +Priority: + +- `P0` + +### History + +Strengths: + +- Good audit and recovery framing +- Rows are readable and trustworthy + +Main issue: + +- Could feel more timeline-like and less card-list-like + +Priority: + +- `P1` + +### Permissions +n +Strengths: + +- Limited-mode messaging is strong +- Permission rationale now feels respectful and clear + +Main issue: + +- Still somewhat uniform visually; could use stronger “what to do now” emphasis + +Priority: + +- `P1` + +### Settings + +Strengths: + +- Good scope coverage +- Language switching is correctly placed +- Trust information is discoverable + +Main issue: + +- Too long and text-heavy for a premium settings surface + +Priority: + +- `P0` + +### Task Center + +Strengths: + +- Useful and keyboard friendly +- Clear bridge into History + +Main issue: + +- Still visually closer to a utility panel than a polished product surface + +Priority: + +- `P1` + +## Recommended Execution Order + +### Wave 1 + +- Reduce card hierarchy flatness +- Introduce content-width ceiling +- Make `Smart Clean` a single-primary-action screen per state +- Reduce `Settings` reading burden + +### Wave 2 + +- Refine sidebar density +- Upgrade `Task Center` and other secondary surfaces +- Tighten typography and spacing rules + +### Wave 3 + +- Add native delight polish +- Add advanced progressive disclosure where it improves clarity + +## Done When + +This UI audit is considered addressed when: + +- each major screen has an obvious primary focus region +- each state has one clearly dominant next action +- reading width feels intentionally controlled on large windows +- `Settings` no longer feels like a long documentation page +- secondary surfaces feel visually consistent with the main shell +- the product feels recognizably “Mac-native polished,” not just “well-organized SwiftUI” diff --git a/Docs/Execution/UI-Automation-Blocker.md b/Docs/Execution/UI-Automation-Blocker.md new file mode 100644 index 0000000..1acc1af --- /dev/null +++ b/Docs/Execution/UI-Automation-Blocker.md @@ -0,0 +1,92 @@ +# UI Automation Blocker + +## Status + +Investigated and resolved locally after granting Accessibility trust to the calling process. + +## Goal + +Add native macOS UI automation for `AtlasApp` using Xcode/XCTest automation targets. + +## What Was Tried + +### Attempt 1: Main project native UI testing +- Added stable accessibility identifiers to the app UI for sidebar and primary controls. +- Tried a generated UI-testing bundle path from the main project. +- Tried a host-linked unit-test bundle path to probe `XCUIApplication` support. + +### Result +- `bundle.unit-test` is not valid for `XCUIApplication`; XCTest rejects that path. +- The main-project UI-testing setup remained noisy and unsuitable as a stable repository default. + +### Attempt 2: Independent minimal repro +- Built a standalone repro under `Testing/XCUITestRepro/` with: + - one minimal SwiftUI app target + - one UI test target + - one test using `XCUIApplication` +- Generated the project with `xcodegen` +- Ran: + +```bash +xcodebuild test \ + -project Testing/XCUITestRepro/XCUITestRepro.xcodeproj \ + -scheme XCUITestRepro \ + -destination 'platform=macOS' +``` + +### Result +- The minimal repro builds, signs, launches the UI test runner, and gets farther than the main-project experiment. +- It then fails with: + - `Timed out while enabling automation mode.` + +## Conclusion + +- The dominant blocker is now identified as local macOS UI automation enablement, not Atlas business logic. +- Specifically, the current shell process is not trusted for Accessibility APIs, which is consistent with macOS UI automation bootstrap failure. +- After granting Accessibility trust to the terminal process, both the standalone repro and the Atlas main-project UI smoke tests succeed locally. + +## Evidence + +### Local permission check + +```bash +swift -e 'import ApplicationServices; print(AXIsProcessTrusted())' +``` + +Initial result on this machine before granting Accessibility trust: + +```text +false +``` + +Current result after granting Accessibility trust: + +```text +true +``` + +### Minimal repro location + +- `Testing/XCUITestRepro/project.yml` +- `Testing/XCUITestRepro/App/XCUITestReproApp.swift` +- `Testing/XCUITestRepro/UITests/XCUITestReproUITests.swift` + +### Preflight helper + +- `scripts/atlas/ui-automation-preflight.sh` + +## Outcome + +- `scripts/atlas/ui-automation-preflight.sh` now passes on this machine. +- `Testing/XCUITestRepro/` UI tests pass. +- Atlas main-project UI smoke tests pass through `scripts/atlas/run-ui-automation.sh`. + +## Remaining Constraint + +- Native UI automation still depends on Accessibility trust being granted for the process that runs `xcodebuild`. On a new machine, run the preflight first. + +## 2026-03-08 Update + +- The current machine can still hit `Timed out while enabling automation mode.` even when `AXIsProcessTrusted()` returns `true`. +- The standalone repro under `Testing/XCUITestRepro/` reproduced the same failure on 2026-03-08, which confirms the blocker is currently machine-level / environment-level rather than Atlas-product-specific. +- `scripts/atlas/run-ui-automation.sh` now retries after cleanup, and `scripts/atlas/full-acceptance.sh` now classifies the failure against the standalone repro before failing the product acceptance run. diff --git a/Docs/Execution/UI-Copy-Walkthrough-2026-03-09.md b/Docs/Execution/UI-Copy-Walkthrough-2026-03-09.md new file mode 100644 index 0000000..3c5592e --- /dev/null +++ b/Docs/Execution/UI-Copy-Walkthrough-2026-03-09.md @@ -0,0 +1,213 @@ +# UI Copy Walkthrough — 2026-03-09 + +## Goal + +This checklist translates the current Atlas UI copy system into a page-by-page review guide so future edits keep the same business meaning, terminology, and user-facing tone. + +This document assumes the frozen MVP scope: + +- `Overview` +- `Smart Clean` +- `Apps` +- `History` +- `Permissions` +- `Settings` +- supporting surfaces such as `Task Center`, toolbar, command menus, and route labels + +## Core Glossary + +Use these terms consistently across product UI, docs, QA, and release notes. + +- `Scan` — read-only analysis that collects findings. It never changes the system by itself. +- `Cleanup Plan` — the reviewed set of cleanup steps Atlas proposes from current findings. +- `Uninstall Plan` — the reviewed set of uninstall steps Atlas proposes for one app. +- `Review` — the human confirmation step before a plan runs. +- `Run Plan` / `Run Uninstall` — the action that applies a reviewed plan. +- `Estimated Space` / `预计释放空间` — the amount the current plan can free. It may decrease after execution because the plan is rebuilt from remaining items. +- `Recoverable` — Atlas can restore the result while the retention window is still open. +- `App Footprint` — the current disk space an app uses. +- `Leftover Files` — related support files, caches, or launch items shown before uninstall. +- `Limited Mode` — Atlas works with partial permissions and asks for more access only when a specific workflow needs it. + +## Global Rules + +### Meaning + +- Always explain what the user is looking at before suggesting an action. +- Distinguish `current plan` from `remaining items after execution`. +- Use `plan` as the primary noun for actionable work. Avoid relying on `preview` alone when the object is something the user can run. +- If the action opens macOS settings, say `Open System Settings` / `打开系统设置`. + +### Tone + +- Calm +- Direct +- Reassuring +- Technical only when necessary + +### CTA Rules + +- Prefer explicit verbs: `Run Scan`, `Update Plan`, `Run Plan`, `Review Plan`, `Restore`, `Open System Settings` +- Avoid vague actions such as `Continue`, `Process`, `Confirm`, `Do It` +- Secondary actions must still communicate outcome, not just mechanism + +## Surface Checklist + +### Navigation + +#### `Overview` + +Primary promise: +- Users understand current system state, estimated space opportunity, and the next safe step. + +Copy checks: +- Route subtitle must mention `health`, `estimated space`, and `next safe step` +- Main metric must say `Estimated Space` / `预计释放空间`, not a vague size label +- If a number can change after execution, the detail copy must say so explicitly + +Reject if: +- It says `reclaimable` without clarifying it comes from the current plan +- It implies cleanup has already happened when it is only estimated + +#### `Smart Clean` + +Primary promise: +- Users scan first, review the cleanup plan second, and run it only when ready. + +Copy checks: +- Screen subtitle must express the order: `scan → plan → run` +- The primary object on the page must be called `Cleanup Plan` / `清理计划` +- The primary execution CTA must say `Run Plan` / `执行计划` +- The plan-size metric must say `Estimated Space` / `预计释放空间` +- Empty states must say `No cleanup plan yet` / `还没有清理计划` +- Result copy after execution must not imply the remaining plan is the same as the one that just ran + +Reject if: +- The UI mixes `preview`, `plan`, and `execution` as if they were the same concept +- The primary CTA implies execution when the user is only rebuilding the plan +- The metric label can be mistaken for already-freed space + +#### `Apps` + +Primary promise: +- Users inspect app footprint, leftover files, and the uninstall plan before removal. + +Copy checks: +- `Preview` should be a review verb, not the main object noun +- The actionable object must be `Uninstall Plan` / `卸载计划` +- `Footprint` and `Leftover Files` must remain distinct concepts +- The destructive CTA must say `Run Uninstall` / `执行卸载` +- Row footnotes should identify leftovers clearly and avoid generic file language + +Reject if: +- App size and uninstall result size are described with the same noun without context +- `Preview` is used as the label for something the user is actually about to run +- Leftovers are described as errors or threats + +#### `History` + +Primary promise: +- Users can understand what happened and what can still be restored. + +Copy checks: +- Timeline language must distinguish `ran`, `finished`, and `still in progress` +- Recovery copy must mention the retention window where relevant +- Restore CTA and hints must make reversibility explicit + +Reject if: +- It sounds like recovery is permanent +- It hides the time window for restoration + +#### `Permissions` + +Primary promise: +- Users understand why access matters, whether it can wait, and how to proceed safely. + +Copy checks: +- The screen must frame permissions as optional until needed by a concrete workflow +- `Not Needed Yet` / `暂不需要` is preferred over pressure-heavy phrases +- The settings-opening CTA must say `Open System Settings` / `打开系统设置` +- Per-permission support text must explain when the permission matters, not just what it is + +Reject if: +- It implies Atlas itself grants access +- It pressures the user with mandatory or fear-based wording +- It mentions system scope without user-facing benefit + +#### `Settings` + +Primary promise: +- Users adjust preferences and review trust/legal information in one calm surface. + +Copy checks: +- Active preferences must read like operational controls, not legal copy +- Legal and trust text must stay descriptive and low-pressure +- Exclusions must clearly say they stay out of scans and plans +- Recovery retention wording must describe what remains recoverable and for how long + +Reject if: +- Legal copy dominates action-oriented settings +- Exclusions sound like deletions or irreversible removals + +### Supporting Surfaces + +#### `Task Center` + +Primary promise: +- Users see recent task activity and know when to open History. + +Copy checks: +- Empty state must name concrete actions that populate the timeline +- Active state must point to History for the full audit trail +- Use `task activity`, `timeline`, `audit trail`, and `recovery items` consistently + +Reject if: +- It uses internal terms such as queue/event payload/job object + +#### Toolbar and Commands + +Primary promise: +- Users understand what happens immediately when they click a global command. + +Copy checks: +- `Permissions` global action should say `check status`, not just `refresh` +- `Task Center` should describe recent activity, not background internals +- Command labels should mirror the current screen vocabulary (`Run Plan`, `Check Permission Status`, `Refresh Current Screen`) + +Reject if: +- Global actions use different verbs than in-page actions for the same behavior + +## State-by-State Review Checklist + +Use this table whenever copy changes on any screen. + +| State | Must explain | Must avoid | +|------|--------------|------------| +| Loading | What Atlas is doing right now | vague spinner-only language | +| Empty | Why the page is empty and what action repopulates it | blame, dead ends | +| Ready | What the user can review now | implying work already ran | +| Executing | What is currently being applied | silent destructive behavior | +| Completed | What finished and what changed | overstating certainty or permanence | +| Recoverable | What can still be restored and for how long | implying indefinite restore availability | +| Limited mode | What still works and when more access might help | coercive permission language | + +## Fast Acceptance Pass + +A copy change is ready to ship when all of the following are true: + +- Every primary surface has one clear noun for the object the user is acting on +- Every destructive CTA names the actual action outcome +- Every permission CTA names the real system destination +- Every reclaimable-space metric says whether it is estimated and whether it recalculates +- Recovery language always mentions reversibility or the retention window where relevant +- Chinese and English versions communicate the same product model, not just literal translations + +## Recommended Use + +Use this walkthrough when: + +- editing `Localizable.strings` +- reviewing new screens or empty states +- preparing beta QA scripts +- checking regression after feature or IA changes +- writing release notes that reference Smart Clean, Apps, History, or Permissions diff --git a/Docs/Execution/Week-01.md b/Docs/Execution/Week-01.md new file mode 100644 index 0000000..39d393c --- /dev/null +++ b/Docs/Execution/Week-01.md @@ -0,0 +1,38 @@ +# Week 1 Plan + +## Goal + +Freeze product scope, naming, compliance, and core definitions. + +## Must Deliver + +- `PRD v1` +- `IA v1` +- `Protocol v1` +- `Permission strategy v1` +- `Attribution and third-party notices v1` +- Decision and risk logs + +## Day Plan + +- `Day 1` Kickoff, naming, MVP scope, decision-log creation +- `Day 2` IA, key flows, page states, permission outline +- `Day 3` Protocol, state machine, system-boundary outline +- `Day 4` Resolve open questions +- `Day 5` Gate review and freeze + +## Owner Tasks + +- `Product Agent` freeze scope, goals, metrics, non-goals +- `UX Agent` draft IA and core flows +- `Core Agent` define models and protocol +- `System Agent` define permission matrix and boundaries +- `Adapter Agent` audit upstream capabilities +- `QA Agent` define acceptance matrix +- `Docs Agent` draft attribution and notice files + +## Exit Criteria + +- All P0 scope questions resolved +- Week 2 inputs are frozen +- Decision log updated diff --git a/Docs/Execution/Week-02.md b/Docs/Execution/Week-02.md new file mode 100644 index 0000000..f7507fd --- /dev/null +++ b/Docs/Execution/Week-02.md @@ -0,0 +1,35 @@ +# Week 2 Plan + +## Goal + +Freeze high-fidelity design input and refine schemas required for implementation. + +## Must Deliver + +- High-fidelity drafts for `Overview`, `Smart Clean`, and `Apps` +- Permission explainer sheets +- `Protocol v1.1` +- Engineering scaffold proposal +- MVP acceptance matrix v1 + +## Day Plan + +- `Day 1` Review low-fidelity flows and lock layout direction +- `Day 2` Produce `Overview` and `Smart Clean` high-fidelity drafts +- `Day 3` Produce `Apps` high-fidelity draft and uninstall preview details +- `Day 4` Freeze package and target graph +- `Day 5` Gate review + +## Owner Tasks + +- `UX Agent` deliver design screens and state variants +- `Mac App Agent` define shell, navigation, and dependency wiring +- `Core Agent` refine protocol and persistence models +- `System Agent` refine XPC and helper interfaces +- `Adapter Agent` define scan and app-footprint chains +- `QA Agent` refine acceptance matrix + +## Exit Criteria + +- Three key screens are ready for implementation slicing +- Protocol and persistence models are stable enough for Week 3 freeze diff --git a/Docs/Execution/Week-03.md b/Docs/Execution/Week-03.md new file mode 100644 index 0000000..7d05ac9 --- /dev/null +++ b/Docs/Execution/Week-03.md @@ -0,0 +1,35 @@ +# Week 3 Plan + +## Goal + +Freeze architecture, protocol, worker/helper boundaries, and scaffold inputs. + +## Must Deliver + +- `Architecture v1` +- `Protocol Schema v1` +- `Task State Machine v1` +- `Error Registry v1` +- `Worker XPC` interface +- `Privileged Helper` action allowlist +- Package and target dependency graph + +## Day Plan + +- `Day 1` Confirm minimum data required by the three core screens +- `Day 2` Freeze schema, state machine, error mapping +- `Day 3` Freeze worker/helper interfaces and validations +- `Day 4` Freeze scaffold structure and dependency graph +- `Day 5` Gate review for Week 4 engineering start + +## Owner Tasks + +- `Core Agent` freeze protocol, state, persistence +- `System Agent` freeze worker/helper interfaces +- `Mac App Agent` freeze shell and package graph +- `Adapter Agent` freeze MVP adapter paths +- `QA Agent` define contract and boundary test coverage + +## Exit Criteria + +- Week 4 can start implementation without unresolved P0 architecture blockers diff --git a/Docs/HELP_CENTER_OUTLINE.md b/Docs/HELP_CENTER_OUTLINE.md new file mode 100644 index 0000000..3b19e9c --- /dev/null +++ b/Docs/HELP_CENTER_OUTLINE.md @@ -0,0 +1,43 @@ +# Help Center Outline + +## Getting Started + +- What Atlas for Mac does +- What Atlas for Mac does not do +- First scan walkthrough +- Understanding permissions + +## Smart Clean + +- What Smart Clean can execute today +- Why some items stay review-only +- What the risk groups mean +- Why some items are rebuildable +- How exclusions and rules work +- What happens when a task partially fails + +## Apps + +- How uninstall preview works +- Why leftovers appear after app removal +- Background items and login items + +## History and Recovery + +- Where to find past runs +- When recovery restores a file physically +- Which actions are recoverable +- What happens when recovery expires + +## Permissions + +- Full Disk Access, including when a relaunch is required before status updates +- Admin-authorized actions +- Notifications and background monitoring + +## Troubleshooting + +- Scan results look incomplete +- A task was cancelled or failed +- A restore cannot return to the original path +- How to export diagnostics diff --git a/Docs/IA.md b/Docs/IA.md new file mode 100644 index 0000000..877871b --- /dev/null +++ b/Docs/IA.md @@ -0,0 +1,68 @@ +# Information Architecture + +## Primary Navigation + +- `Overview` +- `Smart Clean` +- `Apps` +- `History` +- `Permissions` +- `Settings` + +## MVP Navigation Notes + +- `History` contains recovery entry points. +- `Settings` contains acknowledgements and third-party notices. +- `Storage` remains scaffolded at the package layer but is not part of the frozen MVP app shell. + +## Screen Responsibilities + +### Overview + +- Health summary +- Reclaimable space summary +- Top issues +- Recommended actions +- Recent activity + +### Smart Clean + +- Scan initiation +- Findings grouped by `Safe`, `Review`, `Advanced` +- Selection and preview +- Execution and result summary + +### Apps + +- Installed app list +- Footprint details +- Leftovers and background item visibility +- Uninstall preview and execution + +### History + +- Timeline of runs +- Task detail view +- Recovery item access + +### Permissions + +- Permission status cards +- Guidance and system-settings links + +### Settings + +- General preferences +- App language +- Rules and exclusions +- Recovery retention +- Notifications +- Acknowledgements + +## Global Surfaces + +- Toolbar search +- Task center +- Confirmation sheets +- Error detail sheet +- Permission explainer sheet diff --git a/Docs/PRD.md b/Docs/PRD.md new file mode 100644 index 0000000..9dd3ac7 --- /dev/null +++ b/Docs/PRD.md @@ -0,0 +1,70 @@ +# PRD + +## Product + +- Working name: `Atlas for Mac` +- Category: `Mac Maintenance Workspace` +- Target platform: `macOS` + +## Positioning + +Atlas for Mac is a native desktop maintenance application that helps users understand why their Mac is slow, full, or disorganized, then take safe and explainable action. + +## Product Goals + +- Help users complete a safe space-recovery decision in minutes. +- Turn scanning into an explainable action plan. +- Unify cleanup, uninstall, permissions, history, and recovery into one workflow. +- Prefer reversible actions over permanent deletion. +- Support heavy Mac users and developer-oriented cleanup scenarios. + +## Non-Goals + +- No anti-malware suite in MVP. +- No Mac App Store release in MVP. +- No full automation rule engine in MVP. +- No advanced storage treemap in MVP. +- No menu bar utility in MVP. + +## Target Users + +- Heavy Mac users with persistent disk pressure. +- Developers with Xcode, simulators, containers, package-manager caches, and build artifacts. +- Creative users with large local media libraries. +- Cautious mainstream users who want safer maintenance than terminal tools. + +## MVP Modules + +- `Overview` +- `Smart Clean` +- `Apps` +- `History` +- `Recovery` +- `Permissions` +- `Settings` + +## Core Differentiators + +- Explainable cleanup recommendations +- Recovery-first execution model +- Unified maintenance workflow +- Developer-aware cleanup coverage +- Least-privilege permission design + +## Success Metrics + +- First scan completion rate +- Scan-to-execution conversion rate +- Permission completion rate +- Recovery success rate +- Task success rate +- User-visible space reclaimed + +## MVP Acceptance Summary + +- Users can run a scan without granting all permissions up front. +- Findings are grouped by risk and explained before execution. +- Users can preview app uninstall footprint before removal. +- Every destructive task produces a history record. +- Recoverable actions expose a restoration path. +- The app includes a visible open-source acknowledgement and third-party notices page. diff --git a/Docs/Protocol.md b/Docs/Protocol.md new file mode 100644 index 0000000..094f2de --- /dev/null +++ b/Docs/Protocol.md @@ -0,0 +1,145 @@ +# Local Protocol + +## Goals + +- Provide a stable local contract between UI, worker, and helper components. +- Avoid parsing terminal-oriented text output. +- Support progress, execution, history, recovery, settings, and helper handoff. + +## Protocol Version + +- Current implementation version: `0.3.0` + +## UI ↔ Worker Commands + +- `health.snapshot` +- `permissions.inspect` +- `scan.start` +- `plan.preview` +- `plan.execute` +- `recovery.restore` +- `apps.list` +- `apps.uninstall.preview` +- `apps.uninstall.execute` +- `settings.get` +- `settings.set` + +## Worker ↔ Helper Models + +### `AtlasHelperAction` + +- `id` +- `kind` +- `targetPath` +- `destinationPath` (required for restore-style actions) + +### `AtlasHelperActionResult` + +- `action` +- `success` +- `message` +- `resolvedPath` + +## Response Payloads + +- `accepted(task)` +- `health(snapshot)` +- `permissions(permissionStates)` +- `apps(appFootprints)` +- `preview(actionPlan)` +- `settings(settings)` +- `rejected(code, reason)` + +### Error Codes in Current Use + +- `unsupportedCommand` +- `permissionRequired` +- `helperUnavailable` +- `executionUnavailable` +- `invalidSelection` + +## Event Payloads + +- `taskProgress(taskID, completed, total)` +- `taskFinished(taskRun)` +- `permissionUpdated(permissionState)` + +## Core Schemas + +### Finding + +- `id` +- `category` +- `title` +- `detail` +- `bytes` +- `risk` +- `targetPaths` (optional structured execution targets derived from the scan adapter) + +### ActionPlan + +- `id` +- `title` +- `items` +- `estimatedBytes` + +### TaskRun + +- `id` +- `kind` +- `status` +- `summary` +- `startedAt` +- `finishedAt` + +### AppFootprint + +- `id` +- `name` +- `bundleIdentifier` +- `bundlePath` +- `bytes` +- `leftoverItems` + +### RecoveryItem + +- `id` +- `title` +- `detail` +- `originalPath` +- `bytes` +- `deletedAt` +- `expiresAt` +- `payload` +- `restoreMappings` (optional original-path ↔ trashed-path records for physical restoration) + +### AtlasSettings + +- `recoveryRetentionDays` +- `notificationsEnabled` +- `excludedPaths` +- `language` +- `acknowledgementText` +- `thirdPartyNoticesText` + +## Protocol Rules + +- Progress must be monotonic. +- Rejected requests return a stable code plus a user-facing reason. +- Destructive flows must end in a history record. +- Recoverable flows must produce structured recovery items. +- Helper actions must remain allowlisted structured actions, never arbitrary command strings. + +## Current Implementation Note + +- `health.snapshot` is backed by `lib/check/health_json.sh` through `MoleHealthAdapter`. +- `scan.start` is backed by `bin/clean.sh --dry-run` through `MoleSmartCleanAdapter` when the upstream workflow succeeds. If it cannot complete, the worker now rejects the request instead of silently fabricating scan results. +- `apps.list` is backed by `MacAppsInventoryAdapter`, which scans local app bundles and derives leftover counts. +- The worker persists a local JSON-backed workspace state containing the latest snapshot, current Smart Clean plan, and settings, including the persisted app-language preference. +- Atlas localizes user-facing shell copy through a package-scoped resource bundle and uses the persisted language to keep summaries and settings text aligned. +- App uninstall can invoke the packaged or development helper executable through structured JSON actions. +- Structured Smart Clean findings can now carry executable target paths, and a safe subset of those targets can be moved to Trash and physically restored later. +- The app shell communicates with the worker over structured XPC `Data` payloads that encode Atlas request and result envelopes. + +- `executePlan` is fail-closed for unsupported targets, but now supports a real Trash-based execution path for a safe structured subset of Smart Clean items. +- `recovery.restore` can physically restore items when `restoreMappings` are present; otherwise it falls back to model rehydration only. diff --git a/Docs/README.md b/Docs/README.md new file mode 100644 index 0000000..4f65310 --- /dev/null +++ b/Docs/README.md @@ -0,0 +1,58 @@ +# Atlas for Mac Docs + +This directory contains the working product, design, engineering, and compliance documents for the Atlas for Mac desktop application. + +## Principles + +- Atlas for Mac is an independent product. +- The project does not use the Mole brand in user-facing naming. +- The project may reuse or adapt parts of the upstream Mole codebase under the MIT License. +- User-facing flows should prefer explainability, reversibility, and least privilege. + +## Document Map + +- `PRD.md` — product requirements and MVP scope +- `IA.md` — information architecture and navigation model +- `Architecture.md` — application architecture and process boundaries +- `Protocol.md` — local JSON protocol and core schemas +- `TaskStateMachine.md` — task lifecycle rules +- `ErrorCodes.md` — user-facing and system error registry +- `ROADMAP.md` — 12-week MVP execution plan +- `Backlog.md` — epics, issue seeds, and board conventions +- `DECISIONS.md` — frozen product and architecture decisions +- `RISKS.md` — active project risk register +- `Execution/` — weekly execution plans, status snapshots, beta checklists, gate reviews, manual test SOPs, and release execution notes +- `Execution/Current-Status-2026-03-07.md` — current engineering status snapshot +- `Execution/UI-Audit-2026-03-08.md` — UI design audit and prioritized remediation directions +- `Execution/UI-Copy-Walkthrough-2026-03-09.md` — page-by-page UI copy glossary, consistency checklist, and acceptance guide +- `Execution/Execution-Chain-Audit-2026-03-09.md` — end-to-end review of real vs scaffold execution paths and release-facing trust gaps +- `Execution/Smart-Clean-Execution-Coverage-2026-03-09.md` — user-facing summary of what Smart Clean can execute for real today +- `Execution/Smart-Clean-QA-Checklist-2026-03-09.md` — QA checklist for scan, execute, rescan, and physical restore validation +- `Execution/Smart-Clean-Manual-Verification-2026-03-09.md` — local-machine fixture workflow for validating real Smart Clean execution and restore +- `Templates/` — issue, epic, ADR, gate, and handoff templates +- `WORKSPACE_LAYOUT.md` — planned repository and module structure +- `HELP_CENTER_OUTLINE.md` — help center structure +- `COPY_GUIDELINES.md` — product voice and UI copy rules +- `ATTRIBUTION.md` — upstream acknowledgement strategy +- `THIRD_PARTY_NOTICES.md` — third-party notices and license references +- `ADR/` — architecture decision records +- `Sequence/` — flow-level engineering sequence documents + +## Ownership + +- Product decisions: `Product Agent` +- Interaction and content design: `UX Agent` +- App implementation: `Mac App Agent` +- Protocol and domain model: `Core Agent` +- XPC and privileged integration: `System Agent` +- Upstream adaptation: `Adapter Agent` +- Verification: `QA Agent` +- Distribution and release: `Release Agent` +- Compliance and docs: `Docs Agent` + +## Update Rules + +- Update `PRD.md` before changing MVP scope. +- Update `Protocol.md` and `TaskStateMachine.md` together when task lifecycle or schema changes. +- Add or update an ADR for any process-boundary, privilege, or storage decision. +- Keep `ATTRIBUTION.md` and `THIRD_PARTY_NOTICES.md` in sync with shipped code. diff --git a/Docs/RISKS.md b/Docs/RISKS.md new file mode 100644 index 0000000..099c965 --- /dev/null +++ b/Docs/RISKS.md @@ -0,0 +1,98 @@ +# Risk Register + +## R-001 XPC and Helper Complexity + +- Impact: High +- Probability: Medium +- Owner: `System Agent` +- Risk: Worker/helper setup and privilege boundaries may delay implementation. +- Mitigation: Complete architecture and helper allowlist freeze before scaffold build. + +## R-002 Upstream Adapter Instability + +- Impact: High +- Probability: High +- Owner: `Adapter Agent` +- Risk: Existing upstream commands may not expose stable structured data. +- Mitigation: Add adapter normalization layer and rewrite hot paths if JSON mapping is brittle. + +## R-003 Permission Friction + +- Impact: High +- Probability: Medium +- Owner: `UX Agent` +- Risk: Aggressive permission prompts may reduce activation. +- Mitigation: Use just-in-time prompts and support limited mode. + +## R-004 Recovery Trust Gap + +- Impact: High +- Probability: Medium +- Owner: `Core Agent` +- Risk: Users may not trust destructive actions without clear rollback behavior. +- Mitigation: Prefer reversible actions and preserve detailed history. + +## R-005 Scope Creep + +- Impact: High +- Probability: High +- Owner: `Product Agent` +- Risk: P1 features may leak into MVP. +- Mitigation: Freeze MVP scope and require explicit decision-log updates for scope changes. + +## R-006 Signing and Notarization Surprises + +- Impact: High +- Probability: Medium +- Owner: `Release Agent` +- Risk: Helper signing or notarization may fail late in the schedule. +- Mitigation: Validate packaging flow before feature-complete milestone. Current repo now includes native build/package scripts and CI workflow, but signing and notarization still depend on release credentials. + +## R-007 Experience Polish Drift + +- Impact: High +- Probability: High +- Owner: `Mac App Agent` +- Risk: MVP screens may continue to diverge in spacing, CTA hierarchy, and state handling as teams polish pages independently. +- Mitigation: Route visual and interaction changes through shared design-system components before page-level tweaks land. + +## R-008 Trust Gap in Destructive Flows + +- Impact: High +- Probability: Medium +- Owner: `UX Agent` +- Risk: Users may still hesitate to run `Smart Clean` or uninstall actions if recovery, review, and consequence messaging stay too subtle. +- Mitigation: Make recoverability, risk level, and next-step guidance visible at decision points and in completion states. + +## R-009 State Coverage Debt + +- Impact: High +- Probability: Medium +- Owner: `QA Agent` +- Risk: Loading, empty, partial-permission, and failure states may feel unfinished even when the happy path is functional. +- Mitigation: Require state-matrix coverage for primary screens before additional visual polish is considered complete. + + +## R-010 Localization Drift + +- Impact: Medium +- Probability: Medium +- Owner: `Docs Agent` +- Risk: Newly added Chinese and English strings may drift between UI, worker summaries, and future screens if copy changes bypass the shared localization layer. +- Mitigation: Keep user-facing shell copy in shared localization resources and require bilingual QA before release-facing packaging. + +## R-011 Smart Clean Execution Trust Gap + +- Impact: High +- Probability: High +- Owner: `System Agent` +- Risk: `Smart Clean` execution now supports a real Trash-based path for a safe subset of targets, but unsupported or unstructured findings still cannot be executed and must fail closed. Physical restore also remains partial and depends on structured recovery mappings. +- Mitigation: Add real Smart Clean execution targets and block release-facing execution claims until `scan -> execute -> rescan` proves real disk impact. + +## R-012 Silent Worker Fallback Masks Execution Capability + +- Impact: High +- Probability: Medium +- Owner: `System Agent` +- Risk: Silent fallback from XPC to the scaffold worker can make user-facing execution appear successful even when the primary worker path is unavailable. +- Mitigation: Restrict fallback to explicit development mode or surface a concrete error when real execution infrastructure is unavailable. diff --git a/Docs/ROADMAP.md b/Docs/ROADMAP.md new file mode 100644 index 0000000..18c362c --- /dev/null +++ b/Docs/ROADMAP.md @@ -0,0 +1,59 @@ +# MVP Roadmap + +## Timeline + +### Week 1 + +- Freeze MVP scope +- Freeze naming, compliance, and acknowledgement strategy +- Freeze product goals and success metrics + +### Week 2 + +- Freeze IA and high-fidelity design input for key screens +- Freeze interaction states and permission explainers + +### Week 3 + +- Freeze architecture, protocol, state machine, and helper boundaries + +### Week 4 + +- Create engineering scaffold and mock-data application shell + +### Week 5 + +- Ship scan initiation and result pipeline + +### Week 6 + +- Ship action-plan preview and cleanup execution path + +### Week 7 + +- Ship apps list and uninstall preview flow + +### Week 8 + +- Ship permissions center, history, and recovery views + +### Week 9 + +- Integrate privileged helper path and audit trail + +### Week 10 + +- Run quality, regression, and performance hardening + +### Week 11 + +- Produce beta candidate and packaging pipeline + +### Week 12 + +- Internal beta wrap-up and release-readiness review + +## MVP Scope + +- In scope: `Overview`, `Smart Clean`, `Apps`, `History`, `Recovery`, `Permissions` +- Deferred to P1: `Storage treemap`, `Menu Bar`, `Automation` diff --git a/Docs/Sequence/execute-flow.md b/Docs/Sequence/execute-flow.md new file mode 100644 index 0000000..a68065a --- /dev/null +++ b/Docs/Sequence/execute-flow.md @@ -0,0 +1,21 @@ +# Execute Flow + +## Actors + +- User +- AtlasApp +- AtlasWorkerClient +- AtlasWorkerXPC +- AtlasPrivilegedHelper +- AtlasStore + +## Sequence + +1. User previews a plan and confirms execution. +2. App sends `task.execute`. +3. Worker splits actions into privileged and non-privileged work. +4. Worker performs non-privileged actions directly. +5. Worker submits allowlisted privileged actions to helper when needed. +6. Worker streams progress, warnings, and per-item results. +7. Worker persists task result and recoverable items. +8. App renders a result page with success, warnings, and recovery actions. diff --git a/Docs/Sequence/restore-flow.md b/Docs/Sequence/restore-flow.md new file mode 100644 index 0000000..ed81f39 --- /dev/null +++ b/Docs/Sequence/restore-flow.md @@ -0,0 +1,19 @@ +# Restore Flow + +## Actors + +- User +- AtlasApp +- AtlasWorkerClient +- AtlasWorkerXPC +- AtlasPrivilegedHelper +- AtlasStore + +## Sequence + +1. User selects one or more recovery items. +2. App sends `recovery.restore`. +3. Worker validates recovery windows and target conflicts. +4. Worker restores items directly or via helper when required. +5. Worker persists restore result and updates recovery status. +6. App renders restored, failed, or expired outcomes. diff --git a/Docs/Sequence/scan-flow.md b/Docs/Sequence/scan-flow.md new file mode 100644 index 0000000..fe4e5d2 --- /dev/null +++ b/Docs/Sequence/scan-flow.md @@ -0,0 +1,21 @@ +# Scan Flow + +## Actors + +- User +- AtlasApp +- AtlasWorkerClient +- AtlasWorkerXPC +- AtlasCoreAdapters +- AtlasStore + +## Sequence + +1. User starts a scan. +2. App sends `scan.start`. +3. Worker validates scope and permissions. +4. Worker invokes one or more adapters. +5. Worker streams progress events. +6. Worker aggregates findings and summary. +7. Worker persists scan summary. +8. App renders grouped findings or a limited-results banner. diff --git a/Docs/THIRD_PARTY_NOTICES.md b/Docs/THIRD_PARTY_NOTICES.md new file mode 100644 index 0000000..5105dfc --- /dev/null +++ b/Docs/THIRD_PARTY_NOTICES.md @@ -0,0 +1,24 @@ +# Third-Party Notices + +This project includes planning for third-party and upstream open-source acknowledgements. + +## Upstream Project + +- Project: `Mole` +- Source: `https://github.com/tw93/mole` +- License: `MIT` +- Copyright: `tw93` + +## Distribution Rule + +If Atlas for Mac ships code derived from upstream Mole sources, the applicable copyright notice and MIT license text must be included in copies or substantial portions of the software. + +## Notice Template + +```text +This product includes software derived from the open-source project Mole by tw93 and contributors, used under the MIT License. +``` + +## Future Additions + +Add all additional third-party libraries, bundled binaries, or copied source components here before release. diff --git a/Docs/TaskStateMachine.md b/Docs/TaskStateMachine.md new file mode 100644 index 0000000..91827b4 --- /dev/null +++ b/Docs/TaskStateMachine.md @@ -0,0 +1,68 @@ +# Task State Machine + +## Task Types + +- `scan` +- `execute_clean` +- `execute_uninstall` +- `restore` +- `inspect_permissions` +- `health_snapshot` + +## Main States + +- `draft` +- `submitted` +- `validating` +- `awaiting_permission` +- `queued` +- `running` +- `cancelling` +- `completed` +- `partial_failed` +- `failed` +- `cancelled` +- `expired` + +## Terminal States + +- `completed` +- `partial_failed` +- `failed` +- `cancelled` +- `expired` + +## Core Transition Rules + +- `draft -> submitted` +- `submitted -> validating` +- `validating -> awaiting_permission | queued | failed` +- `awaiting_permission -> queued | cancelled | failed` +- `queued -> running | cancelled` +- `running -> cancelling | completed | partial_failed | failed` +- `cancelling -> cancelled` + +## Action Item States + +- `pending` +- `running` +- `succeeded` +- `skipped` +- `failed` +- `cancelled` + +## Guarantees + +- Terminal states are immutable. +- Progress must not move backwards. +- Destructive tasks must be audited. +- Recoverable tasks must leave structured recovery entries until restored or expired. +- Repeated write requests must honor idempotency rules when those flows become externally reentrant. + +## Current MVP Notes + +- `scan` emits monotonic progress and finishes with a preview-ready plan when the upstream scan adapter succeeds; otherwise the request should fail rather than silently fabricate findings. +- `execute_clean` must not report completion in release-facing flows unless real cleanup side effects have been applied. Unsupported or unstructured targets should fail closed. +- `execute_uninstall` removes an app from the current workspace view and creates a recovery entry. +- `restore` can physically restore items when structured recovery mappings are present, and can still rehydrate a `Finding` or an `AppFootprint` into Atlas state from the recovery payload. +- User-visible task summaries and settings-driven text should reflect the persisted app-language preference when generated. diff --git a/Docs/Templates/ADR_TEMPLATE.md b/Docs/Templates/ADR_TEMPLATE.md new file mode 100644 index 0000000..e9b4ddb --- /dev/null +++ b/Docs/Templates/ADR_TEMPLATE.md @@ -0,0 +1,24 @@ +# ADR-XXX: Title + +## Status + +- Proposed +- Accepted +- Superseded + +## Context + +Describe the problem, tradeoffs, and constraints. + +## Decision + +State the architectural decision clearly. + +## Consequences + +List positive, negative, and follow-on consequences. + +## Alternatives Considered + +- Alternative A +- Alternative B diff --git a/Docs/Templates/AGENT_HANDOFF_TEMPLATE.md b/Docs/Templates/AGENT_HANDOFF_TEMPLATE.md new file mode 100644 index 0000000..f57222b --- /dev/null +++ b/Docs/Templates/AGENT_HANDOFF_TEMPLATE.md @@ -0,0 +1,23 @@ +# Agent Handoff Template + +## Completed + +- What is done? + +## Changed Artifacts + +- Files or documents changed + +## Open Questions + +- Question 1 +- Question 2 + +## Risks and Blockers + +- Blocker 1 +- Risk 1 + +## Recommended Next Step + +What should the next Agent do immediately? diff --git a/Docs/Templates/EPIC_TEMPLATE.md b/Docs/Templates/EPIC_TEMPLATE.md new file mode 100644 index 0000000..4f71312 --- /dev/null +++ b/Docs/Templates/EPIC_TEMPLATE.md @@ -0,0 +1,43 @@ +# Epic Template + +## Epic Name + +## Problem Statement + +What user or product problem does this epic solve? + +## User Outcome + +What should users be able to do after this epic is complete? + +## In Scope + +- Item 1 +- Item 2 + +## Out of Scope + +- Item 1 +- Item 2 + +## Dependencies + +- Decision log entries +- Related epics +- Blocking technical capabilities + +## Milestones + +- Milestone 1 +- Milestone 2 +- Milestone 3 + +## Success Metrics + +- Metric 1 +- Metric 2 + +## Risks + +- Risk 1 +- Risk 2 diff --git a/Docs/Templates/GATE_REVIEW_TEMPLATE.md b/Docs/Templates/GATE_REVIEW_TEMPLATE.md new file mode 100644 index 0000000..6a0ce4b --- /dev/null +++ b/Docs/Templates/GATE_REVIEW_TEMPLATE.md @@ -0,0 +1,32 @@ +# Gate Review Template + +## Gate + +- `Week 1` +- `Week 2` +- `Week 3` +- `Feature Complete` +- `Beta Candidate` + +## Readiness Checklist + +- [ ] Required P0 tasks complete +- [ ] Docs updated +- [ ] Risks reviewed +- [ ] Open questions below threshold +- [ ] Next-stage inputs available + +## Blockers + +- Blocker 1 + +## Decision + +- `Pass` +- `Pass with Conditions` +- `Fail` + +## Follow-up Actions + +- Action 1 +- Action 2 diff --git a/Docs/Templates/ISSUE_TEMPLATE.md b/Docs/Templates/ISSUE_TEMPLATE.md new file mode 100644 index 0000000..096d00e --- /dev/null +++ b/Docs/Templates/ISSUE_TEMPLATE.md @@ -0,0 +1,56 @@ +# Issue Template + +## Summary + +Describe the intended outcome in one sentence. + +## Type + +- `Feature` +- `Design` +- `Architecture` +- `Protocol` +- `System` +- `QA` +- `Release` +- `Docs` + +## Owner Agent + +Assign exactly one primary owner. + +## Priority + +- `P0` +- `P1` +- `P2` + +## Dependencies + +List blocking issues, decisions, or documents. + +## Scope + +What is included in this issue? + +## Non-Scope + +What should not be done as part of this issue? + +## Acceptance Criteria + +- Criterion 1 +- Criterion 2 +- Criterion 3 + +## Deliverables + +- Document, schema, design file, code path, or test artifact + +## Risks + +List known implementation or coordination risks. + +## Handoff Notes + +Capture what the next Agent needs to know. diff --git a/Docs/WORKSPACE_LAYOUT.md b/Docs/WORKSPACE_LAYOUT.md new file mode 100644 index 0000000..1b6d36c --- /dev/null +++ b/Docs/WORKSPACE_LAYOUT.md @@ -0,0 +1,65 @@ +# Workspace Layout + +## Top-Level Directories + +- `Apps/` — user-facing app targets +- `Packages/` — shared Swift packages +- `XPC/` — XPC service targets +- `Helpers/` — privileged helper targets +- `MenuBar/` — deferred menu-bar target area +- `Testing/` — shared testing support and future test targets +- `Docs/` — product, design, engineering, and compliance documents + +## Planned Module Layout + +### App Shell + +- `Apps/AtlasApp/` +- `Apps/Package.swift` + +### Shared Packages + +- `Packages/Package.swift` +- `Packages/AtlasDesignSystem/` +- `Packages/AtlasDomain/` +- `Packages/AtlasApplication/` +- `Packages/AtlasProtocol/` +- `Packages/AtlasInfrastructure/` +- `Packages/AtlasCoreAdapters/` +- `Packages/AtlasFeaturesOverview/` +- `Packages/AtlasFeaturesSmartClean/` +- `Packages/AtlasFeaturesApps/` +- `Packages/AtlasFeaturesStorage/` +- `Packages/AtlasFeaturesHistory/` +- `Packages/AtlasFeaturesPermissions/` +- `Packages/AtlasFeaturesSettings/` + +### Services + +- `XPC/Package.swift` +- `Helpers/Package.swift` +- `XPC/AtlasWorkerXPC/` +- `Helpers/AtlasPrivilegedHelper/` + +### Deferred Targets + +- `MenuBar/AtlasMenuBar/` + +### Test Support + +- `Testing/Package.swift` +- `Testing/AtlasTestingSupport/` + +## Current Scaffold Conventions + +- `Apps/Package.swift` hosts the main `AtlasApp` executable target. +- `Packages/Package.swift` hosts shared library products with sources under `Packages/*/Sources/*`. +- `XPC/Package.swift` and `Helpers/Package.swift` host the worker and helper executable stubs. +- Root `project.yml` also generates an `AtlasWorkerXPC` macOS `xpc-service` target for the app bundle. +- `Testing/Package.swift` hosts shared fixtures and future contract-test helpers. +- `MenuBar/` remains README-only until deferred P1 scope is explicitly reopened. +- Root `project.yml` generates `Atlas.xcodeproj` through `xcodegen` for the native app shell. + +## Rule + +Create implementation files inside these directories rather than introducing new top-level structures unless an ADR records the change. Keep `project.yml` as the source of truth for regenerating `Atlas.xcodeproj`. diff --git a/Helpers/AtlasPrivilegedHelper/README.md b/Helpers/AtlasPrivilegedHelper/README.md new file mode 100644 index 0000000..a2384ae --- /dev/null +++ b/Helpers/AtlasPrivilegedHelper/README.md @@ -0,0 +1,14 @@ +# AtlasPrivilegedHelper + +## Responsibility + +- Execute allowlisted structured actions only +- Validate target paths before execution +- Return structured JSON results to the worker boundary + +## Current Actions + +- `trashItems` +- `restoreItem` +- `removeLaunchService` +- `repairOwnership` (reserved placeholder for future privileged expansion) diff --git a/Helpers/AtlasPrivilegedHelper/Sources/AtlasPrivilegedHelper/main.swift b/Helpers/AtlasPrivilegedHelper/Sources/AtlasPrivilegedHelper/main.swift new file mode 100644 index 0000000..013c6d4 --- /dev/null +++ b/Helpers/AtlasPrivilegedHelper/Sources/AtlasPrivilegedHelper/main.swift @@ -0,0 +1,36 @@ +import AtlasPrivilegedHelperCore +import AtlasProtocol +import Foundation + +@main +struct AtlasPrivilegedHelperMain { + static func main() { + if CommandLine.arguments.contains("--action-json") { + runJSONActionMode() + return + } + + let actions = AtlasPrivilegedActionKind.allCases.map(\.rawValue).joined(separator: ", ") + print("AtlasPrivilegedHelper ready") + print("Allowlisted actions: \(actions)") + } + + private static func runJSONActionMode() { + let inputData = FileHandle.standardInput.readDataToEndOfFile() + let decoder = JSONDecoder() + let encoder = JSONEncoder() + encoder.outputFormatting = [.sortedKeys] + + do { + let action = try decoder.decode(AtlasHelperAction.self, from: inputData) + let result = try AtlasPrivilegedHelperActionExecutor().perform(action) + FileHandle.standardOutput.write(try encoder.encode(result)) + } catch { + let fallbackAction = AtlasHelperAction(kind: .trashItems, targetPath: "") + let result = AtlasHelperActionResult(action: fallbackAction, success: false, message: error.localizedDescription) + if let data = try? encoder.encode(result) { + FileHandle.standardOutput.write(data) + } + } + } +} diff --git a/Helpers/AtlasPrivilegedHelper/Sources/AtlasPrivilegedHelperCore/HelperActionExecutor.swift b/Helpers/AtlasPrivilegedHelper/Sources/AtlasPrivilegedHelperCore/HelperActionExecutor.swift new file mode 100644 index 0000000..7b302cc --- /dev/null +++ b/Helpers/AtlasPrivilegedHelper/Sources/AtlasPrivilegedHelperCore/HelperActionExecutor.swift @@ -0,0 +1,204 @@ +import AtlasProtocol +import Foundation +import Darwin + +public struct AtlasPrivilegedHelperActionExecutor { + private let fileManager: FileManager + private let allowedRoots: [String] + private let currentUserID: UInt32 + private let currentGroupID: UInt32 + private let homeDirectoryURL: URL + + public init( + fileManager: FileManager = .default, + allowedRoots: [String]? = nil, + currentUserID: UInt32 = getuid(), + currentGroupID: UInt32 = getgid(), + homeDirectoryURL: URL = FileManager.default.homeDirectoryForCurrentUser + ) { + self.fileManager = fileManager + self.currentUserID = currentUserID + self.currentGroupID = currentGroupID + self.homeDirectoryURL = homeDirectoryURL + self.allowedRoots = allowedRoots ?? [ + URL(fileURLWithPath: "/Applications", isDirectory: true).path, + homeDirectoryURL.appendingPathComponent("Applications", isDirectory: true).path, + homeDirectoryURL.appendingPathComponent("Library/LaunchAgents", isDirectory: true).path, + URL(fileURLWithPath: "/Library/LaunchAgents", isDirectory: true).path, + URL(fileURLWithPath: "/Library/LaunchDaemons", isDirectory: true).path, + ] + } + + public func perform(_ action: AtlasHelperAction) throws -> AtlasHelperActionResult { + let targetURL = URL(fileURLWithPath: action.targetPath).resolvingSymlinksInPath() + let destinationURL = action.destinationPath.map { URL(fileURLWithPath: $0).resolvingSymlinksInPath() } + try validate(action: action, targetURL: targetURL, destinationURL: destinationURL) + + switch action.kind { + case .trashItems: + var trashedURL: NSURL? + try fileManager.trashItem(at: targetURL, resultingItemURL: &trashedURL) + return AtlasHelperActionResult( + action: action, + success: true, + message: "Moved item to Trash.", + resolvedPath: (trashedURL as URL?)?.path + ) + case .restoreItem: + guard let destinationURL else { + throw HelperValidationError.invalidRestoreDestination(nil) + } + try fileManager.createDirectory(at: destinationURL.deletingLastPathComponent(), withIntermediateDirectories: true) + try fileManager.moveItem(at: targetURL, to: destinationURL) + return AtlasHelperActionResult( + action: action, + success: true, + message: "Restored item from Trash.", + resolvedPath: destinationURL.path + ) + case .removeLaunchService: + try fileManager.removeItem(at: targetURL) + return AtlasHelperActionResult( + action: action, + success: true, + message: "Removed launch service file.", + resolvedPath: targetURL.path + ) + case .repairOwnership: + return try repairOwnership(for: action, targetURL: targetURL) + } + } + + private func repairOwnership(for action: AtlasHelperAction, targetURL: URL) throws -> AtlasHelperActionResult { + let targets = try ownershipTargets(for: targetURL) + var updatedCount = 0 + var failedPaths: [String] = [] + + for url in targets { + do { + let attributes = try fileManager.attributesOfItem(atPath: url.path) + let ownerID = attributes[.ownerAccountID] as? NSNumber + let groupID = attributes[.groupOwnerAccountID] as? NSNumber + let alreadyOwned = ownerID?.uint32Value == currentUserID && groupID?.uint32Value == currentGroupID + + if !alreadyOwned { + try fileManager.setAttributes([ + .ownerAccountID: NSNumber(value: currentUserID), + .groupOwnerAccountID: NSNumber(value: currentGroupID), + ], ofItemAtPath: url.path) + updatedCount += 1 + } + } catch { + failedPaths.append(url.path) + } + } + + if !failedPaths.isEmpty { + throw HelperValidationError.repairOwnershipFailed(failedPaths) + } + + let message: String + if updatedCount == 0 { + message = "Ownership already matched the current user." + } else { + message = "Repaired ownership for \(updatedCount) item\(updatedCount == 1 ? "" : "s")." + } + + return AtlasHelperActionResult( + action: action, + success: true, + message: message, + resolvedPath: targetURL.path + ) + } + + private func ownershipTargets(for rootURL: URL) throws -> [URL] { + var urls: [URL] = [rootURL] + + let values = try rootURL.resourceValues(forKeys: [.isDirectoryKey, .isSymbolicLinkKey]) + guard values.isDirectory == true, values.isSymbolicLink != true else { + return urls + } + + if let enumerator = fileManager.enumerator( + at: rootURL, + includingPropertiesForKeys: [.isDirectoryKey, .isSymbolicLinkKey], + options: [.skipsHiddenFiles] + ) { + for case let url as URL in enumerator { + let resourceValues = try? url.resourceValues(forKeys: [.isSymbolicLinkKey]) + if resourceValues?.isSymbolicLink == true { + continue + } + urls.append(url) + } + } + + return urls + } + + private func validate(action: AtlasHelperAction, targetURL: URL, destinationURL: URL?) throws { + guard fileManager.fileExists(atPath: targetURL.path) else { + throw HelperValidationError.pathNotFound(targetURL.path) + } + + let isAllowed = { (url: URL) in + allowedRoots.contains { root in + url.path == root || url.path.hasPrefix(root + "/") + } + } + + switch action.kind { + case .trashItems, .removeLaunchService, .repairOwnership: + guard isAllowed(targetURL) else { + throw HelperValidationError.pathNotAllowed(targetURL.path) + } + case .restoreItem: + let trashRoot = homeDirectoryURL.appendingPathComponent(".Trash", isDirectory: true).path + guard targetURL.path == trashRoot || targetURL.path.hasPrefix(trashRoot + "/") else { + throw HelperValidationError.pathNotAllowed(targetURL.path) + } + guard let destinationURL else { + throw HelperValidationError.invalidRestoreDestination(nil) + } + guard isAllowed(destinationURL) else { + throw HelperValidationError.invalidRestoreDestination(destinationURL.path) + } + if fileManager.fileExists(atPath: destinationURL.path) { + throw HelperValidationError.restoreDestinationExists(destinationURL.path) + } + } + + if action.kind == .removeLaunchService { + guard targetURL.pathExtension == "plist" else { + throw HelperValidationError.invalidLaunchServicePath(targetURL.path) + } + } + } +} + +enum HelperValidationError: LocalizedError { + case pathNotFound(String) + case pathNotAllowed(String) + case invalidLaunchServicePath(String) + case invalidRestoreDestination(String?) + case restoreDestinationExists(String) + case repairOwnershipFailed([String]) + + var errorDescription: String? { + switch self { + case let .pathNotFound(path): + return "Target path not found: \(path)" + case let .pathNotAllowed(path): + return "Target path is outside the helper allowlist: \(path)" + case let .invalidLaunchServicePath(path): + return "Launch service removal requires a plist path: \(path)" + case let .invalidRestoreDestination(path): + return "Restore destination is invalid: \(path ?? "")" + case let .restoreDestinationExists(path): + return "Restore destination already exists: \(path)" + case let .repairOwnershipFailed(paths): + return "Failed to repair ownership for: \(paths.joined(separator: ", "))" + } + } +} diff --git a/Helpers/AtlasPrivilegedHelper/Tests/AtlasPrivilegedHelperTests/AtlasPrivilegedHelperTests.swift b/Helpers/AtlasPrivilegedHelper/Tests/AtlasPrivilegedHelperTests/AtlasPrivilegedHelperTests.swift new file mode 100644 index 0000000..1ff7a80 --- /dev/null +++ b/Helpers/AtlasPrivilegedHelper/Tests/AtlasPrivilegedHelperTests/AtlasPrivilegedHelperTests.swift @@ -0,0 +1,51 @@ +import XCTest +@testable import AtlasPrivilegedHelperCore +import AtlasProtocol + +final class AtlasPrivilegedHelperTests: XCTestCase { + func testRepairOwnershipSucceedsForAllowedCurrentUserFile() throws { + let root = makeAllowedRoot() + let fileURL = root.appendingPathComponent("Sample.txt") + try FileManager.default.createDirectory(at: root, withIntermediateDirectories: true) + try Data("sample".utf8).write(to: fileURL) + + let executor = AtlasPrivilegedHelperActionExecutor(allowedRoots: [root.path]) + let result = try executor.perform(AtlasHelperAction(kind: .repairOwnership, targetPath: fileURL.path)) + + XCTAssertTrue(result.success) + XCTAssertEqual(result.resolvedPath, fileURL.path) + } + + func testRestoreItemMovesTrashedFileBackToAllowedDestination() throws { + let home = FileManager.default.temporaryDirectory.appendingPathComponent(UUID().uuidString, isDirectory: true) + let trash = home.appendingPathComponent(".Trash", isDirectory: true) + let root = home.appendingPathComponent("Applications", isDirectory: true) + let sourceURL = trash.appendingPathComponent("Sample.app", isDirectory: true) + let destinationURL = root.appendingPathComponent("Sample.app", isDirectory: true) + try FileManager.default.createDirectory(at: sourceURL, withIntermediateDirectories: true) + try FileManager.default.createDirectory(at: root, withIntermediateDirectories: true) + + let executor = AtlasPrivilegedHelperActionExecutor(allowedRoots: [root.path], homeDirectoryURL: home) + let result = try executor.perform(AtlasHelperAction(kind: .restoreItem, targetPath: sourceURL.path, destinationPath: destinationURL.path)) + + XCTAssertTrue(result.success) + XCTAssertEqual(result.resolvedPath, destinationURL.path) + XCTAssertTrue(FileManager.default.fileExists(atPath: destinationURL.path)) + } + + func testRemoveLaunchServiceRejectsNonPlistPath() throws { + let root = makeAllowedRoot() + let fileURL = root.appendingPathComponent("not-a-plist.txt") + try FileManager.default.createDirectory(at: root, withIntermediateDirectories: true) + try Data("sample".utf8).write(to: fileURL) + + let executor = AtlasPrivilegedHelperActionExecutor(allowedRoots: [root.path]) + + XCTAssertThrowsError(try executor.perform(AtlasHelperAction(kind: .removeLaunchService, targetPath: fileURL.path))) + } + + private func makeAllowedRoot() -> URL { + FileManager.default.temporaryDirectory + .appendingPathComponent(UUID().uuidString, isDirectory: true) + } +} diff --git a/Helpers/Package.swift b/Helpers/Package.swift new file mode 100644 index 0000000..8d8ef78 --- /dev/null +++ b/Helpers/Package.swift @@ -0,0 +1,36 @@ +// swift-tools-version: 5.10 +import PackageDescription + +let package = Package( + name: "AtlasHelpers", + platforms: [.macOS(.v14)], + products: [ + .library(name: "AtlasPrivilegedHelperCore", targets: ["AtlasPrivilegedHelperCore"]), + .executable(name: "AtlasPrivilegedHelper", targets: ["AtlasPrivilegedHelper"]), + ], + dependencies: [ + .package(path: "../Packages"), + ], + targets: [ + .target( + name: "AtlasPrivilegedHelperCore", + dependencies: [ + .product(name: "AtlasProtocol", package: "Packages"), + ], + path: "AtlasPrivilegedHelper/Sources/AtlasPrivilegedHelperCore" + ), + .executableTarget( + name: "AtlasPrivilegedHelper", + dependencies: [ + "AtlasPrivilegedHelperCore", + .product(name: "AtlasProtocol", package: "Packages"), + ], + path: "AtlasPrivilegedHelper/Sources/AtlasPrivilegedHelper" + ), + .testTarget( + name: "AtlasPrivilegedHelperTests", + dependencies: ["AtlasPrivilegedHelperCore", .product(name: "AtlasProtocol", package: "Packages")], + path: "AtlasPrivilegedHelper/Tests/AtlasPrivilegedHelperTests" + ), + ] +) diff --git a/Helpers/README.md b/Helpers/README.md new file mode 100644 index 0000000..964a8fb --- /dev/null +++ b/Helpers/README.md @@ -0,0 +1,9 @@ +# Helpers + +This directory contains helper targets for Atlas for Mac. + +## Current Entry + +- `AtlasPrivilegedHelper/` contains the allowlisted helper executable. +- The helper accepts structured JSON actions and validates target paths before execution. +- `Package.swift` exposes the helper as a SwiftPM executable target for local development and packaging. diff --git a/MenuBar/AtlasMenuBar/README.md b/MenuBar/AtlasMenuBar/README.md new file mode 100644 index 0000000..d844867 --- /dev/null +++ b/MenuBar/AtlasMenuBar/README.md @@ -0,0 +1,10 @@ +# AtlasMenuBar + +## Responsibility + +- Menu-bar entry point for P1 +- Lightweight health summary and quick-entry actions + +## Status + +- Deferred until after MVP diff --git a/MenuBar/README.md b/MenuBar/README.md new file mode 100644 index 0000000..594814e --- /dev/null +++ b/MenuBar/README.md @@ -0,0 +1,3 @@ +# MenuBar + +This directory contains planned menu-bar targets and helpers. diff --git a/Packages/AtlasApplication/README.md b/Packages/AtlasApplication/README.md new file mode 100644 index 0000000..71cbe4f --- /dev/null +++ b/Packages/AtlasApplication/README.md @@ -0,0 +1,19 @@ +# AtlasApplication + +## Responsibility + +- Use cases and orchestration interfaces +- Structured application-layer coordination between the app shell and worker boundary + +## Planned Use Cases + +- `StartScan` +- `PreviewPlan` +- `ExecutePlan` +- `RestoreItems` +- `InspectPermissions` + +## Current Scaffold + +- `AtlasWorkspaceController` turns structured worker responses into app-facing scan, preview, and permission outputs. +- `AtlasWorkerServing` defines the worker boundary without leaking UI concerns into infrastructure. diff --git a/Packages/AtlasApplication/Sources/AtlasApplication/AtlasApplication.swift b/Packages/AtlasApplication/Sources/AtlasApplication/AtlasApplication.swift new file mode 100644 index 0000000..8e08cba --- /dev/null +++ b/Packages/AtlasApplication/Sources/AtlasApplication/AtlasApplication.swift @@ -0,0 +1,549 @@ +import AtlasDomain +import AtlasProtocol +import Foundation + +public struct AtlasWorkspaceSnapshot: Codable, Hashable, Sendable { + public var reclaimableSpaceBytes: Int64 + public var findings: [Finding] + public var apps: [AppFootprint] + public var taskRuns: [TaskRun] + public var recoveryItems: [RecoveryItem] + public var permissions: [PermissionState] + public var healthSnapshot: AtlasHealthSnapshot? + + public init( + reclaimableSpaceBytes: Int64, + findings: [Finding], + apps: [AppFootprint], + taskRuns: [TaskRun], + recoveryItems: [RecoveryItem], + permissions: [PermissionState], + healthSnapshot: AtlasHealthSnapshot? = nil + ) { + self.reclaimableSpaceBytes = reclaimableSpaceBytes + self.findings = findings + self.apps = apps + self.taskRuns = taskRuns + self.recoveryItems = recoveryItems + self.permissions = permissions + self.healthSnapshot = healthSnapshot + } +} + +public struct AtlasWorkspaceState: Codable, Hashable, Sendable { + public var snapshot: AtlasWorkspaceSnapshot + public var currentPlan: ActionPlan + public var settings: AtlasSettings + + public init(snapshot: AtlasWorkspaceSnapshot, currentPlan: ActionPlan, settings: AtlasSettings) { + self.snapshot = snapshot + self.currentPlan = currentPlan + self.settings = settings + } +} + +public enum AtlasScaffoldWorkspace { + public static func state(language: AtlasLanguage = AtlasL10n.currentLanguage) -> AtlasWorkspaceState { + let snapshot = AtlasWorkspaceSnapshot( + reclaimableSpaceBytes: AtlasScaffoldFixtures.findings(language: language).map(\.bytes).reduce(0, +), + findings: AtlasScaffoldFixtures.findings(language: language), + apps: AtlasScaffoldFixtures.apps, + taskRuns: AtlasScaffoldFixtures.taskRuns(language: language), + recoveryItems: AtlasScaffoldFixtures.recoveryItems(language: language), + permissions: AtlasScaffoldFixtures.permissions(language: language), + healthSnapshot: AtlasScaffoldFixtures.healthSnapshot(language: language) + ) + + return AtlasWorkspaceState( + snapshot: snapshot, + currentPlan: makeInitialPlan(from: snapshot.findings), + settings: AtlasScaffoldFixtures.settings(language: language) + ) + } + + public static func snapshot(language: AtlasLanguage = AtlasL10n.currentLanguage) -> AtlasWorkspaceSnapshot { + state(language: language).snapshot + } + + private static func makeInitialPlan(from findings: [Finding]) -> ActionPlan { + let items = findings.map { finding in + ActionItem( + id: finding.id, + title: finding.risk == .advanced + ? AtlasL10n.string("application.plan.inspectPrivileged", finding.title) + : AtlasL10n.string("application.plan.reviewFinding", finding.title), + detail: finding.detail, + kind: finding.category == "Apps" ? .removeApp : (finding.risk == .advanced ? .inspectPermission : .removeCache), + recoverable: finding.risk != .advanced + ) + } + + let titleKey = findings.count == 1 ? "application.plan.reviewSelected.one" : "application.plan.reviewSelected.other" + return ActionPlan( + title: AtlasL10n.string(titleKey, findings.count), + items: items, + estimatedBytes: findings.map(\.bytes).reduce(0, +) + ) + } +} + +public protocol AtlasHealthSnapshotProviding: Sendable { + func collectHealthSnapshot() async throws -> AtlasHealthSnapshot +} + +public struct AtlasSmartCleanScanResult: Codable, Hashable, Sendable { + public var findings: [Finding] + public var summary: String + + public init(findings: [Finding], summary: String) { + self.findings = findings + self.summary = summary + } +} + +public protocol AtlasSmartCleanScanProviding: Sendable { + func collectSmartCleanScan() async throws -> AtlasSmartCleanScanResult +} + + +public protocol AtlasAppInventoryProviding: Sendable { + func collectInstalledApps() async throws -> [AppFootprint] +} + +public struct AtlasWorkerCommandResult: Codable, Hashable, Sendable { + public var request: AtlasRequestEnvelope + public var response: AtlasResponseEnvelope + public var events: [AtlasEventEnvelope] + public var snapshot: AtlasWorkspaceSnapshot + public var previewPlan: ActionPlan? + + public init( + request: AtlasRequestEnvelope, + response: AtlasResponseEnvelope, + events: [AtlasEventEnvelope], + snapshot: AtlasWorkspaceSnapshot, + previewPlan: ActionPlan? = nil + ) { + self.request = request + self.response = response + self.events = events + self.snapshot = snapshot + self.previewPlan = previewPlan + } +} + +public protocol AtlasWorkerServing: Sendable { + func submit(_ request: AtlasRequestEnvelope) async throws -> AtlasWorkerCommandResult +} + +public enum AtlasWorkspaceControllerError: LocalizedError, Sendable { + case rejected(code: AtlasProtocolErrorCode, reason: String) + case unexpectedResponse(String) + + public var errorDescription: String? { + switch self { + case let .rejected(code, reason): + return AtlasL10n.string("application.error.workerRejected", code.rawValue, reason) + case let .unexpectedResponse(reason): + return reason + } + } +} + +public struct AtlasPermissionInspectionOutput: Sendable { + public var snapshot: AtlasWorkspaceSnapshot + public var events: [AtlasEventEnvelope] + + public init(snapshot: AtlasWorkspaceSnapshot, events: [AtlasEventEnvelope]) { + self.snapshot = snapshot + self.events = events + } +} + +public struct AtlasHealthSnapshotOutput: Sendable { + public var snapshot: AtlasWorkspaceSnapshot + public var healthSnapshot: AtlasHealthSnapshot + + public init(snapshot: AtlasWorkspaceSnapshot, healthSnapshot: AtlasHealthSnapshot) { + self.snapshot = snapshot + self.healthSnapshot = healthSnapshot + } +} + +public struct AtlasPlanPreviewOutput: Sendable { + public var snapshot: AtlasWorkspaceSnapshot + public var actionPlan: ActionPlan + public var summary: String + + public init(snapshot: AtlasWorkspaceSnapshot, actionPlan: ActionPlan, summary: String) { + self.snapshot = snapshot + self.actionPlan = actionPlan + self.summary = summary + } +} + +public struct AtlasScanOutput: Sendable { + public var snapshot: AtlasWorkspaceSnapshot + public var actionPlan: ActionPlan? + public var events: [AtlasEventEnvelope] + public var progressFraction: Double + public var summary: String + + public init( + snapshot: AtlasWorkspaceSnapshot, + actionPlan: ActionPlan?, + events: [AtlasEventEnvelope], + progressFraction: Double, + summary: String + ) { + self.snapshot = snapshot + self.actionPlan = actionPlan + self.events = events + self.progressFraction = progressFraction + self.summary = summary + } +} + +public struct AtlasTaskActionOutput: Sendable { + public var snapshot: AtlasWorkspaceSnapshot + public var events: [AtlasEventEnvelope] + public var progressFraction: Double + public var summary: String + + public init( + snapshot: AtlasWorkspaceSnapshot, + events: [AtlasEventEnvelope], + progressFraction: Double, + summary: String + ) { + self.snapshot = snapshot + self.events = events + self.progressFraction = progressFraction + self.summary = summary + } +} + +public struct AtlasAppsOutput: Sendable { + public var snapshot: AtlasWorkspaceSnapshot + public var apps: [AppFootprint] + public var summary: String + + public init(snapshot: AtlasWorkspaceSnapshot, apps: [AppFootprint], summary: String) { + self.snapshot = snapshot + self.apps = apps + self.summary = summary + } +} + +public struct AtlasSettingsOutput: Sendable { + public var settings: AtlasSettings + + public init(settings: AtlasSettings) { + self.settings = settings + } +} + +public struct AtlasWorkspaceController: Sendable { + private let worker: any AtlasWorkerServing + + public init(worker: any AtlasWorkerServing) { + self.worker = worker + } + + public func healthSnapshot() async throws -> AtlasHealthSnapshotOutput { + let request = HealthSnapshotUseCase().makeRequest() + let result = try await worker.submit(request) + + switch result.response.response { + case let .health(healthSnapshot): + return AtlasHealthSnapshotOutput(snapshot: result.snapshot, healthSnapshot: healthSnapshot) + case let .rejected(code, reason): + throw AtlasWorkspaceControllerError.rejected(code: code, reason: reason) + default: + throw AtlasWorkspaceControllerError.unexpectedResponse("Expected health response for healthSnapshot.") + } + } + + public func inspectPermissions() async throws -> AtlasPermissionInspectionOutput { + let request = InspectPermissionsUseCase().makeRequest() + let result = try await worker.submit(request) + + switch result.response.response { + case .permissions: + return AtlasPermissionInspectionOutput(snapshot: result.snapshot, events: result.events) + case let .rejected(code, reason): + throw AtlasWorkspaceControllerError.rejected(code: code, reason: reason) + default: + throw AtlasWorkspaceControllerError.unexpectedResponse("Expected permissions response for inspectPermissions.") + } + } + + public func startScan(taskID: UUID = UUID()) async throws -> AtlasScanOutput { + let request = StartScanUseCase().makeRequest(taskID: taskID) + let result = try await worker.submit(request) + + switch result.response.response { + case .accepted: + return AtlasScanOutput( + snapshot: result.snapshot, + actionPlan: result.previewPlan, + events: result.events, + progressFraction: progressFraction(from: result.events), + summary: summary(from: result.events, fallback: AtlasL10n.string("application.scan.completed")) + ) + case let .rejected(code, reason): + throw AtlasWorkspaceControllerError.rejected(code: code, reason: reason) + default: + throw AtlasWorkspaceControllerError.unexpectedResponse("Expected accepted response for startScan.") + } + } + + public func previewPlan(findingIDs: [UUID]) async throws -> AtlasPlanPreviewOutput { + let request = PreviewPlanUseCase().makeRequest(findingIDs: findingIDs) + let result = try await worker.submit(request) + + switch result.response.response { + case let .preview(plan): + return AtlasPlanPreviewOutput( + snapshot: result.snapshot, + actionPlan: plan, + summary: AtlasL10n.string(plan.items.count == 1 ? "application.preview.updated.one" : "application.preview.updated.other", plan.items.count) + ) + case let .rejected(code, reason): + throw AtlasWorkspaceControllerError.rejected(code: code, reason: reason) + default: + throw AtlasWorkspaceControllerError.unexpectedResponse("Expected preview response for previewPlan.") + } + } + + public func executePlan(planID: UUID) async throws -> AtlasTaskActionOutput { + let request = ExecutePlanUseCase().makeRequest(planID: planID) + let result = try await worker.submit(request) + + switch result.response.response { + case .accepted: + return AtlasTaskActionOutput( + snapshot: result.snapshot, + events: result.events, + progressFraction: progressFraction(from: result.events), + summary: summary(from: result.events, fallback: AtlasL10n.string("application.plan.executed")) + ) + case let .rejected(code, reason): + throw AtlasWorkspaceControllerError.rejected(code: code, reason: reason) + default: + throw AtlasWorkspaceControllerError.unexpectedResponse("Expected accepted response for executePlan.") + } + } + + public func restoreItems(taskID: UUID = UUID(), itemIDs: [UUID]) async throws -> AtlasTaskActionOutput { + let request = RestoreItemsUseCase().makeRequest(taskID: taskID, itemIDs: itemIDs) + let result = try await worker.submit(request) + + switch result.response.response { + case .accepted: + return AtlasTaskActionOutput( + snapshot: result.snapshot, + events: result.events, + progressFraction: progressFraction(from: result.events), + summary: summary(from: result.events, fallback: AtlasL10n.string("application.recovery.completed")) + ) + case let .rejected(code, reason): + throw AtlasWorkspaceControllerError.rejected(code: code, reason: reason) + default: + throw AtlasWorkspaceControllerError.unexpectedResponse("Expected accepted response for restoreItems.") + } + } + + public func listApps() async throws -> AtlasAppsOutput { + let request = AppsListUseCase().makeRequest() + let result = try await worker.submit(request) + + switch result.response.response { + case let .apps(apps): + return AtlasAppsOutput( + snapshot: result.snapshot, + apps: apps, + summary: AtlasL10n.string(apps.count == 1 ? "application.apps.loaded.one" : "application.apps.loaded.other", apps.count) + ) + case let .rejected(code, reason): + throw AtlasWorkspaceControllerError.rejected(code: code, reason: reason) + default: + throw AtlasWorkspaceControllerError.unexpectedResponse("Expected apps response for listApps.") + } + } + + public func previewAppUninstall(appID: UUID) async throws -> AtlasPlanPreviewOutput { + let request = PreviewAppUninstallUseCase().makeRequest(appID: appID) + let result = try await worker.submit(request) + + switch result.response.response { + case let .preview(plan): + return AtlasPlanPreviewOutput( + snapshot: result.snapshot, + actionPlan: plan, + summary: AtlasL10n.string("application.apps.previewUpdated", plan.title) + ) + case let .rejected(code, reason): + throw AtlasWorkspaceControllerError.rejected(code: code, reason: reason) + default: + throw AtlasWorkspaceControllerError.unexpectedResponse("Expected preview response for previewAppUninstall.") + } + } + + public func executeAppUninstall(appID: UUID) async throws -> AtlasTaskActionOutput { + let request = ExecuteAppUninstallUseCase().makeRequest(appID: appID) + let result = try await worker.submit(request) + + switch result.response.response { + case .accepted: + return AtlasTaskActionOutput( + snapshot: result.snapshot, + events: result.events, + progressFraction: progressFraction(from: result.events), + summary: summary(from: result.events, fallback: AtlasL10n.string("application.apps.uninstallCompleted")) + ) + case let .rejected(code, reason): + throw AtlasWorkspaceControllerError.rejected(code: code, reason: reason) + default: + throw AtlasWorkspaceControllerError.unexpectedResponse("Expected accepted response for executeAppUninstall.") + } + } + + public func settings() async throws -> AtlasSettingsOutput { + let request = SettingsGetUseCase().makeRequest() + let result = try await worker.submit(request) + + switch result.response.response { + case let .settings(settings): + return AtlasSettingsOutput(settings: settings) + case let .rejected(code, reason): + throw AtlasWorkspaceControllerError.rejected(code: code, reason: reason) + default: + throw AtlasWorkspaceControllerError.unexpectedResponse("Expected settings response for settings.") + } + } + + public func updateSettings(_ settings: AtlasSettings) async throws -> AtlasSettingsOutput { + let request = SettingsSetUseCase().makeRequest(settings: settings) + let result = try await worker.submit(request) + + switch result.response.response { + case let .settings(settings): + return AtlasSettingsOutput(settings: settings) + case let .rejected(code, reason): + throw AtlasWorkspaceControllerError.rejected(code: code, reason: reason) + default: + throw AtlasWorkspaceControllerError.unexpectedResponse("Expected settings response for updateSettings.") + } + } + + private func progressFraction(from events: [AtlasEventEnvelope]) -> Double { + let fractions = events.compactMap { event -> Double? in + guard case let .taskProgress(_, completed, total) = event.event, total > 0 else { + return nil + } + + return Double(completed) / Double(total) + } + + return fractions.last ?? 0 + } + + private func summary(from events: [AtlasEventEnvelope], fallback: String) -> String { + for event in events.reversed() { + if case let .taskFinished(taskRun) = event.event { + return taskRun.summary + } + } + + return fallback + } +} + +public struct HealthSnapshotUseCase: Sendable { + public init() {} + + public func makeRequest() -> AtlasRequestEnvelope { + AtlasRequestEnvelope(command: .healthSnapshot) + } +} + +public struct StartScanUseCase: Sendable { + public init() {} + + public func makeRequest(taskID: UUID = UUID()) -> AtlasRequestEnvelope { + AtlasRequestEnvelope(command: .startScan(taskID: taskID)) + } +} + +public struct InspectPermissionsUseCase: Sendable { + public init() {} + + public func makeRequest() -> AtlasRequestEnvelope { + AtlasRequestEnvelope(command: .inspectPermissions) + } +} + +public struct PreviewPlanUseCase: Sendable { + public init() {} + + public func makeRequest(taskID: UUID = UUID(), findingIDs: [UUID]) -> AtlasRequestEnvelope { + AtlasRequestEnvelope(command: .previewPlan(taskID: taskID, findingIDs: findingIDs)) + } +} + +public struct ExecutePlanUseCase: Sendable { + public init() {} + + public func makeRequest(planID: UUID) -> AtlasRequestEnvelope { + AtlasRequestEnvelope(command: .executePlan(planID: planID)) + } +} + +public struct RestoreItemsUseCase: Sendable { + public init() {} + + public func makeRequest(taskID: UUID = UUID(), itemIDs: [UUID]) -> AtlasRequestEnvelope { + AtlasRequestEnvelope(command: .restoreItems(taskID: taskID, itemIDs: itemIDs)) + } +} + +public struct AppsListUseCase: Sendable { + public init() {} + + public func makeRequest() -> AtlasRequestEnvelope { + AtlasRequestEnvelope(command: .appsList) + } +} + +public struct PreviewAppUninstallUseCase: Sendable { + public init() {} + + public func makeRequest(appID: UUID) -> AtlasRequestEnvelope { + AtlasRequestEnvelope(command: .previewAppUninstall(appID: appID)) + } +} + +public struct ExecuteAppUninstallUseCase: Sendable { + public init() {} + + public func makeRequest(appID: UUID) -> AtlasRequestEnvelope { + AtlasRequestEnvelope(command: .executeAppUninstall(appID: appID)) + } +} + +public struct SettingsGetUseCase: Sendable { + public init() {} + + public func makeRequest() -> AtlasRequestEnvelope { + AtlasRequestEnvelope(command: .settingsGet) + } +} + +public struct SettingsSetUseCase: Sendable { + public init() {} + + public func makeRequest(settings: AtlasSettings) -> AtlasRequestEnvelope { + AtlasRequestEnvelope(command: .settingsSet(settings)) + } +} diff --git a/Packages/AtlasApplication/Tests/AtlasApplicationTests/AtlasApplicationTests.swift b/Packages/AtlasApplication/Tests/AtlasApplicationTests/AtlasApplicationTests.swift new file mode 100644 index 0000000..1fe85b1 --- /dev/null +++ b/Packages/AtlasApplication/Tests/AtlasApplicationTests/AtlasApplicationTests.swift @@ -0,0 +1,177 @@ +import XCTest +@testable import AtlasApplication +import AtlasDomain +import AtlasProtocol + +final class AtlasApplicationTests: XCTestCase { + func testStartScanUsesWorkerEventsToBuildProgressAndSummary() async throws { + let taskID = UUID(uuidString: "20000000-0000-0000-0000-000000000001") ?? UUID() + let request = AtlasRequestEnvelope(command: .startScan(taskID: taskID)) + let finishedRun = TaskRun( + id: taskID, + kind: .scan, + status: .completed, + summary: "Scanned 4 finding groups and prepared a Smart Clean preview.", + startedAt: request.issuedAt, + finishedAt: Date() + ) + let result = AtlasWorkerCommandResult( + request: request, + response: AtlasResponseEnvelope( + requestID: request.id, + response: .accepted(task: AtlasTaskDescriptor(taskID: taskID, kind: .scan)) + ), + events: [ + AtlasEventEnvelope(event: .taskProgress(taskID: taskID, completed: 1, total: 4)), + AtlasEventEnvelope(event: .taskProgress(taskID: taskID, completed: 4, total: 4)), + AtlasEventEnvelope(event: .taskFinished(finishedRun)), + ], + snapshot: AtlasScaffoldWorkspace.snapshot(), + previewPlan: AtlasScaffoldWorkspace.state().currentPlan + ) + let controller = AtlasWorkspaceController(worker: FakeWorker(result: result)) + + let output = try await controller.startScan(taskID: taskID) + + XCTAssertEqual(output.progressFraction, 1) + XCTAssertEqual(output.summary, finishedRun.summary) + XCTAssertEqual(output.actionPlan?.items.count, AtlasScaffoldWorkspace.state().currentPlan.items.count) + XCTAssertEqual(output.snapshot.findings.count, AtlasScaffoldWorkspace.snapshot().findings.count) + } + + func testPreviewPlanReturnsStructuredPlanFromWorkerResponse() async throws { + let plan = AtlasScaffoldWorkspace.state().currentPlan + let request = AtlasRequestEnvelope(command: .previewPlan(taskID: UUID(), findingIDs: AtlasScaffoldFixtures.findings.map(\.id))) + let result = AtlasWorkerCommandResult( + request: request, + response: AtlasResponseEnvelope(requestID: request.id, response: .preview(plan)), + events: [], + snapshot: AtlasScaffoldWorkspace.snapshot(), + previewPlan: plan + ) + let controller = AtlasWorkspaceController(worker: FakeWorker(result: result)) + + let output = try await controller.previewPlan(findingIDs: AtlasScaffoldFixtures.findings.map(\.id)) + + XCTAssertEqual(output.actionPlan.title, plan.title) + XCTAssertEqual(output.actionPlan.estimatedBytes, plan.estimatedBytes) + } + + func testExecutePlanUsesWorkerEventsToBuildSummary() async throws { + let plan = AtlasScaffoldWorkspace.state().currentPlan + let taskID = UUID(uuidString: "20000000-0000-0000-0000-000000000002") ?? UUID() + let request = AtlasRequestEnvelope(command: .executePlan(planID: plan.id)) + let finishedRun = TaskRun( + id: taskID, + kind: .executePlan, + status: .completed, + summary: "Moved 2 Smart Clean items into recovery.", + startedAt: request.issuedAt, + finishedAt: Date() + ) + let result = AtlasWorkerCommandResult( + request: request, + response: AtlasResponseEnvelope( + requestID: request.id, + response: .accepted(task: AtlasTaskDescriptor(taskID: taskID, kind: .executePlan)) + ), + events: [ + AtlasEventEnvelope(event: .taskProgress(taskID: taskID, completed: 1, total: 3)), + AtlasEventEnvelope(event: .taskProgress(taskID: taskID, completed: 3, total: 3)), + AtlasEventEnvelope(event: .taskFinished(finishedRun)), + ], + snapshot: AtlasScaffoldWorkspace.snapshot(), + previewPlan: nil + ) + let controller = AtlasWorkspaceController(worker: FakeWorker(result: result)) + + let output = try await controller.executePlan(planID: plan.id) + + XCTAssertEqual(output.progressFraction, 1) + XCTAssertEqual(output.summary, finishedRun.summary) + } + + func testListAppsReturnsStructuredAppFootprints() async throws { + let apps = AtlasScaffoldFixtures.apps + let request = AtlasRequestEnvelope(command: .appsList) + let result = AtlasWorkerCommandResult( + request: request, + response: AtlasResponseEnvelope(requestID: request.id, response: .apps(apps)), + events: [], + snapshot: AtlasScaffoldWorkspace.snapshot(), + previewPlan: nil + ) + let controller = AtlasWorkspaceController(worker: FakeWorker(result: result)) + + let output = try await controller.listApps() + + XCTAssertEqual(output.apps.count, apps.count) + XCTAssertEqual(output.snapshot.apps.count, apps.count) + } + + func testSettingsUpdateReturnsStructuredSettings() async throws { + var updated = AtlasScaffoldFixtures.settings + updated.recoveryRetentionDays = 14 + let request = AtlasRequestEnvelope(command: .settingsSet(updated)) + let result = AtlasWorkerCommandResult( + request: request, + response: AtlasResponseEnvelope(requestID: request.id, response: .settings(updated)), + events: [], + snapshot: AtlasScaffoldWorkspace.snapshot(), + previewPlan: nil + ) + let controller = AtlasWorkspaceController(worker: FakeWorker(result: result)) + + let output = try await controller.updateSettings(updated) + + XCTAssertEqual(output.settings.recoveryRetentionDays, 14) + } + + func testHealthSnapshotReturnsStructuredOverviewData() async throws { + let healthSnapshot = AtlasScaffoldFixtures.healthSnapshot + let request = AtlasRequestEnvelope(command: .healthSnapshot) + let result = AtlasWorkerCommandResult( + request: request, + response: AtlasResponseEnvelope(requestID: request.id, response: .health(healthSnapshot)), + events: [], + snapshot: AtlasScaffoldWorkspace.snapshot(), + previewPlan: nil + ) + let controller = AtlasWorkspaceController(worker: FakeWorker(result: result)) + + let output = try await controller.healthSnapshot() + + XCTAssertEqual(output.healthSnapshot.optimizations.count, healthSnapshot.optimizations.count) + XCTAssertEqual(output.healthSnapshot.diskUsedPercent, healthSnapshot.diskUsedPercent) + } + + func testInspectPermissionsPropagatesUpdatedSnapshot() async throws { + let permissions = AtlasScaffoldFixtures.permissions + let request = AtlasRequestEnvelope(command: .inspectPermissions) + let result = AtlasWorkerCommandResult( + request: request, + response: AtlasResponseEnvelope(requestID: request.id, response: .permissions(permissions)), + events: permissions.map { AtlasEventEnvelope(event: .permissionUpdated($0)) }, + snapshot: AtlasScaffoldWorkspace.snapshot(), + previewPlan: nil + ) + let controller = AtlasWorkspaceController(worker: FakeWorker(result: result)) + + let output = try await controller.inspectPermissions() + + XCTAssertEqual(output.snapshot.permissions.count, permissions.count) + XCTAssertEqual(output.events.count, permissions.count) + } +} + +private actor FakeWorker: AtlasWorkerServing { + let result: AtlasWorkerCommandResult + + init(result: AtlasWorkerCommandResult) { + self.result = result + } + + func submit(_ request: AtlasRequestEnvelope) async throws -> AtlasWorkerCommandResult { + result + } +} diff --git a/Packages/AtlasCoreAdapters/README.md b/Packages/AtlasCoreAdapters/README.md new file mode 100644 index 0000000..f5181d6 --- /dev/null +++ b/Packages/AtlasCoreAdapters/README.md @@ -0,0 +1,11 @@ +# AtlasCoreAdapters + +## Responsibility + +- Wrap reusable upstream and local system capabilities behind structured interfaces + +## Current Adapters + +- `MoleHealthAdapter` wraps `lib/check/health_json.sh` and returns structured overview health data. +- `MoleSmartCleanAdapter` wraps `bin/clean.sh --dry-run` behind a temporary state directory and parses reclaimable findings for Smart Clean. +- `MacAppsInventoryAdapter` scans local application bundles, estimates footprint size, and derives leftover counts for the `Apps` MVP workflow. diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/AtlasCoreAdapters.swift b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/AtlasCoreAdapters.swift new file mode 100644 index 0000000..f2cd7e6 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/AtlasCoreAdapters.swift @@ -0,0 +1,44 @@ +import AtlasInfrastructure +import AtlasProtocol +import Foundation + +public struct AtlasLegacyAdapterDescriptor: Identifiable, Hashable, Sendable { + public var id: String { name } + public var name: String + public var capability: String + public var status: AtlasCapabilityStatus + + public init(name: String, capability: String, status: AtlasCapabilityStatus) { + self.name = name + self.capability = capability + self.status = status + } +} + +public enum AtlasCoreAdapterCatalog { + public static func defaultDescriptors( + status: AtlasCapabilityStatus = AtlasCapabilityStatus() + ) -> [AtlasLegacyAdapterDescriptor] { + [ + AtlasLegacyAdapterDescriptor( + name: "MoleScanAdapter", + capability: "Structured Smart Clean scanning bridge", + status: status + ), + AtlasLegacyAdapterDescriptor( + name: "MoleAppsAdapter", + capability: "Installed apps and leftovers inspection bridge", + status: status + ), + AtlasLegacyAdapterDescriptor( + name: "MoleStatusAdapter", + capability: "Overview health and diagnostics bridge", + status: status + ), + ] + } + + public static func bootstrapEvent(taskID: UUID = UUID()) -> AtlasEventEnvelope { + AtlasEventEnvelope(event: .taskProgress(taskID: taskID, completed: 0, total: 1)) + } +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/MacAppsInventoryAdapter.swift b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/MacAppsInventoryAdapter.swift new file mode 100644 index 0000000..df91810 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/MacAppsInventoryAdapter.swift @@ -0,0 +1,103 @@ +import AtlasApplication +import AtlasDomain +import Foundation + +public struct MacAppsInventoryAdapter: AtlasAppInventoryProviding { + private let searchRoots: [URL] + private let homeDirectoryURL: URL + + public init( + searchRoots: [URL]? = nil, + homeDirectoryURL: URL = FileManager.default.homeDirectoryForCurrentUser + ) { + self.homeDirectoryURL = homeDirectoryURL + self.searchRoots = searchRoots ?? [ + URL(fileURLWithPath: "/Applications", isDirectory: true), + homeDirectoryURL.appendingPathComponent("Applications", isDirectory: true), + ] + } + + public func collectInstalledApps() async throws -> [AppFootprint] { + var apps: [AppFootprint] = [] + var seenPaths = Set() + + for root in searchRoots where FileManager.default.fileExists(atPath: root.path) { + let entries = (try? FileManager.default.contentsOfDirectory( + at: root, + includingPropertiesForKeys: [.isApplicationKey, .isDirectoryKey], + options: [.skipsHiddenFiles] + )) ?? [] + + for entry in entries where entry.pathExtension == "app" { + let standardizedPath = entry.resolvingSymlinksInPath().path + guard seenPaths.insert(standardizedPath).inserted else { continue } + if let app = makeAppFootprint(for: entry) { + apps.append(app) + } + } + } + + return apps.sorted { lhs, rhs in + if lhs.bytes == rhs.bytes { + return lhs.name.localizedCaseInsensitiveCompare(rhs.name) == .orderedAscending + } + return lhs.bytes > rhs.bytes + } + } + + private func makeAppFootprint(for appURL: URL) -> AppFootprint? { + guard let bundle = Bundle(url: appURL) else { return nil } + + let name = bundle.object(forInfoDictionaryKey: "CFBundleDisplayName") as? String + ?? bundle.object(forInfoDictionaryKey: "CFBundleName") as? String + ?? appURL.deletingPathExtension().lastPathComponent + + let bundleIdentifier = bundle.bundleIdentifier ?? "unknown.\(name.replacingOccurrences(of: " ", with: "-").lowercased())" + let bytes = allocatedSize(for: appURL) + let leftoverItems = leftoverPaths(for: name, bundleIdentifier: bundleIdentifier).filter { + FileManager.default.fileExists(atPath: $0.path) + }.count + + return AppFootprint( + name: name, + bundleIdentifier: bundleIdentifier, + bundlePath: appURL.path, + bytes: bytes, + leftoverItems: leftoverItems + ) + } + + private func leftoverPaths(for appName: String, bundleIdentifier: String) -> [URL] { + [ + homeDirectoryURL.appendingPathComponent("Library/Application Support/\(appName)", isDirectory: true), + homeDirectoryURL.appendingPathComponent("Library/Application Support/\(bundleIdentifier)", isDirectory: true), + homeDirectoryURL.appendingPathComponent("Library/Caches/\(bundleIdentifier)", isDirectory: true), + homeDirectoryURL.appendingPathComponent("Library/Containers/\(bundleIdentifier)", isDirectory: true), + homeDirectoryURL.appendingPathComponent("Library/Preferences/\(bundleIdentifier).plist"), + homeDirectoryURL.appendingPathComponent("Library/Saved Application State/\(bundleIdentifier).savedState", isDirectory: true), + homeDirectoryURL.appendingPathComponent("Library/LaunchAgents/\(bundleIdentifier).plist"), + ] + } + + private func allocatedSize(for url: URL) -> Int64 { + if let values = try? url.resourceValues(forKeys: [.totalFileAllocatedSizeKey, .fileAllocatedSizeKey]), + let size = values.totalFileAllocatedSize ?? values.fileAllocatedSize { + return Int64(size) + } + + var total: Int64 = 0 + if let enumerator = FileManager.default.enumerator( + at: url, + includingPropertiesForKeys: [.isRegularFileKey, .totalFileAllocatedSizeKey, .fileAllocatedSizeKey], + options: [.skipsHiddenFiles] + ) { + for case let fileURL as URL in enumerator { + let values = try? fileURL.resourceValues(forKeys: [.isRegularFileKey, .totalFileAllocatedSizeKey, .fileAllocatedSizeKey]) + guard values?.isRegularFile == true else { continue } + let size = values?.totalFileAllocatedSize ?? values?.fileAllocatedSize ?? 0 + total += Int64(size) + } + } + return total + } +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/MoleHealthAdapter.swift b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/MoleHealthAdapter.swift new file mode 100644 index 0000000..3dd8d6a --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/MoleHealthAdapter.swift @@ -0,0 +1,109 @@ +import AtlasApplication +import AtlasDomain +import Foundation + +public struct MoleHealthAdapter: AtlasHealthSnapshotProviding { + private let scriptURL: URL + private let decoder = JSONDecoder() + + public init(scriptURL: URL? = nil) { + self.scriptURL = scriptURL ?? Self.defaultScriptURL + } + + public func collectHealthSnapshot() async throws -> AtlasHealthSnapshot { + let output = try runHealthScript() + let payload = try decoder.decode(HealthJSONPayload.self, from: output) + return payload.atlasSnapshot + } + + private func runHealthScript() throws -> Data { + let process = Process() + process.executableURL = URL(fileURLWithPath: "/bin/bash") + process.arguments = [scriptURL.path] + + let stdout = Pipe() + let stderr = Pipe() + process.standardOutput = stdout + process.standardError = stderr + + try process.run() + process.waitUntilExit() + + let errorData = stderr.fileHandleForReading.readDataToEndOfFile() + guard process.terminationStatus == 0 else { + let message = String(data: errorData, encoding: .utf8) ?? "unknown error" + throw MoleHealthAdapterError.commandFailed(message) + } + + return stdout.fileHandleForReading.readDataToEndOfFile() + } + + private static var defaultScriptURL: URL { + MoleRuntimeLocator.url(for: "lib/check/health_json.sh") + } +} + +private enum MoleHealthAdapterError: LocalizedError { + case commandFailed(String) + + var errorDescription: String? { + switch self { + case let .commandFailed(message): + return "Mole health adapter failed: \(message)" + } + } +} + +private struct HealthJSONPayload: Decodable { + let memoryUsedGB: Double + let memoryTotalGB: Double + let diskUsedGB: Double + let diskTotalGB: Double + let diskUsedPercent: Double + let uptimeDays: Double + let optimizations: [OptimizationPayload] + + enum CodingKeys: String, CodingKey { + case memoryUsedGB = "memory_used_gb" + case memoryTotalGB = "memory_total_gb" + case diskUsedGB = "disk_used_gb" + case diskTotalGB = "disk_total_gb" + case diskUsedPercent = "disk_used_percent" + case uptimeDays = "uptime_days" + case optimizations + } + + var atlasSnapshot: AtlasHealthSnapshot { + let fallbackMemoryTotalGB = Double(ProcessInfo.processInfo.physicalMemory) / (1024 * 1024 * 1024) + let normalizedMemoryTotalGB = memoryTotalGB > 0 ? memoryTotalGB : fallbackMemoryTotalGB + let normalizedUptimeDays = uptimeDays > 0 ? uptimeDays : (ProcessInfo.processInfo.systemUptime / 86_400) + + return AtlasHealthSnapshot( + memoryUsedGB: memoryUsedGB, + memoryTotalGB: normalizedMemoryTotalGB, + diskUsedGB: diskUsedGB, + diskTotalGB: diskTotalGB, + diskUsedPercent: diskUsedPercent, + uptimeDays: normalizedUptimeDays, + optimizations: optimizations.map(\.atlasOptimization) + ) + } +} + +private struct OptimizationPayload: Decodable { + let category: String + let name: String + let description: String + let action: String + let safe: Bool + + var atlasOptimization: AtlasOptimizationRecommendation { + AtlasOptimizationRecommendation( + category: category, + name: name, + detail: description, + action: action, + isSafe: safe + ) + } +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/MoleRuntimeLocator.swift b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/MoleRuntimeLocator.swift new file mode 100644 index 0000000..9ee3d82 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/MoleRuntimeLocator.swift @@ -0,0 +1,22 @@ +import Foundation + +enum MoleRuntimeLocator { + static func runtimeURL() -> URL { + if let bundled = Bundle.module.resourceURL?.appendingPathComponent("MoleRuntime", isDirectory: true), + FileManager.default.fileExists(atPath: bundled.path) { + return bundled + } + + let sourceURL = URL(fileURLWithPath: #filePath) + return sourceURL + .deletingLastPathComponent() + .deletingLastPathComponent() + .deletingLastPathComponent() + .deletingLastPathComponent() + .deletingLastPathComponent() + } + + static func url(for relativePath: String) -> URL { + runtimeURL().appendingPathComponent(relativePath) + } +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/MoleSmartCleanAdapter.swift b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/MoleSmartCleanAdapter.swift new file mode 100644 index 0000000..06ed691 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/MoleSmartCleanAdapter.swift @@ -0,0 +1,331 @@ +import AtlasApplication +import AtlasDomain +import Foundation + +public struct MoleSmartCleanAdapter: AtlasSmartCleanScanProviding { + private let cleanScriptURL: URL + + public init(cleanScriptURL: URL? = nil) { + self.cleanScriptURL = cleanScriptURL ?? Self.defaultCleanScriptURL + } + + public func collectSmartCleanScan() async throws -> AtlasSmartCleanScanResult { + let stateDirectory = URL(fileURLWithPath: NSTemporaryDirectory()) + .appendingPathComponent("atlas-smart-clean-\(UUID().uuidString)", isDirectory: true) + try FileManager.default.createDirectory(at: stateDirectory, withIntermediateDirectories: true) + + let exportFileURL = stateDirectory.appendingPathComponent("clean-list.txt") + let detailedExportFileURL = stateDirectory.appendingPathComponent("clean-list-detailed.tsv") + let output = try runDryRun(stateDirectory: stateDirectory, exportFileURL: exportFileURL, detailedExportFileURL: detailedExportFileURL) + let findings = Self.parseDetailedFindings(from: detailedExportFileURL).isEmpty + ? Self.parseFindings(from: output) + : Self.parseDetailedFindings(from: detailedExportFileURL) + let summary = findings.isEmpty + ? "Smart Clean dry run found no reclaimable items from the upstream clean workflow." + : "Smart Clean dry run found \(findings.count) reclaimable item\(findings.count == 1 ? "" : "s")." + return AtlasSmartCleanScanResult(findings: findings, summary: summary) + } + + private func runDryRun(stateDirectory: URL, exportFileURL: URL, detailedExportFileURL: URL) throws -> String { + let process = Process() + process.executableURL = URL(fileURLWithPath: "/bin/bash") + process.arguments = [cleanScriptURL.path, "--dry-run"] + + var environment = ProcessInfo.processInfo.environment + environment["MO_NO_OPLOG"] = "1" + environment["MOLE_STATE_DIR"] = stateDirectory.path + environment["MOLE_EXPORT_LIST_FILE"] = exportFileURL.path + environment["MOLE_DETAILED_EXPORT_FILE"] = detailedExportFileURL.path + process.environment = environment + + let stdout = Pipe() + let stderr = Pipe() + process.standardOutput = stdout + process.standardError = stderr + + try process.run() + process.waitUntilExit() + + let outputData = stdout.fileHandleForReading.readDataToEndOfFile() + let errorData = stderr.fileHandleForReading.readDataToEndOfFile() + let output = String(data: outputData, encoding: .utf8) ?? "" + + guard process.terminationStatus == 0 else { + let message = String(data: errorData, encoding: .utf8) ?? "unknown error" + throw MoleSmartCleanAdapterError.commandFailed(message) + } + + return output + } + + static func parseDetailedFindings(from exportFileURL: URL) -> [Finding] { + guard let content = try? String(contentsOf: exportFileURL), !content.isEmpty else { + return [] + } + + struct Entry { + let section: String + let path: String + let sizeKB: Int64 + } + + let entries: [Entry] = content + .split(whereSeparator: \.isNewline) + .compactMap { rawLine in + let parts = rawLine.split(separator: "\t", omittingEmptySubsequences: false) + guard parts.count == 3 else { return nil } + guard let sizeKB = Int64(parts[2]) else { return nil } + return Entry(section: String(parts[0]), path: String(parts[1]), sizeKB: sizeKB) + } + + guard !entries.isEmpty else { + return [] + } + + let homePath = FileManager.default.homeDirectoryForCurrentUser.path + var parentCounts: [String: Int] = [:] + for entry in entries { + let parentPath = URL(fileURLWithPath: entry.path).deletingLastPathComponent().path + let key = entry.section + "\u{001F}" + parentPath + parentCounts[key, default: 0] += 1 + } + + struct Group { + var section: String + var displayPath: String + var bytes: Int64 + var targetPaths: [String] + var childCount: Int + var order: Int + } + + var groups: [String: Group] = [:] + var order = 0 + + for entry in entries { + let parentPath = URL(fileURLWithPath: entry.path).deletingLastPathComponent().path + let parentKey = entry.section + "\u{001F}" + parentPath + let shouldGroupByParent = parentCounts[parentKey, default: 0] > 1 && parentPath != homePath + let displayPath = shouldGroupByParent ? parentPath : entry.path + let groupKey = entry.section + "\u{001F}" + displayPath + if groups[groupKey] == nil { + groups[groupKey] = Group( + section: entry.section, + displayPath: displayPath, + bytes: 0, + targetPaths: [], + childCount: 0, + order: order + ) + order += 1 + } + groups[groupKey]!.bytes += entry.sizeKB * 1024 + groups[groupKey]!.targetPaths.append(entry.path) + groups[groupKey]!.childCount += 1 + } + + return groups.values + .sorted { lhs, rhs in + if lhs.bytes == rhs.bytes { return lhs.order < rhs.order } + return lhs.bytes > rhs.bytes + } + .map { group in + Finding( + title: makeDetailedTitle(for: group.displayPath, section: group.section), + detail: makeDetailedDetail(for: group.displayPath, section: group.section, childCount: group.childCount), + bytes: group.bytes, + risk: riskLevel(for: group.section, title: group.displayPath), + category: group.section, + targetPaths: group.targetPaths + ) + } + } + + private static func makeDetailedTitle(for displayPath: String, section: String) -> String { + let url = URL(fileURLWithPath: displayPath) + let path = displayPath.lowercased() + let last = url.lastPathComponent + let parent = url.deletingLastPathComponent().lastPathComponent + + if path.contains("/google/chrome/default") { return "Chrome cache" } + if path.contains("component_crx_cache") { return "Chrome component cache" } + if path.contains("googleupdater") { return "Google Updater cache" } + if path.contains("deriveddata") { return "Xcode DerivedData" } + if path.contains("/__pycache__") || last == "__pycache__" { return "Python bytecode cache" } + if path.contains("/.next/cache") { return "Next.js build cache" } + if path.contains("/.npm/") || path.hasSuffix("/.npm") || path.contains("_cacache") { return "npm cache" } + if path.contains("/.npm_cache/_npx") { return "npm npx cache" } + if path.contains("/.npm_cache/_logs") { return "npm logs" } + if path.contains("/.oh-my-zsh/cache") { return "Oh My Zsh cache" } + if last == "Caches" { return section == "User essentials" ? "User app caches" : "Caches" } + if last == "Logs" { return "App logs" } + if last == "Attachments" { return "Messages attachment previews" } + if last == FileManager.default.homeDirectoryForCurrentUser.lastPathComponent { return section } + if last == "Default" && !parent.isEmpty { return parent } + return last.replacingOccurrences(of: "_", with: " ") + } + + private static func makeDetailedDetail(for displayPath: String, section: String, childCount: Int) -> String { + if childCount > 1 { + return "\(displayPath) • \(childCount) items from \(section)" + } + return "\(displayPath) • \(section)" + } + + static func parseFindings(from output: String) -> [Finding] { + let cleanedOutput = stripANSI(from: output) + let lines = cleanedOutput + .split(whereSeparator: \.isNewline) + .map { String($0).trimmingCharacters(in: .whitespacesAndNewlines) } + + var currentSection = "Smart Clean" + var pendingRuntimeVolumeIndex: Int? + var findings: [Finding] = [] + var seenKeys = Set() + + for line in lines where !line.isEmpty { + if line.hasPrefix("➤ ") { + currentSection = String(line.dropFirst(2)).trimmingCharacters(in: .whitespacesAndNewlines) + pendingRuntimeVolumeIndex = nil + continue + } + + if line.hasPrefix("→ ") { + if let finding = makeFinding(from: line, section: currentSection) { + let key = "\(finding.category)|\(finding.title)|\(finding.bytes)" + if seenKeys.insert(key).inserted { + findings.append(finding) + if finding.title == "Xcode runtime volumes" { + pendingRuntimeVolumeIndex = findings.indices.last + } else { + pendingRuntimeVolumeIndex = nil + } + } + } + continue + } + + if line.hasPrefix("• Runtime volumes total:"), let index = pendingRuntimeVolumeIndex, + let bytes = parseRuntimeVolumeUnusedBytes(from: line) { + findings[index].bytes = bytes + findings[index].detail = line + pendingRuntimeVolumeIndex = nil + } + } + + return findings.sorted { lhs, rhs in + if lhs.bytes == rhs.bytes { return lhs.title < rhs.title } + return lhs.bytes > rhs.bytes + } + } + + private static func makeFinding(from line: String, section: String) -> Finding? { + let content = line.replacingOccurrences(of: "→ ", with: "") + let bytes = parseSize(from: content) ?? 0 + let title = normalizeTitle(parseTitle(from: content)) + guard !title.isEmpty else { return nil } + let detail = parseDetail(from: content, fallbackSection: section) + let risk = riskLevel(for: section, title: title) + return Finding(title: title, detail: detail, bytes: bytes, risk: risk, category: section) + } + + + private static func normalizeTitle(_ title: String) -> String { + if title.hasPrefix("Would remove ") { + return String(title.dropFirst("Would remove ".count)).trimmingCharacters(in: .whitespacesAndNewlines) + } + return title + } + + private static func parseTitle(from content: String) -> String { + let separators = [" · ", ","] + for separator in separators { + if let range = content.range(of: separator) { + return String(content[.. String { + if let range = content.range(of: " · ") { + return String(content[range.upperBound...]).trimmingCharacters(in: .whitespacesAndNewlines) + } + if let range = content.range(of: ",") { + return String(content[range.upperBound...]).trimmingCharacters(in: .whitespacesAndNewlines) + } + return "Detected from the upstream \(fallbackSection) dry-run preview." + } + + private static func riskLevel(for section: String, title: String) -> RiskLevel { + let normalized = "\(section) \(title)".lowercased() + if normalized.contains("launch agent") || normalized.contains("system service") || normalized.contains("orphan") { + return .advanced + } + if normalized.contains("application") || normalized.contains("large file") || normalized.contains("device backup") || normalized.contains("runtime") || normalized.contains("simulator") { + return .review + } + return .safe + } + + private static func parseRuntimeVolumeUnusedBytes(from line: String) -> Int64? { + guard let match = line.range(of: #"unused\s+([0-9.]+(?:B|KB|MB|GB|TB))"#, options: .regularExpression) else { + return nil + } + let token = String(line[match]).replacingOccurrences(of: "unused", with: "").trimmingCharacters(in: .whitespaces) + return parseByteCount(token) + } + + private static func parseSize(from content: String) -> Int64? { + if let range = content.range(of: #"([0-9.]+(?:B|KB|MB|GB|TB))\s+dry"#, options: .regularExpression) { + let token = String(content[range]).replacingOccurrences(of: "dry", with: "").trimmingCharacters(in: .whitespaces) + return parseByteCount(token) + } + if let range = content.range(of: #"would clean\s+([0-9.]+(?:B|KB|MB|GB|TB))"#, options: .regularExpression) { + let token = String(content[range]).replacingOccurrences(of: "would clean", with: "").trimmingCharacters(in: .whitespaces) + return parseByteCount(token) + } + if let range = content.range(of: #",\s*([0-9.]+(?:B|KB|MB|GB|TB))(?:\s+dry)?$"#, options: .regularExpression) { + let token = String(content[range]).replacingOccurrences(of: ",", with: "").replacingOccurrences(of: "dry", with: "").trimmingCharacters(in: .whitespaces) + return parseByteCount(token) + } + if let range = content.range(of: #"\(([0-9.]+(?:B|KB|MB|GB|TB))\)"#, options: .regularExpression) { + let token = String(content[range]).trimmingCharacters(in: CharacterSet(charactersIn: "()")) + return parseByteCount(token) + } + return nil + } + + private static func parseByteCount(_ token: String) -> Int64? { + let cleaned = token.uppercased().replacingOccurrences(of: " ", with: "") + let units: [(String, Double)] = [("TB", 1024 * 1024 * 1024 * 1024), ("GB", 1024 * 1024 * 1024), ("MB", 1024 * 1024), ("KB", 1024), ("B", 1)] + for (suffix, multiplier) in units { + if cleaned.hasSuffix(suffix) { + let valueString = String(cleaned.dropLast(suffix.count)) + guard let value = Double(valueString) else { return nil } + return Int64(value * multiplier) + } + } + return nil + } + + private static func stripANSI(from text: String) -> String { + let pattern = String("\u{001B}") + "\\[[0-9;]*m" + return text.replacingOccurrences(of: pattern, with: "", options: .regularExpression) + } + + private static var defaultCleanScriptURL: URL { + MoleRuntimeLocator.url(for: "bin/clean.sh") + } +} + +private enum MoleSmartCleanAdapterError: LocalizedError { + case commandFailed(String) + + var errorDescription: String? { + switch self { + case let .commandFailed(message): + return "Mole Smart Clean adapter failed: \(message)" + } + } +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/analyze.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/analyze.sh new file mode 100755 index 0000000..f699113 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/analyze.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# Mole - Analyze command. +# Runs the Go disk analyzer UI. +# Uses bundled analyze-go binary. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +GO_BIN="$SCRIPT_DIR/analyze-go" +if [[ -x "$GO_BIN" ]]; then + exec "$GO_BIN" "$@" +fi + +echo "Bundled analyzer binary not found. Please reinstall Mole or run mo update to restore it." >&2 +exit 1 diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/check.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/check.sh new file mode 100755 index 0000000..24e4594 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/check.sh @@ -0,0 +1,101 @@ +#!/bin/bash + +set -euo pipefail + +# Fix locale issues (similar to Issue #83) +export LC_ALL=C +export LANG=C + +# Load common functions +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +source "$SCRIPT_DIR/lib/core/common.sh" +source "$SCRIPT_DIR/lib/core/sudo.sh" +source "$SCRIPT_DIR/lib/manage/update.sh" +source "$SCRIPT_DIR/lib/manage/autofix.sh" + +source "$SCRIPT_DIR/lib/check/all.sh" + +cleanup_all() { + stop_inline_spinner 2> /dev/null || true + stop_sudo_session + cleanup_temp_files +} + +handle_interrupt() { + cleanup_all + exit 130 +} + +main() { + # Register unified cleanup handler + trap cleanup_all EXIT + trap handle_interrupt INT TERM + + if [[ -t 1 ]]; then + clear + fi + + printf '\n' + + # Create temp files for parallel execution + local updates_file=$(mktemp_file) + local health_file=$(mktemp_file) + local security_file=$(mktemp_file) + local config_file=$(mktemp_file) + + # Run all checks in parallel with spinner + if [[ -t 1 ]]; then + echo -ne "${PURPLE_BOLD}System Check${NC} " + start_inline_spinner "Running checks..." + else + echo -e "${PURPLE_BOLD}System Check${NC}" + echo "" + fi + + # Parallel execution + { + check_all_updates > "$updates_file" 2>&1 & + check_system_health > "$health_file" 2>&1 & + check_all_security > "$security_file" 2>&1 & + check_all_config > "$config_file" 2>&1 & + wait + } + + if [[ -t 1 ]]; then + stop_inline_spinner + printf '\n' + fi + + # Display results + echo -e "${BLUE}${ICON_ARROW}${NC} System updates" + cat "$updates_file" + + printf '\n' + echo -e "${BLUE}${ICON_ARROW}${NC} System health" + cat "$health_file" + + printf '\n' + echo -e "${BLUE}${ICON_ARROW}${NC} Security posture" + cat "$security_file" + + printf '\n' + echo -e "${BLUE}${ICON_ARROW}${NC} Configuration" + cat "$config_file" + + # Show suggestions + show_suggestions + + # Ask about auto-fix + if ask_for_auto_fix; then + perform_auto_fix + fi + + # Ask about updates + if ask_for_updates; then + perform_updates + fi + + printf '\n' +} + +main "$@" diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/clean.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/clean.sh new file mode 100755 index 0000000..0a3ac5a --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/clean.sh @@ -0,0 +1,1134 @@ +#!/bin/bash +# Mole - Clean command. +# Runs cleanup modules with optional sudo. +# Supports dry-run and whitelist. + +set -euo pipefail + +export LC_ALL=C +export LANG=C + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "$SCRIPT_DIR/../lib/core/common.sh" + +source "$SCRIPT_DIR/../lib/core/sudo.sh" +source "$SCRIPT_DIR/../lib/clean/brew.sh" +source "$SCRIPT_DIR/../lib/clean/caches.sh" +source "$SCRIPT_DIR/../lib/clean/apps.sh" +source "$SCRIPT_DIR/../lib/clean/dev.sh" +source "$SCRIPT_DIR/../lib/clean/app_caches.sh" +source "$SCRIPT_DIR/../lib/clean/hints.sh" +source "$SCRIPT_DIR/../lib/clean/system.sh" +source "$SCRIPT_DIR/../lib/clean/user.sh" + +SYSTEM_CLEAN=false +DRY_RUN=false +PROTECT_FINDER_METADATA=false +IS_M_SERIES=$([[ "$(uname -m)" == "arm64" ]] && echo "true" || echo "false") + +EXPORT_LIST_FILE="${MOLE_EXPORT_LIST_FILE:-$MOLE_STATE_DIR/clean-list.txt}" +DETAILED_EXPORT_FILE="${MOLE_DETAILED_EXPORT_FILE:-}" +CURRENT_SECTION="" +readonly PROTECTED_SW_DOMAINS=( + "capcut.com" + "photopea.com" + "pixlr.com" +) + +declare -a WHITELIST_PATTERNS=() +WHITELIST_WARNINGS=() +if [[ -f "$HOME/.config/mole/whitelist" ]]; then + while IFS= read -r line; do + # shellcheck disable=SC2295 + line="${line#"${line%%[![:space:]]*}"}" + # shellcheck disable=SC2295 + line="${line%"${line##*[![:space:]]}"}" + [[ -z "$line" || "$line" =~ ^# ]] && continue + + [[ "$line" == ~* ]] && line="${line/#~/$HOME}" + line="${line//\$HOME/$HOME}" + line="${line//\$\{HOME\}/$HOME}" + if [[ "$line" =~ \.\. ]]; then + WHITELIST_WARNINGS+=("Path traversal not allowed: $line") + continue + fi + + if [[ "$line" != "$FINDER_METADATA_SENTINEL" ]]; then + if [[ ! "$line" =~ ^[a-zA-Z0-9/_.@\ *-]+$ ]]; then + WHITELIST_WARNINGS+=("Invalid path format: $line") + continue + fi + + if [[ "$line" != /* ]]; then + WHITELIST_WARNINGS+=("Must be absolute path: $line") + continue + fi + fi + + if [[ "$line" =~ // ]]; then + WHITELIST_WARNINGS+=("Consecutive slashes: $line") + continue + fi + + case "$line" in + / | /System | /System/* | /bin | /bin/* | /sbin | /sbin/* | /usr/bin | /usr/bin/* | /usr/sbin | /usr/sbin/* | /etc | /etc/* | /var/db | /var/db/*) + WHITELIST_WARNINGS+=("Protected system path: $line") + continue + ;; + esac + + duplicate="false" + if [[ ${#WHITELIST_PATTERNS[@]} -gt 0 ]]; then + for existing in "${WHITELIST_PATTERNS[@]}"; do + if [[ "$line" == "$existing" ]]; then + duplicate="true" + break + fi + done + fi + [[ "$duplicate" == "true" ]] && continue + WHITELIST_PATTERNS+=("$line") + done < "$HOME/.config/mole/whitelist" +else + WHITELIST_PATTERNS=("${DEFAULT_WHITELIST_PATTERNS[@]}") +fi + +# Expand whitelist patterns once to avoid repeated tilde expansion in hot loops. +expand_whitelist_patterns() { + if [[ ${#WHITELIST_PATTERNS[@]} -gt 0 ]]; then + local -a EXPANDED_PATTERNS + EXPANDED_PATTERNS=() + for pattern in "${WHITELIST_PATTERNS[@]}"; do + local expanded="${pattern/#\~/$HOME}" + EXPANDED_PATTERNS+=("$expanded") + done + WHITELIST_PATTERNS=("${EXPANDED_PATTERNS[@]}") + fi +} +expand_whitelist_patterns + +if [[ ${#WHITELIST_PATTERNS[@]} -gt 0 ]]; then + for entry in "${WHITELIST_PATTERNS[@]}"; do + if [[ "$entry" == "$FINDER_METADATA_SENTINEL" ]]; then + PROTECT_FINDER_METADATA=true + break + fi + done +fi + +# Section tracking and summary counters. +total_items=0 +TRACK_SECTION=0 +SECTION_ACTIVITY=0 +files_cleaned=0 +total_size_cleaned=0 +whitelist_skipped_count=0 +PROJECT_ARTIFACT_HINT_DETECTED=false +PROJECT_ARTIFACT_HINT_COUNT=0 +PROJECT_ARTIFACT_HINT_TRUNCATED=false +PROJECT_ARTIFACT_HINT_EXAMPLES=() +PROJECT_ARTIFACT_HINT_ESTIMATED_KB=0 +PROJECT_ARTIFACT_HINT_ESTIMATE_SAMPLES=0 +PROJECT_ARTIFACT_HINT_ESTIMATE_PARTIAL=false + +# shellcheck disable=SC2329 +note_activity() { + if [[ "${TRACK_SECTION:-0}" == "1" ]]; then + SECTION_ACTIVITY=1 + fi +} + +CLEANUP_DONE=false +# shellcheck disable=SC2329 +cleanup() { + local signal="${1:-EXIT}" + local exit_code="${2:-$?}" + + if [[ "$CLEANUP_DONE" == "true" ]]; then + return 0 + fi + CLEANUP_DONE=true + + stop_inline_spinner 2> /dev/null || true + + cleanup_temp_files + + stop_sudo_session + + show_cursor +} + +trap 'cleanup EXIT $?' EXIT +trap 'cleanup INT 130; exit 130' INT +trap 'cleanup TERM 143; exit 143' TERM + +start_section() { + TRACK_SECTION=1 + SECTION_ACTIVITY=0 + CURRENT_SECTION="$1" + echo "" + echo -e "${PURPLE_BOLD}${ICON_ARROW} $1${NC}" + + if [[ "$DRY_RUN" == "true" ]]; then + ensure_user_file "$EXPORT_LIST_FILE" + echo "" >> "$EXPORT_LIST_FILE" + echo "=== $1 ===" >> "$EXPORT_LIST_FILE" + fi +} + +end_section() { + stop_section_spinner + + if [[ "${TRACK_SECTION:-0}" == "1" && "${SECTION_ACTIVITY:-0}" == "0" ]]; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Nothing to clean" + fi + TRACK_SECTION=0 +} + +# shellcheck disable=SC2329 +normalize_paths_for_cleanup() { + local -a input_paths=("$@") + local -a unique_paths=() + + for path in "${input_paths[@]}"; do + local normalized="${path%/}" + [[ -z "$normalized" ]] && normalized="$path" + local found=false + if [[ ${#unique_paths[@]} -gt 0 ]]; then + for existing in "${unique_paths[@]}"; do + if [[ "$existing" == "$normalized" ]]; then + found=true + break + fi + done + fi + [[ "$found" == "true" ]] || unique_paths+=("$normalized") + done + + local sorted_paths + if [[ ${#unique_paths[@]} -gt 0 ]]; then + sorted_paths=$(printf '%s\n' "${unique_paths[@]}" | awk '{print length "|" $0}' | LC_ALL=C sort -n | cut -d'|' -f2-) + else + sorted_paths="" + fi + + local -a result_paths=() + while IFS= read -r path; do + [[ -z "$path" ]] && continue + local is_child=false + if [[ ${#result_paths[@]} -gt 0 ]]; then + for kept in "${result_paths[@]}"; do + if [[ "$path" == "$kept" || "$path" == "$kept"/* ]]; then + is_child=true + break + fi + done + fi + [[ "$is_child" == "true" ]] || result_paths+=("$path") + done <<< "$sorted_paths" + + if [[ ${#result_paths[@]} -gt 0 ]]; then + printf '%s\n' "${result_paths[@]}" + fi +} + +# shellcheck disable=SC2329 +get_cleanup_path_size_kb() { + local path="$1" + + if [[ -f "$path" && ! -L "$path" ]]; then + if command -v stat > /dev/null 2>&1; then + local bytes + bytes=$(stat -f%z "$path" 2> /dev/null || echo "0") + if [[ "$bytes" =~ ^[0-9]+$ && "$bytes" -gt 0 ]]; then + echo $(((bytes + 1023) / 1024)) + return 0 + fi + fi + fi + + if [[ -L "$path" ]]; then + if command -v stat > /dev/null 2>&1; then + local bytes + bytes=$(stat -f%z "$path" 2> /dev/null || echo "0") + if [[ "$bytes" =~ ^[0-9]+$ && "$bytes" -gt 0 ]]; then + echo $(((bytes + 1023) / 1024)) + else + echo 0 + fi + return 0 + fi + fi + + get_path_size_kb "$path" +} + +# Classification helper for cleanup risk levels +# shellcheck disable=SC2329 +classify_cleanup_risk() { + local description="$1" + local path="${2:-}" + + # HIGH RISK: System files, preference files, require sudo + if [[ "$description" =~ [Ss]ystem || "$description" =~ [Ss]udo || "$path" =~ ^/System || "$path" =~ ^/Library ]]; then + echo "HIGH|System files or requires admin access" + return + fi + + # HIGH RISK: Preference files that might affect app functionality + if [[ "$description" =~ [Pp]reference || "$path" =~ /Preferences/ ]]; then + echo "HIGH|Preference files may affect app settings" + return + fi + + # MEDIUM RISK: Installers, large files, app bundles + if [[ "$description" =~ [Ii]nstaller || "$description" =~ [Aa]pp.*[Bb]undle || "$description" =~ [Ll]arge ]]; then + echo "MEDIUM|Installer packages or app data" + return + fi + + # MEDIUM RISK: Old backups, downloads + if [[ "$description" =~ [Bb]ackup || "$description" =~ [Dd]ownload || "$description" =~ [Oo]rphan ]]; then + echo "MEDIUM|Backup or downloaded files" + return + fi + + # LOW RISK: Caches, logs, temporary files (automatically regenerated) + if [[ "$description" =~ [Cc]ache || "$description" =~ [Ll]og || "$description" =~ [Tt]emp || "$description" =~ [Tt]humbnail ]]; then + echo "LOW|Cache/log files, automatically regenerated" + return + fi + + # DEFAULT: MEDIUM + echo "MEDIUM|User data files" +} + +# shellcheck disable=SC2329 +safe_clean() { + if [[ $# -eq 0 ]]; then + return 0 + fi + + local description + local -a targets + + if [[ $# -eq 1 ]]; then + description="$1" + targets=("$1") + else + description="${*: -1}" + targets=("${@:1:$#-1}") + fi + + local -a valid_targets=() + for target in "${targets[@]}"; do + # Optimization: If target is a glob literal and parent dir missing, skip it. + if [[ "$target" == *"*"* && ! -e "$target" ]]; then + local base_path="${target%%\**}" + local parent_dir + if [[ "$base_path" == */ ]]; then + parent_dir="${base_path%/}" + else + parent_dir=$(dirname "$base_path") + fi + + if [[ ! -d "$parent_dir" ]]; then + # debug_log "Skipping nonexistent parent: $parent_dir for $target" + continue + fi + fi + valid_targets+=("$target") + done + + if [[ ${#valid_targets[@]} -gt 0 ]]; then + targets=("${valid_targets[@]}") + else + targets=() + fi + if [[ ${#targets[@]} -eq 0 ]]; then + return 0 + fi + + local removed_any=0 + local total_size_kb=0 + local total_count=0 + local skipped_count=0 + local removal_failed_count=0 + local permission_start=${MOLE_PERMISSION_DENIED_COUNT:-0} + + local show_scan_feedback=false + if [[ ${#targets[@]} -gt 20 && -t 1 ]]; then + show_scan_feedback=true + stop_section_spinner + MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning ${#targets[@]} items..." + fi + + local -a existing_paths=() + for path in "${targets[@]}"; do + local skip=false + + if should_protect_path "$path"; then + skip=true + skipped_count=$((skipped_count + 1)) + log_operation "clean" "SKIPPED" "$path" "protected" + fi + + [[ "$skip" == "true" ]] && continue + + if is_path_whitelisted "$path"; then + skip=true + skipped_count=$((skipped_count + 1)) + log_operation "clean" "SKIPPED" "$path" "whitelist" + fi + [[ "$skip" == "true" ]] && continue + [[ -e "$path" ]] && existing_paths+=("$path") + done + + if [[ "$show_scan_feedback" == "true" ]]; then + stop_section_spinner + fi + + debug_log "Cleaning: $description, ${#existing_paths[@]} items" + + # Enhanced debug output with risk level and details + if [[ "${MO_DEBUG:-}" == "1" && ${#existing_paths[@]} -gt 0 ]]; then + # Determine risk level for this cleanup operation + local risk_info + risk_info=$(classify_cleanup_risk "$description" "${existing_paths[0]}") + local risk_level="${risk_info%%|*}" + local risk_reason="${risk_info#*|}" + + debug_operation_start "$description" + debug_risk_level "$risk_level" "$risk_reason" + debug_operation_detail "Item count" "${#existing_paths[@]}" + + # Log sample of files (first 10) with details + if [[ ${#existing_paths[@]} -le 10 ]]; then + debug_operation_detail "Files to be removed" "All files listed below" + else + debug_operation_detail "Files to be removed" "Showing first 10 of ${#existing_paths[@]} files" + fi + fi + + if [[ $skipped_count -gt 0 ]]; then + whitelist_skipped_count=$((whitelist_skipped_count + skipped_count)) + fi + + if [[ ${#existing_paths[@]} -eq 0 ]]; then + return 0 + fi + + if [[ ${#existing_paths[@]} -gt 1 ]]; then + local -a normalized_paths=() + while IFS= read -r path; do + [[ -n "$path" ]] && normalized_paths+=("$path") + done < <(normalize_paths_for_cleanup "${existing_paths[@]}") + + if [[ ${#normalized_paths[@]} -gt 0 ]]; then + existing_paths=("${normalized_paths[@]}") + else + existing_paths=() + fi + fi + + local show_spinner=false + if [[ ${#existing_paths[@]} -gt 10 ]]; then + show_spinner=true + local total_paths=${#existing_paths[@]} + if [[ -t 1 ]]; then MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning items..."; fi + fi + + local cleaning_spinner_started=false + + # For larger batches, precompute sizes in parallel for better UX/stat accuracy. + if [[ ${#existing_paths[@]} -gt 3 ]]; then + local temp_dir + temp_dir=$(create_temp_dir) + + local dir_count=0 + local sample_size=$((${#existing_paths[@]} > 20 ? 20 : ${#existing_paths[@]})) + local max_sample=$((${#existing_paths[@]} * 20 / 100)) + [[ $max_sample -gt $sample_size ]] && sample_size=$max_sample + + for ((i = 0; i < sample_size && i < ${#existing_paths[@]}; i++)); do + [[ -d "${existing_paths[i]}" ]] && ((dir_count++)) + done + + # Heuristic: mostly files -> sequential stat is faster than subshells. + if [[ $dir_count -lt 5 && ${#existing_paths[@]} -gt 20 ]]; then + if [[ -t 1 && "$show_spinner" == "false" ]]; then + MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning items..." + show_spinner=true + fi + + local idx=0 + local last_progress_update + last_progress_update=$(get_epoch_seconds) + for path in "${existing_paths[@]}"; do + local size + size=$(get_cleanup_path_size_kb "$path") + [[ ! "$size" =~ ^[0-9]+$ ]] && size=0 + + if [[ "$size" -gt 0 ]]; then + echo "$size 1" > "$temp_dir/result_${idx}" + else + echo "0 0" > "$temp_dir/result_${idx}" + fi + + idx=$((idx + 1)) + if [[ $((idx % 20)) -eq 0 && "$show_spinner" == "true" && -t 1 ]]; then + update_progress_if_needed "$idx" "${#existing_paths[@]}" last_progress_update 1 || true + last_progress_update=$(get_epoch_seconds) + fi + done + else + local -a pids=() + local idx=0 + local completed=0 + local last_progress_update + last_progress_update=$(get_epoch_seconds) + local total_paths=${#existing_paths[@]} + + if [[ ${#existing_paths[@]} -gt 0 ]]; then + for path in "${existing_paths[@]}"; do + ( + local size + size=$(get_cleanup_path_size_kb "$path") + [[ ! "$size" =~ ^[0-9]+$ ]] && size=0 + local tmp_file="$temp_dir/result_${idx}.$$" + if [[ "$size" -gt 0 ]]; then + echo "$size 1" > "$tmp_file" + else + echo "0 0" > "$tmp_file" + fi + mv "$tmp_file" "$temp_dir/result_${idx}" 2> /dev/null || true + ) & + pids+=($!) + idx=$((idx + 1)) + + if ((${#pids[@]} >= MOLE_MAX_PARALLEL_JOBS)); then + wait "${pids[0]}" 2> /dev/null || true + pids=("${pids[@]:1}") + completed=$((completed + 1)) + + if [[ "$show_spinner" == "true" && -t 1 ]]; then + update_progress_if_needed "$completed" "$total_paths" last_progress_update 2 || true + fi + fi + done + fi + + if [[ ${#pids[@]} -gt 0 ]]; then + for pid in "${pids[@]}"; do + wait "$pid" 2> /dev/null || true + completed=$((completed + 1)) + + if [[ "$show_spinner" == "true" && -t 1 ]]; then + update_progress_if_needed "$completed" "$total_paths" last_progress_update 2 || true + fi + done + fi + fi + + # Read results back in original order. + # Start spinner for cleaning phase + if [[ "$DRY_RUN" != "true" && ${#existing_paths[@]} -gt 0 && -t 1 ]]; then + MOLE_SPINNER_PREFIX=" " start_inline_spinner "Cleaning..." + cleaning_spinner_started=true + fi + idx=0 + if [[ ${#existing_paths[@]} -gt 0 ]]; then + for path in "${existing_paths[@]}"; do + local result_file="$temp_dir/result_${idx}" + if [[ -f "$result_file" ]]; then + read -r size count < "$result_file" 2> /dev/null || true + local removed=0 + if [[ "$DRY_RUN" != "true" ]]; then + if safe_remove "$path" true; then + removed=1 + fi + else + removed=1 + fi + + if [[ $removed -eq 1 ]]; then + if [[ "$size" -gt 0 ]]; then + total_size_kb=$((total_size_kb + size)) + fi + total_count=$((total_count + 1)) + removed_any=1 + else + if [[ -e "$path" && "$DRY_RUN" != "true" ]]; then + removal_failed_count=$((removal_failed_count + 1)) + fi + fi + fi + idx=$((idx + 1)) + done + fi + + else + # Start spinner for cleaning phase (small batch) + if [[ "$DRY_RUN" != "true" && ${#existing_paths[@]} -gt 0 && -t 1 ]]; then + MOLE_SPINNER_PREFIX=" " start_inline_spinner "Cleaning..." + cleaning_spinner_started=true + fi + local idx=0 + if [[ ${#existing_paths[@]} -gt 0 ]]; then + for path in "${existing_paths[@]}"; do + local size_kb + size_kb=$(get_cleanup_path_size_kb "$path") + [[ ! "$size_kb" =~ ^[0-9]+$ ]] && size_kb=0 + + local removed=0 + if [[ "$DRY_RUN" != "true" ]]; then + if safe_remove "$path" true; then + removed=1 + fi + else + removed=1 + fi + + if [[ $removed -eq 1 ]]; then + if [[ "$size_kb" -gt 0 ]]; then + total_size_kb=$((total_size_kb + size_kb)) + fi + total_count=$((total_count + 1)) + removed_any=1 + else + if [[ -e "$path" && "$DRY_RUN" != "true" ]]; then + removal_failed_count=$((removal_failed_count + 1)) + fi + fi + idx=$((idx + 1)) + done + fi + fi + + if [[ "$show_spinner" == "true" || "$cleaning_spinner_started" == "true" ]]; then + stop_inline_spinner + fi + + local permission_end=${MOLE_PERMISSION_DENIED_COUNT:-0} + # Track permission failures in debug output (avoid noisy user warnings). + if [[ $permission_end -gt $permission_start && $removed_any -eq 0 ]]; then + debug_log "Permission denied while cleaning: $description" + fi + if [[ $removal_failed_count -gt 0 && "$DRY_RUN" != "true" ]]; then + debug_log "Skipped $removal_failed_count items, permission denied or in use, for: $description" + fi + + if [[ $removed_any -eq 1 ]]; then + # Stop spinner before output + stop_section_spinner + + local size_human + size_human=$(bytes_to_human "$((total_size_kb * 1024))") + + local label="$description" + if [[ ${#targets[@]} -gt 1 ]]; then + label+=" ${#targets[@]} items" + fi + + if [[ "$DRY_RUN" == "true" ]]; then + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $label${NC}, ${YELLOW}$size_human dry${NC}" + + local paths_temp + paths_temp=$(create_temp_file) + + idx=0 + if [[ ${#existing_paths[@]} -gt 0 ]]; then + for path in "${existing_paths[@]}"; do + local size=0 + + if [[ -n "${temp_dir:-}" && -f "$temp_dir/result_${idx}" ]]; then + read -r size count < "$temp_dir/result_${idx}" 2> /dev/null || true + else + size=$(get_cleanup_path_size_kb "$path" 2> /dev/null || echo "0") + fi + + [[ "$size" == "0" || -z "$size" ]] && { + idx=$((idx + 1)) + continue + } + + echo "$(dirname "$path")|$size|$path" >> "$paths_temp" + if [[ -n "$DETAILED_EXPORT_FILE" ]]; then + printf "%s\t%s\t%s\n" "$CURRENT_SECTION" "$path" "$size" >> "$DETAILED_EXPORT_FILE" + fi + idx=$((idx + 1)) + done + fi + + # Group dry-run paths by parent for a compact export list. + if [[ -f "$paths_temp" && -s "$paths_temp" ]]; then + sort -t'|' -k1,1 "$paths_temp" | awk -F'|' ' + { + parent = $1 + size = $2 + path = $3 + + parent_size[parent] += size + if (parent_count[parent] == 0) { + parent_first[parent] = path + } + parent_count[parent]++ + } + END { + for (parent in parent_size) { + if (parent_count[parent] > 1) { + printf "%s|%d|%d\n", parent, parent_size[parent], parent_count[parent] + } else { + printf "%s|%d|1\n", parent_first[parent], parent_size[parent] + } + } + } + ' | while IFS='|' read -r display_path total_size child_count; do + local size_human + size_human=$(bytes_to_human "$((total_size * 1024))") + if [[ $child_count -gt 1 ]]; then + echo "$display_path # $size_human, $child_count items" >> "$EXPORT_LIST_FILE" + else + echo "$display_path # $size_human" >> "$EXPORT_LIST_FILE" + fi + done + fi + else + echo -e " ${GREEN}${ICON_SUCCESS}${NC} $label${NC}, ${GREEN}$size_human${NC}" + fi + files_cleaned=$((files_cleaned + total_count)) + total_size_cleaned=$((total_size_cleaned + total_size_kb)) + total_items=$((total_items + 1)) + note_activity + fi + + return 0 +} + +start_cleanup() { + # Set current command for operation logging + export MOLE_CURRENT_COMMAND="clean" + log_operation_session_start "clean" + + if [[ -t 1 ]]; then + printf '\033[2J\033[H' + fi + printf '\n' + echo -e "${PURPLE_BOLD}Clean Your Mac${NC}" + echo "" + + if [[ "$DRY_RUN" != "true" && -t 0 ]]; then + echo -e "${GRAY}${ICON_WARNING} Use --dry-run to preview, --whitelist to manage protected paths${NC}" + fi + + if [[ "$DRY_RUN" == "true" ]]; then + echo -e "${YELLOW}Dry Run Mode${NC}, Preview only, no deletions" + echo "" + + ensure_user_file "$EXPORT_LIST_FILE" + if [[ -n "$DETAILED_EXPORT_FILE" ]]; then + mkdir -p "$(dirname "$DETAILED_EXPORT_FILE")" + : > "$DETAILED_EXPORT_FILE" + fi + cat > "$EXPORT_LIST_FILE" << EOF +# Mole Cleanup Preview - $(date '+%Y-%m-%d %H:%M:%S') +# +# How to protect files: +# 1. Copy any path below to ~/.config/mole/whitelist +# 2. Run: mo clean --whitelist +# +# Example: +# /Users/*/Library/Caches/com.example.app +# + +EOF + + # Preview system section when sudo is already cached (no password prompt). + if has_sudo_session; then + SYSTEM_CLEAN=true + echo -e "${GREEN}${ICON_SUCCESS}${NC} Admin access available, system preview included" + echo "" + else + SYSTEM_CLEAN=false + echo -e "${GRAY}${ICON_WARNING} System caches need sudo, run ${NC}sudo -v && mo clean --dry-run${GRAY} for full preview${NC}" + echo "" + fi + return + fi + + if [[ -t 0 ]]; then + if has_sudo_session; then + SYSTEM_CLEAN=true + echo -e "${GREEN}${ICON_SUCCESS}${NC} Admin access already available" + echo "" + else + echo -ne "${PURPLE}${ICON_ARROW}${NC} System caches need sudo. ${GREEN}Enter${NC} continue, ${GRAY}Space${NC} skip: " + + local choice + choice=$(read_key) + + # ESC/Q aborts, Space skips, Enter enables system cleanup. + if [[ "$choice" == "QUIT" ]]; then + echo -e " ${GRAY}Canceled${NC}" + exit 0 + fi + + if [[ "$choice" == "SPACE" ]]; then + echo -e " ${GRAY}Skipped${NC}" + echo "" + SYSTEM_CLEAN=false + elif [[ "$choice" == "ENTER" ]]; then + printf "\r\033[K" # Clear the prompt line + if ensure_sudo_session "System cleanup requires admin access"; then + SYSTEM_CLEAN=true + echo -e "${GREEN}${ICON_SUCCESS}${NC} Admin access granted" + echo "" + else + SYSTEM_CLEAN=false + echo "" + echo -e "${YELLOW}Authentication failed${NC}, continuing with user-level cleanup" + fi + else + SYSTEM_CLEAN=false + echo -e " ${GRAY}Skipped${NC}" + echo "" + fi + fi + else + echo "" + echo "Running in non-interactive mode" + if has_sudo_session; then + SYSTEM_CLEAN=true + echo " ${ICON_LIST} System-level cleanup enabled, sudo session active" + else + SYSTEM_CLEAN=false + echo " ${ICON_LIST} System-level cleanup skipped, requires sudo" + fi + echo " ${ICON_LIST} User-level cleanup will proceed automatically" + echo "" + fi +} + +perform_cleanup() { + # Test mode skips expensive scans and returns minimal output. + local test_mode_enabled=false + if [[ "${MOLE_TEST_MODE:-0}" == "1" ]]; then + test_mode_enabled=true + if [[ "$DRY_RUN" == "true" ]]; then + echo -e "${YELLOW}Dry Run Mode${NC}, Preview only, no deletions" + echo "" + fi + echo -e "${GREEN}${ICON_LIST}${NC} User app cache" + if [[ ${#WHITELIST_PATTERNS[@]} -gt 0 ]]; then + local -a expanded_defaults + expanded_defaults=() + for default in "${DEFAULT_WHITELIST_PATTERNS[@]}"; do + expanded_defaults+=("${default/#\~/$HOME}") + done + local has_custom=false + for pattern in "${WHITELIST_PATTERNS[@]}"; do + local is_default=false + local normalized_pattern="${pattern%/}" + for default in "${expanded_defaults[@]}"; do + local normalized_default="${default%/}" + [[ "$normalized_pattern" == "$normalized_default" ]] && is_default=true && break + done + [[ "$is_default" == "false" ]] && has_custom=true && break + done + [[ "$has_custom" == "true" ]] && echo -e "${GREEN}${ICON_SUCCESS}${NC} Protected items found" + fi + if [[ "$DRY_RUN" == "true" ]]; then + echo "" + echo "Potential space: 0.00GB" + fi + total_items=1 + files_cleaned=0 + total_size_cleaned=0 + fi + + if [[ "$test_mode_enabled" == "false" ]]; then + echo -e "${BLUE}${ICON_ADMIN}${NC} $(detect_architecture) | Free space: $(get_free_space)" + fi + + if [[ "$test_mode_enabled" == "true" ]]; then + local summary_heading="Test mode complete" + local -a summary_details + summary_details=() + summary_details+=("Test mode - no actual cleanup performed") + print_summary_block "$summary_heading" "${summary_details[@]}" + printf '\n' + return 0 + fi + + # Pre-check TCC permissions to avoid mid-run prompts. + check_tcc_permissions + + if [[ ${#WHITELIST_PATTERNS[@]} -gt 0 ]]; then + local predefined_count=0 + local custom_count=0 + + for pattern in "${WHITELIST_PATTERNS[@]}"; do + local is_predefined=false + for default in "${DEFAULT_WHITELIST_PATTERNS[@]}"; do + local expanded_default="${default/#\~/$HOME}" + if [[ "$pattern" == "$expanded_default" ]]; then + is_predefined=true + break + fi + done + + if [[ "$is_predefined" == "true" ]]; then + predefined_count=$((predefined_count + 1)) + else + custom_count=$((custom_count + 1)) + fi + done + + if [[ $custom_count -gt 0 || $predefined_count -gt 0 ]]; then + local summary="" + [[ $predefined_count -gt 0 ]] && summary+="$predefined_count core" + [[ $custom_count -gt 0 && $predefined_count -gt 0 ]] && summary+=" + " + [[ $custom_count -gt 0 ]] && summary+="$custom_count custom" + summary+=" patterns active" + + echo -e "${BLUE}${ICON_SUCCESS}${NC} Whitelist: $summary" + + if [[ "$DRY_RUN" == "true" ]]; then + for pattern in "${WHITELIST_PATTERNS[@]}"; do + [[ "$pattern" == "$FINDER_METADATA_SENTINEL" ]] && continue + echo -e " ${GRAY}${ICON_SUBLIST}${NC} ${GRAY}${pattern}${NC}" + done + fi + fi + fi + + if [[ -t 1 && "$DRY_RUN" != "true" ]]; then + local fda_status=0 + has_full_disk_access + fda_status=$? + if [[ $fda_status -eq 1 ]]; then + echo "" + echo -e "${GRAY}${ICON_REVIEW}${NC} ${GRAY}Grant Full Disk Access to your terminal in System Settings for best results${NC}" + fi + fi + + total_items=0 + files_cleaned=0 + total_size_cleaned=0 + + local had_errexit=0 + [[ $- == *e* ]] && had_errexit=1 + + # Allow per-section failures without aborting the full run. + set +e + + # ===== 1. System ===== + if [[ "$SYSTEM_CLEAN" == "true" ]]; then + start_section "System" + clean_deep_system + clean_local_snapshots + end_section + fi + + if [[ ${#WHITELIST_WARNINGS[@]} -gt 0 ]]; then + echo "" + for warning in "${WHITELIST_WARNINGS[@]}"; do + echo -e " ${GRAY}${ICON_WARNING}${NC} Whitelist: $warning" + done + fi + + # ===== 2. User essentials ===== + start_section "User essentials" + clean_user_essentials + clean_finder_metadata + scan_external_volumes + end_section + + # ===== 3. App caches (merged sandboxed and standard app caches) ===== + start_section "App caches" + clean_app_caches + end_section + + # ===== 4. Browsers ===== + start_section "Browsers" + clean_browsers + end_section + + # ===== 5. Cloud & Office ===== + start_section "Cloud & Office" + clean_cloud_storage + clean_office_applications + end_section + + # ===== 6. Developer tools (merged CLI and GUI tooling) ===== + start_section "Developer tools" + clean_developer_tools + end_section + + # ===== 7. Applications ===== + start_section "Applications" + clean_user_gui_applications + end_section + + # ===== 8. Virtualization ===== + start_section "Virtualization" + clean_virtualization_tools + end_section + + # ===== 9. Application Support ===== + start_section "Application Support" + clean_application_support_logs + end_section + + # ===== 10. Orphaned data ===== + start_section "Orphaned data" + clean_orphaned_app_data + clean_orphaned_system_services + clean_orphaned_launch_agents + end_section + + # ===== 11. Apple Silicon ===== + clean_apple_silicon_caches + + # ===== 12. Device backups ===== + start_section "Device backups" + check_ios_device_backups + end_section + + # ===== 13. Time Machine ===== + start_section "Time Machine" + clean_time_machine_failed_backups + end_section + + # ===== 14. Large files ===== + start_section "Large files" + check_large_file_candidates + end_section + + # ===== 15. System Data clues ===== + start_section "System Data clues" + show_system_data_hint_notice + end_section + + # ===== 16. Project artifacts ===== + start_section "Project artifacts" + show_project_artifact_hint_notice + end_section + + # ===== Final summary ===== + echo "" + + local summary_heading="" + local summary_status="success" + if [[ "$DRY_RUN" == "true" ]]; then + summary_heading="Dry run complete - no changes made" + else + summary_heading="Cleanup complete" + fi + + local -a summary_details=() + + if [[ $total_size_cleaned -gt 0 ]]; then + local freed_size_human + freed_size_human=$(bytes_to_human_kb "$total_size_cleaned") + + if [[ "$DRY_RUN" == "true" ]]; then + local stats="Potential space: ${GREEN}${freed_size_human}${NC}" + [[ $files_cleaned -gt 0 ]] && stats+=" | Items: $files_cleaned" + [[ $total_items -gt 0 ]] && stats+=" | Categories: $total_items" + summary_details+=("$stats") + + { + echo "" + echo "# ============================================" + echo "# Summary" + echo "# ============================================" + echo "# Potential cleanup: ${freed_size_human}" + echo "# Items: $files_cleaned" + echo "# Categories: $total_items" + } >> "$EXPORT_LIST_FILE" + + summary_details+=("Detailed file list: ${GRAY}$EXPORT_LIST_FILE${NC}") + summary_details+=("Use ${GRAY}mo clean --whitelist${NC} to add protection rules") + else + local summary_line="Space freed: ${GREEN}${freed_size_human}${NC}" + + if [[ $files_cleaned -gt 0 && $total_items -gt 0 ]]; then + summary_line+=" | Items cleaned: $files_cleaned | Categories: $total_items" + elif [[ $files_cleaned -gt 0 ]]; then + summary_line+=" | Items cleaned: $files_cleaned" + elif [[ $total_items -gt 0 ]]; then + summary_line+=" | Categories: $total_items" + fi + + summary_details+=("$summary_line") + + # Movie comparison only if >= 1GB (1048576 KB) + if ((total_size_cleaned >= 1048576)); then + local freed_gb=$((total_size_cleaned / 1048576)) + local movies=$((freed_gb * 10 / 45)) + + if [[ $movies -gt 0 ]]; then + if [[ $movies -eq 1 ]]; then + summary_details+=("Equivalent to ~$movies 4K movie of storage.") + else + summary_details+=("Equivalent to ~$movies 4K movies of storage.") + fi + fi + fi + + local final_free_space + final_free_space=$(get_free_space) + summary_details+=("Free space now: $final_free_space") + fi + else + summary_status="info" + if [[ "$DRY_RUN" == "true" ]]; then + summary_details+=("No significant reclaimable space detected, system already clean.") + else + summary_details+=("System was already clean; no additional space freed.") + fi + summary_details+=("Free space now: $(get_free_space)") + fi + + if [[ $had_errexit -eq 1 ]]; then + set -e + fi + + # Log session end with summary + log_operation_session_end "clean" "$files_cleaned" "$total_size_cleaned" + + print_summary_block "$summary_heading" "${summary_details[@]}" + printf '\n' +} + +main() { + for arg in "$@"; do + case "$arg" in + "--help" | "-h") + show_clean_help + exit 0 + ;; + "--debug") + export MO_DEBUG=1 + ;; + "--dry-run" | "-n") + DRY_RUN=true + export MOLE_DRY_RUN=1 + ;; + "--whitelist") + source "$SCRIPT_DIR/../lib/manage/whitelist.sh" + manage_whitelist "clean" + exit 0 + ;; + esac + done + + start_cleanup + hide_cursor + perform_cleanup + show_cursor + exit 0 +} + +main "$@" diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/completion.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/completion.sh new file mode 100755 index 0000000..a575929 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/completion.sh @@ -0,0 +1,292 @@ +#!/bin/bash + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ROOT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" + +source "$ROOT_DIR/lib/core/common.sh" +source "$ROOT_DIR/lib/core/commands.sh" + +command_names=() +for entry in "${MOLE_COMMANDS[@]}"; do + command_names+=("${entry%%:*}") +done +command_words="${command_names[*]}" + +emit_zsh_subcommands() { + for entry in "${MOLE_COMMANDS[@]}"; do + printf " '%s:%s'\n" "${entry%%:*}" "${entry#*:}" + done +} + +emit_fish_completions() { + local cmd="$1" + for entry in "${MOLE_COMMANDS[@]}"; do + local name="${entry%%:*}" + local desc="${entry#*:}" + printf 'complete -c %s -n "__fish_mole_no_subcommand" -a %s -d "%s"\n' "$cmd" "$name" "$desc" + done + + printf '\n' + printf 'complete -c %s -n "not __fish_mole_no_subcommand" -a bash -d "generate bash completion" -n "__fish_see_subcommand_path completion"\n' "$cmd" + printf 'complete -c %s -n "not __fish_mole_no_subcommand" -a zsh -d "generate zsh completion" -n "__fish_see_subcommand_path completion"\n' "$cmd" + printf 'complete -c %s -n "not __fish_mole_no_subcommand" -a fish -d "generate fish completion" -n "__fish_see_subcommand_path completion"\n' "$cmd" +} + +if [[ $# -gt 0 ]]; then + normalized_args=() + for arg in "$@"; do + case "$arg" in + "--dry-run" | "-n") + export MOLE_DRY_RUN=1 + ;; + *) + normalized_args+=("$arg") + ;; + esac + done + if [[ ${#normalized_args[@]} -gt 0 ]]; then + set -- "${normalized_args[@]}" + else + set -- + fi +fi + +# Auto-install mode when run without arguments +if [[ $# -eq 0 ]]; then + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + echo -e "${YELLOW}${ICON_DRY_RUN} DRY RUN MODE${NC}, shell config files will not be modified" + echo "" + fi + + # Detect current shell + current_shell="${SHELL##*/}" + if [[ -z "$current_shell" ]]; then + current_shell="$(ps -p "$PPID" -o comm= 2> /dev/null | awk '{print $1}')" + fi + + completion_name="" + if command -v mole > /dev/null 2>&1; then + completion_name="mole" + elif command -v mo > /dev/null 2>&1; then + completion_name="mo" + fi + + case "$current_shell" in + bash) + config_file="${HOME}/.bashrc" + [[ -f "${HOME}/.bash_profile" ]] && config_file="${HOME}/.bash_profile" + # shellcheck disable=SC2016 + completion_line='if output="$('"$completion_name"' completion bash 2>/dev/null)"; then eval "$output"; fi' + ;; + zsh) + config_file="${HOME}/.zshrc" + # shellcheck disable=SC2016 + completion_line='if output="$('"$completion_name"' completion zsh 2>/dev/null)"; then eval "$output"; fi' + ;; + fish) + config_file="${HOME}/.config/fish/config.fish" + # shellcheck disable=SC2016 + completion_line='set -l output ('"$completion_name"' completion fish 2>/dev/null); and echo "$output" | source' + ;; + *) + log_error "Unsupported shell: $current_shell" + echo " mole completion " + exit 1 + ;; + esac + + if [[ -z "$completion_name" ]]; then + if [[ -f "$config_file" ]] && grep -Eq "(^# Mole shell completion$|(mole|mo)[[:space:]]+completion)" "$config_file" 2> /dev/null; then + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + echo -e "${GRAY}${ICON_REVIEW} [DRY RUN] Would remove stale completion entries from $config_file${NC}" + echo "" + else + original_mode="" + original_mode="$(stat -f '%Mp%Lp' "$config_file" 2> /dev/null || true)" + temp_file="$(mktemp)" + grep -Ev "(^# Mole shell completion$|(mole|mo)[[:space:]]+completion)" "$config_file" > "$temp_file" || true + mv "$temp_file" "$config_file" + if [[ -n "$original_mode" ]]; then + chmod "$original_mode" "$config_file" 2> /dev/null || true + fi + echo -e "${GREEN}${ICON_SUCCESS}${NC} Removed stale completion entries from $config_file" + echo "" + fi + fi + log_error "mole not found in PATH, install Mole before enabling completion" + exit 1 + fi + + # Check if already installed and normalize to latest line + if [[ -f "$config_file" ]] && grep -Eq "(mole|mo)[[:space:]]+completion" "$config_file" 2> /dev/null; then + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + echo -e "${GRAY}${ICON_REVIEW} [DRY RUN] Would normalize completion entry in $config_file${NC}" + echo "" + exit 0 + fi + + original_mode="" + original_mode="$(stat -f '%Mp%Lp' "$config_file" 2> /dev/null || true)" + temp_file="$(mktemp)" + grep -Ev "(^# Mole shell completion$|(mole|mo)[[:space:]]+completion)" "$config_file" > "$temp_file" || true + mv "$temp_file" "$config_file" + if [[ -n "$original_mode" ]]; then + chmod "$original_mode" "$config_file" 2> /dev/null || true + fi + { + echo "" + echo "# Mole shell completion" + echo "$completion_line" + } >> "$config_file" + echo "" + echo -e "${GREEN}${ICON_SUCCESS}${NC} Shell completion updated in $config_file" + echo "" + exit 0 + fi + + # Prompt user for installation + echo "" + echo -e "${GRAY}Will add to ${config_file}:${NC}" + echo " $completion_line" + echo "" + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + echo -e "${GREEN}${ICON_SUCCESS}${NC} Dry run complete, no changes made" + exit 0 + fi + + echo -ne "${PURPLE}${ICON_ARROW}${NC} Enable completion for ${GREEN}${current_shell}${NC}? ${GRAY}Enter confirm / Q cancel${NC}: " + IFS= read -r -s -n1 key || key="" + drain_pending_input + echo "" + + case "$key" in + $'\e' | [Qq] | [Nn]) + echo -e "${YELLOW}Cancelled${NC}" + exit 0 + ;; + "" | $'\n' | $'\r' | [Yy]) ;; + *) + log_error "Invalid key" + exit 1 + ;; + esac + + # Create config file if it doesn't exist + if [[ ! -f "$config_file" ]]; then + mkdir -p "$(dirname "$config_file")" + touch "$config_file" + fi + + # Remove previous Mole completion lines to avoid duplicates + if [[ -f "$config_file" ]]; then + original_mode="" + original_mode="$(stat -f '%Mp%Lp' "$config_file" 2> /dev/null || true)" + temp_file="$(mktemp)" + grep -Ev "(^# Mole shell completion$|(mole|mo)[[:space:]]+completion)" "$config_file" > "$temp_file" || true + mv "$temp_file" "$config_file" + if [[ -n "$original_mode" ]]; then + chmod "$original_mode" "$config_file" 2> /dev/null || true + fi + fi + + # Add completion line + { + echo "" + echo "# Mole shell completion" + echo "$completion_line" + } >> "$config_file" + + echo -e "${GREEN}${ICON_SUCCESS}${NC} Completion added to $config_file" + echo "" + echo "" + echo -e "${GRAY}To activate now:${NC}" + echo -e " ${GREEN}source $config_file${NC}" + exit 0 +fi + +case "$1" in + bash) + cat << EOF +_mole_completions() +{ + local cur_word prev_word + cur_word="\${COMP_WORDS[\$COMP_CWORD]}" + prev_word="\${COMP_WORDS[\$COMP_CWORD-1]}" + + if [ "\$COMP_CWORD" -eq 1 ]; then + COMPREPLY=( \$(compgen -W "$command_words" -- "\$cur_word") ) + else + case "\$prev_word" in + completion) + COMPREPLY=( \$(compgen -W "bash zsh fish" -- "\$cur_word") ) + ;; + *) + COMPREPLY=() + ;; + esac + fi +} + +complete -F _mole_completions mole mo +EOF + ;; + zsh) + printf '#compdef mole mo\n\n' + printf '_mole() {\n' + printf ' local -a subcommands\n' + printf ' subcommands=(\n' + emit_zsh_subcommands + printf ' )\n' + printf " _describe 'subcommand' subcommands\n" + printf '}\n\n' + printf 'compdef _mole mole mo\n' + ;; + fish) + printf '# Completions for mole\n' + emit_fish_completions mole + printf '\n# Completions for mo (alias)\n' + emit_fish_completions mo + printf '\nfunction __fish_mole_no_subcommand\n' + printf ' for i in (commandline -opc)\n' + # shellcheck disable=SC2016 + printf ' if contains -- $i %s\n' "$command_words" + printf ' return 1\n' + printf ' end\n' + printf ' end\n' + printf ' return 0\n' + printf 'end\n\n' + printf 'function __fish_see_subcommand_path\n' + printf ' string match -q -- "completion" (commandline -opc)[1]\n' + printf 'end\n' + ;; + *) + cat << 'EOF' +Usage: mole completion [bash|zsh|fish] + +Setup shell tab completion for mole and mo commands. + +Auto-install: + mole completion # Auto-detect shell and install + mole completion --dry-run # Preview config changes without writing files + +Manual install: + mole completion bash # Generate bash completion script + mole completion zsh # Generate zsh completion script + mole completion fish # Generate fish completion script + +Examples: + # Auto-install (recommended) + mole completion + + # Manual install - Bash + eval "$(mole completion bash)" + + # Manual install - Zsh + eval "$(mole completion zsh)" + + # Manual install - Fish + mole completion fish | source +EOF + exit 1 + ;; +esac diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/installer.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/installer.sh new file mode 100755 index 0000000..864404a --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/installer.sh @@ -0,0 +1,725 @@ +#!/bin/bash +# Mole - Installer command +# Find and remove installer files - .dmg, .pkg, .mpkg, .iso, .xip, .zip + +set -euo pipefail + +# shellcheck disable=SC2154 +# External variables set by menu_paginated.sh and environment +declare MOLE_SELECTION_RESULT +declare MOLE_INSTALLER_SCAN_MAX_DEPTH + +export LC_ALL=C +export LANG=C + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "$SCRIPT_DIR/../lib/core/common.sh" +source "$SCRIPT_DIR/../lib/ui/menu_paginated.sh" + +cleanup() { + if [[ "${IN_ALT_SCREEN:-0}" == "1" ]]; then + leave_alt_screen + IN_ALT_SCREEN=0 + fi + show_cursor + cleanup_temp_files +} +trap cleanup EXIT +trap 'trap - EXIT; cleanup; exit 130' INT TERM + +# Scan configuration +readonly INSTALLER_SCAN_MAX_DEPTH_DEFAULT=2 +readonly INSTALLER_SCAN_PATHS=( + "$HOME/Downloads" + "$HOME/Desktop" + "$HOME/Documents" + "$HOME/Public" + "$HOME/Library/Downloads" + "/Users/Shared" + "/Users/Shared/Downloads" + "$HOME/Library/Caches/Homebrew" + "$HOME/Library/Mobile Documents/com~apple~CloudDocs/Downloads" + "$HOME/Library/Containers/com.apple.mail/Data/Library/Mail Downloads" + "$HOME/Library/Application Support/Telegram Desktop" + "$HOME/Downloads/Telegram Desktop" +) +readonly MAX_ZIP_ENTRIES=50 +ZIP_LIST_CMD=() +IN_ALT_SCREEN=0 + +if command -v zipinfo > /dev/null 2>&1; then + ZIP_LIST_CMD=(zipinfo -1) +elif command -v unzip > /dev/null 2>&1; then + ZIP_LIST_CMD=(unzip -Z -1) +fi + +TERMINAL_WIDTH=0 + +# Check for installer payloads inside ZIP - check first N entries for installer patterns +is_installer_zip() { + local zip="$1" + local cap="$MAX_ZIP_ENTRIES" + + [[ ${#ZIP_LIST_CMD[@]} -gt 0 ]] || return 1 + + if ! "${ZIP_LIST_CMD[@]}" "$zip" 2> /dev/null | + head -n "$cap" | + awk ' + /\.(app|pkg|dmg|xip)(\/|$)/ { found=1; exit 0 } + END { exit found ? 0 : 1 } + '; then + return 1 + fi + + return 0 +} + +handle_candidate_file() { + local file="$1" + + [[ -L "$file" ]] && return 0 # Skip symlinks explicitly + case "$file" in + *.dmg | *.pkg | *.mpkg | *.iso | *.xip) + echo "$file" + ;; + *.zip) + [[ -r "$file" ]] || return 0 + if is_installer_zip "$file" 2> /dev/null; then + echo "$file" + fi + ;; + esac +} + +scan_installers_in_path() { + local path="$1" + local max_depth="${MOLE_INSTALLER_SCAN_MAX_DEPTH:-$INSTALLER_SCAN_MAX_DEPTH_DEFAULT}" + + [[ -d "$path" ]] || return 0 + + local file + + if command -v fd > /dev/null 2>&1; then + while IFS= read -r file; do + handle_candidate_file "$file" + done < <( + fd --no-ignore --hidden --type f --max-depth "$max_depth" \ + -e dmg -e pkg -e mpkg -e iso -e xip -e zip \ + . "$path" 2> /dev/null || true + ) + else + while IFS= read -r file; do + handle_candidate_file "$file" + done < <( + find "$path" -maxdepth "$max_depth" -type f \ + \( -name '*.dmg' -o -name '*.pkg' -o -name '*.mpkg' \ + -o -name '*.iso' -o -name '*.xip' -o -name '*.zip' \) \ + 2> /dev/null || true + ) + fi +} + +scan_all_installers() { + for path in "${INSTALLER_SCAN_PATHS[@]}"; do + scan_installers_in_path "$path" + done +} + +# Initialize stats +declare -i total_deleted=0 +declare -i total_size_freed_kb=0 + +# Global arrays for installer data +declare -a INSTALLER_PATHS=() +declare -a INSTALLER_SIZES=() +declare -a INSTALLER_SOURCES=() +declare -a DISPLAY_NAMES=() + +# Get source directory display name - for example "Downloads" or "Desktop" +get_source_display() { + local file_path="$1" + local dir_path="${file_path%/*}" + + # Match against known paths and return friendly names + case "$dir_path" in + "$HOME/Downloads"*) echo "Downloads" ;; + "$HOME/Desktop"*) echo "Desktop" ;; + "$HOME/Documents"*) echo "Documents" ;; + "$HOME/Public"*) echo "Public" ;; + "$HOME/Library/Downloads"*) echo "Library" ;; + "/Users/Shared"*) echo "Shared" ;; + "$HOME/Library/Caches/Homebrew"*) echo "Homebrew" ;; + "$HOME/Library/Mobile Documents/com~apple~CloudDocs/Downloads"*) echo "iCloud" ;; + "$HOME/Library/Containers/com.apple.mail"*) echo "Mail" ;; + *"Telegram Desktop"*) echo "Telegram" ;; + *) echo "${dir_path##*/}" ;; + esac +} + +get_terminal_width() { + if [[ $TERMINAL_WIDTH -le 0 ]]; then + TERMINAL_WIDTH=$(tput cols 2> /dev/null || echo 80) + fi + echo "$TERMINAL_WIDTH" +} + +# Format installer display with alignment - similar to purge command +format_installer_display() { + local filename="$1" + local size_str="$2" + local source="$3" + + # Terminal width for alignment + local terminal_width + terminal_width=$(get_terminal_width) + local fixed_width=24 # Reserve for size and source + local available_width=$((terminal_width - fixed_width)) + + # Bounds check: 20-40 chars for filename + [[ $available_width -lt 20 ]] && available_width=20 + [[ $available_width -gt 40 ]] && available_width=40 + + # Truncate filename if needed + local truncated_name + truncated_name=$(truncate_by_display_width "$filename" "$available_width") + local current_width + current_width=$(get_display_width "$truncated_name") + local char_count=${#truncated_name} + local padding=$((available_width - current_width)) + local printf_width=$((char_count + padding)) + + # Format: "filename size | source" + printf "%-*s %8s | %-10s" "$printf_width" "$truncated_name" "$size_str" "$source" +} + +# Collect all installers with their metadata +collect_installers() { + # Clear previous results + INSTALLER_PATHS=() + INSTALLER_SIZES=() + INSTALLER_SOURCES=() + DISPLAY_NAMES=() + + # Start scanning with spinner + if [[ -t 1 ]]; then + start_inline_spinner "Scanning for installers..." + fi + + # Start debug session + debug_operation_start "Collect Installers" "Scanning for redundant installer files" + + # Scan all paths, deduplicate, and sort results + local -a all_files=() + + while IFS= read -r file; do + [[ -z "$file" ]] && continue + all_files+=("$file") + debug_file_action "Found installer" "$file" + done < <(scan_all_installers | sort -u) + + if [[ -t 1 ]]; then + stop_inline_spinner + fi + + if [[ ${#all_files[@]} -eq 0 ]]; then + if [[ "${IN_ALT_SCREEN:-0}" != "1" ]]; then + echo -e "${GREEN}${ICON_SUCCESS}${NC} Great! No installer files to clean" + fi + return 1 + fi + + # Calculate sizes with spinner + if [[ -t 1 ]]; then + start_inline_spinner "Calculating sizes..." + fi + + # Process each installer + for file in "${all_files[@]}"; do + # Calculate file size + local file_size=0 + if [[ -f "$file" ]]; then + file_size=$(get_file_size "$file") + fi + + # Get source directory + local source + source=$(get_source_display "$file") + + # Format human readable size + local size_human + size_human=$(bytes_to_human "$file_size") + + # Get display filename - strip Homebrew hash prefix if present + local display_name + display_name=$(basename "$file") + if [[ "$source" == "Homebrew" ]]; then + # Homebrew names often look like: sha256--name--version + # Strip the leading hash if it matches [0-9a-f]{64}-- + if [[ "$display_name" =~ ^[0-9a-f]{64}--(.*) ]]; then + display_name="${BASH_REMATCH[1]}" + fi + fi + + # Format display with alignment + local display + display=$(format_installer_display "$display_name" "$size_human" "$source") + + # Store installer data in parallel arrays + INSTALLER_PATHS+=("$file") + INSTALLER_SIZES+=("$file_size") + INSTALLER_SOURCES+=("$source") + DISPLAY_NAMES+=("$display") + done + + if [[ -t 1 ]]; then + stop_inline_spinner + fi + return 0 +} + +# Installer selector with Select All / Invert support +select_installers() { + local -a items=("$@") + local total_items=${#items[@]} + local clear_line=$'\r\033[2K' + + if [[ $total_items -eq 0 ]]; then + return 1 + fi + + # Calculate items per page based on terminal height + _get_items_per_page() { + local term_height=24 + if [[ -t 0 ]] || [[ -t 2 ]]; then + term_height=$(stty size < /dev/tty 2> /dev/null | awk '{print $1}') + fi + if [[ -z "$term_height" || $term_height -le 0 ]]; then + if command -v tput > /dev/null 2>&1; then + term_height=$(tput lines 2> /dev/null || echo "24") + else + term_height=24 + fi + fi + local reserved=6 + local available=$((term_height - reserved)) + if [[ $available -lt 3 ]]; then + echo 3 + elif [[ $available -gt 50 ]]; then + echo 50 + else + echo "$available" + fi + } + + local items_per_page=$(_get_items_per_page) + local cursor_pos=0 + local top_index=0 + + # Initialize selection (all unselected by default) + local -a selected=() + for ((i = 0; i < total_items; i++)); do + selected[i]=false + done + + local original_stty="" + if [[ -t 0 ]] && command -v stty > /dev/null 2>&1; then + original_stty=$(stty -g 2> /dev/null || echo "") + fi + + restore_terminal() { + trap - EXIT INT TERM + if [[ "${IN_ALT_SCREEN:-0}" == "1" ]]; then + leave_alt_screen + IN_ALT_SCREEN=0 + fi + show_cursor + if [[ -n "${original_stty:-}" ]]; then + stty "${original_stty}" 2> /dev/null || stty sane 2> /dev/null || true + fi + } + + handle_interrupt() { + restore_terminal + exit 130 + } + + draw_menu() { + items_per_page=$(_get_items_per_page) + + local max_top_index=0 + if [[ $total_items -gt $items_per_page ]]; then + max_top_index=$((total_items - items_per_page)) + fi + if [[ $top_index -gt $max_top_index ]]; then + top_index=$max_top_index + fi + if [[ $top_index -lt 0 ]]; then + top_index=0 + fi + + local visible_count=$((total_items - top_index)) + [[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page + if [[ $cursor_pos -gt $((visible_count - 1)) ]]; then + cursor_pos=$((visible_count - 1)) + fi + if [[ $cursor_pos -lt 0 ]]; then + cursor_pos=0 + fi + + printf "\033[H" + + # Calculate selected size and count + local selected_size=0 + local selected_count=0 + for ((i = 0; i < total_items; i++)); do + if [[ ${selected[i]} == true ]]; then + selected_size=$((selected_size + ${INSTALLER_SIZES[i]:-0})) + ((selected_count++)) + fi + done + local selected_human + selected_human=$(bytes_to_human "$selected_size") + + # Show position indicator if scrolling is needed + local scroll_indicator="" + if [[ $total_items -gt $items_per_page ]]; then + local current_pos=$((top_index + cursor_pos + 1)) + scroll_indicator=" ${GRAY}[${current_pos}/${total_items}]${NC}" + fi + + printf "${PURPLE_BOLD}Select Installers to Remove${NC}%s ${GRAY}, ${selected_human}, ${selected_count} selected${NC}\n" "$scroll_indicator" + printf "%s\n" "$clear_line" + + # Calculate visible range + local end_index=$((top_index + visible_count)) + + # Draw only visible items + for ((i = top_index; i < end_index; i++)); do + local checkbox="$ICON_EMPTY" + [[ ${selected[i]} == true ]] && checkbox="$ICON_SOLID" + local rel_pos=$((i - top_index)) + if [[ $rel_pos -eq $cursor_pos ]]; then + printf "%s${CYAN}${ICON_ARROW} %s %s${NC}\n" "$clear_line" "$checkbox" "${items[i]}" + else + printf "%s %s %s\n" "$clear_line" "$checkbox" "${items[i]}" + fi + done + + # Fill empty slots + local items_shown=$visible_count + for ((i = items_shown; i < items_per_page; i++)); do + printf "%s\n" "$clear_line" + done + + printf "%s\n" "$clear_line" + printf "%s${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN} | Space Select | Enter Confirm | A All | I Invert | Q Quit${NC}\n" "$clear_line" + } + + trap restore_terminal EXIT + trap handle_interrupt INT TERM + stty -echo -icanon intr ^C 2> /dev/null || true + hide_cursor + if [[ -t 1 ]]; then + printf "\033[2J\033[H" >&2 + fi + + # Main loop + while true; do + draw_menu + + IFS= read -r -s -n1 key || key="" + case "$key" in + $'\x1b') + IFS= read -r -s -n1 -t 1 key2 || key2="" + if [[ "$key2" == "[" ]]; then + IFS= read -r -s -n1 -t 1 key3 || key3="" + case "$key3" in + A) # Up arrow + if [[ $cursor_pos -gt 0 ]]; then + ((cursor_pos--)) + elif [[ $top_index -gt 0 ]]; then + ((top_index--)) + fi + ;; + B) # Down arrow + local absolute_index=$((top_index + cursor_pos)) + local last_index=$((total_items - 1)) + if [[ $absolute_index -lt $last_index ]]; then + local visible_count=$((total_items - top_index)) + [[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page + if [[ $cursor_pos -lt $((visible_count - 1)) ]]; then + ((cursor_pos++)) + elif [[ $((top_index + visible_count)) -lt $total_items ]]; then + ((top_index++)) + fi + fi + ;; + esac + else + # ESC alone + restore_terminal + return 1 + fi + ;; + " ") # Space - toggle current item + local idx=$((top_index + cursor_pos)) + if [[ ${selected[idx]} == true ]]; then + selected[idx]=false + else + selected[idx]=true + fi + ;; + "a" | "A") # Select all + for ((i = 0; i < total_items; i++)); do + selected[i]=true + done + ;; + "i" | "I") # Invert selection + for ((i = 0; i < total_items; i++)); do + if [[ ${selected[i]} == true ]]; then + selected[i]=false + else + selected[i]=true + fi + done + ;; + "q" | "Q" | $'\x03') # Quit or Ctrl-C + restore_terminal + return 1 + ;; + "" | $'\n' | $'\r') # Enter - confirm + MOLE_SELECTION_RESULT="" + for ((i = 0; i < total_items; i++)); do + if [[ ${selected[i]} == true ]]; then + [[ -n "$MOLE_SELECTION_RESULT" ]] && MOLE_SELECTION_RESULT+="," + MOLE_SELECTION_RESULT+="$i" + fi + done + restore_terminal + return 0 + ;; + esac + done +} + +# Show menu for user selection +show_installer_menu() { + if [[ ${#DISPLAY_NAMES[@]} -eq 0 ]]; then + return 1 + fi + + echo "" + + MOLE_SELECTION_RESULT="" + if ! select_installers "${DISPLAY_NAMES[@]}"; then + return 1 + fi + + return 0 +} + +# Delete selected installers +delete_selected_installers() { + # Parse selection indices + local -a selected_indices=() + [[ -n "$MOLE_SELECTION_RESULT" ]] && IFS=',' read -ra selected_indices <<< "$MOLE_SELECTION_RESULT" + + if [[ ${#selected_indices[@]} -eq 0 ]]; then + return 1 + fi + + # Calculate total size for confirmation + local confirm_size=0 + for idx in "${selected_indices[@]}"; do + if [[ "$idx" =~ ^[0-9]+$ ]] && [[ $idx -lt ${#INSTALLER_SIZES[@]} ]]; then + confirm_size=$((confirm_size + ${INSTALLER_SIZES[$idx]:-0})) + fi + done + local confirm_human + confirm_human=$(bytes_to_human "$confirm_size") + + # Show files to be deleted + echo -e "${PURPLE_BOLD}Files to be removed:${NC}" + for idx in "${selected_indices[@]}"; do + if [[ "$idx" =~ ^[0-9]+$ ]] && [[ $idx -lt ${#INSTALLER_PATHS[@]} ]]; then + local file_path="${INSTALLER_PATHS[$idx]}" + local file_size="${INSTALLER_SIZES[$idx]}" + local size_human + size_human=$(bytes_to_human "$file_size") + echo -e " ${GREEN}${ICON_SUCCESS}${NC} $(basename "$file_path") ${GRAY}, ${size_human}${NC}" + fi + done + + # Confirm deletion + echo "" + echo -ne "${PURPLE}${ICON_ARROW}${NC} Delete ${#selected_indices[@]} installers, ${confirm_human} ${GREEN}Enter${NC} confirm, ${GRAY}ESC${NC} cancel: " + + IFS= read -r -s -n1 confirm || confirm="" + case "$confirm" in + $'\e' | q | Q) + return 1 + ;; + "" | $'\n' | $'\r') + printf "\r\033[K" # Clear prompt line + echo "" # Single line break + ;; + *) + return 1 + ;; + esac + + # Delete each selected installer with spinner + total_deleted=0 + total_size_freed_kb=0 + + if [[ -t 1 ]]; then + start_inline_spinner "Removing installers..." + fi + + for idx in "${selected_indices[@]}"; do + if [[ ! "$idx" =~ ^[0-9]+$ ]] || [[ $idx -ge ${#INSTALLER_PATHS[@]} ]]; then + continue + fi + + local file_path="${INSTALLER_PATHS[$idx]}" + local file_size="${INSTALLER_SIZES[$idx]}" + + # Validate path before deletion + if ! validate_path_for_deletion "$file_path"; then + continue + fi + + # Delete the file + if safe_remove "$file_path" true; then + total_size_freed_kb=$((total_size_freed_kb + ((file_size + 1023) / 1024))) + total_deleted=$((total_deleted + 1)) + fi + done + + if [[ -t 1 ]]; then + stop_inline_spinner + fi + + return 0 +} + +# Perform the installers cleanup +perform_installers() { + # Enter alt screen for scanning and selection + if [[ -t 1 ]]; then + enter_alt_screen + IN_ALT_SCREEN=1 + printf "\033[2J\033[H" >&2 + fi + + # Collect installers + if ! collect_installers; then + if [[ -t 1 ]]; then + leave_alt_screen + IN_ALT_SCREEN=0 + fi + printf '\n' + echo -e "${GREEN}${ICON_SUCCESS}${NC} Great! No installer files to clean" + printf '\n' + return 2 # Nothing to clean + fi + + # Show menu + if ! show_installer_menu; then + if [[ -t 1 ]]; then + leave_alt_screen + IN_ALT_SCREEN=0 + fi + return 1 # User cancelled + fi + + # Leave alt screen before deletion (so confirmation and results are on main screen) + if [[ -t 1 ]]; then + leave_alt_screen + IN_ALT_SCREEN=0 + fi + + # Delete selected + if ! delete_selected_installers; then + return 1 + fi + + return 0 +} + +show_summary() { + local summary_heading="Installers cleaned" + local -a summary_details=() + local dry_run_mode="${MOLE_DRY_RUN:-0}" + + if [[ "$dry_run_mode" == "1" ]]; then + summary_heading="Dry run complete - no changes made" + fi + + if [[ $total_deleted -gt 0 ]]; then + local freed_mb + freed_mb=$(echo "$total_size_freed_kb" | awk '{printf "%.2f", $1/1024}') + + if [[ "$dry_run_mode" == "1" ]]; then + summary_details+=("Would remove ${GREEN}$total_deleted${NC} installers, free ${GREEN}${freed_mb}MB${NC}") + else + summary_details+=("Removed ${GREEN}$total_deleted${NC} installers, freed ${GREEN}${freed_mb}MB${NC}") + summary_details+=("Your Mac is cleaner now!") + fi + else + summary_details+=("No installers were removed") + fi + + print_summary_block "$summary_heading" "${summary_details[@]}" + printf '\n' +} + +main() { + for arg in "$@"; do + case "$arg" in + "--help" | "-h") + show_installer_help + exit 0 + ;; + "--debug") + export MO_DEBUG=1 + ;; + "--dry-run" | "-n") + export MOLE_DRY_RUN=1 + ;; + *) + echo "Unknown option: $arg" + exit 1 + ;; + esac + done + + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + echo -e "${YELLOW}${ICON_DRY_RUN} DRY RUN MODE${NC}, No installer files will be removed" + printf '\n' + fi + + hide_cursor + perform_installers + local exit_code=$? + show_cursor + + case $exit_code in + 0) + show_summary + ;; + 1) + printf '\n' + ;; + 2) + # Already handled by collect_installers + ;; + esac + + return 0 +} + +# Only run main if not in test mode +if [[ "${MOLE_TEST_MODE:-0}" != "1" ]]; then + main "$@" +fi diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/optimize.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/optimize.sh new file mode 100755 index 0000000..1a24451 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/optimize.sh @@ -0,0 +1,521 @@ +#!/bin/bash +# Mole - Optimize command. +# Runs system maintenance checks and fixes. +# Supports dry-run where applicable. + +set -euo pipefail + +# Fix locale issues. +export LC_ALL=C +export LANG=C + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +source "$SCRIPT_DIR/lib/core/common.sh" + +# Clean temp files on exit. +trap cleanup_temp_files EXIT INT TERM +source "$SCRIPT_DIR/lib/core/sudo.sh" +source "$SCRIPT_DIR/lib/manage/update.sh" +source "$SCRIPT_DIR/lib/manage/autofix.sh" +source "$SCRIPT_DIR/lib/optimize/maintenance.sh" +source "$SCRIPT_DIR/lib/optimize/tasks.sh" +source "$SCRIPT_DIR/lib/check/health_json.sh" +source "$SCRIPT_DIR/lib/check/all.sh" +source "$SCRIPT_DIR/lib/manage/whitelist.sh" + +print_header() { + printf '\n' + echo -e "${PURPLE_BOLD}Optimize and Check${NC}" +} + +run_system_checks() { + # Skip checks in dry-run mode. + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + return 0 + fi + + unset AUTO_FIX_SUMMARY AUTO_FIX_DETAILS + unset MOLE_SECURITY_FIXES_SHOWN + unset MOLE_SECURITY_FIXES_SKIPPED + echo "" + + check_all_updates + echo "" + + check_system_health + echo "" + + check_all_security + if ask_for_security_fixes; then + perform_security_fixes + fi + if [[ "${MOLE_SECURITY_FIXES_SKIPPED:-}" != "true" ]]; then + echo "" + fi + + check_all_config + echo "" + + show_suggestions + + if ask_for_updates; then + perform_updates + fi + if ask_for_auto_fix; then + perform_auto_fix + fi +} + +show_optimization_summary() { + local safe_count="${OPTIMIZE_SAFE_COUNT:-0}" + local confirm_count="${OPTIMIZE_CONFIRM_COUNT:-0}" + if ((safe_count == 0 && confirm_count == 0)) && [[ -z "${AUTO_FIX_SUMMARY:-}" ]]; then + return + fi + + local summary_title + local -a summary_details=() + local total_applied=$((safe_count + confirm_count)) + + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + summary_title="Dry Run Complete, No Changes Made" + summary_details+=("Would apply ${YELLOW}${total_applied:-0}${NC} optimizations") + summary_details+=("Run without ${YELLOW}--dry-run${NC} to apply these changes") + else + summary_title="Optimization and Check Complete" + + # Build statistics summary + local -a stats=() + local cache_kb="${OPTIMIZE_CACHE_CLEANED_KB:-0}" + local db_count="${OPTIMIZE_DATABASES_COUNT:-0}" + local config_count="${OPTIMIZE_CONFIGS_REPAIRED:-0}" + + if [[ "$cache_kb" =~ ^[0-9]+$ ]] && [[ "$cache_kb" -gt 0 ]]; then + local cache_human=$(bytes_to_human "$((cache_kb * 1024))") + stats+=("${cache_human} cache cleaned") + fi + + if [[ "$db_count" =~ ^[0-9]+$ ]] && [[ "$db_count" -gt 0 ]]; then + stats+=("${db_count} databases optimized") + fi + + if [[ "$config_count" =~ ^[0-9]+$ ]] && [[ "$config_count" -gt 0 ]]; then + stats+=("${config_count} configs repaired") + fi + + # Build first summary line with most important stat only + local key_stat="" + if [[ "$cache_kb" =~ ^[0-9]+$ ]] && [[ "$cache_kb" -gt 0 ]]; then + local cache_human=$(bytes_to_human "$((cache_kb * 1024))") + key_stat="${cache_human} cache cleaned" + elif [[ "$db_count" =~ ^[0-9]+$ ]] && [[ "$db_count" -gt 0 ]]; then + key_stat="${db_count} databases optimized" + elif [[ "$config_count" =~ ^[0-9]+$ ]] && [[ "$config_count" -gt 0 ]]; then + key_stat="${config_count} configs repaired" + fi + + if [[ -n "$key_stat" ]]; then + summary_details+=("Applied ${GREEN}${total_applied:-0}${NC} optimizations, ${key_stat}") + else + summary_details+=("Applied ${GREEN}${total_applied:-0}${NC} optimizations, all services tuned") + fi + + local summary_line3="" + if [[ -n "${AUTO_FIX_SUMMARY:-}" ]]; then + summary_line3="${AUTO_FIX_SUMMARY}" + if [[ -n "${AUTO_FIX_DETAILS:-}" ]]; then + local detail_join + detail_join=$(echo "${AUTO_FIX_DETAILS}" | paste -sd ", " -) + [[ -n "$detail_join" ]] && summary_line3+=": ${detail_join}" + fi + summary_details+=("$summary_line3") + fi + summary_details+=("System fully optimized") + fi + + print_summary_block "$summary_title" "${summary_details[@]}" +} + +show_system_health() { + local health_json="$1" + + local mem_used=$(echo "$health_json" | jq -r '.memory_used_gb // 0' 2> /dev/null || echo "0") + local mem_total=$(echo "$health_json" | jq -r '.memory_total_gb // 0' 2> /dev/null || echo "0") + local disk_used=$(echo "$health_json" | jq -r '.disk_used_gb // 0' 2> /dev/null || echo "0") + local disk_total=$(echo "$health_json" | jq -r '.disk_total_gb // 0' 2> /dev/null || echo "0") + local disk_percent=$(echo "$health_json" | jq -r '.disk_used_percent // 0' 2> /dev/null || echo "0") + local uptime=$(echo "$health_json" | jq -r '.uptime_days // 0' 2> /dev/null || echo "0") + + mem_used=${mem_used:-0} + mem_total=${mem_total:-0} + disk_used=${disk_used:-0} + disk_total=${disk_total:-0} + disk_percent=${disk_percent:-0} + uptime=${uptime:-0} + + printf "${ICON_ADMIN} System %.0f/%.0f GB RAM | %.0f/%.0f GB Disk | Uptime %.0fd\n" \ + "$mem_used" "$mem_total" "$disk_used" "$disk_total" "$uptime" +} + +parse_optimizations() { + local health_json="$1" + echo "$health_json" | jq -c '.optimizations[]' 2> /dev/null +} + +announce_action() { + local name="$1" + local desc="$2" + local kind="$3" + + if [[ "${FIRST_ACTION:-true}" == "true" ]]; then + export FIRST_ACTION=false + else + echo "" + fi + echo -e "${BLUE}${ICON_ARROW} ${name}${NC}" +} + +touchid_configured() { + local pam_file="/etc/pam.d/sudo" + [[ -f "$pam_file" ]] && grep -q "pam_tid.so" "$pam_file" 2> /dev/null +} + +touchid_supported() { + if command -v bioutil > /dev/null 2>&1; then + if bioutil -r 2> /dev/null | grep -qi "Touch ID"; then + return 0 + fi + fi + + # Fallback: Apple Silicon Macs usually have Touch ID. + if [[ "$(uname -m)" == "arm64" ]]; then + return 0 + fi + return 1 +} + +cleanup_path() { + local raw_path="$1" + local label="$2" + + local expanded_path="${raw_path/#\~/$HOME}" + if [[ ! -e "$expanded_path" ]]; then + echo -e "${GREEN}${ICON_SUCCESS}${NC} $label" + return + fi + if should_protect_path "$expanded_path"; then + echo -e "${GRAY}${ICON_WARNING}${NC} Protected $label" + return + fi + + local size_kb + size_kb=$(get_path_size_kb "$expanded_path") + local size_display="" + if [[ "$size_kb" =~ ^[0-9]+$ && "$size_kb" -gt 0 ]]; then + size_display=$(bytes_to_human "$((size_kb * 1024))") + fi + + local removed=false + if safe_remove "$expanded_path" true; then + removed=true + elif request_sudo_access "Removing $label requires admin access"; then + if safe_sudo_remove "$expanded_path"; then + removed=true + fi + fi + + if [[ "$removed" == "true" ]]; then + if [[ -n "$size_display" ]]; then + echo -e "${GREEN}${ICON_SUCCESS}${NC} $label${NC}, ${GREEN}${size_display}${NC}" + else + echo -e "${GREEN}${ICON_SUCCESS}${NC} $label" + fi + else + echo -e "${GRAY}${ICON_WARNING}${NC} Skipped $label${NC}" + echo -e "${GRAY}${ICON_REVIEW}${NC} ${GRAY}Grant Full Disk Access to your terminal, then retry${NC}" + fi +} + +ensure_directory() { + local raw_path="$1" + local expanded_path="${raw_path/#\~/$HOME}" + ensure_user_dir "$expanded_path" +} + +declare -a SECURITY_FIXES=() + +collect_security_fix_actions() { + SECURITY_FIXES=() + if [[ "${FIREWALL_DISABLED:-}" == "true" ]]; then + if ! is_whitelisted "firewall"; then + SECURITY_FIXES+=("firewall|Enable macOS firewall") + fi + fi + if [[ "${GATEKEEPER_DISABLED:-}" == "true" ]]; then + if ! is_whitelisted "gatekeeper"; then + SECURITY_FIXES+=("gatekeeper|Enable Gatekeeper, app download protection") + fi + fi + if touchid_supported && ! touchid_configured; then + if ! is_whitelisted "check_touchid"; then + SECURITY_FIXES+=("touchid|Enable Touch ID for sudo") + fi + fi + + ((${#SECURITY_FIXES[@]} > 0)) +} + +ask_for_security_fixes() { + if ! collect_security_fix_actions; then + return 1 + fi + + echo "" + echo -e "${BLUE}SECURITY FIXES${NC}" + for entry in "${SECURITY_FIXES[@]}"; do + IFS='|' read -r _ label <<< "$entry" + echo -e " ${ICON_LIST} $label" + done + echo "" + export MOLE_SECURITY_FIXES_SHOWN=true + echo -ne "${GRAY}${ICON_REVIEW}${NC} ${YELLOW}Apply now?${NC} ${GRAY}Enter confirm / Space cancel${NC}: " + + local key + if ! key=$(read_key); then + export MOLE_SECURITY_FIXES_SKIPPED=true + echo -e "\n ${GRAY}${ICON_WARNING}${NC} Security fixes skipped" + echo "" + return 1 + fi + + if [[ "$key" == "ENTER" ]]; then + echo "" + return 0 + else + export MOLE_SECURITY_FIXES_SKIPPED=true + echo -e "\n ${GRAY}${ICON_WARNING}${NC} Security fixes skipped" + echo "" + return 1 + fi +} + +apply_firewall_fix() { + if sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setglobalstate on > /dev/null 2>&1; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Firewall enabled" + FIREWALL_DISABLED=false + return 0 + fi + echo -e " ${GRAY}${ICON_WARNING}${NC} Failed to enable firewall, check permissions" + return 1 +} + +apply_gatekeeper_fix() { + if sudo spctl --master-enable 2> /dev/null; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Gatekeeper enabled" + GATEKEEPER_DISABLED=false + return 0 + fi + echo -e " ${GRAY}${ICON_WARNING}${NC} Failed to enable Gatekeeper" + return 1 +} + +apply_touchid_fix() { + if "$SCRIPT_DIR/bin/touchid.sh" enable; then + return 0 + fi + return 1 +} + +perform_security_fixes() { + if ! ensure_sudo_session "Security changes require admin access"; then + echo -e "${GRAY}${ICON_WARNING}${NC} Skipped security fixes, sudo denied" + return 1 + fi + + local applied=0 + for entry in "${SECURITY_FIXES[@]}"; do + IFS='|' read -r action _ <<< "$entry" + case "$action" in + firewall) + apply_firewall_fix && ((applied++)) + ;; + gatekeeper) + apply_gatekeeper_fix && ((applied++)) + ;; + touchid) + apply_touchid_fix && ((applied++)) + ;; + esac + done + + if ((applied > 0)); then + log_success "Security settings updated" + fi + SECURITY_FIXES=() +} + +cleanup_all() { + stop_inline_spinner 2> /dev/null || true + stop_sudo_session + cleanup_temp_files + # Log session end + log_operation_session_end "optimize" "${OPTIMIZE_SAFE_COUNT:-0}" "0" +} + +handle_interrupt() { + cleanup_all + exit 130 +} + +main() { + # Set current command for operation logging + export MOLE_CURRENT_COMMAND="optimize" + + local health_json + for arg in "$@"; do + case "$arg" in + "--help" | "-h") + show_optimize_help + exit 0 + ;; + "--debug") + export MO_DEBUG=1 + ;; + "--dry-run") + export MOLE_DRY_RUN=1 + ;; + "--whitelist") + manage_whitelist "optimize" + exit 0 + ;; + esac + done + + log_operation_session_start "optimize" + + trap cleanup_all EXIT + trap handle_interrupt INT TERM + + if [[ -t 1 ]]; then + clear_screen + fi + print_header + + # Dry-run indicator. + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + echo -e "${YELLOW}${ICON_DRY_RUN} DRY RUN MODE${NC}, No files will be modified\n" + fi + + if ! command -v jq > /dev/null 2>&1; then + echo -e "${YELLOW}${ICON_ERROR}${NC} Missing dependency: jq" + echo -e "${GRAY}Install with: ${GREEN}brew install jq${NC}" + exit 1 + fi + + if ! command -v bc > /dev/null 2>&1; then + echo -e "${YELLOW}${ICON_ERROR}${NC} Missing dependency: bc" + echo -e "${GRAY}Install with: ${GREEN}brew install bc${NC}" + exit 1 + fi + + if [[ -t 1 ]]; then + start_inline_spinner "Collecting system info..." + fi + + if ! health_json=$(generate_health_json 2> /dev/null); then + if [[ -t 1 ]]; then + stop_inline_spinner + fi + echo "" + log_error "Failed to collect system health data" + exit 1 + fi + + if ! echo "$health_json" | jq empty 2> /dev/null; then + if [[ -t 1 ]]; then + stop_inline_spinner + fi + echo "" + log_error "Invalid system health data format" + echo -e "${GRAY}${ICON_REVIEW}${NC} Check if jq, awk, sysctl, and df commands are available" + exit 1 + fi + + if [[ -t 1 ]]; then + stop_inline_spinner + fi + + show_system_health "$health_json" + + load_whitelist "optimize" + if [[ ${#CURRENT_WHITELIST_PATTERNS[@]} -gt 0 ]]; then + local count=${#CURRENT_WHITELIST_PATTERNS[@]} + if [[ $count -le 3 ]]; then + local patterns_list=$( + IFS=', ' + echo "${CURRENT_WHITELIST_PATTERNS[*]}" + ) + echo -e "${ICON_ADMIN} Active Whitelist: ${patterns_list}" + fi + fi + + local -a safe_items=() + local -a confirm_items=() + local opts_file + opts_file=$(mktemp_file) + parse_optimizations "$health_json" > "$opts_file" + + while IFS= read -r opt_json; do + [[ -z "$opt_json" ]] && continue + + local name=$(echo "$opt_json" | jq -r '.name') + local desc=$(echo "$opt_json" | jq -r '.description') + local action=$(echo "$opt_json" | jq -r '.action') + local path=$(echo "$opt_json" | jq -r '.path // ""') + local safe=$(echo "$opt_json" | jq -r '.safe') + + local item="${name}|${desc}|${action}|${path}" + + if [[ "$safe" == "true" ]]; then + safe_items+=("$item") + else + confirm_items+=("$item") + fi + done < "$opts_file" + + echo "" + if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then + ensure_sudo_session "System optimization requires admin access" || true + fi + + export FIRST_ACTION=true + if [[ ${#safe_items[@]} -gt 0 ]]; then + for item in "${safe_items[@]}"; do + IFS='|' read -r name desc action path <<< "$item" + announce_action "$name" "$desc" "safe" + execute_optimization "$action" "$path" + done + fi + + if [[ ${#confirm_items[@]} -gt 0 ]]; then + for item in "${confirm_items[@]}"; do + IFS='|' read -r name desc action path <<< "$item" + announce_action "$name" "$desc" "confirm" + execute_optimization "$action" "$path" + done + fi + + local safe_count=${#safe_items[@]} + local confirm_count=${#confirm_items[@]} + + run_system_checks + + export OPTIMIZE_SAFE_COUNT=$safe_count + export OPTIMIZE_CONFIRM_COUNT=$confirm_count + + show_optimization_summary + + printf '\n' +} + +main "$@" diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/purge.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/purge.sh new file mode 100755 index 0000000..cd373bd --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/purge.sh @@ -0,0 +1,294 @@ +#!/bin/bash +# Mole - Purge command. +# Cleans heavy project build artifacts. +# Interactive selection by project. + +set -euo pipefail + +# Fix locale issues (avoid Perl warnings on non-English systems) +export LC_ALL=C +export LANG=C + +# Get script directory and source common functions +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "$SCRIPT_DIR/../lib/core/common.sh" + +# Set up cleanup trap for temporary files +trap cleanup_temp_files EXIT INT TERM +source "$SCRIPT_DIR/../lib/core/log.sh" +source "$SCRIPT_DIR/../lib/clean/project.sh" + +# Configuration +CURRENT_SECTION="" + +# Section management +start_section() { + local section_name="$1" + CURRENT_SECTION="$section_name" + printf '\n' + echo -e "${BLUE}━━━ ${section_name} ━━━${NC}" +} + +end_section() { + CURRENT_SECTION="" +} + +# Note activity for export list +note_activity() { + if [[ -n "$CURRENT_SECTION" ]]; then + printf '%s\n' "$CURRENT_SECTION" >> "$EXPORT_LIST_FILE" + fi +} + +# Main purge function +start_purge() { + # Set current command for operation logging + export MOLE_CURRENT_COMMAND="purge" + log_operation_session_start "purge" + + # Clear screen for better UX + if [[ -t 1 ]]; then + printf '\033[2J\033[H' + fi + + # Initialize stats file in user cache directory + local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole" + ensure_user_dir "$stats_dir" + ensure_user_file "$stats_dir/purge_stats" + ensure_user_file "$stats_dir/purge_count" + ensure_user_file "$stats_dir/purge_scanning" + echo "0" > "$stats_dir/purge_stats" + echo "0" > "$stats_dir/purge_count" + echo "" > "$stats_dir/purge_scanning" +} + +# Perform the purge +perform_purge() { + local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole" + local monitor_pid="" + + # Cleanup function - use flag to prevent duplicate execution + _cleanup_done=false + cleanup_monitor() { + # Prevent multiple cleanup executions from trap conflicts + [[ "$_cleanup_done" == "true" ]] && return + _cleanup_done=true + + # Remove scanning file to stop monitor + rm -f "$stats_dir/purge_scanning" 2> /dev/null || true + + if [[ -n "$monitor_pid" ]]; then + kill "$monitor_pid" 2> /dev/null || true + wait "$monitor_pid" 2> /dev/null || true + fi + if [[ -t 1 ]]; then + printf '\r\033[2K\n' > /dev/tty 2> /dev/null || true + fi + } + + # Ensure Ctrl-C/TERM always stops spinner(s) and exits immediately. + handle_interrupt() { + cleanup_monitor + stop_inline_spinner 2> /dev/null || true + show_cursor 2> /dev/null || true + printf '\n' >&2 + exit 130 + } + + # Set up trap for cleanup + abort + trap handle_interrupt INT TERM + + # Show scanning with spinner below the title line + if [[ -t 1 ]]; then + # Print title ONCE with newline; spinner occupies the line below + printf '%s\n' "${PURPLE_BOLD}Purge Project Artifacts${NC}" + + # Capture terminal width in parent (most reliable before forking) + local _parent_cols=80 + local _stty_out + if _stty_out=$(stty size < /dev/tty 2> /dev/null); then + _parent_cols="${_stty_out##* }" # "rows cols" -> take cols + else + _parent_cols=$(tput cols 2> /dev/null || echo 80) + fi + [[ "$_parent_cols" =~ ^[0-9]+$ && $_parent_cols -gt 0 ]] || _parent_cols=80 + + # Start background monitor: writes directly to /dev/tty to avoid stdout state issues + ( + local spinner_chars="|/-\\" + local spinner_idx=0 + local last_path="" + # Use parent-captured width; never refresh inside the loop (avoids unreliable tput in bg) + local term_cols="$_parent_cols" + # Visible prefix "| Scanning " = 11 chars; reserve 25 total for safety margin + local max_path_len=$((term_cols - 25)) + ((max_path_len < 5)) && max_path_len=5 + + # Set up trap to exit cleanly (erase the spinner line via /dev/tty) + trap 'printf "\r\033[2K" >/dev/tty 2>/dev/null; exit 0' INT TERM + + # Truncate path to guaranteed fit + truncate_path() { + local path="$1" + if [[ ${#path} -le $max_path_len ]]; then + echo "$path" + return + fi + local side_len=$(((max_path_len - 3) / 2)) + echo "${path:0:$side_len}...${path: -$side_len}" + } + + while [[ -f "$stats_dir/purge_scanning" ]]; do + local current_path + current_path=$(cat "$stats_dir/purge_scanning" 2> /dev/null || echo "") + + if [[ -n "$current_path" ]]; then + local display_path="${current_path/#$HOME/~}" + display_path=$(truncate_path "$display_path") + last_path="$display_path" + fi + + local spin_char="${spinner_chars:$spinner_idx:1}" + spinner_idx=$(((spinner_idx + 1) % ${#spinner_chars})) + + # Write directly to /dev/tty: \033[2K clears entire current line, \r goes to start + if [[ -n "$last_path" ]]; then + printf '\r\033[2K%s %sScanning %s%s' \ + "${BLUE}${spin_char}${NC}" \ + "${GRAY}" "$last_path" "${NC}" > /dev/tty 2> /dev/null + else + printf '\r\033[2K%s %sScanning...%s' \ + "${BLUE}${spin_char}${NC}" \ + "${GRAY}" "${NC}" > /dev/tty 2> /dev/null + fi + + sleep 0.05 + done + printf '\r\033[2K' > /dev/tty 2> /dev/null + exit 0 + ) & + monitor_pid=$! + else + echo -e "${PURPLE_BOLD}Purge Project Artifacts${NC}" + fi + + clean_project_artifacts + local exit_code=$? + + # Clean up + trap - INT TERM + cleanup_monitor + + # Exit codes: + # 0 = success, show summary + # 1 = user cancelled + # 2 = nothing to clean + if [[ $exit_code -ne 0 ]]; then + return 0 + fi + + # Final summary (matching clean.sh format) + echo "" + + local summary_heading="Purge complete" + local -a summary_details=() + local total_size_cleaned=0 + local total_items_cleaned=0 + + if [[ -f "$stats_dir/purge_stats" ]]; then + total_size_cleaned=$(cat "$stats_dir/purge_stats" 2> /dev/null || echo "0") + rm -f "$stats_dir/purge_stats" + fi + + if [[ -f "$stats_dir/purge_count" ]]; then + total_items_cleaned=$(cat "$stats_dir/purge_count" 2> /dev/null || echo "0") + rm -f "$stats_dir/purge_count" + fi + + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + summary_heading="Dry run complete - no changes made" + fi + + if [[ $total_size_cleaned -gt 0 ]]; then + local freed_size_human + freed_size_human=$(bytes_to_human_kb "$total_size_cleaned") + + local summary_line="Space freed: ${GREEN}${freed_size_human}${NC}" + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + summary_line="Would free: ${GREEN}${freed_size_human}${NC}" + fi + [[ $total_items_cleaned -gt 0 ]] && summary_line+=" | Items: $total_items_cleaned" + summary_line+=" | Free: $(get_free_space)" + summary_details+=("$summary_line") + else + summary_details+=("No old project artifacts to clean.") + summary_details+=("Free space: $(get_free_space)") + fi + + # Log session end + log_operation_session_end "purge" "${total_items_cleaned:-0}" "${total_size_cleaned:-0}" + + print_summary_block "$summary_heading" "${summary_details[@]}" + printf '\n' +} + +# Show help message +show_help() { + echo -e "${PURPLE_BOLD}Mole Purge${NC}, Clean old project build artifacts" + echo "" + echo -e "${YELLOW}Usage:${NC} mo purge [options]" + echo "" + echo -e "${YELLOW}Options:${NC}" + echo " --paths Edit custom scan directories" + echo " --dry-run Preview purge actions without making changes" + echo " --debug Enable debug logging" + echo " --help Show this help message" + echo "" + echo -e "${YELLOW}Default Paths:${NC}" + for path in "${DEFAULT_PURGE_SEARCH_PATHS[@]}"; do + echo " * $path" + done +} + +# Main entry point +main() { + # Set up signal handling + trap 'show_cursor; exit 130' INT TERM + + # Parse arguments + for arg in "$@"; do + case "$arg" in + "--paths") + source "$SCRIPT_DIR/../lib/manage/purge_paths.sh" + manage_purge_paths + exit 0 + ;; + "--help") + show_help + exit 0 + ;; + "--debug") + export MO_DEBUG=1 + ;; + "--dry-run" | "-n") + export MOLE_DRY_RUN=1 + ;; + *) + echo "Unknown option: $arg" + echo "Use 'mo purge --help' for usage information" + exit 1 + ;; + esac + done + + start_purge + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + echo -e "${YELLOW}${ICON_DRY_RUN} DRY RUN MODE${NC}, No project artifacts will be removed" + printf '\n' + fi + hide_cursor + perform_purge + show_cursor +} + +main "$@" diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/status.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/status.sh new file mode 100755 index 0000000..afe6d13 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/status.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# Mole - Status command. +# Runs the Go system status panel. +# Shows live system metrics. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +GO_BIN="$SCRIPT_DIR/status-go" +if [[ -x "$GO_BIN" ]]; then + exec "$GO_BIN" "$@" +fi + +echo "Bundled status binary not found. Please reinstall Mole or run mo update to restore it." >&2 +exit 1 diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/touchid.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/touchid.sh new file mode 100755 index 0000000..76b5cc2 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/touchid.sh @@ -0,0 +1,382 @@ +#!/bin/bash +# Mole - Touch ID command. +# Configures sudo with Touch ID. +# Guided toggle with safety checks. + +set -euo pipefail + +# Determine script location and source common functions +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +LIB_DIR="$(cd "$SCRIPT_DIR/../lib" && pwd)" + +# Source common functions +# shellcheck source=../lib/core/common.sh +source "$LIB_DIR/core/common.sh" + +# Set up global cleanup trap +trap cleanup_temp_files EXIT INT TERM + +PAM_SUDO_FILE="${MOLE_PAM_SUDO_FILE:-/etc/pam.d/sudo}" +PAM_SUDO_LOCAL_FILE="${MOLE_PAM_SUDO_LOCAL_FILE:-$(dirname "$PAM_SUDO_FILE")/sudo_local}" +readonly PAM_SUDO_FILE +readonly PAM_SUDO_LOCAL_FILE +readonly PAM_TID_LINE="auth sufficient pam_tid.so" + +# Check if Touch ID is already configured +is_touchid_configured() { + # Check sudo_local first + if [[ -f "$PAM_SUDO_LOCAL_FILE" ]]; then + grep -q "pam_tid.so" "$PAM_SUDO_LOCAL_FILE" 2> /dev/null && return 0 + fi + + # Fallback to standard sudo file + if [[ ! -f "$PAM_SUDO_FILE" ]]; then + return 1 + fi + grep -q "pam_tid.so" "$PAM_SUDO_FILE" 2> /dev/null +} + +# Check if system supports Touch ID +supports_touchid() { + # Check if bioutil exists and has Touch ID capability + if command -v bioutil &> /dev/null; then + bioutil -r 2> /dev/null | grep -q "Touch ID" && return 0 + fi + + # Fallback: check if running on Apple Silicon or modern Intel Mac + local arch + arch=$(uname -m) + if [[ "$arch" == "arm64" ]]; then + return 0 + fi + + # For Intel Macs, check if it's 2018 or later (approximation) + local model_year + model_year=$(system_profiler SPHardwareDataType 2> /dev/null | grep "Model Identifier" | grep -o "[0-9]\{4\}" | head -1) + if [[ -n "$model_year" ]] && [[ "$model_year" -ge 2018 ]]; then + return 0 + fi + + return 1 +} + +touchid_dry_run_enabled() { + [[ "${MOLE_DRY_RUN:-0}" == "1" ]] +} + +# Show current Touch ID status +show_status() { + if is_touchid_configured; then + echo -e "${GREEN}${ICON_SUCCESS}${NC} Touch ID is enabled for sudo" + else + echo -e "${YELLOW}☻${NC} Touch ID is not configured for sudo" + fi +} + +# Enable Touch ID for sudo +enable_touchid() { + # Cleanup trap handled by global EXIT trap + local temp_file="" + + if touchid_dry_run_enabled; then + if is_touchid_configured; then + echo -e "${GREEN}${ICON_SUCCESS} Touch ID is already enabled, no changes needed${NC}" + else + echo -e "${GREEN}${ICON_SUCCESS} [DRY RUN] Would enable Touch ID for sudo${NC}" + echo -e "${GRAY}${ICON_REVIEW} Target files: ${PAM_SUDO_FILE} and/or ${PAM_SUDO_LOCAL_FILE}${NC}" + fi + return 0 + fi + + # First check if system supports Touch ID + if ! supports_touchid; then + log_warning "This Mac may not support Touch ID" + read -rp "Continue anyway? [y/N] " confirm + if [[ ! "$confirm" =~ ^[Yy]$ ]]; then + echo -e "${YELLOW}Cancelled${NC}" + return 1 + fi + echo "" + fi + + # Check if we should use sudo_local (Sonoma+) + if grep -q "sudo_local" "$PAM_SUDO_FILE"; then + # Check if already correctly configured in sudo_local + if [[ -f "$PAM_SUDO_LOCAL_FILE" ]] && grep -q "pam_tid.so" "$PAM_SUDO_LOCAL_FILE"; then + # It is in sudo_local, but let's check if it's ALSO in sudo (incomplete migration) + if grep -q "pam_tid.so" "$PAM_SUDO_FILE"; then + # Clean up legacy config + temp_file=$(create_temp_file) + grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file" + if sudo mv "$temp_file" "$PAM_SUDO_FILE" 2> /dev/null; then + echo -e "${GREEN}${ICON_SUCCESS} Cleanup legacy configuration${NC}" + fi + fi + echo -e "${GREEN}${ICON_SUCCESS} Touch ID is already enabled${NC}" + return 0 + fi + + # Not configured in sudo_local yet. + # Check if configured in sudo (Legacy) + local is_legacy_configured=false + if grep -q "pam_tid.so" "$PAM_SUDO_FILE"; then + is_legacy_configured=true + fi + + # Function to write to sudo_local + local write_success=false + if [[ ! -f "$PAM_SUDO_LOCAL_FILE" ]]; then + # Create the file + echo "# sudo_local: local customizations for sudo" | sudo tee "$PAM_SUDO_LOCAL_FILE" > /dev/null + echo "$PAM_TID_LINE" | sudo tee -a "$PAM_SUDO_LOCAL_FILE" > /dev/null + sudo chmod 444 "$PAM_SUDO_LOCAL_FILE" + sudo chown root:wheel "$PAM_SUDO_LOCAL_FILE" + write_success=true + else + # Append if not present + if ! grep -q "pam_tid.so" "$PAM_SUDO_LOCAL_FILE"; then + temp_file=$(create_temp_file) + cp "$PAM_SUDO_LOCAL_FILE" "$temp_file" + echo "$PAM_TID_LINE" >> "$temp_file" + sudo mv "$temp_file" "$PAM_SUDO_LOCAL_FILE" + sudo chmod 444 "$PAM_SUDO_LOCAL_FILE" + sudo chown root:wheel "$PAM_SUDO_LOCAL_FILE" + write_success=true + else + write_success=true # Already there (should be caught by first check, but safe fallback) + fi + fi + + if $write_success; then + # If we migrated from legacy, clean it up now + if $is_legacy_configured; then + temp_file=$(create_temp_file) + grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file" + sudo mv "$temp_file" "$PAM_SUDO_FILE" + log_success "Touch ID migrated to sudo_local" + else + log_success "Touch ID enabled, via sudo_local, try: sudo ls" + fi + return 0 + else + log_error "Failed to write to sudo_local" + return 1 + fi + fi + + # Legacy method: Modify sudo file directly + + # Check if already configured (Legacy) + if is_touchid_configured; then + echo -e "${GREEN}${ICON_SUCCESS} Touch ID is already enabled${NC}" + return 0 + fi + + # Create backup only if it doesn't exist to preserve original state + if [[ ! -f "${PAM_SUDO_FILE}.mole-backup" ]]; then + if ! sudo cp "$PAM_SUDO_FILE" "${PAM_SUDO_FILE}.mole-backup" 2> /dev/null; then + log_error "Failed to create backup" + return 1 + fi + fi + + # Create temp file + temp_file=$(create_temp_file) + + # Insert pam_tid.so after the first comment block + awk ' + BEGIN { inserted = 0 } + /^#/ { print; next } + !inserted && /^[^#]/ { + print "'"$PAM_TID_LINE"'" + inserted = 1 + } + { print } + ' "$PAM_SUDO_FILE" > "$temp_file" + + # Verify content change + if cmp -s "$PAM_SUDO_FILE" "$temp_file"; then + log_error "Failed to modify configuration" + return 1 + fi + + # Apply the changes + if sudo mv "$temp_file" "$PAM_SUDO_FILE" 2> /dev/null; then + log_success "Touch ID enabled, try: sudo ls" + return 0 + else + log_error "Failed to enable Touch ID" + return 1 + fi +} + +# Disable Touch ID for sudo +disable_touchid() { + # Cleanup trap handled by global EXIT trap + local temp_file="" + + if touchid_dry_run_enabled; then + if ! is_touchid_configured; then + echo -e "${YELLOW}Touch ID is not currently enabled${NC}" + else + echo -e "${GREEN}${ICON_SUCCESS} [DRY RUN] Would disable Touch ID for sudo${NC}" + echo -e "${GRAY}${ICON_REVIEW} Target files: ${PAM_SUDO_FILE} and/or ${PAM_SUDO_LOCAL_FILE}${NC}" + fi + return 0 + fi + + if ! is_touchid_configured; then + echo -e "${YELLOW}Touch ID is not currently enabled${NC}" + return 0 + fi + + # Check sudo_local first + if [[ -f "$PAM_SUDO_LOCAL_FILE" ]] && grep -q "pam_tid.so" "$PAM_SUDO_LOCAL_FILE"; then + # Remove from sudo_local + temp_file=$(create_temp_file) + grep -v "pam_tid.so" "$PAM_SUDO_LOCAL_FILE" > "$temp_file" + + if sudo mv "$temp_file" "$PAM_SUDO_LOCAL_FILE" 2> /dev/null; then + # Since we modified sudo_local, we should also check if it's in sudo file (legacy cleanup) + if grep -q "pam_tid.so" "$PAM_SUDO_FILE"; then + temp_file=$(create_temp_file) + grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file" + sudo mv "$temp_file" "$PAM_SUDO_FILE" + fi + echo -e "${GREEN}${ICON_SUCCESS} Touch ID disabled, removed from sudo_local${NC}" + echo "" + return 0 + else + log_error "Failed to disable Touch ID from sudo_local" + return 1 + fi + fi + + # Fallback to sudo file (legacy) + if grep -q "pam_tid.so" "$PAM_SUDO_FILE"; then + # Create backup only if it doesn't exist + if [[ ! -f "${PAM_SUDO_FILE}.mole-backup" ]]; then + if ! sudo cp "$PAM_SUDO_FILE" "${PAM_SUDO_FILE}.mole-backup" 2> /dev/null; then + log_error "Failed to create backup" + return 1 + fi + fi + + # Remove pam_tid.so line + temp_file=$(create_temp_file) + grep -v "pam_tid.so" "$PAM_SUDO_FILE" > "$temp_file" + + if sudo mv "$temp_file" "$PAM_SUDO_FILE" 2> /dev/null; then + echo -e "${GREEN}${ICON_SUCCESS} Touch ID disabled${NC}" + echo "" + return 0 + else + log_error "Failed to disable Touch ID" + return 1 + fi + fi + + # Should not reach here if is_touchid_configured was true + log_error "Could not find Touch ID configuration to disable" + return 1 +} + +# Interactive menu +show_menu() { + echo "" + show_status + if is_touchid_configured; then + echo -ne "${PURPLE}☛${NC} Press ${GREEN}Enter${NC} to disable, ${GRAY}Q${NC} to quit: " + IFS= read -r -s -n1 key || key="" + drain_pending_input # Clean up any escape sequence remnants + echo "" + + case "$key" in + $'\e') # ESC + return 0 + ;; + "" | $'\n' | $'\r') # Enter + printf "\r\033[K" # Clear the prompt line + disable_touchid + ;; + *) + echo "" + log_error "Invalid key" + ;; + esac + else + echo -ne "${PURPLE}☛${NC} Press ${GREEN}Enter${NC} to enable, ${GRAY}Q${NC} to quit: " + IFS= read -r -s -n1 key || key="" + drain_pending_input # Clean up any escape sequence remnants + + case "$key" in + $'\e') # ESC + return 0 + ;; + "" | $'\n' | $'\r') # Enter + printf "\r\033[K" # Clear the prompt line + enable_touchid + ;; + *) + echo "" + log_error "Invalid key" + ;; + esac + fi +} + +# Main +main() { + local command="" + local arg + + for arg in "$@"; do + case "$arg" in + "--dry-run" | "-n") + export MOLE_DRY_RUN=1 + ;; + "--help" | "-h") + show_touchid_help + return 0 + ;; + enable | disable | status) + if [[ -z "$command" ]]; then + command="$arg" + else + log_error "Only one touchid command is supported per run" + return 1 + fi + ;; + *) + log_error "Unknown command: $arg" + return 1 + ;; + esac + done + + if touchid_dry_run_enabled; then + echo -e "${YELLOW}${ICON_DRY_RUN} DRY RUN MODE${NC}, No sudo authentication files will be modified" + echo "" + fi + + case "$command" in + enable) + enable_touchid + ;; + disable) + disable_touchid + ;; + status) + show_status + ;; + "") + show_menu + ;; + *) + log_error "Unknown command: $command" + exit 1 + ;; + esac +} + +main "$@" diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/uninstall.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/uninstall.sh new file mode 100755 index 0000000..5c96661 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/bin/uninstall.sh @@ -0,0 +1,1002 @@ +#!/bin/bash +# Mole - Uninstall command. +# Interactive app uninstaller. +# Removes app files and leftovers. + +set -euo pipefail + +# Preserve user's locale for app display name lookup. +readonly MOLE_UNINSTALL_USER_LC_ALL="${LC_ALL:-}" +readonly MOLE_UNINSTALL_USER_LANG="${LANG:-}" + +# Fix locale issues on non-English systems. +export LC_ALL=C +export LANG=C + +# Load shared helpers. +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "$SCRIPT_DIR/../lib/core/common.sh" + +# Clean temp files on exit. +trap cleanup_temp_files EXIT INT TERM +source "$SCRIPT_DIR/../lib/ui/menu_paginated.sh" +source "$SCRIPT_DIR/../lib/ui/app_selector.sh" +source "$SCRIPT_DIR/../lib/uninstall/batch.sh" + +# State +selected_apps=() +declare -a apps_data=() +declare -a selection_state=() +total_items=0 +files_cleaned=0 +total_size_cleaned=0 + +readonly MOLE_UNINSTALL_META_CACHE_DIR="$HOME/.cache/mole" +readonly MOLE_UNINSTALL_META_CACHE_FILE="$MOLE_UNINSTALL_META_CACHE_DIR/uninstall_app_metadata_v1" +readonly MOLE_UNINSTALL_META_CACHE_LOCK="${MOLE_UNINSTALL_META_CACHE_FILE}.lock" +readonly MOLE_UNINSTALL_META_REFRESH_TTL=604800 # 7 days +readonly MOLE_UNINSTALL_SCAN_SPINNER_DELAY_SEC="0.25" +readonly MOLE_UNINSTALL_INLINE_METADATA_LIMIT=8 +readonly MOLE_UNINSTALL_INLINE_MDLS_TIMEOUT_SEC="0.08" + +uninstall_relative_time_from_epoch() { + local value_epoch="${1:-0}" + local now_epoch="${2:-0}" + + if [[ ! "$value_epoch" =~ ^[0-9]+$ || $value_epoch -le 0 ]]; then + echo "Unknown" + return 0 + fi + + local days_ago=$(((now_epoch - value_epoch) / 86400)) + if [[ $days_ago -lt 0 ]]; then + days_ago=0 + fi + + if [[ $days_ago -eq 0 ]]; then + echo "Today" + elif [[ $days_ago -eq 1 ]]; then + echo "Yesterday" + elif [[ $days_ago -lt 7 ]]; then + echo "${days_ago} days ago" + elif [[ $days_ago -lt 30 ]]; then + local weeks_ago=$((days_ago / 7)) + [[ $weeks_ago -eq 1 ]] && echo "1 week ago" || echo "${weeks_ago} weeks ago" + elif [[ $days_ago -lt 365 ]]; then + local months_ago=$((days_ago / 30)) + [[ $months_ago -eq 1 ]] && echo "1 month ago" || echo "${months_ago} months ago" + else + local years_ago=$((days_ago / 365)) + [[ $years_ago -eq 1 ]] && echo "1 year ago" || echo "${years_ago} years ago" + fi +} + +uninstall_normalize_size_display() { + local size="${1:-}" + if [[ -z "$size" || "$size" == "0" || "$size" == "Unknown" ]]; then + echo "N/A" + return 0 + fi + echo "$size" +} + +uninstall_normalize_last_used_display() { + local last_used="${1:-}" + local display + display=$(format_last_used_summary "$last_used") + if [[ -z "$display" || "$display" == "Never" ]]; then + echo "Unknown" + return 0 + fi + echo "$display" +} + +uninstall_resolve_display_name() { + local app_path="$1" + local app_name="$2" + local display_name="$app_name" + + if [[ -f "$app_path/Contents/Info.plist" ]]; then + local md_display_name + if [[ -n "$MOLE_UNINSTALL_USER_LC_ALL" ]]; then + md_display_name=$(run_with_timeout 0.04 env LC_ALL="$MOLE_UNINSTALL_USER_LC_ALL" LANG="$MOLE_UNINSTALL_USER_LANG" mdls -name kMDItemDisplayName -raw "$app_path" 2> /dev/null || echo "") + elif [[ -n "$MOLE_UNINSTALL_USER_LANG" ]]; then + md_display_name=$(run_with_timeout 0.04 env LANG="$MOLE_UNINSTALL_USER_LANG" mdls -name kMDItemDisplayName -raw "$app_path" 2> /dev/null || echo "") + else + md_display_name=$(run_with_timeout 0.04 mdls -name kMDItemDisplayName -raw "$app_path" 2> /dev/null || echo "") + fi + + local bundle_display_name + bundle_display_name=$(plutil -extract CFBundleDisplayName raw "$app_path/Contents/Info.plist" 2> /dev/null || echo "") + local bundle_name + bundle_name=$(plutil -extract CFBundleName raw "$app_path/Contents/Info.plist" 2> /dev/null || echo "") + + if [[ "$md_display_name" == /* ]]; then + md_display_name="" + fi + md_display_name="${md_display_name//|/-}" + md_display_name="${md_display_name//[$'\t\r\n']/}" + + bundle_display_name="${bundle_display_name//|/-}" + bundle_display_name="${bundle_display_name//[$'\t\r\n']/}" + + bundle_name="${bundle_name//|/-}" + bundle_name="${bundle_name//[$'\t\r\n']/}" + + if [[ -n "$md_display_name" && "$md_display_name" != "(null)" && "$md_display_name" != "$app_name" ]]; then + display_name="$md_display_name" + elif [[ -n "$bundle_display_name" && "$bundle_display_name" != "(null)" ]]; then + display_name="$bundle_display_name" + elif [[ -n "$bundle_name" && "$bundle_name" != "(null)" ]]; then + display_name="$bundle_name" + fi + fi + + if [[ "$display_name" == /* ]]; then + display_name="$app_name" + fi + display_name="${display_name%.app}" + display_name="${display_name//|/-}" + display_name="${display_name//[$'\t\r\n']/}" + echo "$display_name" +} + +uninstall_acquire_metadata_lock() { + local lock_dir="$1" + local attempts=0 + + while ! mkdir "$lock_dir" 2> /dev/null; do + ((attempts++)) + if [[ $attempts -ge 40 ]]; then + return 1 + fi + + # Clean stale lock if older than 5 minutes. + if [[ -d "$lock_dir" ]]; then + local lock_mtime + lock_mtime=$(get_file_mtime "$lock_dir") + # Skip stale detection if mtime lookup failed (returns 0). + if [[ "$lock_mtime" =~ ^[0-9]+$ && $lock_mtime -gt 0 ]]; then + local lock_age + lock_age=$(($(get_epoch_seconds) - lock_mtime)) + if [[ "$lock_age" =~ ^-?[0-9]+$ && $lock_age -gt 300 ]]; then + rmdir "$lock_dir" 2> /dev/null || true + fi + fi + fi + + sleep 0.1 2> /dev/null || sleep 1 + done + + return 0 +} + +uninstall_release_metadata_lock() { + local lock_dir="$1" + [[ -d "$lock_dir" ]] && rmdir "$lock_dir" 2> /dev/null || true +} + +uninstall_collect_inline_metadata() { + local app_path="$1" + local app_mtime="${2:-0}" + local now_epoch="${3:-0}" + + local size_kb + size_kb=$(get_path_size_kb "$app_path") + [[ "$size_kb" =~ ^[0-9]+$ ]] || size_kb=0 + + local last_used_epoch=0 + local metadata_date + metadata_date=$(run_with_timeout "$MOLE_UNINSTALL_INLINE_MDLS_TIMEOUT_SEC" mdls -name kMDItemLastUsedDate -raw "$app_path" 2> /dev/null || echo "") + if [[ "$metadata_date" != "(null)" && -n "$metadata_date" ]]; then + last_used_epoch=$(date -j -f "%Y-%m-%d %H:%M:%S %z" "$metadata_date" "+%s" 2> /dev/null || echo "0") + fi + + # Fallback to app mtime so first scan does not show "...". + if [[ ! "$last_used_epoch" =~ ^[0-9]+$ || $last_used_epoch -le 0 ]]; then + if [[ "$app_mtime" =~ ^[0-9]+$ && $app_mtime -gt 0 ]]; then + last_used_epoch="$app_mtime" + else + last_used_epoch=0 + fi + fi + + printf "%s|%s|%s\n" "$size_kb" "$last_used_epoch" "$now_epoch" +} + +start_uninstall_metadata_refresh() { + local refresh_file="$1" + [[ ! -s "$refresh_file" ]] && { + rm -f "$refresh_file" 2> /dev/null || true + return 0 + } + + ( + _refresh_debug() { + if [[ "${MO_DEBUG:-}" == "1" ]]; then + local ts + ts=$(date "+%Y-%m-%d %H:%M:%S" 2> /dev/null || echo "?") + echo "[$ts] DEBUG: [metadata-refresh] $*" >> "${HOME}/.config/mole/mole_debug_session.log" 2> /dev/null || true + fi + } + + ensure_user_dir "$MOLE_UNINSTALL_META_CACHE_DIR" + ensure_user_file "$MOLE_UNINSTALL_META_CACHE_FILE" + if [[ ! -r "$MOLE_UNINSTALL_META_CACHE_FILE" ]]; then + if ! : > "$MOLE_UNINSTALL_META_CACHE_FILE" 2> /dev/null; then + _refresh_debug "Cannot create cache file, aborting" + exit 0 + fi + fi + if [[ ! -w "$MOLE_UNINSTALL_META_CACHE_FILE" ]]; then + _refresh_debug "Cache file not writable, aborting" + exit 0 + fi + + local updates_file + updates_file=$(mktemp 2> /dev/null) || { + _refresh_debug "mktemp failed, aborting" + exit 0 + } + local now_epoch + now_epoch=$(get_epoch_seconds) + local max_parallel + max_parallel=$(get_optimal_parallel_jobs "io") + if [[ ! "$max_parallel" =~ ^[0-9]+$ || $max_parallel -lt 1 ]]; then + max_parallel=1 + elif [[ $max_parallel -gt 4 ]]; then + max_parallel=4 + fi + local -a worker_pids=() + local worker_idx=0 + + while IFS='|' read -r app_path app_mtime bundle_id display_name; do + [[ -n "$app_path" && -d "$app_path" ]] || continue + ((worker_idx++)) + local worker_output="${updates_file}.${worker_idx}" + + ( + local last_used_epoch=0 + local metadata_date + metadata_date=$(run_with_timeout 0.2 mdls -name kMDItemLastUsedDate -raw "$app_path" 2> /dev/null || echo "") + if [[ "$metadata_date" != "(null)" && -n "$metadata_date" ]]; then + last_used_epoch=$(date -j -f "%Y-%m-%d %H:%M:%S %z" "$metadata_date" "+%s" 2> /dev/null || echo "0") + fi + + if [[ ! "$last_used_epoch" =~ ^[0-9]+$ || $last_used_epoch -le 0 ]]; then + last_used_epoch=0 + fi + + local size_kb + size_kb=$(get_path_size_kb "$app_path") + [[ "$size_kb" =~ ^[0-9]+$ ]] || size_kb=0 + + printf "%s|%s|%s|%s|%s|%s|%s\n" "$app_path" "${app_mtime:-0}" "$size_kb" "${last_used_epoch:-0}" "$now_epoch" "$bundle_id" "$display_name" > "$worker_output" + ) & + worker_pids+=($!) + + if ((${#worker_pids[@]} >= max_parallel)); then + wait "${worker_pids[0]}" 2> /dev/null || true + worker_pids=("${worker_pids[@]:1}") + fi + done < "$refresh_file" + + local worker_pid + for worker_pid in "${worker_pids[@]}"; do + wait "$worker_pid" 2> /dev/null || true + done + + local worker_output + for worker_output in "${updates_file}".*; do + [[ -f "$worker_output" ]] || continue + cat "$worker_output" >> "$updates_file" + rm -f "$worker_output" + done + + if [[ ! -s "$updates_file" ]]; then + rm -f "$updates_file" + exit 0 + fi + + if ! uninstall_acquire_metadata_lock "$MOLE_UNINSTALL_META_CACHE_LOCK"; then + _refresh_debug "Failed to acquire lock, aborting merge" + rm -f "$updates_file" + exit 0 + fi + + local merged_file + merged_file=$(mktemp 2> /dev/null) || { + _refresh_debug "mktemp for merge failed, aborting" + uninstall_release_metadata_lock "$MOLE_UNINSTALL_META_CACHE_LOCK" + rm -f "$updates_file" + exit 0 + } + + awk -F'|' ' + NR == FNR { updates[$1] = $0; next } + !($1 in updates) { print } + END { + for (path in updates) { + print updates[path] + } + } + ' "$updates_file" "$MOLE_UNINSTALL_META_CACHE_FILE" > "$merged_file" + + mv "$merged_file" "$MOLE_UNINSTALL_META_CACHE_FILE" 2> /dev/null || { + cp "$merged_file" "$MOLE_UNINSTALL_META_CACHE_FILE" 2> /dev/null || true + rm -f "$merged_file" + } + + uninstall_release_metadata_lock "$MOLE_UNINSTALL_META_CACHE_LOCK" + rm -f "$updates_file" + rm -f "$refresh_file" 2> /dev/null || true + ) > /dev/null 2>&1 & + +} + +# Scan applications and collect information. +scan_applications() { + local temp_file scan_raw_file merged_file refresh_file cache_snapshot_file + temp_file=$(create_temp_file) + scan_raw_file="${temp_file}.scan" + merged_file="${temp_file}.merged" + refresh_file="${temp_file}.refresh" + cache_snapshot_file="${temp_file}.cache" + local scan_status_file="${temp_file}.scan_status" + : > "$scan_raw_file" + : > "$refresh_file" + : > "$cache_snapshot_file" + : > "$scan_status_file" + + ensure_user_dir "$MOLE_UNINSTALL_META_CACHE_DIR" + ensure_user_file "$MOLE_UNINSTALL_META_CACHE_FILE" + local cache_source="$MOLE_UNINSTALL_META_CACHE_FILE" + local cache_source_is_temp=false + if [[ ! -r "$cache_source" ]]; then + cache_source=$(create_temp_file) + : > "$cache_source" + cache_source_is_temp=true + fi + + # Fast lookup cache for unchanged apps: path+mtime -> bundle_id/display_name. + local -a cache_paths=() + local -a cache_mtimes=() + local -a cache_bundle_ids=() + local -a cache_display_names=() + local cache_path cache_mtime _cache_size _cache_epoch _cache_updated cache_bundle cache_display + while IFS='|' read -r cache_path cache_mtime _cache_size _cache_epoch _cache_updated cache_bundle cache_display; do + [[ -n "$cache_path" ]] || continue + cache_paths+=("$cache_path") + cache_mtimes+=("${cache_mtime:-0}") + cache_bundle_ids+=("${cache_bundle:-}") + cache_display_names+=("${cache_display:-}") + done < "$cache_source" + + lookup_cached_identity() { + local target_path="$1" + local target_mtime="$2" + local idx + for ((idx = 0; idx < ${#cache_paths[@]}; idx++)); do + if [[ "${cache_paths[idx]}" == "$target_path" ]]; then + if [[ "${cache_mtimes[idx]:-0}" == "${target_mtime:-0}" ]]; then + echo "${cache_bundle_ids[idx]:-}|${cache_display_names[idx]:-}" + else + echo "|" + fi + return 0 + fi + done + echo "|" + } + + # Local spinner_pid for cleanup + local spinner_pid="" + local spinner_shown_file="${temp_file}.spinner_shown" + local previous_int_trap="" + previous_int_trap=$(trap -p INT || true) + + restore_scan_int_trap() { + if [[ -n "$previous_int_trap" ]]; then + eval "$previous_int_trap" + else + trap - INT + fi + } + + # Trap to handle Ctrl+C during scan + # shellcheck disable=SC2329 # Function invoked indirectly via trap + trap_scan_cleanup() { + if [[ -n "$spinner_pid" ]]; then + kill -TERM "$spinner_pid" 2> /dev/null || true + wait "$spinner_pid" 2> /dev/null || true + fi + if [[ -f "$spinner_shown_file" ]]; then + printf "\r\033[K" >&2 + fi + rm -f "$temp_file" "$scan_raw_file" "$merged_file" "$refresh_file" "$cache_snapshot_file" "$scan_status_file" "${temp_file}.sorted" "$spinner_shown_file" 2> /dev/null || true + exit 130 + } + trap trap_scan_cleanup INT + + update_scan_status() { + local message="$1" + local completed="${2:-0}" + local total="${3:-0}" + printf "%s|%s|%s\n" "$message" "$completed" "$total" > "$scan_status_file" + } + + stop_scan_spinner() { + if [[ -n "$spinner_pid" ]]; then + kill -TERM "$spinner_pid" 2> /dev/null || true + wait "$spinner_pid" 2> /dev/null || true + spinner_pid="" + fi + if [[ -f "$spinner_shown_file" ]]; then + printf "\r\033[K" >&2 + fi + rm -f "$spinner_shown_file" "$scan_status_file" 2> /dev/null || true + } + + # Pass 1: collect app paths and bundle IDs (no mdls). + local -a app_data_tuples=() + local -a app_dirs=( + "/Applications" + "$HOME/Applications" + "/Library/Input Methods" + "$HOME/Library/Input Methods" + ) + local vol_app_dir + local nullglob_was_set=0 + shopt -q nullglob && nullglob_was_set=1 + shopt -s nullglob + for vol_app_dir in /Volumes/*/Applications; do + [[ -d "$vol_app_dir" && -r "$vol_app_dir" ]] || continue + if [[ -d "/Applications" && "$vol_app_dir" -ef "/Applications" ]]; then + continue + fi + if [[ -d "$HOME/Applications" && "$vol_app_dir" -ef "$HOME/Applications" ]]; then + continue + fi + app_dirs+=("$vol_app_dir") + done + if [[ $nullglob_was_set -eq 0 ]]; then + shopt -u nullglob + fi + + for app_dir in "${app_dirs[@]}"; do + if [[ ! -d "$app_dir" ]]; then continue; fi + + while IFS= read -r -d '' app_path; do + if [[ ! -e "$app_path" ]]; then continue; fi + + local app_name + app_name=$(basename "$app_path" .app) + + # Skip nested apps inside another .app bundle. + local parent_dir + parent_dir=$(dirname "$app_path") + if [[ "$parent_dir" == *".app" || "$parent_dir" == *".app/"* ]]; then + continue + fi + + if [[ -L "$app_path" ]]; then + local link_target + link_target=$(readlink "$app_path" 2> /dev/null) + if [[ -n "$link_target" ]]; then + local resolved_target="$link_target" + if [[ "$link_target" != /* ]]; then + local link_dir + link_dir=$(dirname "$app_path") + resolved_target=$(cd "$link_dir" 2> /dev/null && cd "$(dirname "$link_target")" 2> /dev/null && pwd)/$(basename "$link_target") 2> /dev/null || echo "" + fi + case "$resolved_target" in + /System/* | /usr/bin/* | /usr/lib/* | /bin/* | /sbin/* | /private/etc/*) + continue + ;; + esac + fi + fi + + local app_mtime + app_mtime=$(get_file_mtime "$app_path") + + local cached_identity cached_bundle_id cached_display_name + cached_identity=$(lookup_cached_identity "$app_path" "$app_mtime") + IFS='|' read -r cached_bundle_id cached_display_name <<< "$cached_identity" + + # Store tuple for pass 2 (bundle + display resolution, then cache merge). + app_data_tuples+=("${app_path}|${app_name}|${app_mtime}|${cached_bundle_id}|${cached_display_name}") + done < <(command find "$app_dir" -name "*.app" -maxdepth 3 -print0 2> /dev/null) + done + + if [[ ${#app_data_tuples[@]} -eq 0 ]]; then + rm -f "$temp_file" "$scan_raw_file" "$merged_file" "$refresh_file" "$cache_snapshot_file" "$scan_status_file" "${temp_file}.sorted" "$spinner_shown_file" 2> /dev/null || true + [[ $cache_source_is_temp == true ]] && rm -f "$cache_source" 2> /dev/null || true + restore_scan_int_trap + printf "\r\033[K" >&2 + echo "No applications found to uninstall." >&2 + return 1 + fi + # Pass 2: resolve display names in parallel. + local app_count=0 + local total_apps=${#app_data_tuples[@]} + local max_parallel + max_parallel=$(get_optimal_parallel_jobs "io") + if [[ $max_parallel -lt 8 ]]; then + max_parallel=8 # At least 8 for good performance + elif [[ $max_parallel -gt 32 ]]; then + max_parallel=32 # Cap at 32 to avoid too many processes + fi + local pids=() + + process_app_metadata() { + local app_data_tuple="$1" + local output_file="$2" + + IFS='|' read -r app_path app_name app_mtime cached_bundle_id cached_display_name <<< "$app_data_tuple" + + local bundle_id="${cached_bundle_id:-}" + if [[ -z "$bundle_id" ]]; then + bundle_id="unknown" + if [[ -f "$app_path/Contents/Info.plist" ]]; then + bundle_id=$(defaults read "$app_path/Contents/Info.plist" CFBundleIdentifier 2> /dev/null || echo "unknown") + fi + fi + + if should_protect_from_uninstall "$bundle_id"; then + return 0 + fi + + local display_name="${cached_display_name:-}" + if [[ -z "$display_name" ]]; then + display_name=$(uninstall_resolve_display_name "$app_path" "$app_name") + fi + + display_name="${display_name%.app}" + display_name="${display_name//|/-}" + display_name="${display_name//[$'\t\r\n']/}" + + echo "${app_path}|${display_name}|${bundle_id}|${app_mtime}" >> "$output_file" + } + + update_scan_status "Scanning applications..." "0" "$total_apps" + + ( + # shellcheck disable=SC2329 # Function invoked indirectly via trap + cleanup_spinner() { exit 0; } + trap cleanup_spinner TERM INT EXIT + sleep "$MOLE_UNINSTALL_SCAN_SPINNER_DELAY_SEC" 2> /dev/null || sleep 1 + [[ -f "$scan_status_file" ]] || exit 0 + local spinner_chars="|/-\\" + local i=0 + : > "$spinner_shown_file" + while true; do + local status_line status_message status_completed status_total + status_line=$(cat "$scan_status_file" 2> /dev/null || echo "") + IFS='|' read -r status_message status_completed status_total <<< "$status_line" + [[ -z "$status_message" ]] && status_message="Scanning applications..." + local c="${spinner_chars:$((i % 4)):1}" + if [[ "$status_completed" =~ ^[0-9]+$ && "$status_total" =~ ^[0-9]+$ && $status_total -gt 0 ]]; then + printf "\r\033[K%s %s %d/%d" "$c" "$status_message" "$status_completed" "$status_total" >&2 + else + printf "\r\033[K%s %s" "$c" "$status_message" >&2 + fi + ((i++)) + sleep 0.1 2> /dev/null || sleep 1 + done + ) & + spinner_pid=$! + + for app_data_tuple in "${app_data_tuples[@]}"; do + ((app_count++)) + process_app_metadata "$app_data_tuple" "$scan_raw_file" & + pids+=($!) + update_scan_status "Scanning applications..." "$app_count" "$total_apps" + + if ((${#pids[@]} >= max_parallel)); then + wait "${pids[0]}" 2> /dev/null + pids=("${pids[@]:1}") + fi + done + + for pid in "${pids[@]}"; do + wait "$pid" 2> /dev/null + done + + update_scan_status "Building uninstall index..." "0" "0" + + if [[ ! -s "$scan_raw_file" ]]; then + stop_scan_spinner + echo "No applications found to uninstall" >&2 + rm -f "$temp_file" "$scan_raw_file" "$merged_file" "$refresh_file" "$cache_snapshot_file" "${temp_file}.sorted" "$spinner_shown_file" 2> /dev/null || true + [[ $cache_source_is_temp == true ]] && rm -f "$cache_source" 2> /dev/null || true + restore_scan_int_trap + return 1 + fi + + update_scan_status "Merging cache data..." "0" "0" + awk -F'|' ' + NR == FNR { + cache_mtime[$1] = $2 + cache_size[$1] = $3 + cache_epoch[$1] = $4 + cache_updated[$1] = $5 + cache_bundle[$1] = $6 + cache_display[$1] = $7 + next + } + { + print $0 "|" cache_mtime[$1] "|" cache_size[$1] "|" cache_epoch[$1] "|" cache_updated[$1] "|" cache_bundle[$1] "|" cache_display[$1] + } + ' "$cache_source" "$scan_raw_file" > "$merged_file" + if [[ ! -s "$merged_file" && -s "$scan_raw_file" ]]; then + awk '{print $0 "||||||"}' "$scan_raw_file" > "$merged_file" + fi + + local current_epoch + current_epoch=$(get_epoch_seconds) + local inline_metadata_count=0 + local metadata_total=0 + metadata_total=$(wc -l < "$merged_file" 2> /dev/null || echo "0") + [[ "$metadata_total" =~ ^[0-9]+$ ]] || metadata_total=0 + local metadata_processed=0 + update_scan_status "Collecting metadata..." "0" "$metadata_total" + + while IFS='|' read -r app_path display_name bundle_id app_mtime cached_mtime cached_size_kb cached_epoch cached_updated_epoch cached_bundle_id cached_display_name; do + ((metadata_processed++)) + if ((metadata_processed % 5 == 0 || metadata_processed == metadata_total)); then + update_scan_status "Collecting metadata..." "$metadata_processed" "$metadata_total" + fi + + [[ -n "$app_path" && -e "$app_path" ]] || continue + + local cache_match=false + if [[ -n "$cached_mtime" && -n "$app_mtime" && "$cached_mtime" == "$app_mtime" ]]; then + cache_match=true + fi + + local final_epoch=0 + if [[ "$cached_epoch" =~ ^[0-9]+$ && $cached_epoch -gt 0 ]]; then + final_epoch="$cached_epoch" + fi + + local final_size_kb=0 + local final_size="N/A" + if [[ "$cached_size_kb" =~ ^[0-9]+$ && $cached_size_kb -gt 0 ]]; then + final_size_kb="$cached_size_kb" + final_size=$(bytes_to_human "$((cached_size_kb * 1024))") + fi + + # Fallback to app mtime to avoid unknown "last used" on first scan. + if [[ ! "$final_epoch" =~ ^[0-9]+$ || $final_epoch -le 0 ]]; then + if [[ "$app_mtime" =~ ^[0-9]+$ && $app_mtime -gt 0 ]]; then + final_epoch="$app_mtime" + fi + fi + + local final_last_used + final_last_used=$(uninstall_relative_time_from_epoch "$final_epoch" "$current_epoch") + + local needs_refresh=false + if [[ $cache_match == false ]]; then + needs_refresh=true + elif [[ ! "$cached_size_kb" =~ ^[0-9]+$ || $cached_size_kb -le 0 ]]; then + needs_refresh=true + elif [[ ! "$cached_epoch" =~ ^[0-9]+$ || $cached_epoch -le 0 ]]; then + needs_refresh=true + elif [[ ! "$cached_updated_epoch" =~ ^[0-9]+$ ]]; then + needs_refresh=true + elif [[ -z "$cached_bundle_id" || -z "$cached_display_name" ]]; then + needs_refresh=true + else + local cache_age=$((current_epoch - cached_updated_epoch)) + if [[ $cache_age -gt $MOLE_UNINSTALL_META_REFRESH_TTL ]]; then + needs_refresh=true + fi + fi + + if [[ $needs_refresh == true ]]; then + if [[ $inline_metadata_count -lt $MOLE_UNINSTALL_INLINE_METADATA_LIMIT ]]; then + local inline_metadata inline_size_kb inline_epoch inline_updated_epoch + inline_metadata=$(uninstall_collect_inline_metadata "$app_path" "${app_mtime:-0}" "$current_epoch") + IFS='|' read -r inline_size_kb inline_epoch inline_updated_epoch <<< "$inline_metadata" + ((inline_metadata_count++)) + + if [[ "$inline_size_kb" =~ ^[0-9]+$ && $inline_size_kb -gt 0 ]]; then + final_size_kb="$inline_size_kb" + final_size=$(bytes_to_human "$((inline_size_kb * 1024))") + fi + if [[ "$inline_epoch" =~ ^[0-9]+$ && $inline_epoch -gt 0 ]]; then + final_epoch="$inline_epoch" + final_last_used=$(uninstall_relative_time_from_epoch "$final_epoch" "$current_epoch") + fi + if [[ "$inline_updated_epoch" =~ ^[0-9]+$ && $inline_updated_epoch -gt 0 ]]; then + cached_updated_epoch="$inline_updated_epoch" + fi + fi + printf "%s|%s|%s|%s\n" "$app_path" "${app_mtime:-0}" "$bundle_id" "$display_name" >> "$refresh_file" + fi + + local persist_updated_epoch=0 + if [[ "$cached_updated_epoch" =~ ^[0-9]+$ && $cached_updated_epoch -gt 0 ]]; then + persist_updated_epoch="$cached_updated_epoch" + fi + printf "%s|%s|%s|%s|%s|%s|%s\n" "$app_path" "${app_mtime:-0}" "${final_size_kb:-0}" "${final_epoch:-0}" "${persist_updated_epoch:-0}" "$bundle_id" "$display_name" >> "$cache_snapshot_file" + + echo "${final_epoch}|${app_path}|${display_name}|${bundle_id}|${final_size}|${final_last_used}|${final_size_kb}" >> "$temp_file" + done < "$merged_file" + + update_scan_status "Updating cache..." "0" "0" + if [[ -s "$cache_snapshot_file" ]]; then + if uninstall_acquire_metadata_lock "$MOLE_UNINSTALL_META_CACHE_LOCK"; then + mv "$cache_snapshot_file" "$MOLE_UNINSTALL_META_CACHE_FILE" 2> /dev/null || { + cp "$cache_snapshot_file" "$MOLE_UNINSTALL_META_CACHE_FILE" 2> /dev/null || true + rm -f "$cache_snapshot_file" + } + uninstall_release_metadata_lock "$MOLE_UNINSTALL_META_CACHE_LOCK" + fi + fi + + update_scan_status "Sorting application list..." "0" "0" + sort -t'|' -k1,1n "$temp_file" > "${temp_file}.sorted" || { + stop_scan_spinner + rm -f "$temp_file" "$scan_raw_file" "$merged_file" "$refresh_file" "$cache_snapshot_file" + [[ $cache_source_is_temp == true ]] && rm -f "$cache_source" 2> /dev/null || true + restore_scan_int_trap + return 1 + } + rm -f "$temp_file" "$scan_raw_file" "$merged_file" "$cache_snapshot_file" + [[ $cache_source_is_temp == true ]] && rm -f "$cache_source" 2> /dev/null || true + + update_scan_status "Finalizing list..." "0" "0" + start_uninstall_metadata_refresh "$refresh_file" + stop_scan_spinner + + if [[ -f "${temp_file}.sorted" ]]; then + restore_scan_int_trap + echo "${temp_file}.sorted" + return 0 + else + restore_scan_int_trap + return 1 + fi +} + +load_applications() { + local apps_file="$1" + + if [[ ! -f "$apps_file" || ! -s "$apps_file" ]]; then + log_warning "No applications found for uninstallation" + return 1 + fi + + apps_data=() + selection_state=() + + while IFS='|' read -r epoch app_path app_name bundle_id size last_used size_kb; do + [[ ! -e "$app_path" ]] && continue + + apps_data+=("$epoch|$app_path|$app_name|$bundle_id|$size|$last_used|${size_kb:-0}") + selection_state+=(false) + done < "$apps_file" + + if [[ ${#apps_data[@]} -eq 0 ]]; then + log_warning "No applications available for uninstallation" + return 1 + fi + + return 0 +} + +# Cleanup: restore cursor and kill keepalive. +cleanup() { + local exit_code="${1:-$?}" + if [[ "${MOLE_ALT_SCREEN_ACTIVE:-}" == "1" ]]; then + leave_alt_screen + unset MOLE_ALT_SCREEN_ACTIVE + fi + if [[ -n "${sudo_keepalive_pid:-}" ]]; then + kill "$sudo_keepalive_pid" 2> /dev/null || true + wait "$sudo_keepalive_pid" 2> /dev/null || true + sudo_keepalive_pid="" + fi + # Log session end + log_operation_session_end "uninstall" "${files_cleaned:-0}" "${total_size_cleaned:-0}" + show_cursor + exit "$exit_code" +} + +trap cleanup EXIT INT TERM + +main() { + # Set current command for operation logging + export MOLE_CURRENT_COMMAND="uninstall" + log_operation_session_start "uninstall" + + # Global flags + for arg in "$@"; do + case "$arg" in + "--help" | "-h") + show_uninstall_help + exit 0 + ;; + "--debug") + export MO_DEBUG=1 + ;; + "--dry-run" | "-n") + export MOLE_DRY_RUN=1 + ;; + "--whitelist") + echo "Unknown uninstall option: $arg" + echo "Whitelist management is currently supported by: mo clean --whitelist / mo optimize --whitelist" + echo "Use 'mo uninstall --help' for supported options." + exit 1 + ;; + -*) + echo "Unknown uninstall option: $arg" + echo "Use 'mo uninstall --help' for supported options." + exit 1 + ;; + *) + echo "Unknown uninstall argument: $arg" + echo "Use 'mo uninstall --help' for supported options." + exit 1 + ;; + esac + done + + hide_cursor + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + echo -e "${YELLOW}${ICON_DRY_RUN} DRY RUN MODE${NC}, No app files or settings will be modified" + printf '\n' + fi + + local first_scan=true + while true; do + unset MOLE_INLINE_LOADING MOLE_MANAGED_ALT_SCREEN + + if [[ $first_scan == false ]]; then + echo -e "${GRAY}Refreshing application list...${NC}" >&2 + fi + first_scan=false + + local apps_file="" + if ! apps_file=$(scan_applications); then + return 1 + fi + + if [[ ! -f "$apps_file" ]]; then + return 1 + fi + + if ! load_applications "$apps_file"; then + rm -f "$apps_file" + return 1 + fi + + set +e + select_apps_for_uninstall + local exit_code=$? + set -e + + if [[ $exit_code -ne 0 ]]; then + show_cursor + clear_screen + printf '\033[2J\033[H' >&2 + rm -f "$apps_file" + + return 0 + fi + + show_cursor + clear_screen + printf '\033[2J\033[H' >&2 + local selection_count=${#selected_apps[@]} + if [[ $selection_count -eq 0 ]]; then + echo "No apps selected" + rm -f "$apps_file" + continue + fi + echo -e "${BLUE}${ICON_CONFIRM}${NC} Selected ${selection_count} apps:" + local -a summary_rows=() + local max_name_display_width=0 + local max_size_width=0 + local max_last_width=0 + for selected_app in "${selected_apps[@]}"; do + IFS='|' read -r _ _ app_name _ size last_used _ <<< "$selected_app" + local name_width=$(get_display_width "$app_name") + [[ $name_width -gt $max_name_display_width ]] && max_name_display_width=$name_width + local size_display + size_display=$(uninstall_normalize_size_display "$size") + [[ ${#size_display} -gt $max_size_width ]] && max_size_width=${#size_display} + local last_display + last_display=$(uninstall_normalize_last_used_display "$last_used") + [[ ${#last_display} -gt $max_last_width ]] && max_last_width=${#last_display} + done + ((max_size_width < 5)) && max_size_width=5 + ((max_last_width < 5)) && max_last_width=5 + ((max_name_display_width < 16)) && max_name_display_width=16 + + local term_width=$(tput cols 2> /dev/null || echo 100) + local available_for_name=$((term_width - 17 - max_size_width - max_last_width)) + + local min_name_width=24 + if [[ $term_width -ge 120 ]]; then + min_name_width=50 + elif [[ $term_width -ge 100 ]]; then + min_name_width=42 + elif [[ $term_width -ge 80 ]]; then + min_name_width=30 + fi + + local name_trunc_limit=$max_name_display_width + [[ $name_trunc_limit -lt $min_name_width ]] && name_trunc_limit=$min_name_width + [[ $name_trunc_limit -gt $available_for_name ]] && name_trunc_limit=$available_for_name + [[ $name_trunc_limit -gt 60 ]] && name_trunc_limit=60 + + max_name_display_width=0 + + for selected_app in "${selected_apps[@]}"; do + IFS='|' read -r epoch app_path app_name bundle_id size last_used size_kb <<< "$selected_app" + + local display_name + display_name=$(truncate_by_display_width "$app_name" "$name_trunc_limit") + + local current_width + current_width=$(get_display_width "$display_name") + [[ $current_width -gt $max_name_display_width ]] && max_name_display_width=$current_width + + local size_display + size_display=$(uninstall_normalize_size_display "$size") + + local last_display + last_display=$(uninstall_normalize_last_used_display "$last_used") + + summary_rows+=("$display_name|$size_display|$last_display") + done + + ((max_name_display_width < 16)) && max_name_display_width=16 + + local index=1 + for row in "${summary_rows[@]}"; do + IFS='|' read -r name_cell size_cell last_cell <<< "$row" + local name_display_width + name_display_width=$(get_display_width "$name_cell") + local name_char_count=${#name_cell} + local padding_needed=$((max_name_display_width - name_display_width)) + local printf_name_width=$((name_char_count + padding_needed)) + + printf "%d. %-*s %*s | Last: %s\n" "$index" "$printf_name_width" "$name_cell" "$max_size_width" "$size_cell" "$last_cell" + ((index++)) + done + + batch_uninstall_applications + + rm -f "$apps_file" + + local prompt_timeout="${MOLE_UNINSTALL_RETURN_PROMPT_TIMEOUT_SEC:-3}" + if [[ ! "$prompt_timeout" =~ ^[0-9]+$ ]] || [[ "$prompt_timeout" -lt 1 ]]; then + prompt_timeout=3 + fi + + echo -e "${GRAY}Press Enter to return to the app list, press any other key or wait ${prompt_timeout}s to exit.${NC}" + local key + local read_ok=false + if IFS= read -r -s -n1 -t "$prompt_timeout" key; then + read_ok=true + else + key="" + fi + drain_pending_input + + if [[ "$read_ok" == "true" && -z "$key" ]]; then + : + else + show_cursor + return 0 + fi + + done +} + +main "$@" diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/check/all.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/check/all.sh new file mode 100644 index 0000000..4a6960a --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/check/all.sh @@ -0,0 +1,714 @@ +#!/bin/bash +# System Checks Module +# Combines configuration, security, updates, and health checks + +set -euo pipefail + +# ============================================================================ +# Helper Functions +# ============================================================================ + +list_login_items() { + if ! command -v osascript > /dev/null 2>&1; then + return + fi + + local raw_items + raw_items=$(osascript -e 'tell application "System Events" to get the name of every login item' 2> /dev/null || echo "") + [[ -z "$raw_items" || "$raw_items" == "missing value" ]] && return + + IFS=',' read -ra login_items_array <<< "$raw_items" + for entry in "${login_items_array[@]}"; do + local trimmed + trimmed=$(echo "$entry" | sed 's/^[[:space:]]*//; s/[[:space:]]*$//') + [[ -n "$trimmed" ]] && printf "%s\n" "$trimmed" + done +} + +# ============================================================================ +# Configuration Checks +# ============================================================================ + +check_touchid_sudo() { + # Check whitelist + if command -v is_whitelisted > /dev/null && is_whitelisted "check_touchid"; then return; fi + # Check if Touch ID is configured for sudo + local pam_file="/etc/pam.d/sudo" + if [[ -f "$pam_file" ]] && grep -q "pam_tid.so" "$pam_file" 2> /dev/null; then + echo -e " ${GREEN}✓${NC} Touch ID Biometric authentication enabled" + else + # Check if Touch ID is supported + local is_supported=false + if command -v bioutil > /dev/null 2>&1; then + if bioutil -r 2> /dev/null | grep -q "Touch ID"; then + is_supported=true + fi + elif [[ "$(uname -m)" == "arm64" ]]; then + is_supported=true + fi + + if [[ "$is_supported" == "true" ]]; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Touch ID ${YELLOW}Not configured for sudo${NC}" + export TOUCHID_NOT_CONFIGURED=true + fi + fi +} + +check_rosetta() { + # Check whitelist + if command -v is_whitelisted > /dev/null && is_whitelisted "check_rosetta"; then return; fi + # Check Rosetta 2 (for Apple Silicon Macs) - informational only, not auto-fixed + if [[ "$(uname -m)" == "arm64" ]]; then + if [[ -f "/Library/Apple/usr/share/rosetta/rosetta" ]]; then + echo -e " ${GREEN}✓${NC} Rosetta 2 Intel app translation ready" + else + echo -e " ${GRAY}${ICON_EMPTY}${NC} Rosetta 2 ${GRAY}Not installed${NC}" + fi + fi +} + +check_git_config() { + # Check whitelist + if command -v is_whitelisted > /dev/null && is_whitelisted "check_git_config"; then return; fi + # Check basic Git configuration + if command -v git > /dev/null 2>&1; then + local git_name=$(git config --global user.name 2> /dev/null || echo "") + local git_email=$(git config --global user.email 2> /dev/null || echo "") + + if [[ -n "$git_name" && -n "$git_email" ]]; then + echo -e " ${GREEN}✓${NC} Git Global identity configured" + else + echo -e " ${GRAY}${ICON_WARNING}${NC} Git ${YELLOW}User identity not set${NC}" + fi + fi +} + +check_all_config() { + echo -e "${BLUE}${ICON_ARROW}${NC} System Configuration" + check_touchid_sudo + check_rosetta + check_git_config +} + +# ============================================================================ +# Security Checks +# ============================================================================ + +check_filevault() { + # Check whitelist + if command -v is_whitelisted > /dev/null && is_whitelisted "check_filevault"; then return; fi + # Check FileVault encryption status + if command -v fdesetup > /dev/null 2>&1; then + local fv_status=$(fdesetup status 2> /dev/null || echo "") + if echo "$fv_status" | grep -q "FileVault is On"; then + echo -e " ${GREEN}✓${NC} FileVault Disk encryption active" + else + echo -e " ${RED}✗${NC} FileVault ${RED}Disk encryption disabled${NC}" + export FILEVAULT_DISABLED=true + fi + fi +} + +check_firewall() { + # Check whitelist + if command -v is_whitelisted > /dev/null && is_whitelisted "firewall"; then return; fi + + unset FIREWALL_DISABLED + + # Check third-party firewalls first (lightweight path-based detection, no sudo required) + local third_party_firewall="" + if [[ -d "/Applications/Little Snitch.app" ]] || [[ -d "/Library/Little Snitch" ]]; then + third_party_firewall="Little Snitch" + elif [[ -d "/Applications/LuLu.app" ]]; then + third_party_firewall="LuLu" + elif [[ -d "/Applications/Radio Silence.app" ]]; then + third_party_firewall="Radio Silence" + elif [[ -d "/Applications/Hands Off!.app" ]]; then + third_party_firewall="Hands Off!" + elif [[ -d "/Applications/Murus.app" ]]; then + third_party_firewall="Murus" + elif [[ -d "/Applications/Vallum.app" ]]; then + third_party_firewall="Vallum" + fi + + if [[ -n "$third_party_firewall" ]]; then + echo -e " ${GREEN}✓${NC} Firewall ${third_party_firewall} active" + return + fi + + # Fall back to macOS built-in firewall check + local firewall_output=$(sudo /usr/libexec/ApplicationFirewall/socketfilterfw --getglobalstate 2> /dev/null || echo "") + if [[ "$firewall_output" == *"State = 1"* ]] || [[ "$firewall_output" == *"State = 2"* ]]; then + echo -e " ${GREEN}✓${NC} Firewall Network protection enabled" + else + echo -e " ${GRAY}${ICON_WARNING}${NC} Firewall ${YELLOW}Network protection disabled${NC}" + export FIREWALL_DISABLED=true + fi +} + +check_gatekeeper() { + # Check whitelist + if command -v is_whitelisted > /dev/null && is_whitelisted "gatekeeper"; then return; fi + # Check Gatekeeper status + if command -v spctl > /dev/null 2>&1; then + local gk_status=$(spctl --status 2> /dev/null || echo "") + if echo "$gk_status" | grep -q "enabled"; then + echo -e " ${GREEN}✓${NC} Gatekeeper App download protection active" + unset GATEKEEPER_DISABLED + else + echo -e " ${GRAY}${ICON_WARNING}${NC} Gatekeeper ${YELLOW}App security disabled${NC}" + export GATEKEEPER_DISABLED=true + fi + fi +} + +check_sip() { + # Check whitelist + if command -v is_whitelisted > /dev/null && is_whitelisted "check_sip"; then return; fi + # Check System Integrity Protection + if command -v csrutil > /dev/null 2>&1; then + local sip_status=$(csrutil status 2> /dev/null || echo "") + if echo "$sip_status" | grep -q "enabled"; then + echo -e " ${GREEN}✓${NC} SIP System integrity protected" + else + echo -e " ${GRAY}${ICON_WARNING}${NC} SIP ${YELLOW}System protection disabled${NC}" + fi + fi +} + +check_all_security() { + echo -e "${BLUE}${ICON_ARROW}${NC} Security Status" + check_filevault + check_firewall + check_gatekeeper + check_sip +} + +# ============================================================================ +# Software Update Checks +# ============================================================================ + +# Cache configuration +CACHE_DIR="${HOME}/.cache/mole" +CACHE_TTL=600 # 10 minutes in seconds + +# Ensure cache directory exists +ensure_user_dir "$CACHE_DIR" + +clear_cache_file() { + local file="$1" + rm -f "$file" 2> /dev/null || true +} + +reset_brew_cache() { + clear_cache_file "$CACHE_DIR/brew_updates" +} + +reset_softwareupdate_cache() { + clear_cache_file "$CACHE_DIR/softwareupdate_list" + SOFTWARE_UPDATE_LIST="" +} + +reset_mole_cache() { + clear_cache_file "$CACHE_DIR/mole_version" +} + +# Check if cache is still valid +is_cache_valid() { + local cache_file="$1" + local ttl="${2:-$CACHE_TTL}" + + if [[ ! -f "$cache_file" ]]; then + return 1 + fi + + local cache_age=$(($(get_epoch_seconds) - $(get_file_mtime "$cache_file"))) + [[ $cache_age -lt $ttl ]] +} + +# Cache software update list to avoid calling softwareupdate twice +SOFTWARE_UPDATE_LIST="" + +get_software_updates() { + local cache_file="$CACHE_DIR/softwareupdate_list" + + # Optimized: Use defaults to check if updates are pending (much faster) + local pending_updates + pending_updates=$(defaults read /Library/Preferences/com.apple.SoftwareUpdate LastRecommendedUpdatesAvailable 2> /dev/null || echo "0") + + if [[ "$pending_updates" -gt 0 ]]; then + echo "Updates Available" + else + echo "" + fi +} + +check_homebrew_updates() { + # Check whitelist + if command -v is_whitelisted > /dev/null && is_whitelisted "check_homebrew_updates"; then return; fi + + export BREW_OUTDATED_COUNT=0 + export BREW_FORMULA_OUTDATED_COUNT=0 + export BREW_CASK_OUTDATED_COUNT=0 + + if ! command -v brew > /dev/null 2>&1; then + printf " ${GRAY}${ICON_EMPTY}${NC} %-12s %s\n" "Homebrew" "Not installed" + return + fi + + local cache_file="$CACHE_DIR/brew_updates" + local formula_count=0 + local cask_count=0 + local total_count=0 + local use_cache=false + + if is_cache_valid "$cache_file"; then + local cached_formula="" + local cached_cask="" + IFS=' ' read -r cached_formula cached_cask < "$cache_file" || true + if [[ "$cached_formula" =~ ^[0-9]+$ && "$cached_cask" =~ ^[0-9]+$ ]]; then + formula_count="$cached_formula" + cask_count="$cached_cask" + use_cache=true + fi + fi + + if [[ "$use_cache" == "false" ]]; then + local formula_outdated="" + local cask_outdated="" + local formula_status=0 + local cask_status=0 + local spinner_started=false + + if [[ -t 1 ]]; then + MOLE_SPINNER_PREFIX=" " start_inline_spinner "Checking Homebrew updates..." + spinner_started=true + fi + + if formula_outdated=$(run_with_timeout 8 brew outdated --formula --quiet 2> /dev/null); then + : + else + formula_status=$? + fi + + if cask_outdated=$(run_with_timeout 8 brew outdated --cask --quiet 2> /dev/null); then + : + else + cask_status=$? + fi + + if [[ "$spinner_started" == "true" ]]; then + stop_inline_spinner + fi + + if [[ $formula_status -eq 0 || $cask_status -eq 0 ]]; then + formula_count=$(printf '%s\n' "$formula_outdated" | awk 'NF {count++} END {print count + 0}') + cask_count=$(printf '%s\n' "$cask_outdated" | awk 'NF {count++} END {print count + 0}') + ensure_user_file "$cache_file" + printf '%s %s\n' "$formula_count" "$cask_count" > "$cache_file" 2> /dev/null || true + elif [[ $formula_status -eq 124 || $cask_status -eq 124 ]]; then + printf " ${GRAY}${ICON_WARNING}${NC} %-12s ${YELLOW}%s${NC}\n" "Homebrew" "Check timed out" + return + else + printf " ${GRAY}${ICON_WARNING}${NC} %-12s ${YELLOW}%s${NC}\n" "Homebrew" "Check failed" + return + fi + fi + + total_count=$((formula_count + cask_count)) + export BREW_FORMULA_OUTDATED_COUNT="$formula_count" + export BREW_CASK_OUTDATED_COUNT="$cask_count" + export BREW_OUTDATED_COUNT="$total_count" + + if [[ $total_count -gt 0 ]]; then + local detail="" + if [[ $formula_count -gt 0 ]]; then + detail="${formula_count} formula" + fi + if [[ $cask_count -gt 0 ]]; then + [[ -n "$detail" ]] && detail="${detail}, " + detail="${detail}${cask_count} cask" + fi + [[ -z "$detail" ]] && detail="${total_count} updates" + printf " ${GRAY}%s${NC} %-12s ${YELLOW}%s${NC}\n" "$ICON_WARNING" "Homebrew" "${detail} available" + else + printf " ${GREEN}✓${NC} %-12s %s\n" "Homebrew" "Up to date" + fi +} + +check_appstore_updates() { + # Skipped for speed optimization - consolidated into check_macos_update + # We can't easily distinguish app store vs macos updates without the slow softwareupdate -l call + export APPSTORE_UPDATE_COUNT=0 +} + +check_macos_update() { + # Check whitelist + if command -v is_whitelisted > /dev/null && is_whitelisted "check_macos_updates"; then return; fi + + # Fast check using system preferences + local updates_available="false" + if [[ $(get_software_updates) == "Updates Available" ]]; then + updates_available="true" + + # Verify with softwareupdate using --no-scan to avoid triggering a fresh scan + # which can timeout. We prioritize avoiding false negatives (missing actual updates) + # over false positives, so we only clear the update flag when softwareupdate + # explicitly reports "No new software available" + local sw_output="" + local sw_status=0 + local spinner_started=false + if [[ -t 1 ]]; then + MOLE_SPINNER_PREFIX=" " start_inline_spinner "Checking macOS updates..." + spinner_started=true + fi + + local softwareupdate_timeout=10 + if sw_output=$(run_with_timeout "$softwareupdate_timeout" softwareupdate -l --no-scan 2> /dev/null); then + : + else + sw_status=$? + fi + + if [[ "$spinner_started" == "true" ]]; then + stop_inline_spinner + fi + + # Debug logging for troubleshooting + if [[ -n "${MO_DEBUG:-}" ]]; then + echo "[DEBUG] softwareupdate exit status: $sw_status, output lines: $(echo "$sw_output" | wc -l | tr -d ' ')" >&2 + fi + + # Prefer avoiding false negatives: if the system indicates updates are pending, + # only clear the flag when softwareupdate returns a list without any update entries. + if [[ $sw_status -eq 0 && -n "$sw_output" ]]; then + if ! echo "$sw_output" | grep -qE '^[[:space:]]*\*'; then + updates_available="false" + fi + fi + fi + + export MACOS_UPDATE_AVAILABLE="$updates_available" + + if [[ "$updates_available" == "true" ]]; then + printf " ${GRAY}%s${NC} %-12s ${YELLOW}%s${NC}\n" "$ICON_WARNING" "macOS" "Update available" + else + printf " ${GREEN}✓${NC} %-12s %s\n" "macOS" "System up to date" + fi +} + +check_mole_update() { + if command -v is_whitelisted > /dev/null && is_whitelisted "check_mole_update"; then return; fi + + # Check if Mole has updates + # Auto-detect version from mole main script + local current_version + if [[ -f "${SCRIPT_DIR:-/usr/local/bin}/mole" ]]; then + current_version=$(grep '^VERSION=' "${SCRIPT_DIR:-/usr/local/bin}/mole" 2> /dev/null | head -1 | sed 's/VERSION="\(.*\)"/\1/' || echo "unknown") + else + current_version="${VERSION:-unknown}" + fi + + local latest_version="" + local cache_file="$CACHE_DIR/mole_version" + + export MOLE_UPDATE_AVAILABLE="false" + + # Check cache first + if is_cache_valid "$cache_file"; then + latest_version=$(cat "$cache_file" 2> /dev/null || echo "") + else + # Show spinner while checking + if [[ -t 1 ]]; then + MOLE_SPINNER_PREFIX=" " start_inline_spinner "Checking Mole version..." + fi + + # Try to get latest version from GitHub + if command -v curl > /dev/null 2>&1; then + # Run in background to allow Ctrl+C to interrupt + local temp_version + temp_version=$(mktemp_file "mole_version_check") + curl -fsSL --connect-timeout 3 --max-time 5 https://api.github.com/repos/tw93/mole/releases/latest 2> /dev/null | grep '"tag_name"' | sed -E 's/.*"v?([^"]+)".*/\1/' > "$temp_version" & + local curl_pid=$! + + # Wait for curl to complete (allows Ctrl+C to interrupt) + if wait "$curl_pid" 2> /dev/null; then + latest_version=$(cat "$temp_version" 2> /dev/null || echo "") + # Save to cache + if [[ -n "$latest_version" ]]; then + ensure_user_file "$cache_file" + echo "$latest_version" > "$cache_file" 2> /dev/null || true + fi + fi + rm -f "$temp_version" 2> /dev/null || true + fi + + # Stop spinner + if [[ -t 1 ]]; then + stop_inline_spinner + fi + fi + + # Normalize version strings (remove leading 'v' or 'V') + current_version="${current_version#v}" + current_version="${current_version#V}" + latest_version="${latest_version#v}" + latest_version="${latest_version#V}" + + if [[ -n "$latest_version" && "$current_version" != "$latest_version" ]]; then + # Compare versions + if [[ "$(printf '%s\n' "$current_version" "$latest_version" | sort -V | head -1)" == "$current_version" ]]; then + export MOLE_UPDATE_AVAILABLE="true" + printf " ${GRAY}%s${NC} %-12s ${YELLOW}%s${NC}, running %s\n" "$ICON_WARNING" "Mole" "${latest_version} available" "${current_version}" + else + printf " ${GREEN}✓${NC} %-12s %s\n" "Mole" "Latest version ${current_version}" + fi + else + printf " ${GREEN}✓${NC} %-12s %s\n" "Mole" "Latest version ${current_version}" + fi +} + +check_all_updates() { + # Reset spinner flag for softwareupdate + unset SOFTWAREUPDATE_SPINNER_SHOWN + + # Preload software update data to avoid delays between subsequent checks + # Only redirect stdout, keep stderr for spinner display + get_software_updates > /dev/null + + echo -e "${BLUE}${ICON_ARROW}${NC} System Updates" + check_homebrew_updates + check_appstore_updates + check_macos_update + check_mole_update +} + +get_appstore_update_labels() { + get_software_updates | awk ' + /^\*/ { + label=$0 + sub(/^[[:space:]]*\* Label: */, "", label) + sub(/,.*/, "", label) + lower=tolower(label) + if (index(lower, "macos") == 0) { + print label + } + } + ' +} + +get_macos_update_labels() { + get_software_updates | awk ' + /^\*/ { + label=$0 + sub(/^[[:space:]]*\* Label: */, "", label) + sub(/,.*/, "", label) + lower=tolower(label) + if (index(lower, "macos") != 0) { + print label + } + } + ' +} + +# ============================================================================ +# System Health Checks +# ============================================================================ + +check_disk_space() { + # Use df -k to get KB values (always numeric), then calculate GB via math + # This avoids unit suffix parsing issues (df -H can return MB or GB) + local free_kb=$(command df -k / | awk 'NR==2 {print $4}') + local free_gb=$(awk "BEGIN {printf \"%.1f\", $free_kb / 1048576}") + local free_num=$(awk "BEGIN {printf \"%d\", $free_kb / 1048576}") + + export DISK_FREE_GB=$free_num + + if [[ $free_num -lt 20 ]]; then + echo -e " ${RED}✗${NC} Disk Space ${RED}${free_gb}GB free${NC}, Critical" + elif [[ $free_num -lt 50 ]]; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Disk Space ${YELLOW}${free_gb}GB free${NC}, Low" + else + echo -e " ${GREEN}✓${NC} Disk Space ${free_gb}GB free" + fi +} + +check_memory_usage() { + local mem_total + mem_total=$(sysctl -n hw.memsize 2> /dev/null || echo "0") + if [[ -z "$mem_total" || "$mem_total" -le 0 ]]; then + echo -e " ${GRAY}-${NC} Memory Unable to determine" + return + fi + + local vm_output + vm_output=$(vm_stat 2> /dev/null || echo "") + + local page_size + page_size=$(echo "$vm_output" | awk '/page size of/ {print $8}') + [[ -z "$page_size" ]] && page_size=4096 + + local free_pages inactive_pages spec_pages + free_pages=$(echo "$vm_output" | awk '/Pages free/ {gsub(/\./,"",$3); print $3}') + inactive_pages=$(echo "$vm_output" | awk '/Pages inactive/ {gsub(/\./,"",$3); print $3}') + spec_pages=$(echo "$vm_output" | awk '/Pages speculative/ {gsub(/\./,"",$3); print $3}') + + free_pages=${free_pages:-0} + inactive_pages=${inactive_pages:-0} + spec_pages=${spec_pages:-0} + + # Estimate used percent: (total - free - inactive - speculative) / total + local total_pages=$((mem_total / page_size)) + local free_total=$((free_pages + inactive_pages + spec_pages)) + local used_pages=$((total_pages - free_total)) + if ((used_pages < 0)); then + used_pages=0 + fi + + local used_percent + used_percent=$(awk "BEGIN {printf \"%.0f\", ($used_pages / $total_pages) * 100}") + ((used_percent > 100)) && used_percent=100 + ((used_percent < 0)) && used_percent=0 + + if [[ $used_percent -gt 90 ]]; then + echo -e " ${RED}✗${NC} Memory ${RED}${used_percent}% used${NC}, Critical" + elif [[ $used_percent -gt 80 ]]; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Memory ${YELLOW}${used_percent}% used${NC}, High" + else + echo -e " ${GREEN}✓${NC} Memory ${used_percent}% used" + fi +} + +check_login_items() { + # Check whitelist + if command -v is_whitelisted > /dev/null && is_whitelisted "check_login_items"; then return; fi + local login_items_count=0 + local -a login_items_list=() + + if [[ -t 0 ]]; then + # Show spinner while getting login items + if [[ -t 1 ]]; then + MOLE_SPINNER_PREFIX=" " start_inline_spinner "Checking login items..." + fi + + while IFS= read -r login_item; do + [[ -n "$login_item" ]] && login_items_list+=("$login_item") + done < <(list_login_items || true) + login_items_count=${#login_items_list[@]} + + # Stop spinner before output + if [[ -t 1 ]]; then + stop_inline_spinner + fi + fi + + if [[ $login_items_count -gt 15 ]]; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Login Items ${YELLOW}${login_items_count} apps${NC}" + elif [[ $login_items_count -gt 0 ]]; then + echo -e " ${GREEN}✓${NC} Login Items ${login_items_count} apps" + else + echo -e " ${GREEN}✓${NC} Login Items None" + return + fi + + # Show items in a single line (compact) + local preview_limit=3 + ((preview_limit > login_items_count)) && preview_limit=$login_items_count + + local items_display="" + for ((i = 0; i < preview_limit; i++)); do + if [[ $i -eq 0 ]]; then + items_display="${login_items_list[$i]}" + else + items_display="${items_display}, ${login_items_list[$i]}" + fi + done + + if ((login_items_count > preview_limit)); then + local remaining=$((login_items_count - preview_limit)) + items_display="${items_display} +${remaining}" + fi + + echo -e " ${GRAY}${items_display}${NC}" +} + +check_cache_size() { + local cache_size_kb=0 + + # Check common cache locations + local -a cache_paths=( + "$HOME/Library/Caches" + "$HOME/Library/Logs" + ) + + # Show spinner while calculating cache size + if [[ -t 1 ]]; then + MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning cache..." + fi + + for cache_path in "${cache_paths[@]}"; do + if [[ -d "$cache_path" ]]; then + local size_output + size_output=$(get_path_size_kb "$cache_path") + [[ "$size_output" =~ ^[0-9]+$ ]] || size_output=0 + cache_size_kb=$((cache_size_kb + size_output)) + fi + done + + local cache_size_gb=$(echo "scale=1; $cache_size_kb / 1024 / 1024" | bc) + export CACHE_SIZE_GB=$cache_size_gb + + # Stop spinner before output + if [[ -t 1 ]]; then + stop_inline_spinner + fi + + # Convert to integer for comparison + local cache_size_int=$(echo "$cache_size_gb" | cut -d'.' -f1) + + if [[ $cache_size_int -gt 10 ]]; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Cache Size ${YELLOW}${cache_size_gb}GB${NC} cleanable" + elif [[ $cache_size_int -gt 5 ]]; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Cache Size ${YELLOW}${cache_size_gb}GB${NC} cleanable" + else + echo -e " ${GREEN}✓${NC} Cache Size ${cache_size_gb}GB" + fi +} + +check_swap_usage() { + # Check swap usage + if command -v sysctl > /dev/null 2>&1; then + local swap_info=$(sysctl vm.swapusage 2> /dev/null || echo "") + if [[ -n "$swap_info" ]]; then + local swap_used=$(echo "$swap_info" | grep -o "used = [0-9.]*[GM]" | awk 'NR==1{print $3}') + swap_used=${swap_used:-0M} + local swap_num="${swap_used//[GM]/}" + + if [[ "$swap_used" == *"G"* ]]; then + local swap_gb=${swap_num%.*} + if [[ $swap_gb -gt 2 ]]; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Swap Usage ${YELLOW}${swap_used}${NC}, High" + else + echo -e " ${GREEN}✓${NC} Swap Usage ${swap_used}" + fi + else + echo -e " ${GREEN}✓${NC} Swap Usage ${swap_used}" + fi + fi + fi +} + +check_brew_health() { + # Check whitelist + if command -v is_whitelisted > /dev/null && is_whitelisted "check_brew_health"; then return; fi +} + +check_system_health() { + echo -e "${BLUE}${ICON_ARROW}${NC} System Health" + check_disk_space + check_memory_usage + check_swap_usage + check_login_items + check_cache_size + # Time Machine check is optional; skip by default to avoid noise on systems without backups +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/check/health_json.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/check/health_json.sh new file mode 100644 index 0000000..cdda7fa --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/check/health_json.sh @@ -0,0 +1,184 @@ +#!/bin/bash +# System Health Check - JSON Generator +# Extracted from tasks.sh + +set -euo pipefail + +# Ensure dependencies are loaded (only if running standalone) +if [[ -z "${MOLE_FILE_OPS_LOADED:-}" ]]; then + SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" + source "$SCRIPT_DIR/lib/core/file_ops.sh" +fi + +# Get memory info in GB +get_memory_info() { + local total_bytes used_gb total_gb + + # Total memory + total_bytes=$(sysctl -n hw.memsize 2> /dev/null || echo "0") + total_gb=$(LC_ALL=C awk "BEGIN {printf \"%.2f\", $total_bytes / (1024*1024*1024)}" 2> /dev/null || echo "0") + [[ -z "$total_gb" || "$total_gb" == "" ]] && total_gb="0" + + # Used memory from vm_stat + local vm_output active wired compressed page_size + vm_output=$(vm_stat 2> /dev/null || echo "") + page_size=4096 + + active=$(echo "$vm_output" | LC_ALL=C awk '/Pages active:/ {print $NF}' | tr -d '.\n' 2> /dev/null) + wired=$(echo "$vm_output" | LC_ALL=C awk '/Pages wired down:/ {print $NF}' | tr -d '.\n' 2> /dev/null) + compressed=$(echo "$vm_output" | LC_ALL=C awk '/Pages occupied by compressor:/ {print $NF}' | tr -d '.\n' 2> /dev/null) + + active=${active:-0} + wired=${wired:-0} + compressed=${compressed:-0} + + local used_bytes=$(((active + wired + compressed) * page_size)) + used_gb=$(LC_ALL=C awk "BEGIN {printf \"%.2f\", $used_bytes / (1024*1024*1024)}" 2> /dev/null || echo "0") + [[ -z "$used_gb" || "$used_gb" == "" ]] && used_gb="0" + + echo "$used_gb $total_gb" +} + +# Get disk info +get_disk_info() { + local home="${HOME:-/}" + local df_output total_gb used_gb used_percent + + df_output=$(command df -k "$home" 2> /dev/null | tail -1) + + local total_kb used_kb + total_kb=$(echo "$df_output" | LC_ALL=C awk 'NR==1{print $2}' 2> /dev/null) + used_kb=$(echo "$df_output" | LC_ALL=C awk 'NR==1{print $3}' 2> /dev/null) + + total_kb=${total_kb:-0} + used_kb=${used_kb:-0} + [[ "$total_kb" == "0" ]] && total_kb=1 # Avoid division by zero + + total_gb=$(LC_ALL=C awk "BEGIN {printf \"%.2f\", $total_kb / (1024*1024)}" 2> /dev/null || echo "0") + used_gb=$(LC_ALL=C awk "BEGIN {printf \"%.2f\", $used_kb / (1024*1024)}" 2> /dev/null || echo "0") + used_percent=$(LC_ALL=C awk "BEGIN {printf \"%.1f\", ($used_kb / $total_kb) * 100}" 2> /dev/null || echo "0") + + [[ -z "$total_gb" || "$total_gb" == "" ]] && total_gb="0" + [[ -z "$used_gb" || "$used_gb" == "" ]] && used_gb="0" + [[ -z "$used_percent" || "$used_percent" == "" ]] && used_percent="0" + + echo "$used_gb $total_gb $used_percent" +} + +# Get uptime in days +get_uptime_days() { + local boot_output boot_time uptime_days + + boot_output=$(sysctl -n kern.boottime 2> /dev/null || echo "") + boot_time=$(echo "$boot_output" | awk -F 'sec = |, usec' '{print $2}' 2> /dev/null || echo "") + + if [[ -n "$boot_time" && "$boot_time" =~ ^[0-9]+$ ]]; then + local now + now=$(get_epoch_seconds) + local uptime_sec=$((now - boot_time)) + uptime_days=$(LC_ALL=C awk "BEGIN {printf \"%.1f\", $uptime_sec / 86400}" 2> /dev/null || echo "0") + else + uptime_days="0" + fi + + [[ -z "$uptime_days" || "$uptime_days" == "" ]] && uptime_days="0" + echo "$uptime_days" +} + +# JSON escape helper +json_escape() { + # Escape backslash, double quote, tab, and newline + local escaped + escaped=$(echo -n "$1" | sed 's/\\/\\\\/g; s/"/\\"/g; s/ /\\t/g' | tr '\n' ' ') + echo -n "${escaped% }" +} + +# Generate JSON output +generate_health_json() { + # System info + read -r mem_used mem_total <<< "$(get_memory_info)" + read -r disk_used disk_total disk_percent <<< "$(get_disk_info)" + local uptime=$(get_uptime_days) + + # Ensure all values are valid numbers (fallback to 0) + mem_used=${mem_used:-0} + mem_total=${mem_total:-0} + disk_used=${disk_used:-0} + disk_total=${disk_total:-0} + disk_percent=${disk_percent:-0} + uptime=${uptime:-0} + + # Start JSON + cat << EOF +{ + "memory_used_gb": $mem_used, + "memory_total_gb": $mem_total, + "disk_used_gb": $disk_used, + "disk_total_gb": $disk_total, + "disk_used_percent": $disk_percent, + "uptime_days": $uptime, + "optimizations": [ +EOF + + # Collect all optimization items + local -a items=() + + # Core optimizations (safe and valuable) + items+=('system_maintenance|DNS & Spotlight Check|Refresh DNS cache & verify Spotlight status|true') + items+=('cache_refresh|Finder Cache Refresh|Refresh QuickLook thumbnails & icon services cache|true') + items+=('saved_state_cleanup|App State Cleanup|Remove old saved application states (30+ days)|true') + items+=('fix_broken_configs|Broken Config Repair|Fix corrupted preferences files|true') + items+=('network_optimization|Network Cache Refresh|Optimize DNS cache & restart mDNSResponder|true') + + # Advanced optimizations (high value, auto-run with safety checks) + items+=('sqlite_vacuum|Database Optimization|Compress SQLite databases for Mail, Safari & Messages (skips if apps are running)|true') + items+=('launch_services_rebuild|LaunchServices Repair|Repair "Open with" menu & file associations|true') + items+=('font_cache_rebuild|Font Cache Rebuild|Rebuild font database to fix rendering issues (skips if browsers are running)|true') + items+=('dock_refresh|Dock Refresh|Fix broken icons and visual glitches in the Dock|true') + + # System performance optimizations (new) + items+=('memory_pressure_relief|Memory Optimization|Release inactive memory to improve system responsiveness|true') + items+=('network_stack_optimize|Network Stack Refresh|Flush routing table and ARP cache to resolve network issues|true') + items+=('disk_permissions_repair|Permission Repair|Fix user directory permission issues|true') + items+=('bluetooth_reset|Bluetooth Refresh|Restart Bluetooth module to fix connectivity (skips if in use)|true') + items+=('spotlight_index_optimize|Spotlight Optimization|Rebuild index if search is slow (smart detection)|true') + + # Removed high-risk optimizations: + # - startup_items_cleanup: Risk of deleting legitimate app helpers + # - system_services_refresh: Risk of data loss when killing system services + # - dyld_cache_update: Low benefit, time-consuming, auto-managed by macOS + + # Output items as JSON + local first=true + for item in "${items[@]}"; do + IFS='|' read -r action name desc safe <<< "$item" + + # Escape strings + action=$(json_escape "$action") + name=$(json_escape "$name") + desc=$(json_escape "$desc") + + [[ "$first" == "true" ]] && first=false || echo "," + + cat << EOF + { + "category": "system", + "name": "$name", + "description": "$desc", + "action": "$action", + "safe": $safe + } +EOF + done + + # Close JSON + cat << 'EOF' + ] +} +EOF +} + +# Main execution (for testing) +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + generate_health_json +fi diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/app_caches.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/app_caches.sh new file mode 100644 index 0000000..d99ebea --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/app_caches.sh @@ -0,0 +1,265 @@ +#!/bin/bash +# User GUI Applications Cleanup Module (desktop apps, media, utilities). +set -euo pipefail +# Xcode and iOS tooling. +clean_xcode_tools() { + # Skip DerivedData/Archives while Xcode is running. + local xcode_running=false + if pgrep -x "Xcode" > /dev/null 2>&1; then + xcode_running=true + fi + safe_clean ~/Library/Developer/CoreSimulator/Caches/* "Simulator cache" + safe_clean ~/Library/Developer/CoreSimulator/Devices/*/data/tmp/* "Simulator temp files" + safe_clean ~/Library/Caches/com.apple.dt.Xcode/* "Xcode cache" + safe_clean ~/Library/Developer/Xcode/iOS\ Device\ Logs/* "iOS device logs" + safe_clean ~/Library/Developer/Xcode/watchOS\ Device\ Logs/* "watchOS device logs" + safe_clean ~/Library/Logs/CoreSimulator/* "CoreSimulator logs" + safe_clean ~/Library/Developer/Xcode/Products/* "Xcode build products" + if [[ "$xcode_running" == "false" ]]; then + safe_clean ~/Library/Developer/Xcode/DerivedData/* "Xcode derived data" + safe_clean ~/Library/Developer/Xcode/Archives/* "Xcode archives" + safe_clean ~/Library/Developer/Xcode/DocumentationCache/* "Xcode documentation cache" + safe_clean ~/Library/Developer/Xcode/DocumentationIndex/* "Xcode documentation index" + else + echo -e " ${GRAY}${ICON_WARNING}${NC} Xcode is running, skipping DerivedData/Archives/Documentation cleanup" + fi +} +# Code editors. +clean_code_editors() { + safe_clean ~/Library/Application\ Support/Code/logs/* "VS Code logs" + safe_clean ~/Library/Application\ Support/Code/Cache/* "VS Code cache" + safe_clean ~/Library/Application\ Support/Code/CachedExtensions/* "VS Code extension cache" + safe_clean ~/Library/Application\ Support/Code/CachedData/* "VS Code data cache" + safe_clean ~/Library/Caches/com.sublimetext.*/* "Sublime Text cache" +} +# Communication apps. +clean_communication_apps() { + safe_clean ~/Library/Application\ Support/discord/Cache/* "Discord cache" + safe_clean ~/Library/Application\ Support/legcord/Cache/* "Legcord cache" + safe_clean ~/Library/Application\ Support/Slack/Cache/* "Slack cache" + safe_clean ~/Library/Caches/us.zoom.xos/* "Zoom cache" + safe_clean ~/Library/Caches/com.tencent.xinWeChat/* "WeChat cache" + safe_clean ~/Library/Caches/ru.keepcoder.Telegram/* "Telegram cache" + safe_clean ~/Library/Caches/com.microsoft.teams2/* "Microsoft Teams cache" + safe_clean ~/Library/Caches/net.whatsapp.WhatsApp/* "WhatsApp cache" + safe_clean ~/Library/Caches/com.skype.skype/* "Skype cache" + safe_clean ~/Library/Caches/com.tencent.meeting/* "Tencent Meeting cache" + safe_clean ~/Library/Caches/com.tencent.WeWorkMac/* "WeCom cache" + safe_clean ~/Library/Caches/com.feishu.*/* "Feishu cache" + safe_clean ~/Library/Application\ Support/Microsoft/Teams/Cache/* "Microsoft Teams legacy cache" + safe_clean ~/Library/Application\ Support/Microsoft/Teams/Application\ Cache/* "Microsoft Teams legacy application cache" + safe_clean ~/Library/Application\ Support/Microsoft/Teams/Code\ Cache/* "Microsoft Teams legacy code cache" + safe_clean ~/Library/Application\ Support/Microsoft/Teams/GPUCache/* "Microsoft Teams legacy GPU cache" + safe_clean ~/Library/Application\ Support/Microsoft/Teams/logs/* "Microsoft Teams legacy logs" + safe_clean ~/Library/Application\ Support/Microsoft/Teams/tmp/* "Microsoft Teams legacy temp files" +} +# DingTalk. +clean_dingtalk() { + safe_clean ~/Library/Caches/dd.work.exclusive4aliding/* "DingTalk iDingTalk cache" + safe_clean ~/Library/Caches/com.alibaba.AliLang.osx/* "AliLang security component" + safe_clean ~/Library/Application\ Support/iDingTalk/log/* "DingTalk logs" + safe_clean ~/Library/Application\ Support/iDingTalk/holmeslogs/* "DingTalk holmes logs" +} +# AI assistants. +clean_ai_apps() { + safe_clean ~/Library/Caches/com.openai.chat/* "ChatGPT cache" + safe_clean ~/Library/Caches/com.anthropic.claudefordesktop/* "Claude desktop cache" + safe_clean ~/Library/Logs/Claude/* "Claude logs" +} +# Design and creative tools. +clean_design_tools() { + safe_clean ~/Library/Caches/com.bohemiancoding.sketch3/* "Sketch cache" + safe_clean ~/Library/Application\ Support/com.bohemiancoding.sketch3/cache/* "Sketch app cache" + safe_clean ~/Library/Caches/Adobe/* "Adobe cache" + safe_clean ~/Library/Caches/com.adobe.*/* "Adobe app caches" + safe_clean ~/Library/Caches/com.figma.Desktop/* "Figma cache" + safe_clean ~/Library/Application\ Support/Adobe/Common/Media\ Cache\ Files/* "Adobe media cache files" + # Raycast cache is protected (clipboard history, images). +} +# Video editing tools. +clean_video_tools() { + safe_clean ~/Library/Caches/net.telestream.screenflow10/* "ScreenFlow cache" + safe_clean ~/Library/Caches/com.apple.FinalCut/* "Final Cut Pro cache" + safe_clean ~/Library/Caches/com.blackmagic-design.DaVinciResolve/* "DaVinci Resolve cache" + safe_clean ~/Library/Caches/com.adobe.PremierePro.*/* "Premiere Pro cache" +} +# 3D and CAD tools. +clean_3d_tools() { + safe_clean ~/Library/Caches/org.blenderfoundation.blender/* "Blender cache" + safe_clean ~/Library/Caches/com.maxon.cinema4d/* "Cinema 4D cache" + safe_clean ~/Library/Caches/com.autodesk.*/* "Autodesk cache" + safe_clean ~/Library/Caches/com.sketchup.*/* "SketchUp cache" +} +# Productivity apps. +clean_productivity_apps() { + safe_clean ~/Library/Caches/com.tw93.MiaoYan/* "MiaoYan cache" + safe_clean ~/Library/Caches/com.klee.desktop/* "Klee cache" + safe_clean ~/Library/Caches/klee_desktop/* "Klee desktop cache" + safe_clean ~/Library/Caches/com.orabrowser.app/* "Ora browser cache" + safe_clean ~/Library/Caches/com.filo.client/* "Filo cache" + safe_clean ~/Library/Caches/com.flomoapp.mac/* "Flomo cache" + safe_clean ~/Library/Application\ Support/Quark/Cache/videoCache/* "Quark video cache" +} +# Music/media players (protect Spotify offline music). +clean_media_players() { + local spotify_cache="$HOME/Library/Caches/com.spotify.client" + local spotify_data="$HOME/Library/Application Support/Spotify" + local has_offline_music=false + # Heuristics: offline DB or large cache. + if [[ -f "$spotify_data/PersistentCache/Storage/offline.bnk" ]] || + [[ -d "$spotify_data/PersistentCache/Storage" && -n "$(find "$spotify_data/PersistentCache/Storage" -type f -name "*.file" 2> /dev/null | head -1)" ]]; then + has_offline_music=true + elif [[ -d "$spotify_cache" ]]; then + local cache_size_kb + cache_size_kb=$(get_path_size_kb "$spotify_cache") + if [[ $cache_size_kb -ge 512000 ]]; then + has_offline_music=true + fi + fi + if [[ "$has_offline_music" == "true" ]]; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Spotify cache protected · offline music detected" + note_activity + else + safe_clean ~/Library/Caches/com.spotify.client/* "Spotify cache" + fi + safe_clean ~/Library/Caches/com.apple.Music "Apple Music cache" + safe_clean ~/Library/Caches/com.apple.podcasts "Apple Podcasts cache" + # Apple Podcasts sandbox container: zombie sparse files and stale artwork cache (#387) + safe_clean ~/Library/Containers/com.apple.podcasts/Data/tmp/StreamedMedia "Podcasts streamed media" + safe_clean ~/Library/Containers/com.apple.podcasts/Data/tmp/*.heic "Podcasts artwork cache" + safe_clean ~/Library/Containers/com.apple.podcasts/Data/tmp/*.img "Podcasts image cache" + safe_clean ~/Library/Containers/com.apple.podcasts/Data/tmp/*CFNetworkDownload*.tmp "Podcasts download temp" + safe_clean ~/Library/Caches/com.apple.TV/* "Apple TV cache" + safe_clean ~/Library/Caches/tv.plex.player.desktop "Plex cache" + safe_clean ~/Library/Caches/com.netease.163music "NetEase Music cache" + safe_clean ~/Library/Caches/com.tencent.QQMusic/* "QQ Music cache" + safe_clean ~/Library/Caches/com.kugou.mac/* "Kugou Music cache" + safe_clean ~/Library/Caches/com.kuwo.mac/* "Kuwo Music cache" +} +# Video players. +clean_video_players() { + safe_clean ~/Library/Caches/com.colliderli.iina "IINA cache" + safe_clean ~/Library/Caches/org.videolan.vlc "VLC cache" + safe_clean ~/Library/Caches/io.mpv "MPV cache" + safe_clean ~/Library/Caches/com.iqiyi.player "iQIYI cache" + safe_clean ~/Library/Caches/com.tencent.tenvideo "Tencent Video cache" + safe_clean ~/Library/Caches/tv.danmaku.bili/* "Bilibili cache" + safe_clean ~/Library/Caches/com.douyu.*/* "Douyu cache" + safe_clean ~/Library/Caches/com.huya.*/* "Huya cache" +} +# Download managers. +clean_download_managers() { + safe_clean ~/Library/Caches/net.xmac.aria2gui "Aria2 cache" + safe_clean ~/Library/Caches/org.m0k.transmission "Transmission cache" + safe_clean ~/Library/Caches/com.qbittorrent.qBittorrent "qBittorrent cache" + safe_clean ~/Library/Caches/com.downie.Downie-* "Downie cache" + safe_clean ~/Library/Caches/com.folx.*/* "Folx cache" + safe_clean ~/Library/Caches/com.charlessoft.pacifist/* "Pacifist cache" +} +# Gaming platforms. +clean_gaming_platforms() { + safe_clean ~/Library/Caches/com.valvesoftware.steam/* "Steam cache" + safe_clean ~/Library/Application\ Support/Steam/htmlcache/* "Steam web cache" + safe_clean ~/Library/Application\ Support/Steam/appcache/* "Steam app cache" + safe_clean ~/Library/Application\ Support/Steam/depotcache/* "Steam depot cache" + safe_clean ~/Library/Application\ Support/Steam/steamapps/shadercache/* "Steam shader cache" + safe_clean ~/Library/Application\ Support/Steam/logs/* "Steam logs" + safe_clean ~/Library/Caches/com.epicgames.EpicGamesLauncher/* "Epic Games cache" + safe_clean ~/Library/Caches/com.blizzard.Battle.net/* "Battle.net cache" + safe_clean ~/Library/Application\ Support/Battle.net/Cache/* "Battle.net app cache" + safe_clean ~/Library/Caches/com.ea.*/* "EA Origin cache" + safe_clean ~/Library/Caches/com.gog.galaxy/* "GOG Galaxy cache" + safe_clean ~/Library/Caches/com.riotgames.*/* "Riot Games cache" + safe_clean ~/Library/Application\ Support/minecraft/logs/* "Minecraft logs" + safe_clean ~/Library/Application\ Support/minecraft/crash-reports/* "Minecraft crash reports" + safe_clean ~/Library/Application\ Support/minecraft/webcache/* "Minecraft web cache" + safe_clean ~/Library/Application\ Support/minecraft/webcache2/* "Minecraft web cache 2" + safe_clean ~/.lunarclient/game-cache/* "Lunar Client game cache" + safe_clean ~/.lunarclient/launcher-cache/* "Lunar Client launcher cache" + safe_clean ~/.lunarclient/logs/* "Lunar Client logs" + safe_clean ~/.lunarclient/offline/*/logs/* "Lunar Client offline logs" + safe_clean ~/.lunarclient/offline/files/*/logs/* "Lunar Client offline file logs" +} +# Translation/dictionary apps. +clean_translation_apps() { + safe_clean ~/Library/Caches/com.youdao.YoudaoDict "Youdao Dictionary cache" + safe_clean ~/Library/Caches/com.eudic.* "Eudict cache" + safe_clean ~/Library/Caches/com.bob-build.Bob "Bob Translation cache" +} +# Screenshot/recording tools. +clean_screenshot_tools() { + safe_clean ~/Library/Caches/com.cleanshot.* "CleanShot cache" + safe_clean ~/Library/Caches/com.reincubate.camo "Camo cache" + safe_clean ~/Library/Caches/com.xnipapp.xnip "Xnip cache" +} +# Email clients. +clean_email_clients() { + safe_clean ~/Library/Caches/com.readdle.smartemail-Mac "Spark cache" + safe_clean ~/Library/Caches/com.airmail.* "Airmail cache" +} +# Task management apps. +clean_task_apps() { + safe_clean ~/Library/Caches/com.todoist.mac.Todoist "Todoist cache" + safe_clean ~/Library/Caches/com.any.do.* "Any.do cache" +} +# Shell/terminal utilities. +clean_shell_utils() { + safe_clean ~/.zcompdump* "Zsh completion cache" + safe_clean ~/.lesshst "less history" + safe_clean ~/.viminfo.tmp "Vim temporary files" + safe_clean ~/.wget-hsts "wget HSTS cache" + safe_clean ~/.cacher/logs/* "Cacher logs" + safe_clean ~/.kite/logs/* "Kite logs" +} +# Input methods and system utilities. +clean_system_utils() { + safe_clean ~/Library/Caches/com.runjuu.Input-Source-Pro/* "Input Source Pro cache" + safe_clean ~/Library/Caches/macos-wakatime.WakaTime/* "WakaTime cache" +} +# Note-taking apps. +clean_note_apps() { + safe_clean ~/Library/Caches/notion.id/* "Notion cache" + safe_clean ~/Library/Caches/md.obsidian/* "Obsidian cache" + safe_clean ~/Library/Caches/com.logseq.*/* "Logseq cache" + safe_clean ~/Library/Caches/com.bear-writer.*/* "Bear cache" + safe_clean ~/Library/Caches/com.evernote.*/* "Evernote cache" + safe_clean ~/Library/Caches/com.yinxiang.*/* "Yinxiang Note cache" +} +# Launchers and automation tools. +clean_launcher_apps() { + safe_clean ~/Library/Caches/com.runningwithcrayons.Alfred/* "Alfred cache" + safe_clean ~/Library/Caches/cx.c3.theunarchiver/* "The Unarchiver cache" +} +# Remote desktop tools. +clean_remote_desktop() { + safe_clean ~/Library/Caches/com.teamviewer.*/* "TeamViewer cache" + safe_clean ~/Library/Caches/com.anydesk.*/* "AnyDesk cache" + safe_clean ~/Library/Caches/com.todesk.*/* "ToDesk cache" + safe_clean ~/Library/Caches/com.sunlogin.*/* "Sunlogin cache" +} +# Main entry for GUI app cleanup. +clean_user_gui_applications() { + stop_section_spinner + clean_xcode_tools + clean_code_editors + clean_communication_apps + clean_dingtalk + clean_ai_apps + clean_design_tools + clean_video_tools + clean_3d_tools + clean_productivity_apps + clean_media_players + clean_video_players + clean_download_managers + clean_gaming_platforms + clean_translation_apps + clean_screenshot_tools + clean_email_clients + clean_task_apps + clean_shell_utils + clean_system_utils + clean_note_apps + clean_launcher_apps + clean_remote_desktop +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/apps.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/apps.sh new file mode 100644 index 0000000..f1e454d --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/apps.sh @@ -0,0 +1,779 @@ +#!/bin/bash +# Application Data Cleanup Module +set -euo pipefail + +readonly ORPHAN_AGE_THRESHOLD=${ORPHAN_AGE_THRESHOLD:-${MOLE_ORPHAN_AGE_DAYS:-60}} +# Args: $1=target_dir, $2=label +clean_ds_store_tree() { + local target="$1" + local label="$2" + [[ -d "$target" ]] || return 0 + local file_count=0 + local total_bytes=0 + local spinner_active="false" + if [[ -t 1 ]]; then + MOLE_SPINNER_PREFIX=" " + start_inline_spinner "Cleaning Finder metadata..." + spinner_active="true" + fi + local -a exclude_paths=( + -path "*/Library/Application Support/MobileSync" -prune -o + -path "*/Library/Developer" -prune -o + -path "*/.Trash" -prune -o + -path "*/node_modules" -prune -o + -path "*/.git" -prune -o + -path "*/Library/Caches" -prune -o + ) + local -a find_cmd=("command" "find" "$target") + if [[ "$target" == "$HOME" ]]; then + find_cmd+=("-maxdepth" "5") + fi + find_cmd+=("${exclude_paths[@]}" "-type" "f" "-name" ".DS_Store" "-print0") + while IFS= read -r -d '' ds_file; do + local size + size=$(get_file_size "$ds_file") + total_bytes=$((total_bytes + size)) + file_count=$((file_count + 1)) + if [[ "$DRY_RUN" != "true" ]]; then + rm -f "$ds_file" 2> /dev/null || true + fi + if [[ $file_count -ge $MOLE_MAX_DS_STORE_FILES ]]; then + break + fi + done < <("${find_cmd[@]}" 2> /dev/null || true) + if [[ "$spinner_active" == "true" ]]; then + stop_section_spinner + fi + if [[ $file_count -gt 0 ]]; then + local size_human + size_human=$(bytes_to_human "$total_bytes") + if [[ "$DRY_RUN" == "true" ]]; then + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $label${NC}, ${YELLOW}$file_count files, $size_human dry${NC}" + else + echo -e " ${GREEN}${ICON_SUCCESS}${NC} $label${NC}, ${GREEN}$file_count files, $size_human${NC}" + fi + local size_kb=$(((total_bytes + 1023) / 1024)) + files_cleaned=$((files_cleaned + file_count)) + total_size_cleaned=$((total_size_cleaned + size_kb)) + total_items=$((total_items + 1)) + note_activity + fi +} +# Orphaned app data (60+ days inactive). Env: ORPHAN_AGE_THRESHOLD, DRY_RUN +# Usage: scan_installed_apps "output_file" +scan_installed_apps() { + local installed_bundles="$1" + # Cache installed app scan briefly to speed repeated runs. + local cache_file="$HOME/.cache/mole/installed_apps_cache" + local cache_age_seconds=300 # 5 minutes + if [[ -f "$cache_file" ]]; then + local cache_mtime=$(get_file_mtime "$cache_file") + local current_time + current_time=$(get_epoch_seconds) + local age=$((current_time - cache_mtime)) + if [[ $age -lt $cache_age_seconds ]]; then + debug_log "Using cached app list, age: ${age}s" + if [[ -r "$cache_file" ]] && [[ -s "$cache_file" ]]; then + if cat "$cache_file" > "$installed_bundles" 2> /dev/null; then + return 0 + else + debug_log "Warning: Failed to read cache, rebuilding" + fi + else + debug_log "Warning: Cache file empty or unreadable, rebuilding" + fi + fi + fi + debug_log "Scanning installed applications, cache expired or missing" + local -a app_dirs=( + "/Applications" + "/System/Applications" + "$HOME/Applications" + # Homebrew Cask locations + "/opt/homebrew/Caskroom" + "/usr/local/Caskroom" + # Setapp applications + "$HOME/Library/Application Support/Setapp/Applications" + ) + # Temp dir avoids write contention across parallel scans. + local scan_tmp_dir=$(create_temp_dir) + local pids=() + local dir_idx=0 + for app_dir in "${app_dirs[@]}"; do + [[ -d "$app_dir" ]] || continue + ( + local -a app_paths=() + while IFS= read -r app_path; do + [[ -n "$app_path" ]] && app_paths+=("$app_path") + done < <(find "$app_dir" -name '*.app' -maxdepth 3 -type d 2> /dev/null) + local count=0 + for app_path in "${app_paths[@]:-}"; do + local plist_path="$app_path/Contents/Info.plist" + [[ ! -f "$plist_path" ]] && continue + local bundle_id=$(/usr/libexec/PlistBuddy -c "Print :CFBundleIdentifier" "$plist_path" 2> /dev/null || echo "") + if [[ -n "$bundle_id" ]]; then + echo "$bundle_id" + count=$((count + 1)) + fi + done + ) > "$scan_tmp_dir/apps_${dir_idx}.txt" & + pids+=($!) + dir_idx=$((dir_idx + 1)) + done + # Collect running apps and LaunchAgents to avoid false orphan cleanup. + ( + local running_apps=$(run_with_timeout 5 osascript -e 'tell application "System Events" to get bundle identifier of every application process' 2> /dev/null || echo "") + echo "$running_apps" | tr ',' '\n' | sed -e 's/^ *//;s/ *$//' -e '/^$/d' > "$scan_tmp_dir/running.txt" + # Fallback: lsappinfo is more reliable than osascript + if command -v lsappinfo > /dev/null 2>&1; then + run_with_timeout 3 lsappinfo list 2> /dev/null | grep -o '"CFBundleIdentifier"="[^"]*"' | cut -d'"' -f4 >> "$scan_tmp_dir/running.txt" 2> /dev/null || true + fi + ) & + pids+=($!) + ( + run_with_timeout 5 find ~/Library/LaunchAgents /Library/LaunchAgents \ + -name "*.plist" -type f 2> /dev/null | + xargs -I {} basename {} .plist > "$scan_tmp_dir/agents.txt" 2> /dev/null || true + ) & + pids+=($!) + debug_log "Waiting for ${#pids[@]} background processes: ${pids[*]}" + if [[ ${#pids[@]} -gt 0 ]]; then + for pid in "${pids[@]}"; do + wait "$pid" 2> /dev/null || true + done + fi + debug_log "All background processes completed" + cat "$scan_tmp_dir"/*.txt >> "$installed_bundles" 2> /dev/null || true + safe_remove "$scan_tmp_dir" true + sort -u "$installed_bundles" -o "$installed_bundles" + ensure_user_dir "$(dirname "$cache_file")" + cp "$installed_bundles" "$cache_file" 2> /dev/null || true + local app_count=$(wc -l < "$installed_bundles" 2> /dev/null | tr -d ' ') + debug_log "Scanned $app_count unique applications" +} +# Sensitive data patterns that should never be treated as orphaned +# These patterns protect security-critical application data +readonly ORPHAN_NEVER_DELETE_PATTERNS=( + "*1password*" "*1Password*" + "*keychain*" "*Keychain*" + "*bitwarden*" "*Bitwarden*" + "*lastpass*" "*LastPass*" + "*keepass*" "*KeePass*" + "*dashlane*" "*Dashlane*" + "*enpass*" "*Enpass*" + "*ssh*" "*gpg*" "*gnupg*" + "com.apple.keychain*" +) + +# Cache file for mdfind results (Bash 3.2 compatible, no associative arrays) +ORPHAN_MDFIND_CACHE_FILE="" + +# Usage: is_bundle_orphaned "bundle_id" "directory_path" "installed_bundles_file" +is_bundle_orphaned() { + local bundle_id="$1" + local directory_path="$2" + local installed_bundles="$3" + + # 1. Fast path: check protection list (in-memory, instant) + if should_protect_data "$bundle_id"; then + return 1 + fi + + # 2. Fast path: check sensitive data patterns (in-memory, instant) + local bundle_lower + bundle_lower=$(echo "$bundle_id" | LC_ALL=C tr '[:upper:]' '[:lower:]') + for pattern in "${ORPHAN_NEVER_DELETE_PATTERNS[@]}"; do + # shellcheck disable=SC2053 + if [[ "$bundle_lower" == $pattern ]]; then + return 1 + fi + done + + # 3. Fast path: check installed bundles file (file read, fast) + if grep -Fxq "$bundle_id" "$installed_bundles" 2> /dev/null; then + return 1 + fi + + # 4. Fast path: hardcoded system components + case "$bundle_id" in + loginwindow | dock | systempreferences | systemsettings | settings | controlcenter | finder | safari) + return 1 + ;; + esac + + # 5. Fast path: 60-day modification check (stat call, fast) + if [[ -e "$directory_path" ]]; then + local last_modified_epoch=$(get_file_mtime "$directory_path") + local current_epoch + current_epoch=$(get_epoch_seconds) + local days_since_modified=$(((current_epoch - last_modified_epoch) / 86400)) + if [[ $days_since_modified -lt ${ORPHAN_AGE_THRESHOLD:-60} ]]; then + return 1 + fi + fi + + # 6. Slow path: mdfind fallback with file-based caching (Bash 3.2 compatible) + # This catches apps installed in non-standard locations + if [[ -n "$bundle_id" ]] && [[ "$bundle_id" =~ ^[a-zA-Z0-9._-]+$ ]] && [[ ${#bundle_id} -ge 5 ]]; then + # Initialize cache file if needed + if [[ -z "$ORPHAN_MDFIND_CACHE_FILE" ]]; then + ORPHAN_MDFIND_CACHE_FILE=$(mktemp "${TMPDIR:-/tmp}/mole_mdfind_cache.XXXXXX") + register_temp_file "$ORPHAN_MDFIND_CACHE_FILE" + fi + + # Check cache first (grep is fast for small files) + if grep -Fxq "FOUND:$bundle_id" "$ORPHAN_MDFIND_CACHE_FILE" 2> /dev/null; then + return 1 + fi + if grep -Fxq "NOTFOUND:$bundle_id" "$ORPHAN_MDFIND_CACHE_FILE" 2> /dev/null; then + # Already checked, not found - continue to return 0 + : + else + # Query mdfind with strict timeout (2 seconds max) + local app_exists + app_exists=$(run_with_timeout 2 mdfind "kMDItemCFBundleIdentifier == '$bundle_id'" 2> /dev/null | head -1 || echo "") + if [[ -n "$app_exists" ]]; then + echo "FOUND:$bundle_id" >> "$ORPHAN_MDFIND_CACHE_FILE" + return 1 + else + echo "NOTFOUND:$bundle_id" >> "$ORPHAN_MDFIND_CACHE_FILE" + fi + fi + fi + + # All checks passed - this is an orphan + return 0 +} + +is_claude_vm_bundle_orphaned() { + local vm_bundle_path="$1" + local installed_bundles="$2" + local claude_bundle_id="com.anthropic.claudefordesktop" + + [[ -d "$vm_bundle_path" ]] || return 1 + + # Extra guard in case the running-app scan missed Claude Desktop. + if pgrep -x "Claude" > /dev/null 2>&1; then + return 1 + fi + + if grep -Fxq "$claude_bundle_id" "$installed_bundles" 2> /dev/null; then + return 1 + fi + + if [[ -z "$ORPHAN_MDFIND_CACHE_FILE" ]]; then + ORPHAN_MDFIND_CACHE_FILE=$(mktemp "${TMPDIR:-/tmp}/mole_mdfind_cache.XXXXXX") + register_temp_file "$ORPHAN_MDFIND_CACHE_FILE" + fi + + if grep -Fxq "FOUND:$claude_bundle_id" "$ORPHAN_MDFIND_CACHE_FILE" 2> /dev/null; then + return 1 + fi + if ! grep -Fxq "NOTFOUND:$claude_bundle_id" "$ORPHAN_MDFIND_CACHE_FILE" 2> /dev/null; then + local app_exists + app_exists=$(run_with_timeout 2 mdfind "kMDItemCFBundleIdentifier == '$claude_bundle_id'" 2> /dev/null | head -1 || echo "") + if [[ -n "$app_exists" ]]; then + echo "FOUND:$claude_bundle_id" >> "$ORPHAN_MDFIND_CACHE_FILE" + return 1 + fi + echo "NOTFOUND:$claude_bundle_id" >> "$ORPHAN_MDFIND_CACHE_FILE" + fi + + return 0 +} + +# Orphaned app data sweep. +clean_orphaned_app_data() { + if ! ls "$HOME/Library/Caches" > /dev/null 2>&1; then + stop_section_spinner + echo -e " ${GRAY}${ICON_WARNING}${NC} Skipped: No permission to access Library folders" + return 0 + fi + start_section_spinner "Scanning installed apps..." + local installed_bundles=$(create_temp_file) + scan_installed_apps "$installed_bundles" + stop_section_spinner + local app_count=$(wc -l < "$installed_bundles" 2> /dev/null | tr -d ' ') + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Found $app_count active/installed apps" + local orphaned_count=0 + local total_orphaned_kb=0 + start_section_spinner "Scanning orphaned app resources..." + + local claude_vm_bundle="$HOME/Library/Application Support/Claude/vm_bundles/claudevm.bundle" + if is_claude_vm_bundle_orphaned "$claude_vm_bundle" "$installed_bundles"; then + local claude_vm_size_kb + claude_vm_size_kb=$(get_path_size_kb "$claude_vm_bundle") + if [[ -n "$claude_vm_size_kb" && "$claude_vm_size_kb" != "0" ]]; then + if safe_clean "$claude_vm_bundle" "Orphaned Claude workspace VM"; then + orphaned_count=$((orphaned_count + 1)) + total_orphaned_kb=$((total_orphaned_kb + claude_vm_size_kb)) + fi + fi + fi + + # CRITICAL: NEVER add LaunchAgents or LaunchDaemons (breaks login items/startup apps). + local -a resource_types=( + "$HOME/Library/Caches|Caches|com.*:org.*:net.*:io.*" + "$HOME/Library/Logs|Logs|com.*:org.*:net.*:io.*" + "$HOME/Library/Saved Application State|States|*.savedState" + "$HOME/Library/WebKit|WebKit|com.*:org.*:net.*:io.*" + "$HOME/Library/HTTPStorages|HTTP|com.*:org.*:net.*:io.*" + "$HOME/Library/Cookies|Cookies|*.binarycookies" + ) + for resource_type in "${resource_types[@]}"; do + IFS='|' read -r base_path label patterns <<< "$resource_type" + if [[ ! -d "$base_path" ]]; then + continue + fi + if ! ls "$base_path" > /dev/null 2>&1; then + continue + fi + local -a file_patterns=() + IFS=':' read -ra pattern_arr <<< "$patterns" + for pat in "${pattern_arr[@]}"; do + file_patterns+=("$base_path/$pat") + done + if [[ ${#file_patterns[@]} -gt 0 ]]; then + local _nullglob_state + _nullglob_state=$(shopt -p nullglob || true) + shopt -s nullglob + for item_path in "${file_patterns[@]}"; do + local iteration_count=0 + local old_ifs=$IFS + IFS=$'\n' + local -a matches=() + # shellcheck disable=SC2206 + matches=($item_path) + IFS=$old_ifs + if [[ ${#matches[@]} -eq 0 ]]; then + continue + fi + for match in "${matches[@]}"; do + [[ -e "$match" ]] || continue + iteration_count=$((iteration_count + 1)) + if [[ $iteration_count -gt $MOLE_MAX_ORPHAN_ITERATIONS ]]; then + break + fi + local bundle_id=$(basename "$match") + bundle_id="${bundle_id%.savedState}" + bundle_id="${bundle_id%.binarycookies}" + if is_bundle_orphaned "$bundle_id" "$match" "$installed_bundles"; then + local size_kb + size_kb=$(get_path_size_kb "$match") + if [[ -z "$size_kb" || "$size_kb" == "0" ]]; then + continue + fi + if safe_clean "$match" "Orphaned $label: $bundle_id"; then + orphaned_count=$((orphaned_count + 1)) + total_orphaned_kb=$((total_orphaned_kb + size_kb)) + fi + fi + done + done + eval "$_nullglob_state" + fi + done + stop_section_spinner + if [[ $orphaned_count -gt 0 ]]; then + local orphaned_mb=$(echo "$total_orphaned_kb" | awk '{printf "%.1f", $1/1024}') + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $orphaned_count items, about ${orphaned_mb}MB" + note_activity + fi + rm -f "$installed_bundles" +} + +# Clean orphaned system-level services (LaunchDaemons, LaunchAgents, PrivilegedHelperTools) +# These are left behind when apps are uninstalled but their system services remain +clean_orphaned_system_services() { + # Requires sudo + if ! sudo -n true 2> /dev/null; then + return 0 + fi + + start_section_spinner "Scanning orphaned system services..." + + local orphaned_count=0 + local total_orphaned_kb=0 + local -a orphaned_files=() + + # Known bundle ID patterns for common apps that leave system services behind + # Format: "file_pattern:app_check_command" + local -a known_orphan_patterns=( + # Sogou Input Method + "com.sogou.*:/Library/Input Methods/SogouInput.app" + # ClashX + "com.west2online.ClashX.*:/Applications/ClashX.app" + # ClashMac + "com.clashmac.*:/Applications/ClashMac.app" + # Nektony App Cleaner + "com.nektony.AC*:/Applications/App Cleaner & Uninstaller.app" + # i4tools (爱思助手) + "cn.i4tools.*:/Applications/i4Tools.app" + ) + + local mdfind_cache_file="" + _system_service_app_exists() { + local bundle_id="$1" + local app_path="$2" + + [[ -n "$app_path" && -d "$app_path" ]] && return 0 + + if [[ -n "$app_path" ]]; then + local app_name + app_name=$(basename "$app_path") + case "$app_path" in + /Applications/*) + [[ -d "$HOME/Applications/$app_name" ]] && return 0 + [[ -d "/Applications/Setapp/$app_name" ]] && return 0 + ;; + /Library/Input\ Methods/*) + [[ -d "$HOME/Library/Input Methods/$app_name" ]] && return 0 + ;; + esac + fi + + if [[ -n "$bundle_id" ]] && [[ "$bundle_id" =~ ^[a-zA-Z0-9._-]+$ ]] && [[ ${#bundle_id} -ge 5 ]]; then + if [[ -z "$mdfind_cache_file" ]]; then + mdfind_cache_file=$(mktemp "${TMPDIR:-/tmp}/mole_mdfind_cache.XXXXXX") + register_temp_file "$mdfind_cache_file" + fi + + if grep -Fxq "FOUND:$bundle_id" "$mdfind_cache_file" 2> /dev/null; then + return 0 + fi + if ! grep -Fxq "NOTFOUND:$bundle_id" "$mdfind_cache_file" 2> /dev/null; then + local app_found + app_found=$(run_with_timeout 2 mdfind "kMDItemCFBundleIdentifier == '$bundle_id'" 2> /dev/null | head -1 || echo "") + if [[ -n "$app_found" ]]; then + echo "FOUND:$bundle_id" >> "$mdfind_cache_file" + return 0 + fi + echo "NOTFOUND:$bundle_id" >> "$mdfind_cache_file" + fi + fi + + return 1 + } + + # Scan system LaunchDaemons + if [[ -d /Library/LaunchDaemons ]]; then + while IFS= read -r -d '' plist; do + local filename + filename=$(basename "$plist") + + # Skip Apple system files + [[ "$filename" == com.apple.* ]] && continue + + # Extract bundle ID from filename (remove .plist extension) + local bundle_id="${filename%.plist}" + + # Check against known orphan patterns + for pattern_entry in "${known_orphan_patterns[@]}"; do + local file_pattern="${pattern_entry%%:*}" + local app_path="${pattern_entry#*:}" + + # shellcheck disable=SC2053 + if [[ "$bundle_id" == $file_pattern ]] && [[ ! -d "$app_path" ]]; then + if _system_service_app_exists "$bundle_id" "$app_path"; then + continue + fi + orphaned_files+=("$plist") + local size_kb + size_kb=$(sudo du -skP "$plist" 2> /dev/null | awk '{print $1}' || echo "0") + total_orphaned_kb=$((total_orphaned_kb + size_kb)) + orphaned_count=$((orphaned_count + 1)) + break + fi + done + done < <(sudo find /Library/LaunchDaemons -maxdepth 1 -name "*.plist" -print0 2> /dev/null) + fi + + # Scan system LaunchAgents + if [[ -d /Library/LaunchAgents ]]; then + while IFS= read -r -d '' plist; do + local filename + filename=$(basename "$plist") + + # Skip Apple system files + [[ "$filename" == com.apple.* ]] && continue + + local bundle_id="${filename%.plist}" + + for pattern_entry in "${known_orphan_patterns[@]}"; do + local file_pattern="${pattern_entry%%:*}" + local app_path="${pattern_entry#*:}" + + # shellcheck disable=SC2053 + if [[ "$bundle_id" == $file_pattern ]] && [[ ! -d "$app_path" ]]; then + if _system_service_app_exists "$bundle_id" "$app_path"; then + continue + fi + orphaned_files+=("$plist") + local size_kb + size_kb=$(sudo du -skP "$plist" 2> /dev/null | awk '{print $1}' || echo "0") + total_orphaned_kb=$((total_orphaned_kb + size_kb)) + orphaned_count=$((orphaned_count + 1)) + break + fi + done + done < <(sudo find /Library/LaunchAgents -maxdepth 1 -name "*.plist" -print0 2> /dev/null) + fi + + # Scan PrivilegedHelperTools + if [[ -d /Library/PrivilegedHelperTools ]]; then + while IFS= read -r -d '' helper; do + local filename + filename=$(basename "$helper") + local bundle_id="$filename" + + # Skip Apple system files + [[ "$filename" == com.apple.* ]] && continue + + for pattern_entry in "${known_orphan_patterns[@]}"; do + local file_pattern="${pattern_entry%%:*}" + local app_path="${pattern_entry#*:}" + + # shellcheck disable=SC2053 + if [[ "$filename" == $file_pattern ]] && [[ ! -d "$app_path" ]]; then + if _system_service_app_exists "$bundle_id" "$app_path"; then + continue + fi + orphaned_files+=("$helper") + local size_kb + size_kb=$(sudo du -skP "$helper" 2> /dev/null | awk '{print $1}' || echo "0") + total_orphaned_kb=$((total_orphaned_kb + size_kb)) + orphaned_count=$((orphaned_count + 1)) + break + fi + done + done < <(sudo find /Library/PrivilegedHelperTools -maxdepth 1 -type f -print0 2> /dev/null) + fi + + stop_section_spinner + + # Report and clean + if [[ $orphaned_count -gt 0 ]]; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Found $orphaned_count orphaned system services" + + for orphan_file in "${orphaned_files[@]}"; do + local filename + filename=$(basename "$orphan_file") + + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + debug_log "[DRY RUN] Would remove orphaned service: $orphan_file" + else + # Unload if it's a LaunchDaemon/LaunchAgent + if [[ "$orphan_file" == *.plist ]]; then + sudo launchctl unload "$orphan_file" 2> /dev/null || true + fi + if safe_sudo_remove "$orphan_file"; then + debug_log "Removed orphaned service: $orphan_file" + fi + fi + done + + local orphaned_kb_display + if [[ $total_orphaned_kb -gt 1024 ]]; then + orphaned_kb_display=$(echo "$total_orphaned_kb" | awk '{printf "%.1fMB", $1/1024}') + else + orphaned_kb_display="${total_orphaned_kb}KB" + fi + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $orphaned_count orphaned services, about $orphaned_kb_display" + note_activity + fi + +} + +# ============================================================================ +# Orphaned LaunchAgent/LaunchDaemon Cleanup (Generic Detection) +# ============================================================================ + +# Extract program path from plist (supports both ProgramArguments and Program) +_extract_program_path() { + local plist="$1" + local program="" + + program=$(plutil -extract ProgramArguments.0 raw "$plist" 2> /dev/null) + if [[ -z "$program" ]]; then + program=$(plutil -extract Program raw "$plist" 2> /dev/null) + fi + + echo "$program" +} + +# Extract associated bundle identifier from plist +_extract_associated_bundle() { + local plist="$1" + local associated="" + + # Try array format first + associated=$(plutil -extract AssociatedBundleIdentifiers.0 raw "$plist" 2> /dev/null) + if [[ -z "$associated" ]] || [[ "$associated" == "1" ]]; then + # Try string format + associated=$(plutil -extract AssociatedBundleIdentifiers raw "$plist" 2> /dev/null) + # Filter out dict/array markers + if [[ "$associated" == "{"* ]] || [[ "$associated" == "["* ]]; then + associated="" + fi + fi + + echo "$associated" +} + +# Check if a LaunchAgent/LaunchDaemon is orphaned using multi-layer verification +# Returns 0 if orphaned, 1 if not orphaned +is_launch_item_orphaned() { + local plist="$1" + + # Layer 1: Check if program path exists + local program=$(_extract_program_path "$plist") + + # No program path - skip (not a standard launch item) + [[ -z "$program" ]] && return 1 + + # Program exists -> not orphaned + [[ -e "$program" ]] && return 1 + + # Layer 2: Check AssociatedBundleIdentifiers + local associated=$(_extract_associated_bundle "$plist") + if [[ -n "$associated" ]]; then + # Check if associated app exists via mdfind + if run_with_timeout 2 mdfind "kMDItemCFBundleIdentifier == '$associated'" 2> /dev/null | head -1 | grep -q .; then + return 1 # Associated app found -> not orphaned + fi + + # Extract vendor name from bundle ID (com.vendor.app -> vendor) + local vendor=$(echo "$associated" | cut -d'.' -f2) + if [[ -n "$vendor" ]] && [[ ${#vendor} -ge 3 ]]; then + # Check if any app from this vendor exists + if find /Applications ~/Applications -maxdepth 2 -iname "*${vendor}*" -type d 2> /dev/null | grep -iq "\.app"; then + return 1 # Vendor app exists -> not orphaned + fi + fi + fi + + # Layer 3: Check Application Support directory activity + if [[ "$program" =~ /Library/Application\ Support/([^/]+)/ ]]; then + local app_support_name="${BASH_REMATCH[1]}" + + # Check both user and system Application Support + for base in "$HOME/Library/Application Support" "/Library/Application Support"; do + local support_path="$base/$app_support_name" + if [[ -d "$support_path" ]]; then + # Check if there are files modified in last 7 days (active usage) + local recent_file=$(find "$support_path" -type f -mtime -7 2> /dev/null | head -1) + if [[ -n "$recent_file" ]]; then + return 1 # Active Application Support -> not orphaned + fi + fi + done + fi + + # Layer 4: Check if app name from program path exists + if [[ "$program" =~ /Applications/([^/]+)\.app/ ]]; then + local app_name="${BASH_REMATCH[1]}" + # Look for apps with similar names (case-insensitive) + if find /Applications ~/Applications -maxdepth 2 -iname "*${app_name}*" -type d 2> /dev/null | grep -iq "\.app"; then + return 1 # Similar app exists -> not orphaned + fi + fi + + # Layer 5: PrivilegedHelper special handling + if [[ "$program" =~ ^/Library/PrivilegedHelperTools/ ]]; then + local filename=$(basename "$plist") + local bundle_id="${filename%.plist}" + + # Extract app hint from bundle ID (com.vendor.app.helper -> vendor) + local app_hint=$(echo "$bundle_id" | sed 's/com\.//; s/\..*helper.*//') + + if [[ -n "$app_hint" ]] && [[ ${#app_hint} -ge 3 ]]; then + # Look for main app + if find /Applications ~/Applications -maxdepth 2 -iname "*${app_hint}*" -type d 2> /dev/null | grep -iq "\.app"; then + return 1 # Helper's main app exists -> not orphaned + fi + fi + fi + + # All checks failed -> likely orphaned + return 0 +} + +# Clean orphaned user-level LaunchAgents +# Only processes ~/Library/LaunchAgents (safer than system-level) +clean_orphaned_launch_agents() { + local launch_agents_dir="$HOME/Library/LaunchAgents" + + [[ ! -d "$launch_agents_dir" ]] && return 0 + + start_section_spinner "Scanning orphaned launch agents..." + + local -a orphaned_items=() + local total_orphaned_kb=0 + + # Scan user LaunchAgents + while IFS= read -r -d '' plist; do + local filename=$(basename "$plist") + + # Skip Apple's LaunchAgents + [[ "$filename" == com.apple.* ]] && continue + + local bundle_id="${filename%.plist}" + + # Check if orphaned using multi-layer verification + if is_launch_item_orphaned "$plist"; then + local size_kb=$(get_path_size_kb "$plist") + orphaned_items+=("$bundle_id|$plist") + total_orphaned_kb=$((total_orphaned_kb + size_kb)) + fi + done < <(find "$launch_agents_dir" -maxdepth 1 -name "*.plist" -print0 2> /dev/null) + + stop_section_spinner + + local orphaned_count=${#orphaned_items[@]} + + if [[ $orphaned_count -eq 0 ]]; then + return 0 + fi + + # Clean the orphaned items automatically + local removed_count=0 + local dry_run_count=0 + local is_dry_run=false + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + is_dry_run=true + fi + for item in "${orphaned_items[@]}"; do + IFS='|' read -r bundle_id plist_path <<< "$item" + + if [[ "$is_dry_run" == "true" ]]; then + dry_run_count=$((dry_run_count + 1)) + log_operation "clean" "DRY_RUN" "$plist_path" "orphaned launch agent" + continue + fi + + # Try to unload first (if currently loaded) + launchctl unload "$plist_path" 2> /dev/null || true + + # Remove the plist file + if safe_remove "$plist_path" false; then + removed_count=$((removed_count + 1)) + log_operation "clean" "REMOVED" "$plist_path" "orphaned launch agent" + else + log_operation "clean" "FAILED" "$plist_path" "permission denied" + fi + done + + if [[ "$is_dry_run" == "true" ]]; then + if [[ $dry_run_count -gt 0 ]]; then + local cleaned_mb=$(echo "$total_orphaned_kb" | awk '{printf "%.1f", $1/1024}') + echo " ${YELLOW}${ICON_DRY_RUN}${NC} Would remove $dry_run_count orphaned launch agent(s), ${cleaned_mb}MB" + note_activity + fi + else + if [[ $removed_count -gt 0 ]]; then + local cleaned_mb=$(echo "$total_orphaned_kb" | awk '{printf "%.1f", $1/1024}') + echo " ${GREEN}${ICON_SUCCESS}${NC} Removed $removed_count orphaned launch agent(s), ${cleaned_mb}MB" + note_activity + fi + fi +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/brew.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/brew.sh new file mode 100644 index 0000000..202c45a --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/brew.sh @@ -0,0 +1,127 @@ +#!/bin/bash +# Clean Homebrew caches and remove orphaned dependencies +# Env: DRY_RUN +# Skips if run within 7 days, runs cleanup/autoremove in parallel with 120s timeout +clean_homebrew() { + command -v brew > /dev/null 2>&1 || return 0 + if [[ "${DRY_RUN:-false}" == "true" ]]; then + # Check if Homebrew cache is whitelisted + if is_path_whitelisted "$HOME/Library/Caches/Homebrew"; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew · skipped whitelist" + else + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Homebrew · would cleanup and autoremove" + fi + return 0 + fi + # Keep behavior consistent with dry-run preview. + if is_path_whitelisted "$HOME/Library/Caches/Homebrew"; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew · skipped whitelist" + return 0 + fi + # Skip if cleaned recently to avoid repeated heavy operations. + local brew_cache_file="${HOME}/.cache/mole/brew_last_cleanup" + local cache_valid_days=7 + local should_skip=false + if [[ -f "$brew_cache_file" ]]; then + local last_cleanup + last_cleanup=$(cat "$brew_cache_file" 2> /dev/null || echo "0") + local current_time + current_time=$(get_epoch_seconds) + local time_diff=$((current_time - last_cleanup)) + local days_diff=$((time_diff / 86400)) + if [[ $days_diff -lt $cache_valid_days ]]; then + should_skip=true + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew · cleaned ${days_diff}d ago, skipped" + fi + fi + [[ "$should_skip" == "true" ]] && return 0 + # Skip cleanup if cache is small; still run autoremove. + local skip_cleanup=false + local brew_cache_size=0 + if [[ -d ~/Library/Caches/Homebrew ]]; then + brew_cache_size=$(run_with_timeout 3 du -skP ~/Library/Caches/Homebrew 2> /dev/null | awk '{print $1}') + local du_exit=$? + if [[ $du_exit -eq 0 && -n "$brew_cache_size" && "$brew_cache_size" -lt 51200 ]]; then + skip_cleanup=true + fi + fi + # Spinner reflects whether cleanup is skipped. + if [[ -t 1 ]]; then + if [[ "$skip_cleanup" == "true" ]]; then + MOLE_SPINNER_PREFIX=" " start_inline_spinner "Homebrew autoremove (cleanup skipped)..." + else + MOLE_SPINNER_PREFIX=" " start_inline_spinner "Homebrew cleanup and autoremove..." + fi + fi + # Run cleanup/autoremove in parallel with timeout guard per command. + local timeout_seconds=120 + local brew_tmp_file autoremove_tmp_file + local brew_pid autoremove_pid + local brew_exit=0 + local autoremove_exit=0 + if [[ "$skip_cleanup" == "false" ]]; then + brew_tmp_file=$(create_temp_file) + run_with_timeout "$timeout_seconds" brew cleanup > "$brew_tmp_file" 2>&1 & + brew_pid=$! + fi + autoremove_tmp_file=$(create_temp_file) + run_with_timeout "$timeout_seconds" brew autoremove > "$autoremove_tmp_file" 2>&1 & + autoremove_pid=$! + + if [[ -n "$brew_pid" ]]; then + wait "$brew_pid" 2> /dev/null || brew_exit=$? + fi + wait "$autoremove_pid" 2> /dev/null || autoremove_exit=$? + + local brew_success=false + if [[ "$skip_cleanup" == "false" && $brew_exit -eq 0 ]]; then + brew_success=true + fi + local autoremove_success=false + if [[ $autoremove_exit -eq 0 ]]; then + autoremove_success=true + fi + if [[ -t 1 ]]; then stop_inline_spinner; fi + # Process cleanup output and extract metrics + # Summarize cleanup results. + if [[ "$skip_cleanup" == "true" ]]; then + # Cleanup was skipped due to small cache size + local size_mb=$((brew_cache_size / 1024)) + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew cleanup · cache ${size_mb}MB, skipped" + elif [[ "$brew_success" == "true" && -f "$brew_tmp_file" ]]; then + local brew_output + brew_output=$(cat "$brew_tmp_file" 2> /dev/null || echo "") + local removed_count freed_space + removed_count=$(printf '%s\n' "$brew_output" | grep -c "Removing:" 2> /dev/null || true) + freed_space=$(printf '%s\n' "$brew_output" | grep -o "[0-9.]*[KMGT]B freed" 2> /dev/null | tail -1 || true) + if [[ $removed_count -gt 0 ]] || [[ -n "$freed_space" ]]; then + if [[ -n "$freed_space" ]]; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew cleanup${NC}, ${GREEN}$freed_space${NC}" + else + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Homebrew cleanup, ${removed_count} items" + fi + fi + elif [[ $brew_exit -eq 124 ]]; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Homebrew cleanup timed out · run ${GRAY}brew cleanup${NC} manually" + fi + # Process autoremove output - only show if packages were removed + # Only surface autoremove output when packages were removed. + if [[ "$autoremove_success" == "true" && -f "$autoremove_tmp_file" ]]; then + local autoremove_output + autoremove_output=$(cat "$autoremove_tmp_file" 2> /dev/null || echo "") + local removed_packages + removed_packages=$(printf '%s\n' "$autoremove_output" | grep -c "^Uninstalling" 2> /dev/null || true) + if [[ $removed_packages -gt 0 ]]; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Removed orphaned dependencies, ${removed_packages} packages" + fi + elif [[ $autoremove_exit -eq 124 ]]; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Autoremove timed out · run ${GRAY}brew autoremove${NC} manually" + fi + # Update cache timestamp on successful completion or when cleanup was intelligently skipped + # This prevents repeated cache size checks within the 7-day window + # Update cache timestamp when any work succeeded or was intentionally skipped. + if [[ "$skip_cleanup" == "true" ]] || [[ "$brew_success" == "true" ]] || [[ "$autoremove_success" == "true" ]]; then + ensure_user_file "$brew_cache_file" + get_epoch_seconds > "$brew_cache_file" + fi +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/caches.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/caches.sh new file mode 100644 index 0000000..72892ce --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/caches.sh @@ -0,0 +1,228 @@ +#!/bin/bash +# Cache Cleanup Module +set -euo pipefail + +# shellcheck disable=SC1091 +source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/purge_shared.sh" +# Preflight TCC prompts once to avoid mid-run interruptions. +check_tcc_permissions() { + [[ -t 1 ]] || return 0 + local permission_flag="$HOME/.cache/mole/permissions_granted" + [[ -f "$permission_flag" ]] && return 0 + local -a tcc_dirs=( + "$HOME/Library/Caches" + "$HOME/Library/Logs" + "$HOME/Library/Application Support" + "$HOME/Library/Containers" + "$HOME/.cache" + ) + # Quick permission probe (avoid deep scans). + local needs_permission_check=false + if ! ls "$HOME/Library/Caches" > /dev/null 2>&1; then + needs_permission_check=true + fi + if [[ "$needs_permission_check" == "true" ]]; then + echo "" + echo -e "${BLUE}First-time setup${NC}" + echo -e "${GRAY}macOS will request permissions to access Library folders.${NC}" + echo -e "${GRAY}You may see ${GREEN}${#tcc_dirs[@]} permission dialogs${NC}${GRAY}, please approve them all.${NC}" + echo "" + echo -ne "${PURPLE}${ICON_ARROW}${NC} Press ${GREEN}Enter${NC} to continue: " + read -r + MOLE_SPINNER_PREFIX="" start_inline_spinner "Requesting permissions..." + # Touch each directory to trigger prompts without deep scanning. + for dir in "${tcc_dirs[@]}"; do + [[ -d "$dir" ]] && command find "$dir" -maxdepth 1 -type d > /dev/null 2>&1 + done + stop_inline_spinner + echo "" + fi + # Mark as granted to avoid repeat prompts. + ensure_user_file "$permission_flag" + return 0 +} +# Args: $1=browser_name, $2=cache_path +# Clean Service Worker cache while protecting critical web editors. +clean_service_worker_cache() { + local browser_name="$1" + local cache_path="$2" + [[ ! -d "$cache_path" ]] && return 0 + local cleaned_size=0 + local protected_count=0 + while IFS= read -r cache_dir; do + [[ ! -d "$cache_dir" ]] && continue + # Extract a best-effort domain name from cache folder. + local domain=$(basename "$cache_dir" | grep -oE '[a-zA-Z0-9][-a-zA-Z0-9]*\.[a-zA-Z]{2,}' | head -1 || echo "") + local size=$(run_with_timeout 5 get_path_size_kb "$cache_dir") + local is_protected=false + for protected_domain in "${PROTECTED_SW_DOMAINS[@]}"; do + if [[ "$domain" == *"$protected_domain"* ]]; then + is_protected=true + protected_count=$((protected_count + 1)) + break + fi + done + if [[ "$is_protected" == "false" ]]; then + if [[ "$DRY_RUN" != "true" ]]; then + safe_remove "$cache_dir" true || true + fi + cleaned_size=$((cleaned_size + size)) + fi + done < <(run_with_timeout 10 sh -c "find '$cache_path' -type d -depth 2 2> /dev/null || true") + if [[ $cleaned_size -gt 0 ]]; then + local spinner_was_running=false + if [[ -t 1 && -n "${INLINE_SPINNER_PID:-}" ]]; then + stop_inline_spinner + spinner_was_running=true + fi + local cleaned_mb=$((cleaned_size / 1024)) + if [[ "$DRY_RUN" != "true" ]]; then + if [[ $protected_count -gt 0 ]]; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} $browser_name Service Worker, ${cleaned_mb}MB, ${protected_count} protected" + else + echo -e " ${GREEN}${ICON_SUCCESS}${NC} $browser_name Service Worker, ${cleaned_mb}MB" + fi + else + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $browser_name Service Worker, would clean ${cleaned_mb}MB, ${protected_count} protected" + fi + note_activity + if [[ "$spinner_was_running" == "true" ]]; then + MOLE_SPINNER_PREFIX=" " start_inline_spinner "Scanning browser Service Worker caches..." + fi + fi +} +# Check whether a directory looks like a project container. +project_cache_has_indicators() { + local dir="$1" + local max_depth="${2:-5}" + local indicator_timeout="${MOLE_PROJECT_CACHE_DISCOVERY_TIMEOUT:-2}" + [[ -d "$dir" ]] || return 1 + + local -a find_args=("$dir" "-maxdepth" "$max_depth" "(") + local first=true + local indicator + for indicator in "${MOLE_PURGE_PROJECT_INDICATORS[@]}"; do + if [[ "$first" == "true" ]]; then + first=false + else + find_args+=("-o") + fi + find_args+=("-name" "$indicator") + done + find_args+=(")" "-print" "-quit") + + run_with_timeout "$indicator_timeout" find "${find_args[@]}" 2> /dev/null | grep -q . +} + +# Discover candidate project roots without scanning the whole home directory. +discover_project_cache_roots() { + local -a roots=() + local root + + for root in "${MOLE_PURGE_DEFAULT_SEARCH_PATHS[@]}"; do + [[ -d "$root" ]] && roots+=("$root") + done + + while IFS= read -r root; do + [[ -d "$root" ]] && roots+=("$root") + done < <(mole_purge_read_paths_config "$HOME/.config/mole/purge_paths") + + local dir + local base + for dir in "$HOME"/*/; do + [[ -d "$dir" ]] || continue + dir="${dir%/}" + base=$(basename "$dir") + + case "$base" in + .* | Library | Applications | Movies | Music | Pictures | Public) + continue + ;; + esac + + if project_cache_has_indicators "$dir" 5; then + roots+=("$dir") + fi + done + + [[ ${#roots[@]} -eq 0 ]] && return 0 + + printf '%s\n' "${roots[@]}" | LC_ALL=C sort -u +} + +# Scan a project root for supported build caches while pruning heavy subtrees. +scan_project_cache_root() { + local root="$1" + local output_file="$2" + local scan_timeout="${MOLE_PROJECT_CACHE_SCAN_TIMEOUT:-6}" + [[ -d "$root" ]] || return 0 + + local -a find_args=( + find -P "$root" -maxdepth 9 -mount + "(" -name "Library" -o -name ".Trash" -o -name "node_modules" -o -name ".git" -o -name ".svn" -o -name ".hg" -o -name ".venv" -o -name "venv" -o -name ".pnpm-store" -o -name ".fvm" -o -name "DerivedData" -o -name "Pods" ")" + -prune -o + -type d + "(" -name ".next" -o -name "__pycache__" -o -name ".dart_tool" ")" + -print + ) + + local status=0 + run_with_timeout "$scan_timeout" "${find_args[@]}" >> "$output_file" 2> /dev/null || status=$? + + if [[ $status -eq 124 ]]; then + debug_log "Project cache scan timed out: $root" + elif [[ $status -ne 0 ]]; then + debug_log "Project cache scan failed (${status}): $root" + fi + + return 0 +} + +# Next.js/Python/Flutter project caches scoped to discovered project roots. +clean_project_caches() { + stop_inline_spinner 2> /dev/null || true + + local matches_tmp_file + matches_tmp_file=$(create_temp_file) + + local -a scan_roots=() + local root + while IFS= read -r root; do + [[ -n "$root" ]] && scan_roots+=("$root") + done < <(discover_project_cache_roots) + + [[ ${#scan_roots[@]} -eq 0 ]] && return 0 + + if [[ -t 1 ]]; then + MOLE_SPINNER_PREFIX=" " + start_inline_spinner "Searching project caches..." + fi + + for root in "${scan_roots[@]}"; do + scan_project_cache_root "$root" "$matches_tmp_file" + done + + if [[ -t 1 ]]; then + stop_inline_spinner + fi + + while IFS= read -r cache_dir; do + case "$(basename "$cache_dir")" in + ".next") + [[ -d "$cache_dir/cache" ]] && safe_clean "$cache_dir/cache"/* "Next.js build cache" || true + ;; + "__pycache__") + [[ -d "$cache_dir" ]] && safe_clean "$cache_dir"/* "Python bytecode cache" || true + ;; + ".dart_tool") + if [[ -d "$cache_dir" ]]; then + safe_clean "$cache_dir" "Flutter build cache (.dart_tool)" || true + local build_dir="$(dirname "$cache_dir")/build" + if [[ -d "$build_dir" ]]; then + safe_clean "$build_dir" "Flutter build cache (build/)" || true + fi + fi + ;; + esac + done < <(LC_ALL=C sort -u "$matches_tmp_file" 2> /dev/null) +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/dev.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/dev.sh new file mode 100644 index 0000000..342ea0f --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/dev.sh @@ -0,0 +1,1077 @@ +#!/bin/bash +# Developer Tools Cleanup Module +set -euo pipefail + +# Tool cache helper (respects DRY_RUN). +clean_tool_cache() { + local description="$1" + shift + if [[ "$DRY_RUN" != "true" ]]; then + local command_succeeded=false + if [[ -t 1 ]]; then + start_section_spinner "Cleaning $description..." + fi + if "$@" > /dev/null 2>&1; then + command_succeeded=true + fi + if [[ -t 1 ]]; then + stop_section_spinner + fi + if [[ "$command_succeeded" == "true" ]]; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} $description" + fi + else + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $description · would clean" + fi + return 0 +} +# npm/pnpm/yarn/bun caches. +clean_dev_npm() { + local npm_default_cache="$HOME/.npm" + local npm_cache_path="$npm_default_cache" + + if command -v npm > /dev/null 2>&1; then + clean_tool_cache "npm cache" npm cache clean --force + + start_section_spinner "Checking npm cache path..." + npm_cache_path=$(run_with_timeout 2 npm config get cache 2> /dev/null) || npm_cache_path="" + stop_section_spinner + + if [[ -z "$npm_cache_path" || "$npm_cache_path" != /* ]]; then + npm_cache_path="$npm_default_cache" + fi + + note_activity + fi + + # These residual directories are not removed by `npm cache clean --force` + local -a npm_residual_dirs=("_cacache" "_npx" "_logs" "_prebuilds") + local -a npm_descriptions=("npm cache directory" "npm npx cache" "npm logs" "npm prebuilds") + + # Clean default npm cache path + local i + for i in "${!npm_residual_dirs[@]}"; do + safe_clean "$npm_default_cache/${npm_residual_dirs[$i]}"/* "${npm_descriptions[$i]}" + done + + # Normalize paths for comparison (remove trailing slash + resolve symlinked dirs) + local npm_cache_path_normalized="${npm_cache_path%/}" + local npm_default_cache_normalized="${npm_default_cache%/}" + if [[ -d "$npm_cache_path_normalized" ]]; then + npm_cache_path_normalized=$(cd "$npm_cache_path_normalized" 2> /dev/null && pwd -P) || npm_cache_path_normalized="${npm_cache_path%/}" + fi + if [[ -d "$npm_default_cache_normalized" ]]; then + npm_default_cache_normalized=$(cd "$npm_default_cache_normalized" 2> /dev/null && pwd -P) || npm_default_cache_normalized="${npm_default_cache%/}" + fi + + # Clean custom npm cache path (if different from default) + if [[ "$npm_cache_path_normalized" != "$npm_default_cache_normalized" ]]; then + for i in "${!npm_residual_dirs[@]}"; do + safe_clean "$npm_cache_path/${npm_residual_dirs[$i]}"/* "${npm_descriptions[$i]} (custom path)" + done + fi + + # Clean pnpm store cache + local pnpm_default_store=~/Library/pnpm/store + # Check if pnpm is actually usable (not just Corepack shim) + if command -v pnpm > /dev/null 2>&1 && COREPACK_ENABLE_DOWNLOAD_PROMPT=0 pnpm --version > /dev/null 2>&1; then + COREPACK_ENABLE_DOWNLOAD_PROMPT=0 clean_tool_cache "pnpm cache" pnpm store prune + local pnpm_store_path + start_section_spinner "Checking store path..." + pnpm_store_path=$(COREPACK_ENABLE_DOWNLOAD_PROMPT=0 run_with_timeout 2 pnpm store path 2> /dev/null) || pnpm_store_path="" + stop_section_spinner + if [[ -n "$pnpm_store_path" && "$pnpm_store_path" != "$pnpm_default_store" ]]; then + safe_clean "$pnpm_default_store"/* "Orphaned pnpm store" + fi + else + # pnpm not installed or not usable, just clean the default store directory + safe_clean "$pnpm_default_store"/* "pnpm store" + fi + note_activity + safe_clean ~/.tnpm/_cacache/* "tnpm cache directory" + safe_clean ~/.tnpm/_logs/* "tnpm logs" + safe_clean ~/.yarn/cache/* "Yarn cache" + safe_clean ~/.bun/install/cache/* "Bun cache" +} +# Python/pip ecosystem caches. +clean_dev_python() { + # Check pip3 is functional (not just macOS stub that triggers CLT install dialog) + if command -v pip3 > /dev/null 2>&1 && pip3 --version > /dev/null 2>&1; then + clean_tool_cache "pip cache" bash -c 'pip3 cache purge > /dev/null 2>&1 || true' + note_activity + fi + safe_clean ~/.pyenv/cache/* "pyenv cache" + safe_clean ~/.cache/poetry/* "Poetry cache" + safe_clean ~/.cache/uv/* "uv cache" + safe_clean ~/.cache/ruff/* "Ruff cache" + safe_clean ~/.cache/mypy/* "MyPy cache" + safe_clean ~/.pytest_cache/* "Pytest cache" + safe_clean ~/.jupyter/runtime/* "Jupyter runtime cache" + safe_clean ~/.cache/huggingface/* "Hugging Face cache" + safe_clean ~/.cache/torch/* "PyTorch cache" + safe_clean ~/.cache/tensorflow/* "TensorFlow cache" + safe_clean ~/.conda/pkgs/* "Conda packages cache" + safe_clean ~/anaconda3/pkgs/* "Anaconda packages cache" + safe_clean ~/.cache/wandb/* "Weights & Biases cache" +} +# Go build/module caches. +clean_dev_go() { + command -v go > /dev/null 2>&1 || return 0 + + local go_build_cache go_mod_cache + go_build_cache=$(go env GOCACHE 2> /dev/null || echo "$HOME/Library/Caches/go-build") + go_mod_cache=$(go env GOMODCACHE 2> /dev/null || echo "$HOME/go/pkg/mod") + + local build_protected=false mod_protected=false + is_path_whitelisted "$go_build_cache" && build_protected=true + is_path_whitelisted "$go_mod_cache" && mod_protected=true + + if [[ "$build_protected" == "true" && "$mod_protected" == "true" ]]; then + if [[ "$DRY_RUN" == "true" ]]; then + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Go cache · would skip (whitelist)" + else + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Go cache · skipped (whitelist)" + fi + return 0 + fi + + if [[ "$build_protected" != "true" && "$mod_protected" != "true" ]]; then + clean_tool_cache "Go cache" bash -c 'go clean -modcache > /dev/null 2>&1 || true; go clean -cache > /dev/null 2>&1 || true' + elif [[ "$build_protected" == "true" ]]; then + clean_tool_cache "Go module cache" bash -c 'go clean -modcache > /dev/null 2>&1 || true' + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Go build cache · skipped (whitelist)" + else + clean_tool_cache "Go build cache" bash -c 'go clean -cache > /dev/null 2>&1 || true' + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Go module cache · skipped (whitelist)" + fi + note_activity +} +# Rust/cargo caches. +clean_dev_rust() { + safe_clean ~/.cargo/registry/cache/* "Rust cargo cache" + safe_clean ~/.cargo/git/* "Cargo git cache" + safe_clean ~/.rustup/downloads/* "Rust downloads cache" +} + +# Helper: Check for multiple versions in a directory. +# Args: $1=directory, $2=tool_name, $3=list_command, $4=remove_command +check_multiple_versions() { + local dir="$1" + local tool_name="$2" + local list_cmd="${3:-}" + local remove_cmd="${4:-}" + + if [[ ! -d "$dir" ]]; then + return 0 + fi + + local count + count=$(find "$dir" -mindepth 1 -maxdepth 1 -type d 2> /dev/null | wc -l | tr -d ' ') + + if [[ "$count" -gt 1 ]]; then + note_activity + local hint="" + if [[ -n "$list_cmd" ]]; then + hint=" · ${GRAY}${list_cmd}${NC}" + fi + echo -e " ${GREEN}${ICON_SUCCESS}${NC} ${tool_name}: ${count} found${hint}" + fi +} + +# Check for multiple Rust toolchains. +check_rust_toolchains() { + command -v rustup > /dev/null 2>&1 || return 0 + + check_multiple_versions \ + "$HOME/.rustup/toolchains" \ + "Rust toolchains" \ + "rustup toolchain list" +} +# Docker caches (guarded by daemon check). +clean_dev_docker() { + if command -v docker > /dev/null 2>&1; then + if [[ "$DRY_RUN" != "true" ]]; then + start_section_spinner "Checking Docker daemon..." + local docker_running=false + if run_with_timeout 3 docker info > /dev/null 2>&1; then + docker_running=true + fi + stop_section_spinner + if [[ "$docker_running" == "true" ]]; then + clean_tool_cache "Docker build cache" docker builder prune -af + else + debug_log "Docker daemon not running, skipping Docker cache cleanup" + fi + else + note_activity + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Docker build cache · would clean" + fi + fi + safe_clean ~/.docker/buildx/cache/* "Docker BuildX cache" +} +# Nix garbage collection. +clean_dev_nix() { + if command -v nix-collect-garbage > /dev/null 2>&1; then + if [[ "$DRY_RUN" != "true" ]]; then + clean_tool_cache "Nix garbage collection" nix-collect-garbage --delete-older-than 30d + else + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Nix garbage collection · would clean" + fi + note_activity + fi +} +# Cloud CLI caches. +clean_dev_cloud() { + safe_clean ~/.kube/cache/* "Kubernetes cache" + safe_clean ~/.local/share/containers/storage/tmp/* "Container storage temp" + safe_clean ~/.aws/cli/cache/* "AWS CLI cache" + safe_clean ~/.config/gcloud/logs/* "Google Cloud logs" + safe_clean ~/.azure/logs/* "Azure CLI logs" +} +# Frontend build caches. +clean_dev_frontend() { + safe_clean ~/.cache/typescript/* "TypeScript cache" + safe_clean ~/.cache/electron/* "Electron cache" + safe_clean ~/.cache/node-gyp/* "node-gyp cache" + safe_clean ~/.node-gyp/* "node-gyp build cache" + safe_clean ~/.turbo/cache/* "Turbo cache" + safe_clean ~/.vite/cache/* "Vite cache" + safe_clean ~/.cache/vite/* "Vite global cache" + safe_clean ~/.cache/webpack/* "Webpack cache" + safe_clean ~/.parcel-cache/* "Parcel cache" + safe_clean ~/.cache/eslint/* "ESLint cache" + safe_clean ~/.cache/prettier/* "Prettier cache" +} +# Check for multiple Android NDK versions. +check_android_ndk() { + check_multiple_versions \ + "$HOME/Library/Android/sdk/ndk" \ + "Android NDK versions" \ + "Android Studio → SDK Manager" +} + +clean_xcode_documentation_cache() { + local doc_cache_root="${MOLE_XCODE_DOCUMENTATION_CACHE_DIR:-/Library/Developer/Xcode/DocumentationCache}" + [[ -d "$doc_cache_root" ]] || return 0 + + if pgrep -x "Xcode" > /dev/null 2>&1; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Xcode is running, skipping documentation cache cleanup" + note_activity + return 0 + fi + + local -a index_entries=() + while IFS= read -r -d '' entry; do + index_entries+=("$entry") + done < <(command find "$doc_cache_root" -mindepth 1 -maxdepth 1 \( -name "DeveloperDocumentation.index" -o -name "DeveloperDocumentation*.index" \) -print0 2> /dev/null) + + if [[ ${#index_entries[@]} -le 1 ]]; then + return 0 + fi + + local -a sorted_entries=() + while IFS= read -r line; do + sorted_entries+=("${line#* }") + done < <( + for entry in "${index_entries[@]}"; do + local mtime + mtime=$(stat -f%m "$entry" 2> /dev/null || echo "0") + printf '%s %s\n' "$mtime" "$entry" + done | sort -rn + ) + + local -a stale_entries=() + local idx=0 + local entry + for entry in "${sorted_entries[@]}"; do + if [[ $idx -eq 0 ]]; then + idx=$((idx + 1)) + continue + fi + stale_entries+=("$entry") + idx=$((idx + 1)) + done + + if [[ ${#stale_entries[@]} -eq 0 ]]; then + return 0 + fi + + if [[ "${DRY_RUN:-false}" == "true" ]]; then + safe_clean "${stale_entries[@]}" "Xcode documentation cache (old indexes)" + note_activity + return 0 + fi + + if ! has_sudo_session; then + if ! ensure_sudo_session "Cleaning Xcode documentation cache requires admin access"; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Xcode documentation cache cleanup skipped (sudo denied)" + note_activity + return 0 + fi + fi + + local removed_count=0 + local skipped_count=0 + local stale_entry + for stale_entry in "${stale_entries[@]}"; do + if should_protect_path "$stale_entry" || is_path_whitelisted "$stale_entry"; then + skipped_count=$((skipped_count + 1)) + continue + fi + if safe_sudo_remove "$stale_entry"; then + removed_count=$((removed_count + 1)) + fi + done + + if [[ $removed_count -gt 0 ]]; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Xcode documentation cache · removed ${removed_count} old indexes" + if [[ $skipped_count -gt 0 ]]; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Xcode documentation cache · skipped ${skipped_count} protected items" + fi + note_activity + elif [[ $skipped_count -gt 0 ]]; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Xcode documentation cache · nothing to clean" + echo -e " ${GRAY}${ICON_WARNING}${NC} Xcode documentation cache · skipped ${skipped_count} protected items" + note_activity + else + echo -e " ${GRAY}${ICON_WARNING}${NC} Xcode documentation cache · no items removed" + note_activity + fi +} + +# Clean old Xcode DeviceSupport versions, keeping the most recent ones. +# Each version holds debug symbols (1-3 GB) for a specific iOS/watchOS/tvOS version. +# Symbols regenerate automatically when a device running that version is connected. +# Args: $1=directory path, $2=display name (e.g. "iOS DeviceSupport") +clean_xcode_device_support() { + local ds_dir="$1" + local display_name="$2" + local keep_count="${MOLE_XCODE_DEVICE_SUPPORT_KEEP:-2}" + [[ "$keep_count" =~ ^[0-9]+$ ]] || keep_count=2 + + [[ -d "$ds_dir" ]] || return 0 + + # Collect version directories (each is a platform version like "17.5 (21F79)") + local -a version_dirs=() + while IFS= read -r -d '' entry; do + # Skip non-directories (e.g. .log files at the top level) + [[ -d "$entry" ]] || continue + version_dirs+=("$entry") + done < <(command find "$ds_dir" -mindepth 1 -maxdepth 1 -print0 2> /dev/null) + + # Sort by modification time (most recent first) + local -a sorted_dirs=() + while IFS= read -r line; do + sorted_dirs+=("${line#* }") + done < <( + for entry in "${version_dirs[@]}"; do + printf '%s %s\n' "$(stat -f%m "$entry" 2> /dev/null || echo 0)" "$entry" + done | sort -rn + ) + + # Get stale versions (everything after keep_count) + local -a stale_dirs=("${sorted_dirs[@]:$keep_count}") + + if [[ ${#stale_dirs[@]} -gt 0 ]]; then + # Calculate total size of stale versions + local stale_size_kb=0 entry_size_kb + for stale_entry in "${stale_dirs[@]}"; do + entry_size_kb=$(get_path_size_kb "$stale_entry" 2> /dev/null || echo 0) + stale_size_kb=$((stale_size_kb + entry_size_kb)) + done + local stale_size_human + stale_size_human=$(bytes_to_human "$((stale_size_kb * 1024))") + + if [[ "$DRY_RUN" == "true" ]]; then + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} ${display_name} · would remove ${#stale_dirs[@]} old versions (${stale_size_human}), keeping ${keep_count} most recent" + note_activity + else + # Remove old versions + local removed_count=0 + for stale_entry in "${stale_dirs[@]}"; do + if should_protect_path "$stale_entry" || is_path_whitelisted "$stale_entry"; then + continue + fi + if safe_remove "$stale_entry"; then + removed_count=$((removed_count + 1)) + fi + done + + if [[ $removed_count -gt 0 ]]; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} ${display_name} · removed ${removed_count} old versions, ${stale_size_human}" + note_activity + fi + fi + fi + + # Clean caches/logs inside kept versions + safe_clean "$ds_dir"/*/Symbols/System/Library/Caches/* "$display_name symbol cache" + safe_clean "$ds_dir"/*.log "$display_name logs" +} + +_sim_runtime_mount_points() { + if [[ -n "${MOLE_XCODE_SIM_RUNTIME_MOUNT_POINTS:-}" ]]; then + printf '%s\n' "$MOLE_XCODE_SIM_RUNTIME_MOUNT_POINTS" + return 0 + fi + mount 2> /dev/null | command awk '{print $3}' || true +} + +_sim_runtime_is_path_in_use() { + local target_path="$1" + shift || true + local mount_path + for mount_path in "$@"; do + [[ -z "$mount_path" ]] && continue + if [[ "$mount_path" == "$target_path" || "$mount_path" == "$target_path"/* ]]; then + return 0 + fi + done + return 1 +} + +_sim_runtime_size_kb() { + local target_path="$1" + local size_kb=0 + if has_sudo_session; then + size_kb=$(sudo du -skP "$target_path" 2> /dev/null | command awk 'NR==1 {print $1; exit}' || echo "0") + else + size_kb=$(du -skP "$target_path" 2> /dev/null | command awk 'NR==1 {print $1; exit}' || echo "0") + fi + + [[ "$size_kb" =~ ^[0-9]+$ ]] || size_kb=0 + echo "$size_kb" +} + +clean_xcode_simulator_runtime_volumes() { + local volumes_root="${MOLE_XCODE_SIM_RUNTIME_VOLUMES_ROOT:-/Library/Developer/CoreSimulator/Volumes}" + local cryptex_root="${MOLE_XCODE_SIM_RUNTIME_CRYPTEX_ROOT:-/Library/Developer/CoreSimulator/Cryptex}" + + local -a candidates=() + local candidate + for candidate in "$volumes_root" "$cryptex_root"; do + [[ -d "$candidate" ]] || continue + while IFS= read -r -d '' entry; do + candidates+=("$entry") + done < <(command find "$candidate" -mindepth 1 -maxdepth 1 -type d -print0 2> /dev/null) + done + + if [[ ${#candidates[@]} -eq 0 ]]; then + return 0 + fi + + local -a mount_points=() + while IFS= read -r line; do + [[ -n "$line" ]] && mount_points+=("$line") + done < <(_sim_runtime_mount_points) + + local -a entry_statuses=() + local -a sorted_candidates=() + local sorted + while IFS= read -r sorted; do + [[ -n "$sorted" ]] && sorted_candidates+=("$sorted") + done < <(printf '%s\n' "${candidates[@]}" | LC_ALL=C sort) + + # Only show scanning message in debug mode; spinner provides visual feedback otherwise + if [[ "${MO_DEBUG:-0}" == "1" ]]; then + echo -e " ${GRAY}${ICON_LIST}${NC} Xcode runtime volumes · scanning ${#sorted_candidates[@]} entries" + fi + local runtime_scan_spinner=false + if [[ -t 1 ]]; then + start_section_spinner "Scanning Xcode runtime volumes..." + runtime_scan_spinner=true + fi + + local in_use_count=0 + local unused_count=0 + for candidate in "${sorted_candidates[@]}"; do + local status="UNUSED" + if [[ ${#mount_points[@]} -gt 0 ]] && _sim_runtime_is_path_in_use "$candidate" "${mount_points[@]}"; then + status="IN_USE" + in_use_count=$((in_use_count + 1)) + else + unused_count=$((unused_count + 1)) + fi + entry_statuses+=("$status") + done + + if [[ "$DRY_RUN" == "true" ]]; then + local -a size_values=() + local in_use_kb=0 + local unused_kb=0 + local i=0 + for candidate in "${sorted_candidates[@]}"; do + local size_kb + size_kb=$(_sim_runtime_size_kb "$candidate") + size_values+=("$size_kb") + local status="${entry_statuses[$i]:-UNUSED}" + if [[ "$status" == "IN_USE" ]]; then + in_use_kb=$((in_use_kb + size_kb)) + else + unused_kb=$((unused_kb + size_kb)) + fi + i=$((i + 1)) + done + if [[ "$runtime_scan_spinner" == "true" ]]; then + stop_section_spinner + runtime_scan_spinner=false + fi + + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Xcode runtime volumes · ${unused_count} unused, ${in_use_count} in use" + local dryrun_total_kb=$((unused_kb + in_use_kb)) + local dryrun_total_human + dryrun_total_human=$(bytes_to_human "$((dryrun_total_kb * 1024))") + local dryrun_unused_human + dryrun_unused_human=$(bytes_to_human "$((unused_kb * 1024))") + local dryrun_in_use_human + dryrun_in_use_human=$(bytes_to_human "$((in_use_kb * 1024))") + echo -e " ${GRAY}${ICON_LIST}${NC} Runtime volumes total: ${dryrun_total_human} (unused ${dryrun_unused_human}, in-use ${dryrun_in_use_human})" + + local dryrun_max_items="${MOLE_SIM_RUNTIME_DRYRUN_MAX_ITEMS:-20}" + [[ "$dryrun_max_items" =~ ^[0-9]+$ ]] || dryrun_max_items=20 + if [[ "$dryrun_max_items" -le 0 ]]; then + dryrun_max_items=20 + fi + + local shown=0 + local line_size_kb line_status line_path + while IFS=$'\t' read -r line_size_kb line_status line_path; do + [[ -z "${line_path:-}" ]] && continue + local line_human + line_human=$(bytes_to_human "$((line_size_kb * 1024))") + echo -e " ${GRAY}${line_status}${NC} ${line_human} · ${line_path}" + shown=$((shown + 1)) + if [[ "$shown" -ge "$dryrun_max_items" ]]; then + break + fi + done < <( + local j=0 + while [[ $j -lt ${#sorted_candidates[@]} ]]; do + printf '%s\t%s\t%s\n' "${size_values[$j]:-0}" "${entry_statuses[$j]:-UNUSED}" "${sorted_candidates[$j]}" + j=$((j + 1)) + done | LC_ALL=C sort -nr -k1,1 + ) + + local total_entries="${#sorted_candidates[@]}" + if [[ "$total_entries" -gt "$shown" ]]; then + local remaining=$((total_entries - shown)) + echo -e " ${GRAY}${ICON_LIST}${NC} ... and ${remaining} more runtime volume entries" + fi + note_activity + return 0 + fi + + # Auto-clean all UNUSED runtime volumes (no user selection) + local -a selected_paths=() + local skipped_protected=0 + local i=0 + for ((i = 0; i < ${#sorted_candidates[@]}; i++)); do + local status="${entry_statuses[$i]:-UNUSED}" + [[ "$status" == "IN_USE" ]] && continue + + local candidate_path="${sorted_candidates[$i]}" + if should_protect_path "$candidate_path" || is_path_whitelisted "$candidate_path"; then + skipped_protected=$((skipped_protected + 1)) + continue + fi + selected_paths+=("$candidate_path") + done + + if [[ "$runtime_scan_spinner" == "true" ]]; then + stop_section_spinner + runtime_scan_spinner=false + fi + + if [[ ${#selected_paths[@]} -eq 0 ]]; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Xcode runtime volumes · already clean" + note_activity + return 0 + fi + + if ! has_sudo_session; then + if ! ensure_sudo_session "Cleaning Xcode runtime volumes requires admin access"; then + echo -e " ${YELLOW}${ICON_WARNING}${NC} Xcode runtime volumes · skipped (sudo denied)" + note_activity + return 0 + fi + fi + + # Perform cleanup and report final result in one line + local removed_count=0 + local removed_size_kb=0 + local selected_path + for selected_path in "${selected_paths[@]}"; do + local selected_size_kb=0 + selected_size_kb=$(_sim_runtime_size_kb "$selected_path") + if safe_sudo_remove "$selected_path"; then + removed_count=$((removed_count + 1)) + removed_size_kb=$((removed_size_kb + selected_size_kb)) + fi + done + + # Unified output: report result, not intermediate steps + if [[ $removed_count -gt 0 ]]; then + local removed_human + removed_human=$(bytes_to_human "$((removed_size_kb * 1024))") + if [[ $skipped_protected -gt 0 ]]; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Xcode runtime volumes · removed ${removed_count} (${removed_human}), skipped ${skipped_protected} protected" + else + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Xcode runtime volumes · removed ${removed_count} (${removed_human})" + fi + note_activity + else + if [[ $skipped_protected -gt 0 ]]; then + echo -e " ${YELLOW}${ICON_WARNING}${NC} Xcode runtime volumes · skipped ${skipped_protected} protected, none removed" + else + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Xcode runtime volumes · already clean" + fi + note_activity + fi +} + +clean_dev_mobile() { + check_android_ndk + clean_xcode_documentation_cache + clean_xcode_simulator_runtime_volumes + + if command -v xcrun > /dev/null 2>&1; then + debug_log "Checking for unavailable Xcode simulators" + local unavailable_before=0 + local unavailable_after=0 + local removed_unavailable=0 + local unavailable_size_kb=0 + local unavailable_size_human="0B" + local -a unavailable_udids=() + local unavailable_udid="" + + # Check if simctl is accessible and working + local simctl_available=true + if ! xcrun simctl list devices > /dev/null 2>&1; then + debug_log "simctl not accessible or CoreSimulator service not running" + echo -e " ${GRAY}${ICON_WARNING}${NC} Xcode unavailable simulators · simctl not available" + note_activity + simctl_available=false + fi + + if [[ "$simctl_available" == "true" ]]; then + unavailable_before=$(xcrun simctl list devices unavailable 2> /dev/null | command awk '/\(unavailable/ { count++ } END { print count+0 }' || echo "0") + [[ "$unavailable_before" =~ ^[0-9]+$ ]] || unavailable_before=0 + while IFS= read -r unavailable_udid; do + [[ -n "$unavailable_udid" ]] && unavailable_udids+=("$unavailable_udid") + done < <( + xcrun simctl list devices unavailable 2> /dev/null | + command sed -nE 's/.*\(([0-9A-Fa-f-]{36})\).*\(unavailable.*/\1/p' || true + ) + if [[ ${#unavailable_udids[@]} -gt 0 ]]; then + local udid + for udid in "${unavailable_udids[@]}"; do + local simulator_device_path="$HOME/Library/Developer/CoreSimulator/Devices/$udid" + if [[ -d "$simulator_device_path" ]]; then + unavailable_size_kb=$((unavailable_size_kb + $(get_path_size_kb "$simulator_device_path"))) + fi + done + fi + unavailable_size_human=$(bytes_to_human "$((unavailable_size_kb * 1024))") + + if [[ "$DRY_RUN" == "true" ]]; then + if ((unavailable_before > 0)); then + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Xcode unavailable simulators · would clean ${unavailable_before}, ${unavailable_size_human}" + else + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Xcode unavailable simulators · already clean" + fi + else + # Skip if no unavailable simulators + if ((unavailable_before == 0)); then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Xcode unavailable simulators · already clean" + note_activity + else + start_section_spinner "Checking unavailable simulators..." + + # Capture error output for diagnostics + local delete_output + local delete_exit_code=0 + delete_output=$(xcrun simctl delete unavailable 2>&1) || delete_exit_code=$? + + if [[ $delete_exit_code -eq 0 ]]; then + stop_section_spinner + unavailable_after=$(xcrun simctl list devices unavailable 2> /dev/null | command awk '/\(unavailable/ { count++ } END { print count+0 }' || echo "0") + [[ "$unavailable_after" =~ ^[0-9]+$ ]] || unavailable_after=0 + + removed_unavailable=$((unavailable_before - unavailable_after)) + if ((removed_unavailable < 0)); then + removed_unavailable=0 + fi + + if ((removed_unavailable > 0)); then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Xcode unavailable simulators · removed ${removed_unavailable}, ${unavailable_size_human}" + else + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Xcode unavailable simulators · cleanup completed, ${unavailable_size_human}" + fi + else + stop_section_spinner + + # Analyze error and provide helpful message + local error_hint="" + if echo "$delete_output" | grep -qi "permission denied"; then + error_hint=" (permission denied)" + elif echo "$delete_output" | grep -qi "in use\|busy"; then + error_hint=" (device in use)" + elif echo "$delete_output" | grep -qi "unable to boot\|failed to boot"; then + error_hint=" (boot failure)" + elif echo "$delete_output" | grep -qi "service"; then + error_hint=" (CoreSimulator service issue)" + fi + + # Try fallback: manual deletion of unavailable device directories + if [[ ${#unavailable_udids[@]} -gt 0 ]]; then + debug_log "Attempting fallback: manual deletion of unavailable simulators" + local manually_removed=0 + local manual_failed=0 + + for udid in "${unavailable_udids[@]}"; do + # Validate UUID format (36 chars: 8-4-4-4-12 hex pattern) + if [[ ! "$udid" =~ ^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{12}$ ]]; then + debug_log "Invalid UUID format, skipping: $udid" + ((manual_failed++)) || true + continue + fi + + local device_path="$HOME/Library/Developer/CoreSimulator/Devices/$udid" + if [[ -d "$device_path" ]]; then + # Use safe_remove for validated simulator device directory + if safe_remove "$device_path" true; then + ((manually_removed++)) || true + debug_log "Manually removed simulator: $udid" + else + ((manual_failed++)) || true + debug_log "Failed to manually remove simulator: $udid" + fi + fi + done + + if ((manually_removed > 0)); then + if ((manual_failed == 0)); then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Xcode unavailable simulators · removed ${manually_removed} (fallback), ${unavailable_size_human}" + else + echo -e " ${YELLOW}${ICON_WARNING}${NC} Xcode unavailable simulators · partially cleaned ${manually_removed}/${#unavailable_udids[@]}, ${unavailable_size_human}" + fi + else + echo -e " ${GRAY}${ICON_WARNING}${NC} Xcode unavailable simulators cleanup failed${error_hint}" + debug_log "simctl delete error: $delete_output" + fi + else + echo -e " ${GRAY}${ICON_WARNING}${NC} Xcode unavailable simulators cleanup failed${error_hint}" + debug_log "simctl delete error: $delete_output" + fi + fi + fi + fi # Close if ((unavailable_before == 0)) + note_activity + fi # End of simctl_available check + fi + # Old iOS/watchOS/tvOS DeviceSupport versions (debug symbols for connected devices). + # Each iOS version creates a 1-3 GB folder of debug symbols. Only the versions + # matching currently used devices are needed; older ones regenerate on device connect. + clean_xcode_device_support ~/Library/Developer/Xcode/iOS\ DeviceSupport "iOS DeviceSupport" + clean_xcode_device_support ~/Library/Developer/Xcode/watchOS\ DeviceSupport "watchOS DeviceSupport" + clean_xcode_device_support ~/Library/Developer/Xcode/tvOS\ DeviceSupport "tvOS DeviceSupport" + # Simulator runtime caches. + safe_clean ~/Library/Developer/CoreSimulator/Profiles/Runtimes/*/Contents/Resources/RuntimeRoot/System/Library/Caches/* "Simulator runtime cache" + safe_clean ~/Library/Caches/Google/AndroidStudio*/* "Android Studio cache" + # safe_clean ~/Library/Caches/CocoaPods/* "CocoaPods cache" + # safe_clean ~/.cache/flutter/* "Flutter cache" + safe_clean ~/.android/build-cache/* "Android build cache" + safe_clean ~/.android/cache/* "Android SDK cache" + safe_clean ~/Library/Developer/Xcode/UserData/IB\ Support/* "Xcode Interface Builder cache" + safe_clean ~/.cache/swift-package-manager/* "Swift package manager cache" + # Expo/React Native caches (preserve state.json which contains auth tokens). + safe_clean ~/.expo/expo-go/* "Expo Go cache" + safe_clean ~/.expo/android-apk-cache/* "Expo Android APK cache" + safe_clean ~/.expo/ios-simulator-app-cache/* "Expo iOS simulator app cache" + safe_clean ~/.expo/native-modules-cache/* "Expo native modules cache" + safe_clean ~/.expo/schema-cache/* "Expo schema cache" + safe_clean ~/.expo/template-cache/* "Expo template cache" + safe_clean ~/.expo/versions-cache/* "Expo versions cache" +} +# JVM ecosystem caches. +# Gradle: Respects whitelist, cleaned when not protected via: mo clean --whitelist +clean_dev_jvm() { + # Source Maven cleanup module (requires bash for BASH_SOURCE) + # shellcheck disable=SC1091 + source "$(dirname "${BASH_SOURCE[0]}")/maven.sh" 2> /dev/null || true + if declare -f clean_maven_repository > /dev/null 2>&1; then + clean_maven_repository + fi + safe_clean ~/.sbt/* "SBT cache" + safe_clean ~/.ivy2/cache/* "Ivy cache" + safe_clean ~/.gradle/caches/* "Gradle cache" + safe_clean ~/.gradle/daemon/* "Gradle daemon" +} +# JetBrains Toolbox old IDE versions (keep current + recent backup). +clean_dev_jetbrains_toolbox() { + local toolbox_root="$HOME/Library/Application Support/JetBrains/Toolbox/apps" + [[ -d "$toolbox_root" ]] || return 0 + + local keep_previous="${MOLE_JETBRAINS_TOOLBOX_KEEP:-1}" + [[ "$keep_previous" =~ ^[0-9]+$ ]] || keep_previous=1 + + # Save and filter whitelist patterns for toolbox path + local whitelist_overridden="false" + local -a original_whitelist=() + if [[ ${#WHITELIST_PATTERNS[@]} -gt 0 ]]; then + original_whitelist=("${WHITELIST_PATTERNS[@]}") + local -a filtered_whitelist=() + local pattern + for pattern in "${WHITELIST_PATTERNS[@]}"; do + [[ "$toolbox_root" == "$pattern" || "$pattern" == "$toolbox_root"* ]] && continue + filtered_whitelist+=("$pattern") + done + WHITELIST_PATTERNS=("${filtered_whitelist[@]+${filtered_whitelist[@]}}") + whitelist_overridden="true" + fi + + # Helper to restore whitelist on exit + _restore_whitelist() { + [[ "$whitelist_overridden" == "true" ]] && WHITELIST_PATTERNS=("${original_whitelist[@]}") + return 0 + } + + local -a product_dirs=() + while IFS= read -r -d '' product_dir; do + product_dirs+=("$product_dir") + done < <(command find "$toolbox_root" -mindepth 1 -maxdepth 1 -type d -print0 2> /dev/null) + + if [[ ${#product_dirs[@]} -eq 0 ]]; then + _restore_whitelist + return 0 + fi + + local product_dir + for product_dir in "${product_dirs[@]}"; do + while IFS= read -r -d '' channel_dir; do + local current_link="" + local current_real="" + if [[ -L "$channel_dir/current" ]]; then + current_link=$(readlink "$channel_dir/current" 2> /dev/null || true) + if [[ -n "$current_link" ]]; then + if [[ "$current_link" == /* ]]; then + current_real="$current_link" + else + current_real="$channel_dir/$current_link" + fi + fi + elif [[ -d "$channel_dir/current" ]]; then + current_real="$channel_dir/current" + fi + + local -a version_dirs=() + while IFS= read -r -d '' version_dir; do + local name + name=$(basename "$version_dir") + + [[ "$name" == "current" ]] && continue + [[ "$name" == .* ]] && continue + [[ "$name" == "plugins" || "$name" == "plugins-lib" || "$name" == "plugins-libs" ]] && continue + [[ -n "$current_real" && "$version_dir" == "$current_real" ]] && continue + [[ ! "$name" =~ ^[0-9] ]] && continue + + version_dirs+=("$version_dir") + done < <(command find "$channel_dir" -mindepth 1 -maxdepth 1 -type d -print0 2> /dev/null) + + [[ ${#version_dirs[@]} -eq 0 ]] && continue + + local -a sorted_dirs=() + while IFS= read -r line; do + local dir_path="${line#* }" + sorted_dirs+=("$dir_path") + done < <( + for version_dir in "${version_dirs[@]}"; do + local mtime + mtime=$(stat -f%m "$version_dir" 2> /dev/null || echo "0") + printf '%s %s\n' "$mtime" "$version_dir" + done | sort -rn + ) + + if [[ ${#sorted_dirs[@]} -le "$keep_previous" ]]; then + continue + fi + + local idx=0 + local dir_path + for dir_path in "${sorted_dirs[@]}"; do + if [[ $idx -lt $keep_previous ]]; then + idx=$((idx + 1)) + continue + fi + safe_clean "$dir_path" "JetBrains Toolbox old IDE version" + note_activity + idx=$((idx + 1)) + done + done < <(command find "$product_dir" -mindepth 1 -maxdepth 1 -type d -name "ch-*" -print0 2> /dev/null) + done + + _restore_whitelist +} +# Other language tool caches. +clean_dev_other_langs() { + safe_clean ~/.bundle/cache/* "Ruby Bundler cache" + safe_clean ~/.composer/cache/* "PHP Composer cache" + safe_clean ~/.nuget/packages/* "NuGet packages cache" + # safe_clean ~/.pub-cache/* "Dart Pub cache" + safe_clean ~/.cache/bazel/* "Bazel cache" + safe_clean ~/.cache/zig/* "Zig cache" + safe_clean ~/Library/Caches/deno/* "Deno cache" +} +# CI/CD and DevOps caches. +clean_dev_cicd() { + safe_clean ~/.cache/terraform/* "Terraform cache" + safe_clean ~/.grafana/cache/* "Grafana cache" + safe_clean ~/.prometheus/data/wal/* "Prometheus WAL cache" + safe_clean ~/.jenkins/workspace/*/target/* "Jenkins workspace cache" + safe_clean ~/.cache/gitlab-runner/* "GitLab Runner cache" + safe_clean ~/.github/cache/* "GitHub Actions cache" + safe_clean ~/.circleci/cache/* "CircleCI cache" + safe_clean ~/.sonar/* "SonarQube cache" +} +# Database tool caches. +clean_dev_database() { + safe_clean ~/Library/Caches/com.sequel-ace.sequel-ace/* "Sequel Ace cache" + safe_clean ~/Library/Caches/com.eggerapps.Sequel-Pro/* "Sequel Pro cache" + safe_clean ~/Library/Caches/redis-desktop-manager/* "Redis Desktop Manager cache" + safe_clean ~/Library/Caches/com.navicat.* "Navicat cache" + safe_clean ~/Library/Caches/com.dbeaver.* "DBeaver cache" + safe_clean ~/Library/Caches/com.redis.RedisInsight "Redis Insight cache" +} +# API/debugging tool caches. +clean_dev_api_tools() { + safe_clean ~/Library/Caches/com.postmanlabs.mac/* "Postman cache" + safe_clean ~/Library/Caches/com.konghq.insomnia/* "Insomnia cache" + safe_clean ~/Library/Caches/com.tinyapp.TablePlus/* "TablePlus cache" + safe_clean ~/Library/Caches/com.getpaw.Paw/* "Paw API cache" + safe_clean ~/Library/Caches/com.charlesproxy.charles/* "Charles Proxy cache" + safe_clean ~/Library/Caches/com.proxyman.NSProxy/* "Proxyman cache" +} +# Misc dev tool caches. +clean_dev_misc() { + safe_clean ~/Library/Caches/com.unity3d.*/* "Unity cache" + safe_clean ~/Library/Caches/com.mongodb.compass/* "MongoDB Compass cache" + safe_clean ~/Library/Caches/com.figma.Desktop/* "Figma cache" + safe_clean ~/Library/Caches/com.github.GitHubDesktop/* "GitHub Desktop cache" + safe_clean ~/Library/Caches/SentryCrash/* "Sentry crash reports" + safe_clean ~/Library/Caches/KSCrash/* "KSCrash reports" + safe_clean ~/Library/Caches/com.crashlytics.data/* "Crashlytics data" + safe_clean ~/Library/Application\ Support/Antigravity/Cache/* "Antigravity cache" + safe_clean ~/Library/Application\ Support/Antigravity/Code\ Cache/* "Antigravity code cache" + safe_clean ~/Library/Application\ Support/Antigravity/GPUCache/* "Antigravity GPU cache" + safe_clean ~/Library/Application\ Support/Antigravity/DawnGraphiteCache/* "Antigravity Dawn cache" + safe_clean ~/Library/Application\ Support/Antigravity/DawnWebGPUCache/* "Antigravity WebGPU cache" + # Filo (Electron) + safe_clean ~/Library/Application\ Support/Filo/production/Cache/* "Filo cache" + safe_clean ~/Library/Application\ Support/Filo/production/Code\ Cache/* "Filo code cache" + safe_clean ~/Library/Application\ Support/Filo/production/GPUCache/* "Filo GPU cache" + safe_clean ~/Library/Application\ Support/Filo/production/DawnGraphiteCache/* "Filo Dawn cache" + safe_clean ~/Library/Application\ Support/Filo/production/DawnWebGPUCache/* "Filo WebGPU cache" + # Claude (Electron) + safe_clean ~/Library/Application\ Support/Claude/Cache/* "Claude cache" + safe_clean ~/Library/Application\ Support/Claude/Code\ Cache/* "Claude code cache" + safe_clean ~/Library/Application\ Support/Claude/GPUCache/* "Claude GPU cache" + safe_clean ~/Library/Application\ Support/Claude/DawnGraphiteCache/* "Claude Dawn cache" + safe_clean ~/Library/Application\ Support/Claude/DawnWebGPUCache/* "Claude WebGPU cache" +} +# Shell and VCS leftovers. +clean_dev_shell() { + safe_clean ~/.gitconfig.lock "Git config lock" + safe_clean ~/.gitconfig.bak* "Git config backup" + safe_clean ~/.oh-my-zsh/cache/* "Oh My Zsh cache" + safe_clean ~/.config/fish/fish_history.bak* "Fish shell backup" + safe_clean ~/.bash_history.bak* "Bash history backup" + safe_clean ~/.zsh_history.bak* "Zsh history backup" + safe_clean ~/.cache/pre-commit/* "pre-commit cache" +} +# Network tool caches. +clean_dev_network() { + safe_clean ~/.cache/curl/* "curl cache" + safe_clean ~/.cache/wget/* "wget cache" + safe_clean ~/Library/Caches/curl/* "macOS curl cache" + safe_clean ~/Library/Caches/wget/* "macOS wget cache" +} +# Orphaned SQLite temp files (-shm/-wal). Disabled due to low ROI. +clean_sqlite_temp_files() { + return 0 +} +# Elixir/Erlang ecosystem. +# Note: ~/.mix/archives contains installed Mix tools - excluded from cleanup +clean_dev_elixir() { + safe_clean ~/.hex/cache/* "Hex cache" +} +# Haskell ecosystem. +# Note: ~/.stack/programs contains Stack-installed GHC compilers - excluded from cleanup +clean_dev_haskell() { + safe_clean ~/.cabal/packages/* "Cabal install cache" +} +# OCaml ecosystem. +clean_dev_ocaml() { + safe_clean ~/.opam/download-cache/* "Opam cache" +} +# Editor caches. +# Note: ~/Library/Application Support/Code/User/workspaceStorage contains workspace settings - excluded from cleanup +clean_dev_editors() { + safe_clean ~/Library/Caches/com.microsoft.VSCode/Cache/* "VS Code cached data" + safe_clean ~/Library/Application\ Support/Code/CachedData/* "VS Code cached data" + safe_clean ~/Library/Application\ Support/Code/DawnGraphiteCache/* "VS Code Dawn cache" + safe_clean ~/Library/Application\ Support/Code/DawnWebGPUCache/* "VS Code WebGPU cache" + safe_clean ~/Library/Application\ Support/Code/GPUCache/* "VS Code GPU cache" + safe_clean ~/Library/Application\ Support/Code/CachedExtensionVSIXs/* "VS Code extension cache" + safe_clean ~/Library/Caches/Zed/* "Zed cache" +} +# Main developer tools cleanup sequence. +clean_developer_tools() { + stop_section_spinner + + # CLI tools and languages + clean_sqlite_temp_files + clean_dev_npm + clean_dev_python + clean_dev_go + clean_dev_rust + check_rust_toolchains + clean_dev_docker + clean_dev_cloud + clean_dev_nix + clean_dev_shell + clean_dev_frontend + clean_project_caches + clean_dev_mobile + clean_dev_jvm + clean_dev_jetbrains_toolbox + clean_dev_other_langs + clean_dev_cicd + clean_dev_database + clean_dev_api_tools + clean_dev_network + clean_dev_misc + clean_dev_elixir + clean_dev_haskell + clean_dev_ocaml + + # GUI developer applications + clean_xcode_tools + clean_code_editors + + # Homebrew + safe_clean ~/Library/Caches/Homebrew/* "Homebrew cache" + local brew_lock_dirs=( + "/opt/homebrew/var/homebrew/locks" + "/usr/local/var/homebrew/locks" + ) + for lock_dir in "${brew_lock_dirs[@]}"; do + if [[ -d "$lock_dir" && -w "$lock_dir" ]]; then + safe_clean "$lock_dir"/* "Homebrew lock files" + elif [[ -d "$lock_dir" ]]; then + if find "$lock_dir" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then + debug_log "Skipping read-only Homebrew locks in $lock_dir" + fi + fi + done + clean_homebrew +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/hints.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/hints.sh new file mode 100644 index 0000000..f6538bf --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/hints.sh @@ -0,0 +1,353 @@ +#!/bin/bash +# Hint notices used by `mo clean` (non-destructive guidance only). + +set -euo pipefail + +mole_hints_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +# shellcheck disable=SC1090 +source "$mole_hints_dir/purge_shared.sh" + +# Quick reminder probe for project build artifacts handled by `mo purge`. +# Designed to be very fast: shallow directory checks only, no deep find scans. +# shellcheck disable=SC2329 +load_quick_purge_hint_paths() { + local config_file="$HOME/.config/mole/purge_paths" + local -a paths=() + + while IFS= read -r line; do + [[ -n "$line" ]] && paths+=("$line") + done < <(mole_purge_read_paths_config "$config_file") + + if [[ ${#paths[@]} -eq 0 ]]; then + paths=("${MOLE_PURGE_DEFAULT_SEARCH_PATHS[@]}") + fi + + if [[ ${#paths[@]} -gt 0 ]]; then + printf '%s\n' "${paths[@]}" + fi +} + +# shellcheck disable=SC2329 +hint_get_path_size_kb_with_timeout() { + local path="$1" + local timeout_seconds="${2:-0.8}" + local du_tmp + du_tmp=$(mktemp) + + local du_status=0 + if run_with_timeout "$timeout_seconds" du -skP "$path" > "$du_tmp" 2> /dev/null; then + du_status=0 + else + du_status=$? + fi + + if [[ $du_status -ne 0 ]]; then + rm -f "$du_tmp" + return 1 + fi + + local size_kb + size_kb=$(awk 'NR==1 {print $1; exit}' "$du_tmp") + rm -f "$du_tmp" + + [[ "$size_kb" =~ ^[0-9]+$ ]] || return 1 + printf '%s\n' "$size_kb" +} + +# shellcheck disable=SC2329 +record_project_artifact_hint() { + local path="$1" + + PROJECT_ARTIFACT_HINT_COUNT=$((PROJECT_ARTIFACT_HINT_COUNT + 1)) + + if [[ ${#PROJECT_ARTIFACT_HINT_EXAMPLES[@]} -lt 2 ]]; then + PROJECT_ARTIFACT_HINT_EXAMPLES+=("${path/#$HOME/~}") + fi + + local sample_max=3 + if [[ $PROJECT_ARTIFACT_HINT_ESTIMATE_SAMPLES -ge $sample_max ]]; then + PROJECT_ARTIFACT_HINT_ESTIMATE_PARTIAL=true + return 0 + fi + + local timeout_seconds="0.8" + local size_kb="" + if size_kb=$(hint_get_path_size_kb_with_timeout "$path" "$timeout_seconds"); then + if [[ "$size_kb" =~ ^[0-9]+$ ]]; then + PROJECT_ARTIFACT_HINT_ESTIMATED_KB=$((PROJECT_ARTIFACT_HINT_ESTIMATED_KB + size_kb)) + PROJECT_ARTIFACT_HINT_ESTIMATE_SAMPLES=$((PROJECT_ARTIFACT_HINT_ESTIMATE_SAMPLES + 1)) + else + PROJECT_ARTIFACT_HINT_ESTIMATE_PARTIAL=true + fi + else + PROJECT_ARTIFACT_HINT_ESTIMATE_PARTIAL=true + fi + + return 0 +} + +# shellcheck disable=SC2329 +is_quick_purge_project_root() { + mole_purge_is_project_root "$1" +} + +# shellcheck disable=SC2329 +probe_project_artifact_hints() { + PROJECT_ARTIFACT_HINT_DETECTED=false + PROJECT_ARTIFACT_HINT_COUNT=0 + PROJECT_ARTIFACT_HINT_TRUNCATED=false + PROJECT_ARTIFACT_HINT_EXAMPLES=() + PROJECT_ARTIFACT_HINT_ESTIMATED_KB=0 + PROJECT_ARTIFACT_HINT_ESTIMATE_SAMPLES=0 + PROJECT_ARTIFACT_HINT_ESTIMATE_PARTIAL=false + + local max_projects=200 + local max_projects_per_root=0 + local max_nested_per_project=120 + local max_matches=12 + + local -a target_names=() + while IFS= read -r target_name; do + [[ -n "$target_name" ]] && target_names+=("$target_name") + done < <(mole_purge_quick_hint_target_names) + + local -a scan_roots=() + while IFS= read -r path; do + [[ -n "$path" ]] && scan_roots+=("$path") + done < <(load_quick_purge_hint_paths) + + [[ ${#scan_roots[@]} -eq 0 ]] && return 0 + + # Fairness: avoid one very large root exhausting the entire scan budget. + if [[ $max_projects_per_root -le 0 ]]; then + max_projects_per_root=$(((max_projects + ${#scan_roots[@]} - 1) / ${#scan_roots[@]})) + [[ $max_projects_per_root -lt 25 ]] && max_projects_per_root=25 + fi + [[ $max_projects_per_root -gt $max_projects ]] && max_projects_per_root=$max_projects + + local nullglob_was_set=0 + if shopt -q nullglob; then + nullglob_was_set=1 + fi + shopt -s nullglob + + local scanned_projects=0 + local stop_scan=false + local root project_dir nested_dir target_name candidate + + for root in "${scan_roots[@]}"; do + [[ -d "$root" ]] || continue + local root_projects_scanned=0 + + if is_quick_purge_project_root "$root"; then + scanned_projects=$((scanned_projects + 1)) + root_projects_scanned=$((root_projects_scanned + 1)) + if [[ $scanned_projects -gt $max_projects ]]; then + PROJECT_ARTIFACT_HINT_TRUNCATED=true + stop_scan=true + break + fi + + for target_name in "${target_names[@]}"; do + candidate="$root/$target_name" + if [[ -d "$candidate" ]]; then + record_project_artifact_hint "$candidate" + fi + done + fi + [[ "$stop_scan" == "true" ]] && break + + if [[ $root_projects_scanned -ge $max_projects_per_root ]]; then + PROJECT_ARTIFACT_HINT_TRUNCATED=true + continue + fi + + for project_dir in "$root"/*/; do + [[ -d "$project_dir" ]] || continue + project_dir="${project_dir%/}" + + local project_name + project_name=$(basename "$project_dir") + [[ "$project_name" == .* ]] && continue + + if [[ $root_projects_scanned -ge $max_projects_per_root ]]; then + PROJECT_ARTIFACT_HINT_TRUNCATED=true + break + fi + + scanned_projects=$((scanned_projects + 1)) + root_projects_scanned=$((root_projects_scanned + 1)) + if [[ $scanned_projects -gt $max_projects ]]; then + PROJECT_ARTIFACT_HINT_TRUNCATED=true + stop_scan=true + break + fi + + for target_name in "${target_names[@]}"; do + candidate="$project_dir/$target_name" + if [[ -d "$candidate" ]]; then + record_project_artifact_hint "$candidate" + fi + done + [[ "$stop_scan" == "true" ]] && break + + local nested_count=0 + for nested_dir in "$project_dir"/*/; do + [[ -d "$nested_dir" ]] || continue + nested_dir="${nested_dir%/}" + + local nested_name + nested_name=$(basename "$nested_dir") + [[ "$nested_name" == .* ]] && continue + + case "$nested_name" in + node_modules | target | build | dist | DerivedData | Pods) + continue + ;; + esac + + nested_count=$((nested_count + 1)) + if [[ $nested_count -gt $max_nested_per_project ]]; then + break + fi + + for target_name in "${target_names[@]}"; do + candidate="$nested_dir/$target_name" + if [[ -d "$candidate" ]]; then + record_project_artifact_hint "$candidate" + fi + done + + [[ "$stop_scan" == "true" ]] && break + done + + [[ "$stop_scan" == "true" ]] && break + done + + [[ "$stop_scan" == "true" ]] && break + done + + if [[ $nullglob_was_set -eq 0 ]]; then + shopt -u nullglob + fi + + if [[ $PROJECT_ARTIFACT_HINT_COUNT -gt 0 ]]; then + PROJECT_ARTIFACT_HINT_DETECTED=true + fi + + # Preserve a compact display hint if candidate count is large, but do not + # stop scanning early solely because we exceeded this threshold. + if [[ $PROJECT_ARTIFACT_HINT_COUNT -gt $max_matches ]]; then + PROJECT_ARTIFACT_HINT_TRUNCATED=true + fi + + return 0 +} + +# shellcheck disable=SC2329 +show_system_data_hint_notice() { + local min_gb=2 + local timeout_seconds="0.8" + local max_hits=3 + + local threshold_kb=$((min_gb * 1024 * 1024)) + local -a clue_labels=() + local -a clue_sizes=() + local -a clue_paths=() + + local -a labels=( + "Xcode DerivedData" + "Xcode Archives" + "iPhone backups" + "Simulator data" + "Docker Desktop data" + "Mail data" + ) + local -a paths=( + "$HOME/Library/Developer/Xcode/DerivedData" + "$HOME/Library/Developer/Xcode/Archives" + "$HOME/Library/Application Support/MobileSync/Backup" + "$HOME/Library/Developer/CoreSimulator/Devices" + "$HOME/Library/Containers/com.docker.docker/Data" + "$HOME/Library/Mail" + ) + + local i + for i in "${!paths[@]}"; do + local path="${paths[$i]}" + [[ -d "$path" ]] || continue + + local size_kb="" + if size_kb=$(hint_get_path_size_kb_with_timeout "$path" "$timeout_seconds"); then + if [[ "$size_kb" -ge "$threshold_kb" ]]; then + clue_labels+=("${labels[$i]}") + clue_sizes+=("$size_kb") + clue_paths+=("${path/#$HOME/~}") + if [[ ${#clue_labels[@]} -ge $max_hits ]]; then + break + fi + fi + fi + done + + if [[ ${#clue_labels[@]} -eq 0 ]]; then + note_activity + echo -e " ${GREEN}${ICON_SUCCESS}${NC} No common System Data clues detected" + return 0 + fi + + note_activity + + for i in "${!clue_labels[@]}"; do + local human_size + human_size=$(bytes_to_human "$((clue_sizes[i] * 1024))") + echo -e " ${GREEN}${ICON_LIST}${NC} ${clue_labels[$i]}: ${human_size}" + echo -e " ${GRAY}${ICON_SUBLIST}${NC} Path: ${GRAY}${clue_paths[$i]}${NC}" + done + echo -e " ${GRAY}${ICON_REVIEW}${NC} Review: mo analyze, Device backups, docker system df" +} + +# shellcheck disable=SC2329 +show_project_artifact_hint_notice() { + probe_project_artifact_hints + + if [[ "$PROJECT_ARTIFACT_HINT_DETECTED" != "true" ]]; then + return 0 + fi + + note_activity + + local hint_count_label="$PROJECT_ARTIFACT_HINT_COUNT" + [[ "$PROJECT_ARTIFACT_HINT_TRUNCATED" == "true" ]] && hint_count_label="${hint_count_label}+" + + local example_text="" + if [[ ${#PROJECT_ARTIFACT_HINT_EXAMPLES[@]} -gt 0 ]]; then + example_text="${PROJECT_ARTIFACT_HINT_EXAMPLES[0]}" + if [[ ${#PROJECT_ARTIFACT_HINT_EXAMPLES[@]} -gt 1 ]]; then + example_text+=", ${PROJECT_ARTIFACT_HINT_EXAMPLES[1]}" + fi + fi + + if [[ $PROJECT_ARTIFACT_HINT_ESTIMATE_SAMPLES -gt 0 ]]; then + local estimate_human + estimate_human=$(bytes_to_human "$((PROJECT_ARTIFACT_HINT_ESTIMATED_KB * 1024))") + + local estimate_is_partial="$PROJECT_ARTIFACT_HINT_ESTIMATE_PARTIAL" + if [[ "$PROJECT_ARTIFACT_HINT_TRUNCATED" == "true" ]] || [[ $PROJECT_ARTIFACT_HINT_ESTIMATE_SAMPLES -lt $PROJECT_ARTIFACT_HINT_COUNT ]]; then + estimate_is_partial=true + fi + + if [[ "$estimate_is_partial" == "true" ]]; then + echo -e " ${GREEN}${ICON_LIST}${NC} ${GREEN}${hint_count_label}${NC} candidates, at least ${estimate_human} sampled from ${PROJECT_ARTIFACT_HINT_ESTIMATE_SAMPLES} items" + else + echo -e " ${GREEN}${ICON_LIST}${NC} ${GREEN}${hint_count_label}${NC} candidates, sampled ${estimate_human}" + fi + else + echo -e " ${GREEN}${ICON_LIST}${NC} ${GREEN}${hint_count_label}${NC} candidates" + fi + + if [[ -n "$example_text" ]]; then + echo -e " ${GRAY}${ICON_SUBLIST}${NC} Examples: ${GRAY}${example_text}${NC}" + fi + echo -e " ${GRAY}${ICON_REVIEW}${NC} Review: mo purge" +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/maven.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/maven.sh new file mode 100644 index 0000000..8200bd8 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/maven.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# Maven Repository Cleanup Module +set -euo pipefail + +# Maven local repository cleanup. +# Path: ~/.m2/repository +# Note: This path is in the default whitelist. Remove from whitelist to enable cleanup. +clean_maven_repository() { + local maven_repo="$HOME/.m2/repository" + + # Only clean if the directory exists + [[ -d "$maven_repo" ]] || return 0 + + safe_clean "$maven_repo"/* "Maven local repository" +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/project.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/project.sh new file mode 100644 index 0000000..d6158b4 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/project.sh @@ -0,0 +1,1416 @@ +#!/bin/bash +# Project Purge Module (mo purge). +# Removes heavy project build artifacts and dependencies. +set -euo pipefail + +PROJECT_LIB_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +CORE_LIB_DIR="$(cd "$PROJECT_LIB_DIR/../core" && pwd)" +if ! command -v ensure_user_dir > /dev/null 2>&1; then + # shellcheck disable=SC1090 + source "$CORE_LIB_DIR/common.sh" +fi +# shellcheck disable=SC1090 +source "$PROJECT_LIB_DIR/purge_shared.sh" + +readonly PURGE_TARGETS=("${MOLE_PURGE_TARGETS[@]}") +# Minimum age in days before considering for cleanup. +readonly MIN_AGE_DAYS=7 +# Scan depth defaults (relative to search root). +readonly PURGE_MIN_DEPTH_DEFAULT=1 +readonly PURGE_MAX_DEPTH_DEFAULT=6 +# Search paths (default, can be overridden via config file). +readonly DEFAULT_PURGE_SEARCH_PATHS=("${MOLE_PURGE_DEFAULT_SEARCH_PATHS[@]}") + +# Config file for custom purge paths. +readonly PURGE_CONFIG_FILE="$HOME/.config/mole/purge_paths" + +# Resolved search paths. +PURGE_SEARCH_PATHS=() + +# Project indicators for container detection. +# Monorepo indicators (higher priority) +readonly MONOREPO_INDICATORS=("${MOLE_PURGE_MONOREPO_INDICATORS[@]}") +readonly PROJECT_INDICATORS=("${MOLE_PURGE_PROJECT_INDICATORS[@]}") + +# Check if a directory contains projects (directly or in subdirectories). +is_project_container() { + local dir="$1" + local max_depth="${2:-2}" + + # Skip hidden/system directories. + local basename + basename=$(basename "$dir") + [[ "$basename" == .* ]] && return 1 + [[ "$basename" == "Library" ]] && return 1 + [[ "$basename" == "Applications" ]] && return 1 + [[ "$basename" == "Movies" ]] && return 1 + [[ "$basename" == "Music" ]] && return 1 + [[ "$basename" == "Pictures" ]] && return 1 + [[ "$basename" == "Public" ]] && return 1 + + # Single find expression for indicators. + local -a find_args=("$dir" "-maxdepth" "$max_depth" "(") + local first=true + for indicator in "${PROJECT_INDICATORS[@]}"; do + if [[ "$first" == "true" ]]; then + first=false + else + find_args+=("-o") + fi + find_args+=("-name" "$indicator") + done + find_args+=(")" "-print" "-quit") + + if find "${find_args[@]}" 2> /dev/null | grep -q .; then + return 0 + fi + + return 1 +} + +# Discover project directories in $HOME. +discover_project_dirs() { + local -a discovered=() + + for path in "${DEFAULT_PURGE_SEARCH_PATHS[@]}"; do + if [[ -d "$path" ]]; then + discovered+=("$path") + fi + done + + # Scan $HOME for other containers (depth 1). + local dir + for dir in "$HOME"/*/; do + [[ ! -d "$dir" ]] && continue + dir="${dir%/}" # Remove trailing slash + + local already_found=false + for existing in "${DEFAULT_PURGE_SEARCH_PATHS[@]}"; do + if [[ "$dir" == "$existing" ]]; then + already_found=true + break + fi + done + [[ "$already_found" == "true" ]] && continue + + if is_project_container "$dir" 2; then + discovered+=("$dir") + fi + done + + printf '%s\n' "${discovered[@]}" | sort -u +} + +# Save discovered paths to config. +save_discovered_paths() { + local -a paths=("$@") + + ensure_user_dir "$(dirname "$PURGE_CONFIG_FILE")" + + cat > "$PURGE_CONFIG_FILE" << 'EOF' +# Mole Purge Paths - Auto-discovered project directories +# Edit this file to customize, or run: mo purge --paths +# Add one path per line (supports ~ for home directory) +EOF + + printf '\n' >> "$PURGE_CONFIG_FILE" + for path in "${paths[@]}"; do + # Convert $HOME to ~ for portability + path="${path/#$HOME/~}" + echo "$path" >> "$PURGE_CONFIG_FILE" + done +} + +# Load purge paths from config or auto-discover +load_purge_config() { + PURGE_SEARCH_PATHS=() + + while IFS= read -r line; do + [[ -n "$line" ]] && PURGE_SEARCH_PATHS+=("$line") + done < <(mole_purge_read_paths_config "$PURGE_CONFIG_FILE") + + if [[ ${#PURGE_SEARCH_PATHS[@]} -eq 0 ]]; then + if [[ -t 1 ]] && [[ -z "${_PURGE_DISCOVERY_SILENT:-}" ]]; then + echo -e "${GRAY}First run: discovering project directories...${NC}" >&2 + fi + + local -a discovered=() + while IFS= read -r path; do + [[ -n "$path" ]] && discovered+=("$path") + done < <(discover_project_dirs) + + if [[ ${#discovered[@]} -gt 0 ]]; then + PURGE_SEARCH_PATHS=("${discovered[@]}") + save_discovered_paths "${discovered[@]}" + + if [[ -t 1 ]] && [[ -z "${_PURGE_DISCOVERY_SILENT:-}" ]]; then + echo -e "${GRAY}Found ${#discovered[@]} project directories, saved to config${NC}" >&2 + fi + else + PURGE_SEARCH_PATHS=("${DEFAULT_PURGE_SEARCH_PATHS[@]}") + fi + fi +} + +# Initialize paths on script load. +load_purge_config + +# Args: $1 - directory path +# Determine whether a directory is a project root. +# This is used to safely allow cleaning direct-child artifacts when +# users configure a single project directory as a purge search path. +is_purge_project_root() { + mole_purge_is_project_root "$1" +} + +# Args: $1 - path to check +# Safe cleanup requires the path be inside a project directory. +is_safe_project_artifact() { + local path="$1" + local search_path="$2" + + # Normalize search path to tolerate user config entries with trailing slash. + if [[ "$search_path" != "/" ]]; then + search_path="${search_path%/}" + fi + + if [[ "$path" != /* ]]; then + return 1 + fi + + if [[ "$path" != "$search_path/"* ]]; then + # fd may emit physical/canonical paths (for example /private/var) + # while configured search roots use symlink aliases (for example /var). + # Compare physical paths as a fallback to avoid false negatives. + local physical_path="" + local physical_search_path="" + if [[ -d "$path" && -d "$search_path" ]]; then + physical_path=$(cd "$path" 2> /dev/null && pwd -P || echo "") + physical_search_path=$(cd "$search_path" 2> /dev/null && pwd -P || echo "") + fi + + if [[ -z "$physical_path" || -z "$physical_search_path" || "$physical_path" != "$physical_search_path/"* ]]; then + return 1 + fi + + path="$physical_path" + search_path="$physical_search_path" + fi + + # Must not be a direct child of the search root. + local relative_path="${path#"$search_path"/}" + local depth=$(echo "$relative_path" | LC_ALL=C tr -cd '/' | wc -c) + if [[ $depth -lt 1 ]]; then + # Allow direct-child artifacts only when the search path is itself + # a project root (single-project mode). + if is_purge_project_root "$search_path"; then + return 0 + fi + return 1 + fi + return 0 +} + +# Detect if directory is a Rails project root +is_rails_project_root() { + local dir="$1" + [[ -f "$dir/config/application.rb" ]] || return 1 + [[ -f "$dir/Gemfile" ]] || return 1 + [[ -f "$dir/bin/rails" || -f "$dir/config/environment.rb" ]] +} + +# Detect if directory is a Go project root +is_go_project_root() { + local dir="$1" + [[ -f "$dir/go.mod" ]] +} + +# Detect if directory is a PHP Composer project root +is_php_project_root() { + local dir="$1" + [[ -f "$dir/composer.json" ]] +} + +# Decide whether a "bin" directory is a .NET directory +is_dotnet_bin_dir() { + local path="$1" + [[ "$(basename "$path")" == "bin" ]] || return 1 + + # Check if parent directory has a .csproj/.fsproj/.vbproj file + local parent_dir + parent_dir="$(dirname "$path")" + find "$parent_dir" -maxdepth 1 \( -name "*.csproj" -o -name "*.fsproj" -o -name "*.vbproj" \) 2> /dev/null | grep -q . || return 1 + + # Check if bin directory contains Debug/ or Release/ subdirectories + [[ -d "$path/Debug" || -d "$path/Release" ]] || return 1 + + return 0 +} + +# Check if a vendor directory should be protected from purge +# Expects path to be a vendor directory (basename == vendor) +# Strategy: Only clean PHP Composer vendor, protect all others +is_protected_vendor_dir() { + local path="$1" + local base + base=$(basename "$path") + [[ "$base" == "vendor" ]] || return 1 + local parent_dir + parent_dir=$(dirname "$path") + + # PHP Composer vendor can be safely regenerated with 'composer install' + # Do NOT protect it (return 1 = not protected = can be cleaned) + if is_php_project_root "$parent_dir"; then + return 1 + fi + + # Rails vendor (importmap dependencies) - should be protected + if is_rails_project_root "$parent_dir"; then + return 0 + fi + + # Go vendor (optional vendoring) - protect to avoid accidental deletion + if is_go_project_root "$parent_dir"; then + return 0 + fi + + # Unknown vendor type - protect by default (conservative approach) + return 0 +} + +# Check if an artifact should be protected from purge +is_protected_purge_artifact() { + local path="$1" + local base + base=$(basename "$path") + + case "$base" in + bin) + # Only allow purging bin/ when we can detect .NET context. + if is_dotnet_bin_dir "$path"; then + return 1 + fi + return 0 + ;; + vendor) + is_protected_vendor_dir "$path" + return $? + ;; + DerivedData) + # Protect Xcode global DerivedData in ~/Library/Developer/Xcode/ + # Only allow purging DerivedData within project directories + [[ "$path" == *"/Library/Developer/Xcode/DerivedData"* ]] && return 0 + return 1 + ;; + esac + + return 1 +} + +# Scan purge targets using fd (fast) or pruned find. +scan_purge_targets() { + local search_path="$1" + local output_file="$2" + local min_depth="$PURGE_MIN_DEPTH_DEFAULT" + local max_depth="$PURGE_MAX_DEPTH_DEFAULT" + if [[ ! "$min_depth" =~ ^[0-9]+$ ]]; then + min_depth="$PURGE_MIN_DEPTH_DEFAULT" + fi + if [[ ! "$max_depth" =~ ^[0-9]+$ ]]; then + max_depth="$PURGE_MAX_DEPTH_DEFAULT" + fi + if [[ "$max_depth" -lt "$min_depth" ]]; then + max_depth="$min_depth" + fi + if [[ ! -d "$search_path" ]]; then + return + fi + + # Update current scanning path + local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole" + echo "$search_path" > "$stats_dir/purge_scanning" 2> /dev/null || true + + # Helper to process raw results + process_scan_results() { + local input_file="$1" + if [[ -f "$input_file" ]]; then + while IFS= read -r item; do + # Check if we should abort (scanning file removed by Ctrl+C) + if [[ ! -f "$stats_dir/purge_scanning" ]]; then + return + fi + + if [[ -n "$item" ]] && is_safe_project_artifact "$item" "$search_path"; then + echo "$item" + # Update scanning path to show current project directory + local project_dir=$(dirname "$item") + echo "$project_dir" > "$stats_dir/purge_scanning" 2> /dev/null || true + fi + done < "$input_file" | filter_nested_artifacts | filter_protected_artifacts > "$output_file" + rm -f "$input_file" + else + touch "$output_file" + fi + } + + local use_find=true + + # Allow forcing find via MO_USE_FIND environment variable + if [[ "${MO_USE_FIND:-0}" == "1" ]]; then + debug_log "MO_USE_FIND=1: Forcing find instead of fd" + use_find=true + elif command -v fd > /dev/null 2>&1; then + # Escape regex special characters in target names for fd patterns + local escaped_targets=() + for target in "${PURGE_TARGETS[@]}"; do + escaped_targets+=("^$(printf '%s' "$target" | sed -e 's/[][(){}.^$*+?|\\]/\\&/g')\$") + done + local pattern="($( + IFS='|' + echo "${escaped_targets[*]}" + ))" + local fd_args=( + "--absolute-path" + "--hidden" + "--no-ignore" + "--type" "d" + "--min-depth" "$min_depth" + "--max-depth" "$max_depth" + "--threads" "8" + "--exclude" ".git" + "--exclude" "Library" + "--exclude" ".Trash" + "--exclude" "Applications" + ) + + # Try running fd. If it succeeds (exit code 0), use it. + # If it fails (e.g. bad flag, permissions, binary issue), fallback to find. + if fd "${fd_args[@]}" "$pattern" "$search_path" 2> /dev/null > "$output_file.raw"; then + # Check if fd actually found anything - if empty, fallback to find + if [[ -s "$output_file.raw" ]]; then + debug_log "Using fd for scanning (found results)" + use_find=false + process_scan_results "$output_file.raw" + else + debug_log "fd returned empty results, falling back to find" + rm -f "$output_file.raw" + fi + else + debug_log "fd command failed, falling back to find" + fi + fi + + if [[ "$use_find" == "true" ]]; then + debug_log "Using find for scanning" + # Pruned find avoids descending into heavy directories. + local prune_dirs=(".git" "Library" ".Trash" "Applications") + local purge_targets=("${PURGE_TARGETS[@]}") + + local prune_expr=() + for i in "${!prune_dirs[@]}"; do + prune_expr+=(-name "${prune_dirs[$i]}") + [[ $i -lt $((${#prune_dirs[@]} - 1)) ]] && prune_expr+=(-o) + done + + local target_expr=() + for i in "${!purge_targets[@]}"; do + target_expr+=(-name "${purge_targets[$i]}") + [[ $i -lt $((${#purge_targets[@]} - 1)) ]] && target_expr+=(-o) + done + + # Use plain `find` here for compatibility with environments where + # `command find` behaves inconsistently in this complex expression. + find "$search_path" -mindepth "$min_depth" -maxdepth "$max_depth" -type d \ + \( "${prune_expr[@]}" \) -prune -o \ + \( "${target_expr[@]}" \) -print -prune \ + 2> /dev/null > "$output_file.raw" || true + + process_scan_results "$output_file.raw" + fi +} +# Filter out nested artifacts (e.g. node_modules inside node_modules, .build inside build). +# Optimized: Sort paths to put parents before children, then filter in single pass. +filter_nested_artifacts() { + # 1. Append trailing slash to each path (to ensure /foo/bar starts with /foo/) + # 2. Sort to group parents and children (LC_COLLATE=C ensures standard sorting) + # 3. Use awk to filter out paths that start with the previous kept path + # 4. Remove trailing slash + sed 's|[^/]$|&/|' | LC_COLLATE=C sort | awk ' + BEGIN { last_kept = "" } + { + current = $0 + # If current path starts with last_kept, it is nested + # Only check if last_kept is not empty + if (last_kept == "" || index(current, last_kept) != 1) { + print current + last_kept = current + } + } + ' | sed 's|/$||' +} + +filter_protected_artifacts() { + while IFS= read -r item; do + if ! is_protected_purge_artifact "$item"; then + echo "$item" + fi + done +} +# Args: $1 - path +# Check if a path was modified recently (safety check). +is_recently_modified() { + local path="$1" + local age_days=$MIN_AGE_DAYS + if [[ ! -e "$path" ]]; then + return 1 + fi + local mod_time + mod_time=$(get_file_mtime "$path") + local current_time + current_time=$(get_epoch_seconds) + local age_seconds=$((current_time - mod_time)) + local age_in_days=$((age_seconds / 86400)) + if [[ $age_in_days -lt $age_days ]]; then + return 0 # Recently modified + else + return 1 # Old enough to clean + fi +} +# Args: $1 - path +# Get directory size in KB. +get_dir_size_kb() { + local path="$1" + if [[ ! -d "$path" ]]; then + echo "0" + return + fi + + local timeout_seconds="${MO_PURGE_SIZE_TIMEOUT_SEC:-15}" + if [[ ! "$timeout_seconds" =~ ^[0-9]+([.][0-9]+)?$ ]]; then + timeout_seconds=15 + fi + + local du_output="" + local du_exit=0 + local du_tmp + du_tmp=$(mktemp) + if run_with_timeout "$timeout_seconds" du -skP "$path" > "$du_tmp" 2> /dev/null; then + du_output=$(cat "$du_tmp") + else + du_exit=$? + fi + rm -f "$du_tmp" + + if [[ $du_exit -eq 124 ]]; then + debug_log "Size calculation timed out (${timeout_seconds}s): $path" + echo "TIMEOUT" + return + fi + + if [[ $du_exit -ne 0 ]]; then + echo "0" + return + fi + + local size_kb + size_kb=$(printf '%s\n' "$du_output" | awk 'NR==1 {print $1; exit}') + if [[ "$size_kb" =~ ^[0-9]+$ ]]; then + echo "$size_kb" + else + echo "0" + fi +} +# Purge category selector. +select_purge_categories() { + local -a categories=("$@") + local total_items=${#categories[@]} + local clear_line=$'\r\033[2K' + if [[ $total_items -eq 0 ]]; then + return 1 + fi + + # Calculate items per page based on terminal height. + _get_items_per_page() { + local term_height=24 + if [[ -t 0 ]] || [[ -t 2 ]]; then + term_height=$(stty size < /dev/tty 2> /dev/null | awk '{print $1}') + fi + if [[ -z "$term_height" || $term_height -le 0 ]]; then + if command -v tput > /dev/null 2>&1; then + term_height=$(tput lines 2> /dev/null || echo "24") + else + term_height=24 + fi + fi + local reserved=6 + local available=$((term_height - reserved)) + if [[ $available -lt 3 ]]; then + echo 3 + elif [[ $available -gt 50 ]]; then + echo 50 + else + echo "$available" + fi + } + + local items_per_page=$(_get_items_per_page) + local cursor_pos=0 + local top_index=0 + + # Initialize selection (all selected by default, except recent ones) + local -a selected=() + IFS=',' read -r -a recent_flags <<< "${PURGE_RECENT_CATEGORIES:-}" + for ((i = 0; i < total_items; i++)); do + # Default unselected if category has recent items + if [[ ${recent_flags[i]:-false} == "true" ]]; then + selected[i]=false + else + selected[i]=true + fi + done + local original_stty="" + local previous_exit_trap="" + local previous_int_trap="" + local previous_term_trap="" + local terminal_restored=false + if [[ -t 0 ]] && command -v stty > /dev/null 2>&1; then + original_stty=$(stty -g 2> /dev/null || echo "") + fi + previous_exit_trap=$(trap -p EXIT || true) + previous_int_trap=$(trap -p INT || true) + previous_term_trap=$(trap -p TERM || true) + # Terminal control functions + restore_terminal() { + # Avoid trap churn when restore is called repeatedly via RETURN/EXIT paths. + if [[ "${terminal_restored:-false}" == "true" ]]; then + return + fi + terminal_restored=true + + trap - EXIT INT TERM + show_cursor + if [[ -n "${original_stty:-}" ]]; then + stty "${original_stty}" 2> /dev/null || stty sane 2> /dev/null || true + fi + if [[ -n "$previous_exit_trap" ]]; then + eval "$previous_exit_trap" + fi + if [[ -n "$previous_int_trap" ]]; then + eval "$previous_int_trap" + fi + if [[ -n "$previous_term_trap" ]]; then + eval "$previous_term_trap" + fi + } + # shellcheck disable=SC2329 + handle_interrupt() { + restore_terminal + exit 130 + } + draw_menu() { + # Recalculate items_per_page dynamically to handle window resize + items_per_page=$(_get_items_per_page) + + # Clamp pagination state to avoid cursor drifting out of view + local max_top_index=0 + if [[ $total_items -gt $items_per_page ]]; then + max_top_index=$((total_items - items_per_page)) + fi + if [[ $top_index -gt $max_top_index ]]; then + top_index=$max_top_index + fi + if [[ $top_index -lt 0 ]]; then + top_index=0 + fi + + local visible_count=$((total_items - top_index)) + [[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page + if [[ $cursor_pos -gt $((visible_count - 1)) ]]; then + cursor_pos=$((visible_count - 1)) + fi + if [[ $cursor_pos -lt 0 ]]; then + cursor_pos=0 + fi + + printf "\033[H" + # Calculate total size of selected items for header + local selected_size=0 + local selected_count=0 + IFS=',' read -r -a sizes <<< "${PURGE_CATEGORY_SIZES:-}" + for ((i = 0; i < total_items; i++)); do + if [[ ${selected[i]} == true ]]; then + selected_size=$((selected_size + ${sizes[i]:-0})) + selected_count=$((selected_count + 1)) + fi + done + + # Format selected size (stored in KB) using shared display rules. + local selected_size_human + selected_size_human=$(bytes_to_human_kb "$selected_size") + + # Show position indicator if scrolling is needed + local scroll_indicator="" + if [[ $total_items -gt $items_per_page ]]; then + local current_pos=$((top_index + cursor_pos + 1)) + scroll_indicator=" ${GRAY}[${current_pos}/${total_items}]${NC}" + fi + + printf "%s${PURPLE_BOLD}Select Categories to Clean${NC}%s${GRAY}, ${selected_size_human}, ${selected_count} selected${NC}\n" "$clear_line" "$scroll_indicator" + printf "%s\n" "$clear_line" + + IFS=',' read -r -a recent_flags <<< "${PURGE_RECENT_CATEGORIES:-}" + + # Calculate visible range + local end_index=$((top_index + visible_count)) + + # Draw only visible items + for ((i = top_index; i < end_index; i++)); do + local checkbox="$ICON_EMPTY" + [[ ${selected[i]} == true ]] && checkbox="$ICON_SOLID" + local recent_marker="" + [[ ${recent_flags[i]:-false} == "true" ]] && recent_marker=" ${GRAY}| Recent${NC}" + local rel_pos=$((i - top_index)) + if [[ $rel_pos -eq $cursor_pos ]]; then + printf "%s${CYAN}${ICON_ARROW} %s %s%s${NC}\n" "$clear_line" "$checkbox" "${categories[i]}" "$recent_marker" + else + printf "%s %s %s%s\n" "$clear_line" "$checkbox" "${categories[i]}" "$recent_marker" + fi + done + + # Keep one blank line between the list and footer tips. + printf "%s\n" "$clear_line" + + # Adaptive footer hints — mirrors menu_paginated.sh pattern + local _term_w + _term_w=$(tput cols 2> /dev/null || echo 80) + [[ "$_term_w" =~ ^[0-9]+$ ]] || _term_w=80 + + local _sep=" ${GRAY}|${NC} " + local _nav="${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN}${NC}" + local _space="${GRAY}Space Select${NC}" + local _enter="${GRAY}Enter Confirm${NC}" + local _all="${GRAY}A All${NC}" + local _invert="${GRAY}I Invert${NC}" + local _quit="${GRAY}Q Quit${NC}" + + # Strip ANSI to measure real length + _ph_len() { printf "%s" "$1" | LC_ALL=C awk '{gsub(/\033\[[0-9;]*[A-Za-z]/,""); printf "%d", length}'; } + + # Level 0 (full): ↑↓ | Space Select | Enter Confirm | A All | I Invert | Q Quit + local _full="${_nav}${_sep}${_space}${_sep}${_enter}${_sep}${_all}${_sep}${_invert}${_sep}${_quit}" + if (($(_ph_len "$_full") <= _term_w)); then + printf "%s${_full}${NC}\n" "$clear_line" + else + # Level 1: ↑↓ | Enter Confirm | A All | I Invert | Q Quit + local _l1="${_nav}${_sep}${_enter}${_sep}${_all}${_sep}${_invert}${_sep}${_quit}" + if (($(_ph_len "$_l1") <= _term_w)); then + printf "%s${_l1}${NC}\n" "$clear_line" + else + # Level 2 (minimal): ↑↓ | Enter | Q Quit + printf "%s${_nav}${_sep}${_enter}${_sep}${_quit}${NC}\n" "$clear_line" + fi + fi + + # Clear stale content below the footer when list height shrinks. + printf '\033[J' + } + move_cursor_up() { + if [[ $cursor_pos -gt 0 ]]; then + ((cursor_pos--)) + elif [[ $top_index -gt 0 ]]; then + ((top_index--)) + fi + } + move_cursor_down() { + local absolute_index=$((top_index + cursor_pos)) + local last_index=$((total_items - 1)) + if [[ $absolute_index -lt $last_index ]]; then + local visible_count=$((total_items - top_index)) + [[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page + if [[ $cursor_pos -lt $((visible_count - 1)) ]]; then + cursor_pos=$((cursor_pos + 1)) + elif [[ $((top_index + visible_count)) -lt $total_items ]]; then + top_index=$((top_index + 1)) + fi + fi + } + trap restore_terminal EXIT + trap handle_interrupt INT TERM + # Preserve interrupt character for Ctrl-C + stty -echo -icanon intr ^C 2> /dev/null || true + hide_cursor + if [[ -t 1 ]]; then + clear_screen + fi + # Main loop + while true; do + draw_menu + # Read key + IFS= read -r -s -n1 key || key="" + case "$key" in + $'\x1b') + # Arrow keys or ESC + # Read next 2 chars with timeout (bash 3.2 needs integer) + IFS= read -r -s -n1 -t 1 key2 || key2="" + if [[ "$key2" == "[" ]]; then + IFS= read -r -s -n1 -t 1 key3 || key3="" + case "$key3" in + A) # Up arrow + move_cursor_up + ;; + B) # Down arrow + move_cursor_down + ;; + esac + else + # ESC alone (no following chars) + restore_terminal + return 1 + fi + ;; + "j" | "J") # Vim down + move_cursor_down + ;; + "k" | "K") # Vim up + move_cursor_up + ;; + " ") # Space - toggle current item + local idx=$((top_index + cursor_pos)) + if [[ ${selected[idx]} == true ]]; then + selected[idx]=false + else + selected[idx]=true + fi + ;; + "a" | "A") # Select all + for ((i = 0; i < total_items; i++)); do + selected[i]=true + done + ;; + "i" | "I") # Invert selection + for ((i = 0; i < total_items; i++)); do + if [[ ${selected[i]} == true ]]; then + selected[i]=false + else + selected[i]=true + fi + done + ;; + "q" | "Q" | $'\x03') # Quit or Ctrl-C + restore_terminal + return 1 + ;; + "" | $'\n' | $'\r') # Enter - confirm + # Build result + PURGE_SELECTION_RESULT="" + for ((i = 0; i < total_items; i++)); do + if [[ ${selected[i]} == true ]]; then + [[ -n "$PURGE_SELECTION_RESULT" ]] && PURGE_SELECTION_RESULT+="," + PURGE_SELECTION_RESULT+="$i" + fi + done + restore_terminal + return 0 + ;; + esac + done +} + +# Final confirmation before deleting selected purge artifacts. +confirm_purge_cleanup() { + local item_count="${1:-0}" + local total_size_kb="${2:-0}" + local unknown_count="${3:-0}" + + [[ "$item_count" =~ ^[0-9]+$ ]] || item_count=0 + [[ "$total_size_kb" =~ ^[0-9]+$ ]] || total_size_kb=0 + [[ "$unknown_count" =~ ^[0-9]+$ ]] || unknown_count=0 + + local item_text="artifact" + [[ $item_count -ne 1 ]] && item_text="artifacts" + + local size_display + size_display=$(bytes_to_human "$((total_size_kb * 1024))") + + local unknown_hint="" + if [[ $unknown_count -gt 0 ]]; then + local unknown_text="unknown size" + [[ $unknown_count -gt 1 ]] && unknown_text="unknown sizes" + unknown_hint=", ${unknown_count} ${unknown_text}" + fi + + echo -ne "${PURPLE}${ICON_ARROW}${NC} Remove ${item_count} ${item_text}, ${size_display}${unknown_hint} ${GREEN}Enter${NC} confirm, ${GRAY}ESC${NC} cancel: " + drain_pending_input + local key="" + IFS= read -r -s -n1 key || key="" + drain_pending_input + + case "$key" in + "" | $'\n' | $'\r' | y | Y) + echo "" + return 0 + ;; + *) + echo "" + return 1 + ;; + esac +} + +# Main cleanup function - scans and prompts user to select artifacts to clean +clean_project_artifacts() { + local -a all_found_items=() + local -a safe_to_clean=() + local -a recently_modified=() + local previous_int_trap="" + local previous_term_trap="" + local trap_installed_by_this_call=false + # Set up cleanup on interrupt + # Note: Declared without 'local' so cleanup_scan trap can access them + scan_pids=() + scan_temps=() + # shellcheck disable=SC2329 + cleanup_scan() { + # Kill all background scans + for pid in "${scan_pids[@]+"${scan_pids[@]}"}"; do + kill "$pid" 2> /dev/null || true + done + # Clean up temp files + for temp in "${scan_temps[@]+"${scan_temps[@]}"}"; do + rm -f "$temp" 2> /dev/null || true + done + # Clean up purge scanning file + local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole" + rm -f "$stats_dir/purge_scanning" 2> /dev/null || true + echo "" + exit 130 + } + # Save caller traps and install local cleanup trap for this function call. + previous_int_trap=$(trap -p INT || true) + previous_term_trap=$(trap -p TERM || true) + trap cleanup_scan INT TERM + trap_installed_by_this_call=true + # Scanning is started from purge.sh with start_inline_spinner + # Launch all scans in parallel + for path in "${PURGE_SEARCH_PATHS[@]}"; do + if [[ -d "$path" ]]; then + local scan_output + scan_output=$(mktemp) + scan_temps+=("$scan_output") + # Launch scan in background for true parallelism + scan_purge_targets "$path" "$scan_output" & + local scan_pid=$! + scan_pids+=("$scan_pid") + fi + done + # Wait for all scans to complete + for pid in "${scan_pids[@]+"${scan_pids[@]}"}"; do + wait "$pid" 2> /dev/null || true + done + + # Stop the scanning monitor (removes purge_scanning file to signal completion) + local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole" + rm -f "$stats_dir/purge_scanning" 2> /dev/null || true + + # Give monitor process time to exit and clear its output + if [[ -t 1 ]]; then + sleep 0.2 + fi + + # Collect all results + for scan_output in "${scan_temps[@]+"${scan_temps[@]}"}"; do + if [[ -f "$scan_output" ]]; then + while IFS= read -r item; do + if [[ -n "$item" ]]; then + all_found_items+=("$item") + fi + done < "$scan_output" + rm -f "$scan_output" + fi + done + # Restore caller traps after this function completes. + if [[ "$trap_installed_by_this_call" == "true" ]]; then + trap - INT TERM + [[ -n "$previous_int_trap" ]] && eval "$previous_int_trap" + [[ -n "$previous_term_trap" ]] && eval "$previous_term_trap" + fi + if [[ ${#all_found_items[@]} -eq 0 ]]; then + echo "" + echo -e "${GREEN}${ICON_SUCCESS}${NC} Great! No old project artifacts to clean" + printf '\n' + return 2 # Special code: nothing to clean + fi + # Mark recently modified items (for default selection state) + for item in "${all_found_items[@]}"; do + if is_recently_modified "$item"; then + recently_modified+=("$item") + fi + # Add all items to safe_to_clean, let user choose + safe_to_clean+=("$item") + done + # Build menu options - one per artifact + if [[ -t 1 ]]; then + start_inline_spinner "Calculating sizes..." + fi + local -a menu_options=() + local -a item_paths=() + local -a item_sizes=() + local -a item_size_unknown_flags=() + local -a item_recent_flags=() + # Helper to get project name from path + # For ~/www/pake/src-tauri/target -> returns "pake" + # For ~/work/code/MyProject/node_modules -> returns "MyProject" + # Strategy: Find the nearest ancestor directory containing a project indicator file + get_project_name() { + local path="$1" + + local current_dir + current_dir=$(dirname "$path") + local monorepo_root="" + local project_root="" + + # Single pass: check both monorepo and project indicators + while [[ "$current_dir" != "/" && "$current_dir" != "$HOME" && -n "$current_dir" ]]; do + # First check for monorepo indicators (higher priority) + if [[ -z "$monorepo_root" ]]; then + for indicator in "${MONOREPO_INDICATORS[@]}"; do + if [[ -e "$current_dir/$indicator" ]]; then + monorepo_root="$current_dir" + break + fi + done + fi + + # Then check for project indicators (save first match) + if [[ -z "$project_root" ]]; then + for indicator in "${PROJECT_INDICATORS[@]}"; do + if [[ -e "$current_dir/$indicator" ]]; then + project_root="$current_dir" + break + fi + done + fi + + # If we found monorepo, we can stop (monorepo always wins) + if [[ -n "$monorepo_root" ]]; then + break + fi + + # If we found project but still checking for monorepo above + # (only stop if we're beyond reasonable depth) + local depth=$(echo "${current_dir#"$HOME"}" | LC_ALL=C tr -cd '/' | wc -c | tr -d ' ') + if [[ -n "$project_root" && $depth -lt 2 ]]; then + break + fi + + current_dir=$(dirname "$current_dir") + done + + # Determine result: monorepo > project > fallback + local result="" + if [[ -n "$monorepo_root" ]]; then + result=$(basename "$monorepo_root") + elif [[ -n "$project_root" ]]; then + result=$(basename "$project_root") + else + # Fallback: first directory under search root + local search_roots=() + if [[ ${#PURGE_SEARCH_PATHS[@]} -gt 0 ]]; then + search_roots=("${PURGE_SEARCH_PATHS[@]}") + else + search_roots=("$HOME/www" "$HOME/dev" "$HOME/Projects") + fi + for root in "${search_roots[@]}"; do + root="${root%/}" + if [[ -n "$root" && "$path" == "$root/"* ]]; then + local relative_path="${path#"$root"/}" + result=$(echo "$relative_path" | cut -d'/' -f1) + break + fi + done + + # Final fallback: use grandparent directory + if [[ -z "$result" ]]; then + result=$(dirname "$(dirname "$path")" | xargs basename) + fi + fi + + echo "$result" + } + + # Helper to get project path (more complete than just project name) + # For ~/www/pake/src-tauri/target -> returns "~/www/pake" + # For ~/work/code/MyProject/node_modules -> returns "~/work/code/MyProject" + # Shows the full path relative to HOME with ~ prefix for better clarity + get_project_path() { + local path="$1" + + local current_dir + current_dir=$(dirname "$path") + local monorepo_root="" + local project_root="" + + # Single pass: check both monorepo and project indicators + while [[ "$current_dir" != "/" && "$current_dir" != "$HOME" && -n "$current_dir" ]]; do + # First check for monorepo indicators (higher priority) + if [[ -z "$monorepo_root" ]]; then + for indicator in "${MONOREPO_INDICATORS[@]}"; do + if [[ -e "$current_dir/$indicator" ]]; then + monorepo_root="$current_dir" + break + fi + done + fi + + # Then check for project indicators (save first match) + if [[ -z "$project_root" ]]; then + for indicator in "${PROJECT_INDICATORS[@]}"; do + if [[ -e "$current_dir/$indicator" ]]; then + project_root="$current_dir" + break + fi + done + fi + + # If we found monorepo, we can stop (monorepo always wins) + if [[ -n "$monorepo_root" ]]; then + break + fi + + # If we found project but still checking for monorepo above + local depth=$(echo "${current_dir#"$HOME"}" | LC_ALL=C tr -cd '/' | wc -c | tr -d ' ') + if [[ -n "$project_root" && $depth -lt 2 ]]; then + break + fi + + current_dir=$(dirname "$current_dir") + done + + # Determine result: monorepo > project > fallback + local result="" + if [[ -n "$monorepo_root" ]]; then + result="$monorepo_root" + elif [[ -n "$project_root" ]]; then + result="$project_root" + else + # Fallback: use parent directory of artifact + result=$(dirname "$path") + fi + + # Convert to ~ format for cleaner display + result="${result/#$HOME/~}" + echo "$result" + } + + # Helper to get artifact display name + # For duplicate artifact names within same project, include parent directory for context + get_artifact_display_name() { + local path="$1" + local artifact_name=$(basename "$path") + local project_name=$(get_project_name "$path") + local parent_name=$(basename "$(dirname "$path")") + + # Check if there are other items with same artifact name AND same project + local has_duplicate=false + for other_item in "${safe_to_clean[@]}"; do + if [[ "$other_item" != "$path" && "$(basename "$other_item")" == "$artifact_name" ]]; then + # Same artifact name, check if same project + if [[ "$(get_project_name "$other_item")" == "$project_name" ]]; then + has_duplicate=true + break + fi + fi + done + + # If duplicate exists in same project and parent is not the project itself, show parent/artifact + if [[ "$has_duplicate" == "true" && "$parent_name" != "$project_name" && "$parent_name" != "." && "$parent_name" != "/" ]]; then + echo "$parent_name/$artifact_name" + else + echo "$artifact_name" + fi + } + # Format display with alignment (mirrors app_selector.sh approach) + # Args: $1=project_path $2=artifact_type $3=size_str $4=terminal_width $5=max_path_width $6=artifact_col_width + format_purge_display() { + local project_path="$1" + local artifact_type="$2" + local size_str="$3" + local terminal_width="${4:-$(tput cols 2> /dev/null || echo 80)}" + local max_path_width="${5:-}" + local artifact_col="${6:-12}" + local available_width + + if [[ -n "$max_path_width" ]]; then + available_width="$max_path_width" + else + # Standalone fallback: overhead = prefix(4)+space(1)+size(9)+sep(3)+artifact_col+recent(9) = artifact_col+26 + local fixed_width=$((artifact_col + 26)) + available_width=$((terminal_width - fixed_width)) + + local min_width=10 + if [[ $terminal_width -ge 120 ]]; then + min_width=48 + elif [[ $terminal_width -ge 100 ]]; then + min_width=38 + elif [[ $terminal_width -ge 80 ]]; then + min_width=25 + fi + + [[ $available_width -lt $min_width ]] && available_width=$min_width + [[ $available_width -gt 60 ]] && available_width=60 + fi + + # Truncate project path if needed + local truncated_path + truncated_path=$(truncate_by_display_width "$project_path" "$available_width") + local current_width + current_width=$(get_display_width "$truncated_path") + local char_count=${#truncated_path} + local padding=$((available_width - current_width)) + local printf_width=$((char_count + padding)) + # Format: "project_path size | artifact_type" + printf "%-*s %9s | %-*s" "$printf_width" "$truncated_path" "$size_str" "$artifact_col" "$artifact_type" + } + # Build menu options - one line per artifact + # Pass 1: collect data into parallel arrays (needed for pre-scan of widths) + local -a raw_project_paths=() + local -a raw_artifact_types=() + for item in "${safe_to_clean[@]}"; do + local project_path + project_path=$(get_project_path "$item") + local artifact_type + artifact_type=$(get_artifact_display_name "$item") + local size_raw + size_raw=$(get_dir_size_kb "$item") + local size_kb=0 + local size_human="" + local size_unknown=false + + if [[ "$size_raw" == "TIMEOUT" ]]; then + size_unknown=true + size_human="unknown" + elif [[ "$size_raw" =~ ^[0-9]+$ ]]; then + size_kb="$size_raw" + # Skip empty directories (0 bytes) + if [[ $size_kb -eq 0 ]]; then + continue + fi + size_human=$(bytes_to_human "$((size_kb * 1024))") + else + continue + fi + + # Check if recent + local is_recent=false + for recent_item in "${recently_modified[@]+"${recently_modified[@]}"}"; do + if [[ "$item" == "$recent_item" ]]; then + is_recent=true + break + fi + done + raw_project_paths+=("$project_path") + raw_artifact_types+=("$artifact_type") + item_paths+=("$item") + item_sizes+=("$size_kb") + item_size_unknown_flags+=("$size_unknown") + item_recent_flags+=("$is_recent") + done + + # Pre-scan: find max path and artifact display widths (mirrors app_selector.sh approach) + local terminal_width + terminal_width=$(tput cols 2> /dev/null || echo 80) + [[ "$terminal_width" =~ ^[0-9]+$ ]] || terminal_width=80 + + local max_path_display_width=0 + local max_artifact_width=0 + for pp in "${raw_project_paths[@]+"${raw_project_paths[@]}"}"; do + local w + w=$(get_display_width "$pp") + [[ $w -gt $max_path_display_width ]] && max_path_display_width=$w + done + for at in "${raw_artifact_types[@]+"${raw_artifact_types[@]}"}"; do + [[ ${#at} -gt $max_artifact_width ]] && max_artifact_width=${#at} + done + + # Artifact column: cap at 17, floor at 6 (shortest typical names like "dist") + [[ $max_artifact_width -lt 6 ]] && max_artifact_width=6 + [[ $max_artifact_width -gt 17 ]] && max_artifact_width=17 + + # Exact overhead: prefix(4) + space(1) + size(9) + " | "(3) + artifact_col + " | Recent"(9) = artifact_col + 26 + local fixed_overhead=$((max_artifact_width + 26)) + local available_for_path=$((terminal_width - fixed_overhead)) + + local min_path_width=10 + if [[ $terminal_width -ge 120 ]]; then + min_path_width=48 + elif [[ $terminal_width -ge 100 ]]; then + min_path_width=38 + elif [[ $terminal_width -ge 80 ]]; then + min_path_width=25 + fi + + [[ $max_path_display_width -lt $min_path_width ]] && max_path_display_width=$min_path_width + [[ $available_for_path -lt $max_path_display_width ]] && max_path_display_width=$available_for_path + [[ $max_path_display_width -gt 60 ]] && max_path_display_width=60 + # Ensure path width is at least 5 on very narrow terminals + [[ $max_path_display_width -lt 5 ]] && max_path_display_width=5 + + # Pass 2: build menu_options using pre-computed widths + for ((idx = 0; idx < ${#raw_project_paths[@]}; idx++)); do + local size_kb_val="${item_sizes[idx]}" + local size_unknown_val="${item_size_unknown_flags[idx]}" + local size_human_val="" + if [[ "$size_unknown_val" == "true" ]]; then + size_human_val="unknown" + else + size_human_val=$(bytes_to_human "$((size_kb_val * 1024))") + fi + menu_options+=("$(format_purge_display "${raw_project_paths[idx]}" "${raw_artifact_types[idx]}" "$size_human_val" "$terminal_width" "$max_path_display_width" "$max_artifact_width")") + done + + # Sort by size descending (largest first) - requested in issue #311 + # Use external sort for better performance with many items + if [[ ${#item_sizes[@]} -gt 0 ]]; then + # Create temporary file with index|size pairs + local sort_temp + sort_temp=$(mktemp) + for ((i = 0; i < ${#item_sizes[@]}; i++)); do + printf '%d|%d\n' "$i" "${item_sizes[i]}" + done > "$sort_temp" + + # Sort by size (field 2) descending, extract indices + local -a sorted_indices=() + while IFS='|' read -r idx size; do + sorted_indices+=("$idx") + done < <(sort -t'|' -k2,2nr "$sort_temp") + rm -f "$sort_temp" + + # Rebuild arrays in sorted order + local -a sorted_menu_options=() + local -a sorted_item_paths=() + local -a sorted_item_sizes=() + local -a sorted_item_size_unknown_flags=() + local -a sorted_item_recent_flags=() + + for idx in "${sorted_indices[@]}"; do + sorted_menu_options+=("${menu_options[idx]}") + sorted_item_paths+=("${item_paths[idx]}") + sorted_item_sizes+=("${item_sizes[idx]}") + sorted_item_size_unknown_flags+=("${item_size_unknown_flags[idx]}") + sorted_item_recent_flags+=("${item_recent_flags[idx]}") + done + + # Replace original arrays with sorted versions + menu_options=("${sorted_menu_options[@]}") + item_paths=("${sorted_item_paths[@]}") + item_sizes=("${sorted_item_sizes[@]}") + item_size_unknown_flags=("${sorted_item_size_unknown_flags[@]}") + item_recent_flags=("${sorted_item_recent_flags[@]}") + fi + if [[ -t 1 ]]; then + stop_inline_spinner + fi + # Set global vars for selector + export PURGE_CATEGORY_SIZES=$( + IFS=, + echo "${item_sizes[*]-}" + ) + export PURGE_RECENT_CATEGORIES=$( + IFS=, + echo "${item_recent_flags[*]-}" + ) + # Interactive selection (only if terminal is available) + PURGE_SELECTION_RESULT="" + if [[ -t 0 ]]; then + if ! select_purge_categories "${menu_options[@]}"; then + unset PURGE_CATEGORY_SIZES PURGE_RECENT_CATEGORIES PURGE_SELECTION_RESULT + return 1 + fi + else + # Non-interactive: select all non-recent items + for ((i = 0; i < ${#menu_options[@]}; i++)); do + if [[ ${item_recent_flags[i]} != "true" ]]; then + [[ -n "$PURGE_SELECTION_RESULT" ]] && PURGE_SELECTION_RESULT+="," + PURGE_SELECTION_RESULT+="$i" + fi + done + fi + if [[ -z "$PURGE_SELECTION_RESULT" ]]; then + echo "" + echo -e "${GRAY}No items selected${NC}" + printf '\n' + unset PURGE_CATEGORY_SIZES PURGE_RECENT_CATEGORIES PURGE_SELECTION_RESULT + return 0 + fi + IFS=',' read -r -a selected_indices <<< "$PURGE_SELECTION_RESULT" + local selected_total_kb=0 + local selected_unknown_count=0 + for idx in "${selected_indices[@]}"; do + local selected_size_kb="${item_sizes[idx]:-0}" + [[ "$selected_size_kb" =~ ^[0-9]+$ ]] || selected_size_kb=0 + selected_total_kb=$((selected_total_kb + selected_size_kb)) + if [[ "${item_size_unknown_flags[idx]:-false}" == "true" ]]; then + selected_unknown_count=$((selected_unknown_count + 1)) + fi + done + + if [[ -t 0 ]]; then + if ! confirm_purge_cleanup "${#selected_indices[@]}" "$selected_total_kb" "$selected_unknown_count"; then + echo -e "${GRAY}Purge cancelled${NC}" + printf '\n' + unset PURGE_CATEGORY_SIZES PURGE_RECENT_CATEGORIES PURGE_SELECTION_RESULT + return 1 + fi + fi + + # Clean selected items + echo "" + local stats_dir="${XDG_CACHE_HOME:-$HOME/.cache}/mole" + local cleaned_count=0 + local dry_run_mode="${MOLE_DRY_RUN:-0}" + for idx in "${selected_indices[@]}"; do + local item_path="${item_paths[idx]}" + local artifact_type=$(basename "$item_path") + local project_path=$(get_project_path "$item_path") + local size_kb="${item_sizes[idx]}" + local size_unknown="${item_size_unknown_flags[idx]:-false}" + local size_human + if [[ "$size_unknown" == "true" ]]; then + size_human="unknown" + else + size_human=$(bytes_to_human "$((size_kb * 1024))") + fi + # Safety checks + if [[ -z "$item_path" || "$item_path" == "/" || "$item_path" == "$HOME" || "$item_path" != "$HOME/"* ]]; then + continue + fi + if [[ -t 1 ]]; then + start_inline_spinner "Cleaning $project_path/$artifact_type..." + fi + local removal_recorded=false + if [[ -e "$item_path" ]]; then + if safe_remove "$item_path" true; then + if [[ "$dry_run_mode" == "1" || ! -e "$item_path" ]]; then + local current_total + current_total=$(cat "$stats_dir/purge_stats" 2> /dev/null || echo "0") + echo "$((current_total + size_kb))" > "$stats_dir/purge_stats" + cleaned_count=$((cleaned_count + 1)) + removal_recorded=true + fi + fi + fi + if [[ -t 1 ]]; then + stop_inline_spinner + if [[ "$removal_recorded" == "true" ]]; then + if [[ "$dry_run_mode" == "1" ]]; then + echo -e "${GREEN}${ICON_SUCCESS}${NC} [DRY RUN] $project_path, $artifact_type${NC}, ${GREEN}$size_human${NC}" + else + echo -e "${GREEN}${ICON_SUCCESS}${NC} $project_path, $artifact_type${NC}, ${GREEN}$size_human${NC}" + fi + fi + fi + done + # Update count + echo "$cleaned_count" > "$stats_dir/purge_count" + unset PURGE_CATEGORY_SIZES PURGE_RECENT_CATEGORIES PURGE_SELECTION_RESULT +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/purge_shared.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/purge_shared.sh new file mode 100644 index 0000000..91ad19f --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/purge_shared.sh @@ -0,0 +1,137 @@ +#!/bin/bash +# Shared purge configuration and helpers (side-effect free). + +set -euo pipefail + +if [[ -n "${MOLE_PURGE_SHARED_LOADED:-}" ]]; then + return 0 +fi +readonly MOLE_PURGE_SHARED_LOADED=1 + +# Canonical purge targets (heavy project build artifacts). +readonly MOLE_PURGE_TARGETS=( + "node_modules" + "target" # Rust, Maven + "build" # Gradle, various + "dist" # JS builds + "venv" # Python + ".venv" # Python + ".pytest_cache" # Python (pytest) + ".mypy_cache" # Python (mypy) + ".tox" # Python (tox virtualenvs) + ".nox" # Python (nox virtualenvs) + ".ruff_cache" # Python (ruff) + ".gradle" # Gradle local + "__pycache__" # Python + ".next" # Next.js + ".nuxt" # Nuxt.js + ".output" # Nuxt.js + "vendor" # PHP Composer + "bin" # .NET build output (guarded; see is_protected_purge_artifact) + "obj" # C# / Unity + ".turbo" # Turborepo cache + ".parcel-cache" # Parcel bundler + ".dart_tool" # Flutter/Dart build cache + ".zig-cache" # Zig + "zig-out" # Zig + ".angular" # Angular + ".svelte-kit" # SvelteKit + ".astro" # Astro + "coverage" # Code coverage reports + "DerivedData" # Xcode + "Pods" # CocoaPods + ".cxx" # React Native Android NDK build cache + ".expo" # Expo +) + +readonly MOLE_PURGE_DEFAULT_SEARCH_PATHS=( + "$HOME/www" + "$HOME/dev" + "$HOME/Projects" + "$HOME/GitHub" + "$HOME/Code" + "$HOME/Workspace" + "$HOME/Repos" + "$HOME/Development" +) + +readonly MOLE_PURGE_MONOREPO_INDICATORS=( + "lerna.json" + "pnpm-workspace.yaml" + "nx.json" + "rush.json" +) + +readonly MOLE_PURGE_PROJECT_INDICATORS=( + "package.json" + "Cargo.toml" + "go.mod" + "pyproject.toml" + "requirements.txt" + "pom.xml" + "build.gradle" + "Gemfile" + "composer.json" + "pubspec.yaml" + "Makefile" + "build.zig" + "build.zig.zon" + ".git" +) + +# High-noise targets intentionally excluded from quick hint scans in mo clean. +readonly MOLE_PURGE_QUICK_HINT_EXCLUDED_TARGETS=( + "bin" + "vendor" +) + +mole_purge_is_project_root() { + local dir="$1" + local indicator + + for indicator in "${MOLE_PURGE_MONOREPO_INDICATORS[@]}"; do + if [[ -e "$dir/$indicator" ]]; then + return 0 + fi + done + + for indicator in "${MOLE_PURGE_PROJECT_INDICATORS[@]}"; do + if [[ -e "$dir/$indicator" ]]; then + return 0 + fi + done + + return 1 +} + +mole_purge_quick_hint_target_names() { + local target + local excluded + local is_excluded + + for target in "${MOLE_PURGE_TARGETS[@]}"; do + is_excluded=false + for excluded in "${MOLE_PURGE_QUICK_HINT_EXCLUDED_TARGETS[@]}"; do + if [[ "$target" == "$excluded" ]]; then + is_excluded=true + break + fi + done + [[ "$is_excluded" == "true" ]] && continue + printf '%s\n' "$target" + done +} + +mole_purge_read_paths_config() { + local config_file="${1:-$HOME/.config/mole/purge_paths}" + [[ -f "$config_file" ]] || return 0 + + local line + while IFS= read -r line; do + line="${line#"${line%%[![:space:]]*}"}" + line="${line%"${line##*[![:space:]]}"}" + [[ -z "$line" || "$line" =~ ^# ]] && continue + line="${line/#\~/$HOME}" + printf '%s\n' "$line" + done < "$config_file" +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/system.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/system.sh new file mode 100644 index 0000000..817964e --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/system.sh @@ -0,0 +1,438 @@ +#!/bin/bash +# System-Level Cleanup Module (requires sudo). +set -euo pipefail +# System caches, logs, and temp files. +clean_deep_system() { + stop_section_spinner + local cache_cleaned=0 + start_section_spinner "Cleaning system caches..." + # Optimized: Single pass for /Library/Caches (3 patterns in 1 scan) + if sudo test -d "/Library/Caches" 2> /dev/null; then + while IFS= read -r -d '' file; do + if should_protect_path "$file"; then + continue + fi + if safe_sudo_remove "$file"; then + cache_cleaned=1 + fi + done < <(sudo find "/Library/Caches" -maxdepth 5 -type f \( \ + \( -name "*.cache" -mtime "+$MOLE_TEMP_FILE_AGE_DAYS" \) -o \ + \( -name "*.tmp" -mtime "+$MOLE_TEMP_FILE_AGE_DAYS" \) -o \ + \( -name "*.log" -mtime "+$MOLE_LOG_AGE_DAYS" \) \ + \) -print0 2> /dev/null || true) + fi + stop_section_spinner + [[ $cache_cleaned -eq 1 ]] && log_success "System caches" + start_section_spinner "Cleaning system temporary files..." + local tmp_cleaned=0 + local -a sys_temp_dirs=("/private/tmp" "/private/var/tmp") + for tmp_dir in "${sys_temp_dirs[@]}"; do + if sudo find "$tmp_dir" -maxdepth 1 -type f -mtime "+${MOLE_TEMP_FILE_AGE_DAYS}" -print -quit 2> /dev/null | grep -q .; then + if safe_sudo_find_delete "$tmp_dir" "*" "${MOLE_TEMP_FILE_AGE_DAYS}" "f"; then + tmp_cleaned=1 + fi + fi + done + stop_section_spinner + [[ $tmp_cleaned -eq 1 ]] && log_success "System temp files" + start_section_spinner "Cleaning system crash reports..." + if sudo find "/Library/Logs/DiagnosticReports" -maxdepth 1 -type f -mtime "+$MOLE_CRASH_REPORT_AGE_DAYS" -print -quit 2> /dev/null | grep -q .; then + safe_sudo_find_delete "/Library/Logs/DiagnosticReports" "*" "$MOLE_CRASH_REPORT_AGE_DAYS" "f" || true + fi + stop_section_spinner + log_success "System crash reports" + start_section_spinner "Cleaning system logs..." + if sudo find "/private/var/log" -maxdepth 3 -type f \( -name "*.log" -o -name "*.gz" -o -name "*.asl" \) -mtime "+$MOLE_LOG_AGE_DAYS" -print -quit 2> /dev/null | grep -q .; then + safe_sudo_find_delete "/private/var/log" "*.log" "$MOLE_LOG_AGE_DAYS" "f" || true + safe_sudo_find_delete "/private/var/log" "*.gz" "$MOLE_LOG_AGE_DAYS" "f" || true + safe_sudo_find_delete "/private/var/log" "*.asl" "$MOLE_LOG_AGE_DAYS" "f" || true + fi + stop_section_spinner + log_success "System logs" + start_section_spinner "Cleaning third-party system logs..." + local -a third_party_log_dirs=( + "/Library/Logs/Adobe" + "/Library/Logs/CreativeCloud" + ) + local third_party_logs_cleaned=0 + local third_party_log_dir="" + for third_party_log_dir in "${third_party_log_dirs[@]}"; do + if sudo test -d "$third_party_log_dir" 2> /dev/null; then + if sudo find "$third_party_log_dir" -maxdepth 5 -type f -mtime "+$MOLE_LOG_AGE_DAYS" -print -quit 2> /dev/null | grep -q .; then + if safe_sudo_find_delete "$third_party_log_dir" "*" "$MOLE_LOG_AGE_DAYS" "f"; then + third_party_logs_cleaned=1 + fi + fi + fi + done + if sudo find "/Library/Logs" -maxdepth 1 -type f -name "adobegc.log" -mtime "+$MOLE_LOG_AGE_DAYS" -print -quit 2> /dev/null | grep -q .; then + if safe_sudo_remove "/Library/Logs/adobegc.log"; then + third_party_logs_cleaned=1 + fi + fi + stop_section_spinner + [[ $third_party_logs_cleaned -eq 1 ]] && log_success "Third-party system logs" + start_section_spinner "Scanning system library updates..." + if [[ -d "/Library/Updates" && ! -L "/Library/Updates" ]]; then + local updates_cleaned=0 + while IFS= read -r -d '' item; do + if [[ -z "$item" ]] || [[ ! "$item" =~ ^/Library/Updates/[^/]+$ ]]; then + debug_log "Skipping malformed path: $item" + continue + fi + local item_flags + item_flags=$($STAT_BSD -f%Sf "$item" 2> /dev/null || echo "") + if [[ "$item_flags" == *"restricted"* ]]; then + continue + fi + if safe_sudo_remove "$item"; then + updates_cleaned=$((updates_cleaned + 1)) + fi + done < <(find /Library/Updates -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true) + stop_section_spinner + [[ $updates_cleaned -gt 0 ]] && log_success "System library updates" + else + stop_section_spinner + fi + start_section_spinner "Scanning macOS installer files..." + if [[ -d "/macOS Install Data" ]]; then + local mtime + mtime=$(get_file_mtime "/macOS Install Data") + local age_days=$((($(get_epoch_seconds) - mtime) / 86400)) + debug_log "Found macOS Install Data, age ${age_days} days" + if [[ $age_days -ge 14 ]]; then + local size_kb + size_kb=$(get_path_size_kb "/macOS Install Data") + if [[ -n "$size_kb" && "$size_kb" -gt 0 ]]; then + local size_human + size_human=$(bytes_to_human "$((size_kb * 1024))") + debug_log "Cleaning macOS Install Data: $size_human, ${age_days} days old" + if safe_sudo_remove "/macOS Install Data"; then + log_success "macOS Install Data, $size_human" + fi + fi + else + debug_log "Keeping macOS Install Data, only ${age_days} days old, needs 14+" + fi + fi + # Clean macOS installer apps (e.g., "Install macOS Sequoia.app") + # Only remove installers older than 14 days and not currently running + local installer_cleaned=0 + for installer_app in /Applications/Install\ macOS*.app; do + [[ -d "$installer_app" ]] || continue + local app_name + app_name=$(basename "$installer_app") + # Skip if installer is currently running + if pgrep -f "$installer_app" > /dev/null 2>&1; then + debug_log "Skipping $app_name: currently running" + continue + fi + # Check age (same 14-day threshold as /macOS Install Data) + local mtime + mtime=$(get_file_mtime "$installer_app") + local age_days=$((($(get_epoch_seconds) - mtime) / 86400)) + if [[ $age_days -lt 14 ]]; then + debug_log "Keeping $app_name: only ${age_days} days old, needs 14+" + continue + fi + local size_kb + size_kb=$(get_path_size_kb "$installer_app") + if [[ -n "$size_kb" && "$size_kb" -gt 0 ]]; then + local size_human + size_human=$(bytes_to_human "$((size_kb * 1024))") + debug_log "Cleaning macOS installer: $app_name, $size_human, ${age_days} days old" + if safe_sudo_remove "$installer_app"; then + log_success "$app_name, $size_human" + installer_cleaned=$((installer_cleaned + 1)) + fi + fi + done + stop_section_spinner + [[ $installer_cleaned -gt 0 ]] && debug_log "Cleaned $installer_cleaned macOS installer(s)" + start_section_spinner "Scanning browser code signature caches..." + local code_sign_cleaned=0 + while IFS= read -r -d '' cache_dir; do + if safe_sudo_remove "$cache_dir"; then + code_sign_cleaned=$((code_sign_cleaned + 1)) + fi + done < <(run_with_timeout 5 command find /private/var/folders -type d -name "*.code_sign_clone" -path "*/X/*" -print0 2> /dev/null || true) + stop_section_spinner + [[ $code_sign_cleaned -gt 0 ]] && log_success "Browser code signature caches, $code_sign_cleaned items" + + local diag_base="/private/var/db/diagnostics" + start_section_spinner "Cleaning system diagnostic logs..." + safe_sudo_find_delete "$diag_base" "*" "$MOLE_LOG_AGE_DAYS" "f" || true + safe_sudo_find_delete "$diag_base" "*.tracev3" "30" "f" || true + safe_sudo_find_delete "/private/var/db/DiagnosticPipeline" "*" "$MOLE_LOG_AGE_DAYS" "f" || true + stop_section_spinner + log_success "System diagnostic logs" + + start_section_spinner "Cleaning power logs..." + safe_sudo_find_delete "/private/var/db/powerlog" "*" "$MOLE_LOG_AGE_DAYS" "f" || true + stop_section_spinner + log_success "Power logs" + start_section_spinner "Cleaning memory exception reports..." + local mem_reports_dir="/private/var/db/reportmemoryexception/MemoryLimitViolations" + local mem_cleaned=0 + if sudo test -d "$mem_reports_dir" 2> /dev/null; then + # Count and size old files before deletion + local file_count=0 + local total_size_kb=0 + local total_bytes=0 + local stats_out + stats_out=$(sudo find "$mem_reports_dir" -type f -mtime +30 -exec stat -f "%z" {} + 2> /dev/null | awk '{c++; s+=$1} END {print c+0, s+0}' || true) + if [[ -n "$stats_out" ]]; then + read -r file_count total_bytes <<< "$stats_out" + total_size_kb=$((total_bytes / 1024)) + fi + + if [[ "$file_count" -gt 0 ]]; then + if [[ "${DRY_RUN:-}" != "true" ]]; then + if safe_sudo_find_delete "$mem_reports_dir" "*" "30" "f"; then + mem_cleaned=1 + fi + # Log summary to operations.log + if [[ $mem_cleaned -eq 1 ]] && oplog_enabled && [[ "$total_size_kb" -gt 0 ]]; then + local size_human + size_human=$(bytes_to_human "$((total_size_kb * 1024))") + log_operation "clean" "REMOVED" "$mem_reports_dir" "$file_count files, $size_human" + fi + else + log_info "[DRY-RUN] Would remove $file_count old memory exception reports ($total_size_kb KB)" + fi + fi + fi + stop_section_spinner + if [[ $mem_cleaned -eq 1 ]]; then + log_success "Memory exception reports" + fi + return 0 +} +# Incomplete Time Machine backups. +clean_time_machine_failed_backups() { + local tm_cleaned=0 + if ! command -v tmutil > /dev/null 2>&1; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found" + return 0 + fi + # Fast pre-check: skip entirely if Time Machine is not configured (no tmutil needed) + if ! defaults read /Library/Preferences/com.apple.TimeMachine AutoBackup 2> /dev/null | grep -qE '^[01]$'; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found" + return 0 + fi + start_section_spinner "Checking Time Machine configuration..." + local spinner_active=true + local tm_info + tm_info=$(run_with_timeout 2 tmutil destinationinfo 2>&1 || echo "failed") + if [[ "$tm_info" == *"No destinations configured"* || "$tm_info" == "failed" ]]; then + if [[ "$spinner_active" == "true" ]]; then + stop_section_spinner + fi + echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found" + return 0 + fi + if [[ ! -d "/Volumes" ]]; then + if [[ "$spinner_active" == "true" ]]; then + stop_section_spinner + fi + echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found" + return 0 + fi + if tm_is_running; then + if [[ "$spinner_active" == "true" ]]; then + stop_section_spinner + fi + echo -e " ${YELLOW}!${NC} Time Machine backup in progress, skipping cleanup" + return 0 + fi + if [[ "$spinner_active" == "true" ]]; then + start_section_spinner "Checking backup volumes..." + fi + # Fast pre-scan for backup volumes to avoid slow tmutil checks. + local -a backup_volumes=() + for volume in /Volumes/*; do + [[ -d "$volume" ]] || continue + [[ "$volume" == "/Volumes/MacintoshHD" || "$volume" == "/" ]] && continue + [[ -L "$volume" ]] && continue + if [[ -d "$volume/Backups.backupdb" ]] || [[ -d "$volume/.MobileBackups" ]]; then + backup_volumes+=("$volume") + fi + done + if [[ ${#backup_volumes[@]} -eq 0 ]]; then + if [[ "$spinner_active" == "true" ]]; then + stop_section_spinner + fi + echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found" + return 0 + fi + if [[ "$spinner_active" == "true" ]]; then + start_section_spinner "Scanning backup volumes..." + fi + for volume in "${backup_volumes[@]}"; do + local fs_type + fs_type=$(run_with_timeout 1 command df -T "$volume" 2> /dev/null | tail -1 | awk '{print $2}' || echo "unknown") + case "$fs_type" in + nfs | smbfs | afpfs | cifs | webdav | unknown) continue ;; + esac + local backupdb_dir="$volume/Backups.backupdb" + if [[ -d "$backupdb_dir" ]]; then + while IFS= read -r inprogress_file; do + [[ -d "$inprogress_file" ]] || continue + # Only delete old incomplete backups (safety window). + local file_mtime + file_mtime=$(get_file_mtime "$inprogress_file") + local current_time + current_time=$(get_epoch_seconds) + local hours_old=$(((current_time - file_mtime) / 3600)) + if [[ $hours_old -lt $MOLE_TM_BACKUP_SAFE_HOURS ]]; then + continue + fi + local size_kb + size_kb=$(get_path_size_kb "$inprogress_file") + [[ "$size_kb" -le 0 ]] && continue + if [[ "$spinner_active" == "true" ]]; then + stop_section_spinner + spinner_active=false + fi + local backup_name + backup_name=$(basename "$inprogress_file") + local size_human + size_human=$(bytes_to_human "$((size_kb * 1024))") + if [[ "$DRY_RUN" == "true" ]]; then + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Incomplete backup: $backup_name${NC}, ${YELLOW}$size_human dry${NC}" + tm_cleaned=$((tm_cleaned + 1)) + note_activity + continue + fi + if ! command -v tmutil > /dev/null 2>&1; then + echo -e " ${YELLOW}!${NC} tmutil not available, skipping: $backup_name" + continue + fi + if tmutil delete "$inprogress_file" 2> /dev/null; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Incomplete backup: $backup_name${NC}, ${GREEN}$size_human${NC}" + tm_cleaned=$((tm_cleaned + 1)) + files_cleaned=$((files_cleaned + 1)) + total_size_cleaned=$((total_size_cleaned + size_kb)) + total_items=$((total_items + 1)) + note_activity + else + echo -e " ${YELLOW}!${NC} Could not delete: $backup_name · try manually with sudo" + fi + done < <(run_with_timeout 15 find "$backupdb_dir" -maxdepth 3 -type d \( -name "*.inProgress" -o -name "*.inprogress" \) 2> /dev/null || true) + fi + # APFS bundles. + for bundle in "$volume"/*.backupbundle "$volume"/*.sparsebundle; do + [[ -e "$bundle" ]] || continue + [[ -d "$bundle" ]] || continue + local bundle_name + bundle_name=$(basename "$bundle") + local mounted_path + mounted_path=$(hdiutil info 2> /dev/null | grep -A 5 "image-path.*$bundle_name" | grep "/Volumes/" | awk '{print $1}' | head -1 || echo "") + if [[ -n "$mounted_path" && -d "$mounted_path" ]]; then + while IFS= read -r inprogress_file; do + [[ -d "$inprogress_file" ]] || continue + local file_mtime + file_mtime=$(get_file_mtime "$inprogress_file") + local current_time + current_time=$(get_epoch_seconds) + local hours_old=$(((current_time - file_mtime) / 3600)) + if [[ $hours_old -lt $MOLE_TM_BACKUP_SAFE_HOURS ]]; then + continue + fi + local size_kb + size_kb=$(get_path_size_kb "$inprogress_file") + [[ "$size_kb" -le 0 ]] && continue + if [[ "$spinner_active" == "true" ]]; then + stop_section_spinner + spinner_active=false + fi + local backup_name + backup_name=$(basename "$inprogress_file") + local size_human + size_human=$(bytes_to_human "$((size_kb * 1024))") + if [[ "$DRY_RUN" == "true" ]]; then + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Incomplete APFS backup in $bundle_name: $backup_name${NC}, ${YELLOW}$size_human dry${NC}" + tm_cleaned=$((tm_cleaned + 1)) + note_activity + continue + fi + if ! command -v tmutil > /dev/null 2>&1; then + continue + fi + if tmutil delete "$inprogress_file" 2> /dev/null; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Incomplete APFS backup in $bundle_name: $backup_name${NC}, ${GREEN}$size_human${NC}" + tm_cleaned=$((tm_cleaned + 1)) + files_cleaned=$((files_cleaned + 1)) + total_size_cleaned=$((total_size_cleaned + size_kb)) + total_items=$((total_items + 1)) + note_activity + else + echo -e " ${YELLOW}!${NC} Could not delete from bundle: $backup_name" + fi + done < <(run_with_timeout 15 find "$mounted_path" -maxdepth 3 -type d \( -name "*.inProgress" -o -name "*.inprogress" \) 2> /dev/null || true) + fi + done + done + if [[ "$spinner_active" == "true" ]]; then + stop_section_spinner + fi + if [[ $tm_cleaned -eq 0 ]]; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} No incomplete backups found" + fi +} +# Returns 0 if a backup is actively running. +# Returns 1 if not running. +# Returns 2 if status cannot be determined +tm_is_running() { + local st + st="$(tmutil status 2> /dev/null)" || return 2 + + # If we can't find a Running field at all, treat as unknown. + if ! grep -qE '(^|[[:space:]])("Running"|Running)[[:space:]]*=' <<< "$st"; then + return 2 + fi + + # Match: Running = 1; OR "Running" = 1 (with or without trailing ;) + grep -qE '(^|[[:space:]])("Running"|Running)[[:space:]]*=[[:space:]]*1([[:space:]]*;|$)' <<< "$st" +} + +# Local APFS snapshots (report only). +clean_local_snapshots() { + if ! command -v tmutil > /dev/null 2>&1; then + return 0 + fi + # Fast pre-check: skip entirely if Time Machine is not configured (no tmutil needed) + if ! defaults read /Library/Preferences/com.apple.TimeMachine AutoBackup 2> /dev/null | grep -qE '^[01]$'; then + return 0 + fi + + start_section_spinner "Checking Time Machine status..." + local rc_running=0 + tm_is_running || rc_running=$? + + if [[ $rc_running -eq 2 ]]; then + stop_section_spinner + echo -e " ${YELLOW}!${NC} Could not determine Time Machine status; skipping snapshot check" + return 0 + fi + + if [[ $rc_running -eq 0 ]]; then + stop_section_spinner + echo -e " ${YELLOW}!${NC} Time Machine is active; skipping snapshot check" + return 0 + fi + + start_section_spinner "Checking local snapshots..." + local snapshot_list + snapshot_list=$(run_with_timeout 3 tmutil listlocalsnapshots / 2> /dev/null || true) + stop_section_spinner + [[ -z "$snapshot_list" ]] && return 0 + + local snapshot_count + snapshot_count=$(echo "$snapshot_list" | { grep -Eo 'com\.apple\.TimeMachine\.[0-9]{4}-[0-9]{2}-[0-9]{2}-[0-9]{6}' || true; } | wc -l | awk '{print $1}') + if [[ "$snapshot_count" =~ ^[0-9]+$ && "$snapshot_count" -gt 0 ]]; then + echo -e " ${YELLOW}${ICON_WARNING}${NC} Time Machine local snapshots: ${GREEN}${snapshot_count}${NC}" + echo -e " ${GRAY}${ICON_REVIEW}${NC} ${GRAY}Review: tmutil listlocalsnapshots /${NC}" + note_activity + fi +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/user.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/user.sh new file mode 100644 index 0000000..a5a1ecb --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/clean/user.sh @@ -0,0 +1,1156 @@ +#!/bin/bash +# User Data Cleanup Module +set -euo pipefail +clean_user_essentials() { + start_section_spinner "Scanning caches..." + safe_clean ~/Library/Caches/* "User app cache" + stop_section_spinner + + safe_clean ~/Library/Logs/* "User app logs" + + if ! is_path_whitelisted "$HOME/.Trash"; then + local trash_count + local trash_count_status=0 + trash_count=$(run_with_timeout 3 osascript -e 'tell application "Finder" to count items in trash' 2> /dev/null) || trash_count_status=$? + if [[ $trash_count_status -eq 124 ]]; then + debug_log "Finder trash count timed out, using direct .Trash scan" + trash_count=$(command find "$HOME/.Trash" -mindepth 1 -maxdepth 1 -exec printf '.' ';' 2> /dev/null | + wc -c | awk '{print $1}' || echo "0") + fi + [[ "$trash_count" =~ ^[0-9]+$ ]] || trash_count="0" + + if [[ "$DRY_RUN" == "true" ]]; then + [[ $trash_count -gt 0 ]] && echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Trash · would empty, $trash_count items" || echo -e " ${GREEN}${ICON_SUCCESS}${NC} Trash · already empty" + elif [[ $trash_count -gt 0 ]]; then + if run_with_timeout 5 osascript -e 'tell application "Finder" to empty trash' > /dev/null 2>&1; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Trash · emptied, $trash_count items" + note_activity + else + debug_log "Finder trash empty failed or timed out, falling back to direct deletion" + local cleaned_count=0 + while IFS= read -r -d '' item; do + if safe_remove "$item" true; then + cleaned_count=$((cleaned_count + 1)) + fi + done < <(command find "$HOME/.Trash" -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true) + if [[ $cleaned_count -gt 0 ]]; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Trash · emptied, $cleaned_count items" + note_activity + fi + fi + else + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Trash · already empty" + fi + fi + + # Recent items + _clean_recent_items + + # Mail downloads + _clean_mail_downloads +} + +# Internal: Remove recent items lists. +_clean_recent_items() { + local shared_dir="$HOME/Library/Application Support/com.apple.sharedfilelist" + local -a recent_lists=( + "$shared_dir/com.apple.LSSharedFileList.RecentApplications.sfl2" + "$shared_dir/com.apple.LSSharedFileList.RecentDocuments.sfl2" + "$shared_dir/com.apple.LSSharedFileList.RecentServers.sfl2" + "$shared_dir/com.apple.LSSharedFileList.RecentHosts.sfl2" + "$shared_dir/com.apple.LSSharedFileList.RecentApplications.sfl" + "$shared_dir/com.apple.LSSharedFileList.RecentDocuments.sfl" + "$shared_dir/com.apple.LSSharedFileList.RecentServers.sfl" + "$shared_dir/com.apple.LSSharedFileList.RecentHosts.sfl" + ) + if [[ -d "$shared_dir" ]]; then + for sfl_file in "${recent_lists[@]}"; do + [[ -e "$sfl_file" ]] && safe_clean "$sfl_file" "Recent items list" || true + done + fi + safe_clean ~/Library/Preferences/com.apple.recentitems.plist "Recent items preferences" || true +} + +# Internal: Clean old mail downloads. +_clean_mail_downloads() { + local mail_age_days=${MOLE_MAIL_AGE_DAYS:-} + if ! [[ "$mail_age_days" =~ ^[0-9]+$ ]]; then + mail_age_days=30 + fi + local -a mail_dirs=( + "$HOME/Library/Mail Downloads" + "$HOME/Library/Containers/com.apple.mail/Data/Library/Mail Downloads" + ) + local count=0 + local cleaned_kb=0 + local spinner_active=false + for target_path in "${mail_dirs[@]}"; do + if [[ -d "$target_path" ]]; then + if [[ "$spinner_active" == "false" && -t 1 ]]; then + start_section_spinner "Cleaning old Mail attachments..." + spinner_active=true + fi + local dir_size_kb=0 + dir_size_kb=$(get_path_size_kb "$target_path") + if ! [[ "$dir_size_kb" =~ ^[0-9]+$ ]]; then + dir_size_kb=0 + fi + local min_kb="${MOLE_MAIL_DOWNLOADS_MIN_KB:-}" + if ! [[ "$min_kb" =~ ^[0-9]+$ ]]; then + min_kb=5120 + fi + if [[ "$dir_size_kb" -lt "$min_kb" ]]; then + continue + fi + while IFS= read -r -d '' file_path; do + if [[ -f "$file_path" ]]; then + local file_size_kb + file_size_kb=$(get_path_size_kb "$file_path") + if safe_remove "$file_path" true; then + count=$((count + 1)) + cleaned_kb=$((cleaned_kb + file_size_kb)) + fi + fi + done < <(command find "$target_path" -type f -mtime +"$mail_age_days" -print0 2> /dev/null || true) + fi + done + if [[ "$spinner_active" == "true" ]]; then + stop_section_spinner + fi + if [[ $count -gt 0 ]]; then + local cleaned_mb + cleaned_mb=$(echo "$cleaned_kb" | awk '{printf "%.1f", $1/1024}' || echo "0.0") + echo " ${GREEN}${ICON_SUCCESS}${NC} Cleaned $count mail attachments, about ${cleaned_mb}MB" + note_activity + fi +} + +# Remove old Google Chrome versions while keeping Current. +clean_chrome_old_versions() { + local -a app_paths=( + "/Applications/Google Chrome.app" + "$HOME/Applications/Google Chrome.app" + ) + + # Match the exact Chrome process name to avoid false positives + if pgrep -x "Google Chrome" > /dev/null 2>&1; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Google Chrome running · old versions cleanup skipped" + return 0 + fi + + local cleaned_count=0 + local total_size=0 + local cleaned_any=false + + for app_path in "${app_paths[@]}"; do + [[ -d "$app_path" ]] || continue + + local versions_dir="$app_path/Contents/Frameworks/Google Chrome Framework.framework/Versions" + [[ -d "$versions_dir" ]] || continue + + local current_link="$versions_dir/Current" + [[ -L "$current_link" ]] || continue + + local current_version + current_version=$(readlink "$current_link" 2> /dev/null || true) + current_version="${current_version##*/}" + [[ -n "$current_version" ]] || continue + + local -a old_versions=() + local dir name + for dir in "$versions_dir"/*; do + [[ -d "$dir" ]] || continue + name=$(basename "$dir") + [[ "$name" == "Current" ]] && continue + [[ "$name" == "$current_version" ]] && continue + if is_path_whitelisted "$dir"; then + continue + fi + old_versions+=("$dir") + done + + if [[ ${#old_versions[@]} -eq 0 ]]; then + continue + fi + + for dir in "${old_versions[@]}"; do + local size_kb + size_kb=$(get_path_size_kb "$dir" || echo 0) + size_kb="${size_kb:-0}" + total_size=$((total_size + size_kb)) + cleaned_count=$((cleaned_count + 1)) + cleaned_any=true + if [[ "$DRY_RUN" != "true" ]]; then + if has_sudo_session; then + safe_sudo_remove "$dir" > /dev/null 2>&1 || true + else + safe_remove "$dir" true > /dev/null 2>&1 || true + fi + fi + done + done + + if [[ "$cleaned_any" == "true" ]]; then + local size_human + size_human=$(bytes_to_human "$((total_size * 1024))") + if [[ "$DRY_RUN" == "true" ]]; then + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Chrome old versions${NC}, ${YELLOW}${cleaned_count} dirs, $size_human dry${NC}" + else + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Chrome old versions${NC}, ${GREEN}${cleaned_count} dirs, $size_human${NC}" + fi + files_cleaned=$((files_cleaned + cleaned_count)) + total_size_cleaned=$((total_size_cleaned + total_size)) + total_items=$((total_items + 1)) + note_activity + fi +} + +# Remove old Microsoft Edge versions while keeping Current. +clean_edge_old_versions() { + # Allow override for testing + local -a app_paths + if [[ -n "${MOLE_EDGE_APP_PATHS:-}" ]]; then + IFS=':' read -ra app_paths <<< "$MOLE_EDGE_APP_PATHS" + else + app_paths=( + "/Applications/Microsoft Edge.app" + "$HOME/Applications/Microsoft Edge.app" + ) + fi + + # Match the exact Edge process name to avoid false positives (e.g., Microsoft Teams) + if pgrep -x "Microsoft Edge" > /dev/null 2>&1; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Microsoft Edge running · old versions cleanup skipped" + return 0 + fi + + local cleaned_count=0 + local total_size=0 + local cleaned_any=false + + for app_path in "${app_paths[@]}"; do + [[ -d "$app_path" ]] || continue + + local versions_dir="$app_path/Contents/Frameworks/Microsoft Edge Framework.framework/Versions" + [[ -d "$versions_dir" ]] || continue + + local current_link="$versions_dir/Current" + [[ -L "$current_link" ]] || continue + + local current_version + current_version=$(readlink "$current_link" 2> /dev/null || true) + current_version="${current_version##*/}" + [[ -n "$current_version" ]] || continue + + local -a old_versions=() + local dir name + for dir in "$versions_dir"/*; do + [[ -d "$dir" ]] || continue + name=$(basename "$dir") + [[ "$name" == "Current" ]] && continue + [[ "$name" == "$current_version" ]] && continue + if is_path_whitelisted "$dir"; then + continue + fi + old_versions+=("$dir") + done + + if [[ ${#old_versions[@]} -eq 0 ]]; then + continue + fi + + for dir in "${old_versions[@]}"; do + local size_kb + size_kb=$(get_path_size_kb "$dir" || echo 0) + size_kb="${size_kb:-0}" + total_size=$((total_size + size_kb)) + cleaned_count=$((cleaned_count + 1)) + cleaned_any=true + if [[ "$DRY_RUN" != "true" ]]; then + if has_sudo_session; then + safe_sudo_remove "$dir" > /dev/null 2>&1 || true + else + safe_remove "$dir" true > /dev/null 2>&1 || true + fi + fi + done + done + + if [[ "$cleaned_any" == "true" ]]; then + local size_human + size_human=$(bytes_to_human "$((total_size * 1024))") + if [[ "$DRY_RUN" == "true" ]]; then + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Edge old versions${NC}, ${YELLOW}${cleaned_count} dirs, $size_human dry${NC}" + else + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Edge old versions${NC}, ${GREEN}${cleaned_count} dirs, $size_human${NC}" + fi + files_cleaned=$((files_cleaned + cleaned_count)) + total_size_cleaned=$((total_size_cleaned + total_size)) + total_items=$((total_items + 1)) + note_activity + fi +} + +# Remove old Microsoft EdgeUpdater versions while keeping latest. +clean_edge_updater_old_versions() { + local updater_dir="$HOME/Library/Application Support/Microsoft/EdgeUpdater/apps/msedge-stable" + [[ -d "$updater_dir" ]] || return 0 + + if pgrep -x "Microsoft Edge" > /dev/null 2>&1; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Microsoft Edge running · updater cleanup skipped" + return 0 + fi + + local -a version_dirs=() + local dir + for dir in "$updater_dir"/*; do + [[ -d "$dir" ]] || continue + version_dirs+=("$dir") + done + + if [[ ${#version_dirs[@]} -lt 2 ]]; then + return 0 + fi + + local latest_version + latest_version=$(printf '%s\n' "${version_dirs[@]##*/}" | sort -V | tail -n 1) + [[ -n "$latest_version" ]] || return 0 + + local cleaned_count=0 + local total_size=0 + local cleaned_any=false + + for dir in "${version_dirs[@]}"; do + local name + name=$(basename "$dir") + [[ "$name" == "$latest_version" ]] && continue + if is_path_whitelisted "$dir"; then + continue + fi + local size_kb + size_kb=$(get_path_size_kb "$dir" || echo 0) + size_kb="${size_kb:-0}" + total_size=$((total_size + size_kb)) + cleaned_count=$((cleaned_count + 1)) + cleaned_any=true + if [[ "$DRY_RUN" != "true" ]]; then + safe_remove "$dir" true > /dev/null 2>&1 || true + fi + done + + if [[ "$cleaned_any" == "true" ]]; then + local size_human + size_human=$(bytes_to_human "$((total_size * 1024))") + if [[ "$DRY_RUN" == "true" ]]; then + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Edge updater old versions${NC}, ${YELLOW}${cleaned_count} dirs, $size_human dry${NC}" + else + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Edge updater old versions${NC}, ${GREEN}${cleaned_count} dirs, $size_human${NC}" + fi + files_cleaned=$((files_cleaned + cleaned_count)) + total_size_cleaned=$((total_size_cleaned + total_size)) + total_items=$((total_items + 1)) + note_activity + fi +} + +scan_external_volumes() { + [[ -d "/Volumes" ]] || return 0 + local -a candidate_volumes=() + local -a network_volumes=() + for volume in /Volumes/*; do + [[ -d "$volume" && -w "$volume" && ! -L "$volume" ]] || continue + [[ "$volume" == "/" || "$volume" == "/Volumes/Macintosh HD" ]] && continue + local protocol="" + protocol=$(run_with_timeout 1 command diskutil info "$volume" 2> /dev/null | grep -i "Protocol:" | awk '{print $2}' || echo "") + case "$protocol" in + SMB | NFS | AFP | CIFS | WebDAV) + network_volumes+=("$volume") + continue + ;; + esac + local fs_type="" + fs_type=$(run_with_timeout 1 command df -T "$volume" 2> /dev/null | tail -1 | awk '{print $2}' || echo "") + case "$fs_type" in + nfs | smbfs | afpfs | cifs | webdav) + network_volumes+=("$volume") + continue + ;; + esac + candidate_volumes+=("$volume") + done + local volume_count=${#candidate_volumes[@]} + local network_count=${#network_volumes[@]} + if [[ $volume_count -eq 0 ]]; then + if [[ $network_count -gt 0 ]]; then + echo -e " ${GRAY}${ICON_LIST}${NC} External volumes, ${network_count} network volumes skipped" + note_activity + fi + return 0 + fi + start_section_spinner "Scanning $volume_count external volumes..." + for volume in "${candidate_volumes[@]}"; do + [[ -d "$volume" && -r "$volume" ]] || continue + local volume_trash="$volume/.Trashes" + if [[ -d "$volume_trash" && "$DRY_RUN" != "true" ]] && ! is_path_whitelisted "$volume_trash"; then + while IFS= read -r -d '' item; do + safe_remove "$item" true || true + done < <(command find "$volume_trash" -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true) + fi + if [[ "$PROTECT_FINDER_METADATA" != "true" ]]; then + clean_ds_store_tree "$volume" "$(basename "$volume") volume, .DS_Store" + fi + done + stop_section_spinner +} + +# Finder metadata (.DS_Store). +clean_finder_metadata() { + if [[ "$PROTECT_FINDER_METADATA" == "true" ]]; then + return + fi + clean_ds_store_tree "$HOME" "Home directory, .DS_Store" +} + +# Conservative cleanup for support caches not covered by generic rules. +clean_support_app_data() { + local support_age_days="${MOLE_SUPPORT_CACHE_AGE_DAYS:-30}" + [[ "$support_age_days" =~ ^[0-9]+$ ]] || support_age_days=30 + + local crash_reporter_dir="$HOME/Library/Application Support/CrashReporter" + if [[ -d "$crash_reporter_dir" && ! -L "$crash_reporter_dir" ]]; then + safe_find_delete "$crash_reporter_dir" "*" "$support_age_days" "f" || true + fi + + # Keep recent wallpaper assets to avoid large re-downloads. + local idle_assets_dir="$HOME/Library/Application Support/com.apple.idleassetsd" + if [[ -d "$idle_assets_dir" && ! -L "$idle_assets_dir" ]]; then + safe_find_delete "$idle_assets_dir" "*" "$support_age_days" "f" || true + fi + + # Clean old aerial wallpaper videos (can be large, safe to remove). + safe_clean ~/Library/Application\ Support/com.apple.wallpaper/aerials/videos/* "Aerial wallpaper videos" + + # Do not touch Messages attachments, only preview/sticker caches. + if pgrep -x "Messages" > /dev/null 2>&1; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Messages is running · preview cache cleanup skipped" + else + safe_clean ~/Library/Messages/StickerCache/* "Messages sticker cache" + safe_clean ~/Library/Messages/Caches/Previews/Attachments/* "Messages preview attachment cache" + safe_clean ~/Library/Messages/Caches/Previews/StickerCache/* "Messages preview sticker cache" + fi +} + +# App caches (merged: macOS system caches + Sandboxed apps). +clean_app_caches() { + start_section_spinner "Scanning app caches..." + + # macOS system caches (merged from clean_macos_system_caches) + safe_clean ~/Library/Saved\ Application\ State/* "Saved application states" || true + safe_clean ~/Library/Caches/com.apple.photoanalysisd "Photo analysis cache" || true + safe_clean ~/Library/Caches/com.apple.akd "Apple ID cache" || true + safe_clean ~/Library/Caches/com.apple.WebKit.Networking/* "WebKit network cache" || true + safe_clean ~/Library/DiagnosticReports/* "Diagnostic reports" || true + safe_clean ~/Library/Caches/com.apple.QuickLook.thumbnailcache "QuickLook thumbnails" || true + safe_clean ~/Library/Caches/Quick\ Look/* "QuickLook cache" || true + safe_clean ~/Library/Caches/com.apple.iconservices* "Icon services cache" || true + safe_clean ~/Downloads/*.download "Safari incomplete downloads" || true + safe_clean ~/Downloads/*.crdownload "Chrome incomplete downloads" || true + safe_clean ~/Downloads/*.part "Partial incomplete downloads" || true + safe_clean ~/Library/Autosave\ Information/* "Autosave information" || true + safe_clean ~/Library/IdentityCaches/* "Identity caches" || true + safe_clean ~/Library/Suggestions/* "Siri suggestions cache" || true + safe_clean ~/Library/Calendars/Calendar\ Cache "Calendar cache" || true + safe_clean ~/Library/Application\ Support/AddressBook/Sources/*/Photos.cache "Address Book photo cache" || true + clean_support_app_data + + # Stop initial scan indicator before entering per-group scans. + stop_section_spinner + + # Sandboxed app caches + safe_clean ~/Library/Containers/com.apple.wallpaper.agent/Data/Library/Caches/* "Wallpaper agent cache" + safe_clean ~/Library/Containers/com.apple.mediaanalysisd/Data/Library/Caches/* "Media analysis cache" + safe_clean ~/Library/Containers/com.apple.AppStore/Data/Library/Caches/* "App Store cache" + safe_clean ~/Library/Containers/com.apple.configurator.xpc.InternetService/Data/tmp/* "Apple Configurator temp files" + local containers_dir="$HOME/Library/Containers" + [[ ! -d "$containers_dir" ]] && return 0 + start_section_spinner "Scanning sandboxed apps..." + local total_size=0 + local cleaned_count=0 + local found_any=false + + local _ng_state + _ng_state=$(shopt -p nullglob || true) + shopt -s nullglob + for container_dir in "$containers_dir"/*; do + process_container_cache "$container_dir" + done + eval "$_ng_state" + stop_section_spinner + + if [[ "$found_any" == "true" ]]; then + local size_human + size_human=$(bytes_to_human "$((total_size * 1024))") + if [[ "$DRY_RUN" == "true" ]]; then + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Sandboxed app caches${NC}, ${YELLOW}$size_human dry${NC}" + else + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Sandboxed app caches${NC}, ${GREEN}$size_human${NC}" + fi + files_cleaned=$((files_cleaned + cleaned_count)) + total_size_cleaned=$((total_size_cleaned + total_size)) + total_items=$((total_items + 1)) + note_activity + fi + + clean_group_container_caches +} + +# Process a single container cache directory. +process_container_cache() { + local container_dir="$1" + [[ -d "$container_dir" ]] || return 0 + [[ -L "$container_dir" ]] && return 0 + local bundle_id + bundle_id=$(basename "$container_dir") + if is_critical_system_component "$bundle_id"; then + return 0 + fi + if should_protect_data "$bundle_id" || should_protect_data "$(echo "$bundle_id" | LC_ALL=C tr '[:upper:]' '[:lower:]')"; then + return 0 + fi + local cache_dir="$container_dir/Data/Library/Caches" + [[ -d "$cache_dir" ]] || return 0 + [[ -L "$cache_dir" ]] && return 0 + # Fast non-empty check. + if find "$cache_dir" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then + local size + size=$(get_path_size_kb "$cache_dir") + total_size=$((total_size + size)) + found_any=true + cleaned_count=$((cleaned_count + 1)) + if [[ "$DRY_RUN" != "true" ]]; then + local item + while IFS= read -r -d '' item; do + [[ -e "$item" ]] || continue + safe_remove "$item" true || true + done < <(command find "$cache_dir" -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true) + fi + fi +} + +# Group Containers safe cleanup (logs for protected apps, caches/tmp for non-protected apps). +clean_group_container_caches() { + local group_containers_dir="$HOME/Library/Group Containers" + [[ -d "$group_containers_dir" ]] || return 0 + if ! find "$group_containers_dir" -mindepth 1 -maxdepth 1 -print -quit 2> /dev/null | grep -q .; then + return 0 + fi + + start_section_spinner "Scanning Group Containers..." + local total_size=0 + local cleaned_count=0 + local found_any=false + + # Collect all non-Apple container directories first + local -a containers=() + local container_dir + for container_dir in "$group_containers_dir"/*; do + [[ -d "$container_dir" ]] || continue + [[ -L "$container_dir" ]] && continue + local container_id + container_id=$(basename "$container_dir") + + # Skip Apple-owned shared containers entirely. + case "$container_id" in + com.apple.* | group.com.apple.* | systemgroup.com.apple.*) + continue + ;; + esac + containers+=("$container_dir") + done + + # Process each container's candidate directories + for container_dir in "${containers[@]}"; do + local container_id + container_id=$(basename "$container_dir") + local normalized_id="$container_id" + [[ "$normalized_id" == group.* ]] && normalized_id="${normalized_id#group.}" + + local protected_container=false + if should_protect_data "$container_id" 2> /dev/null || should_protect_data "$normalized_id" 2> /dev/null; then + protected_container=true + fi + + local -a candidates=( + "$container_dir/Logs" + "$container_dir/Library/Logs" + ) + if [[ "$protected_container" != "true" ]]; then + candidates+=( + "$container_dir/tmp" + "$container_dir/Library/tmp" + "$container_dir/Caches" + "$container_dir/Library/Caches" + ) + fi + + local candidate + for candidate in "${candidates[@]}"; do + [[ -d "$candidate" ]] || continue + [[ -L "$candidate" ]] && continue + if is_path_whitelisted "$candidate" 2> /dev/null; then + continue + fi + + # Build non-protected candidate items for cleanup. + local -a items_to_clean=() + local item + while IFS= read -r -d '' item; do + [[ -e "$item" ]] || continue + [[ -L "$item" ]] && continue + if should_protect_path "$item" 2> /dev/null || is_path_whitelisted "$item" 2> /dev/null; then + continue + else + items_to_clean+=("$item") + fi + done < <(command find "$candidate" -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true) + + [[ ${#items_to_clean[@]} -gt 0 ]] || continue + + local candidate_size_kb=0 + local candidate_changed=false + if [[ "$DRY_RUN" == "true" ]]; then + for item in "${items_to_clean[@]}"; do + local item_size + item_size=$(get_path_size_kb "$item" 2> /dev/null) || item_size=0 + [[ "$item_size" =~ ^[0-9]+$ ]] || item_size=0 + candidate_changed=true + candidate_size_kb=$((candidate_size_kb + item_size)) + done + else + for item in "${items_to_clean[@]}"; do + local item_size + item_size=$(get_path_size_kb "$item" 2> /dev/null) || item_size=0 + [[ "$item_size" =~ ^[0-9]+$ ]] || item_size=0 + if safe_remove "$item" true 2> /dev/null; then + candidate_changed=true + candidate_size_kb=$((candidate_size_kb + item_size)) + fi + done + fi + + if [[ "$candidate_changed" == "true" ]]; then + total_size=$((total_size + candidate_size_kb)) + cleaned_count=$((cleaned_count + 1)) + found_any=true + fi + done + done + + stop_section_spinner + + if [[ "$found_any" == "true" ]]; then + local size_human + size_human=$(bytes_to_human "$((total_size * 1024))") + if [[ "$DRY_RUN" == "true" ]]; then + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Group Containers logs/caches${NC}, ${YELLOW}$size_human dry${NC}" + else + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Group Containers logs/caches${NC}, ${GREEN}$size_human${NC}" + fi + files_cleaned=$((files_cleaned + cleaned_count)) + total_size_cleaned=$((total_size_cleaned + total_size)) + total_items=$((total_items + 1)) + note_activity + fi +} + +# Browser caches (Safari/Chrome/Edge/Firefox). +clean_browsers() { + safe_clean ~/Library/Caches/com.apple.Safari/* "Safari cache" + # Chrome/Chromium. + safe_clean ~/Library/Caches/Google/Chrome/* "Chrome cache" + safe_clean ~/Library/Application\ Support/Google/Chrome/*/Application\ Cache/* "Chrome app cache" + safe_clean ~/Library/Application\ Support/Google/Chrome/*/GPUCache/* "Chrome GPU cache" + safe_clean ~/Library/Application\ Support/Google/Chrome/component_crx_cache/* "Chrome component CRX cache" + safe_clean ~/Library/Application\ Support/Google/GoogleUpdater/crx_cache/* "GoogleUpdater CRX cache" + safe_clean ~/Library/Application\ Support/Google/GoogleUpdater/*.old "GoogleUpdater old files" + safe_clean ~/Library/Caches/Chromium/* "Chromium cache" + safe_clean ~/.cache/puppeteer/* "Puppeteer browser cache" + safe_clean ~/Library/Caches/com.microsoft.edgemac/* "Edge cache" + safe_clean ~/Library/Caches/company.thebrowser.Browser/* "Arc cache" + safe_clean ~/Library/Caches/company.thebrowser.dia/* "Dia cache" + safe_clean ~/Library/Caches/BraveSoftware/Brave-Browser/* "Brave cache" + # Helium Browser. + safe_clean ~/Library/Caches/net.imput.helium/* "Helium cache" + safe_clean ~/Library/Application\ Support/net.imput.helium/*/GPUCache/* "Helium GPU cache" + safe_clean ~/Library/Application\ Support/net.imput.helium/component_crx_cache/* "Helium component cache" + safe_clean ~/Library/Application\ Support/net.imput.helium/extensions_crx_cache/* "Helium extensions cache" + safe_clean ~/Library/Application\ Support/net.imput.helium/GrShaderCache/* "Helium shader cache" + safe_clean ~/Library/Application\ Support/net.imput.helium/GraphiteDawnCache/* "Helium Dawn cache" + safe_clean ~/Library/Application\ Support/net.imput.helium/ShaderCache/* "Helium shader cache" + safe_clean ~/Library/Application\ Support/net.imput.helium/*/Application\ Cache/* "Helium app cache" + # Yandex Browser. + safe_clean ~/Library/Caches/Yandex/YandexBrowser/* "Yandex cache" + safe_clean ~/Library/Application\ Support/Yandex/YandexBrowser/ShaderCache/* "Yandex shader cache" + safe_clean ~/Library/Application\ Support/Yandex/YandexBrowser/GrShaderCache/* "Yandex GR shader cache" + safe_clean ~/Library/Application\ Support/Yandex/YandexBrowser/GraphiteDawnCache/* "Yandex Dawn cache" + safe_clean ~/Library/Application\ Support/Yandex/YandexBrowser/*/GPUCache/* "Yandex GPU cache" + local firefox_running=false + if pgrep -x "Firefox" > /dev/null 2>&1; then + firefox_running=true + fi + if [[ "$firefox_running" == "true" ]]; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Firefox is running · cache cleanup skipped" + else + safe_clean ~/Library/Caches/Firefox/* "Firefox cache" + fi + safe_clean ~/Library/Caches/com.operasoftware.Opera/* "Opera cache" + safe_clean ~/Library/Caches/com.vivaldi.Vivaldi/* "Vivaldi cache" + safe_clean ~/Library/Caches/Comet/* "Comet cache" + safe_clean ~/Library/Caches/com.kagi.kagimacOS/* "Orion cache" + safe_clean ~/Library/Caches/zen/* "Zen cache" + if [[ "$firefox_running" == "true" ]]; then + echo -e " ${GRAY}${ICON_WARNING}${NC} Firefox is running · profile cache cleanup skipped" + else + safe_clean ~/Library/Application\ Support/Firefox/Profiles/*/cache2/* "Firefox profile cache" + fi + clean_chrome_old_versions + clean_edge_old_versions + clean_edge_updater_old_versions +} + +# Cloud storage caches. +clean_cloud_storage() { + safe_clean ~/Library/Caches/com.dropbox.* "Dropbox cache" + safe_clean ~/Library/Caches/com.getdropbox.dropbox "Dropbox cache" + safe_clean ~/Library/Caches/com.google.GoogleDrive "Google Drive cache" + safe_clean ~/Library/Caches/com.baidu.netdisk "Baidu Netdisk cache" + safe_clean ~/Library/Caches/com.alibaba.teambitiondisk "Alibaba Cloud cache" + safe_clean ~/Library/Caches/com.box.desktop "Box cache" + safe_clean ~/Library/Caches/com.microsoft.OneDrive "OneDrive cache" +} + +# Office app caches. +clean_office_applications() { + safe_clean ~/Library/Caches/com.microsoft.Word "Microsoft Word cache" + safe_clean ~/Library/Caches/com.microsoft.Excel "Microsoft Excel cache" + safe_clean ~/Library/Caches/com.microsoft.Powerpoint "Microsoft PowerPoint cache" + safe_clean ~/Library/Caches/com.microsoft.Outlook/* "Microsoft Outlook cache" + safe_clean ~/Library/Caches/com.apple.iWork.* "Apple iWork cache" + safe_clean ~/Library/Caches/com.kingsoft.wpsoffice.mac "WPS Office cache" + safe_clean ~/Library/Caches/org.mozilla.thunderbird/* "Thunderbird cache" + safe_clean ~/Library/Caches/com.apple.mail/* "Apple Mail cache" +} + +# Virtualization caches. +clean_virtualization_tools() { + stop_section_spinner + safe_clean ~/Library/Caches/com.vmware.fusion "VMware Fusion cache" + safe_clean ~/Library/Caches/com.parallels.* "Parallels cache" + safe_clean ~/VirtualBox\ VMs/.cache "VirtualBox cache" + safe_clean ~/.vagrant.d/tmp/* "Vagrant temporary files" +} + +# Estimate item size for Application Support cleanup. +# Files use stat; directories use du with timeout to avoid long blocking scans. +app_support_item_size_bytes() { + local item="$1" + local timeout_seconds="${2:-0.4}" + + if [[ -f "$item" && ! -L "$item" ]]; then + local file_bytes + file_bytes=$(stat -f%z "$item" 2> /dev/null || echo "0") + [[ "$file_bytes" =~ ^[0-9]+$ ]] || return 1 + printf '%s\n' "$file_bytes" + return 0 + fi + + if [[ -d "$item" && ! -L "$item" ]]; then + # Fast path: if directory has too many items, skip detailed size calculation + # to avoid hanging on deep directories (e.g., node_modules, .git) + local item_count + item_count=$(command find "$item" -maxdepth 2 -print0 2> /dev/null | tr -d '\0' | wc -c) + if [[ "$item_count" -gt 10000 ]]; then + # Return 1 to signal "too many items, size unknown" + return 1 + fi + + local du_tmp + du_tmp=$(mktemp) + local du_status=0 + # Use stricter timeout for directories + if run_with_timeout "$timeout_seconds" du -skP "$item" > "$du_tmp" 2> /dev/null; then + du_status=0 + else + du_status=$? + fi + + if [[ $du_status -ne 0 ]]; then + rm -f "$du_tmp" + return 1 + fi + + local size_kb + size_kb=$(awk 'NR==1 {print $1; exit}' "$du_tmp") + rm -f "$du_tmp" + [[ "$size_kb" =~ ^[0-9]+$ ]] || return 1 + printf '%s\n' "$((size_kb * 1024))" + return 0 + fi + + return 1 +} + +# Application Support logs/caches. +clean_application_support_logs() { + if [[ ! -d "$HOME/Library/Application Support" ]] || ! ls "$HOME/Library/Application Support" > /dev/null 2>&1; then + note_activity + echo -e " ${GRAY}${ICON_WARNING}${NC} Skipped: No permission to access Application Support" + return 0 + fi + start_section_spinner "Scanning Application Support..." + local total_size_bytes=0 + local total_size_partial=false + local cleaned_count=0 + local found_any=false + local size_timeout_seconds="${MOLE_APP_SUPPORT_ITEM_SIZE_TIMEOUT_SEC:-0.4}" + if [[ ! "$size_timeout_seconds" =~ ^[0-9]+([.][0-9]+)?$ ]]; then + size_timeout_seconds=0.4 + fi + # Enable nullglob for safe globbing. + local _ng_state + _ng_state=$(shopt -p nullglob || true) + shopt -s nullglob + local app_count=0 + local total_apps + # Temporarily disable pipefail here so that a partial find failure (e.g. TCC + # restrictions on macOS 26+) does not propagate through the pipeline and abort + # the whole scan via set -e. + local pipefail_was_set=false + if [[ -o pipefail ]]; then + pipefail_was_set=true + set +o pipefail + fi + total_apps=$(command find "$HOME/Library/Application Support" -mindepth 1 -maxdepth 1 -type d 2> /dev/null | wc -l | tr -d ' ') + [[ "$total_apps" =~ ^[0-9]+$ ]] || total_apps=0 + local last_progress_update + last_progress_update=$(get_epoch_seconds) + for app_dir in ~/Library/Application\ Support/*; do + [[ -d "$app_dir" ]] || continue + local app_name + app_name=$(basename "$app_dir") + app_count=$((app_count + 1)) + update_progress_if_needed "$app_count" "$total_apps" last_progress_update 1 || true + local app_name_lower + app_name_lower=$(echo "$app_name" | LC_ALL=C tr '[:upper:]' '[:lower:]') + local is_protected=false + if should_protect_data "$app_name"; then + is_protected=true + elif should_protect_data "$app_name_lower"; then + is_protected=true + fi + if [[ "$is_protected" == "true" ]]; then + continue + fi + if is_critical_system_component "$app_name"; then + continue + fi + local -a start_candidates=("$app_dir/log" "$app_dir/logs" "$app_dir/activitylog" "$app_dir/Cache/Cache_Data" "$app_dir/Crashpad/completed") + for candidate in "${start_candidates[@]}"; do + if [[ -d "$candidate" ]]; then + # Quick count check - skip if too many items to avoid hanging + local quick_count + quick_count=$(command find "$candidate" -mindepth 1 -maxdepth 1 -printf '1\n' 2> /dev/null | wc -l | tr -d ' ') + if [[ "$quick_count" -gt 100 ]]; then + # Too many items - use bulk removal instead of item-by-item + local app_label="$app_name" + if [[ ${#app_label} -gt 24 ]]; then + app_label="${app_label:0:21}..." + fi + stop_section_spinner + start_section_spinner "Scanning Application Support... $app_count/$total_apps [$app_label, bulk clean]" + if [[ "$DRY_RUN" != "true" ]]; then + # Remove entire candidate directory in one go + safe_remove "$candidate" true > /dev/null 2>&1 || true + fi + found_any=true + cleaned_count=$((cleaned_count + 1)) + total_size_partial=true + continue + fi + + local item_found=false + local candidate_size_bytes=0 + local candidate_size_partial=false + local candidate_item_count=0 + while IFS= read -r -d '' item; do + [[ -e "$item" ]] || continue + item_found=true + candidate_item_count=$((candidate_item_count + 1)) + if [[ ! -L "$item" && (-f "$item" || -d "$item") ]]; then + local item_size_bytes="" + if item_size_bytes=$(app_support_item_size_bytes "$item" "$size_timeout_seconds"); then + if [[ "$item_size_bytes" =~ ^[0-9]+$ ]]; then + candidate_size_bytes=$((candidate_size_bytes + item_size_bytes)) + else + candidate_size_partial=true + fi + else + candidate_size_partial=true + fi + fi + if ((candidate_item_count % 250 == 0)); then + local current_time + current_time=$(get_epoch_seconds) + if [[ "$current_time" =~ ^[0-9]+$ ]] && ((current_time - last_progress_update >= 1)); then + local app_label="$app_name" + if [[ ${#app_label} -gt 24 ]]; then + app_label="${app_label:0:21}..." + fi + stop_section_spinner + start_section_spinner "Scanning Application Support... $app_count/$total_apps [$app_label, $candidate_item_count items]" + last_progress_update=$current_time + fi + fi + if [[ "$DRY_RUN" != "true" ]]; then + safe_remove "$item" true > /dev/null 2>&1 || true + fi + done < <(command find "$candidate" -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true) + if [[ "$item_found" == "true" ]]; then + total_size_bytes=$((total_size_bytes + candidate_size_bytes)) + [[ "$candidate_size_partial" == "true" ]] && total_size_partial=true + cleaned_count=$((cleaned_count + 1)) + found_any=true + fi + fi + done + done + # Group Containers logs (explicit allowlist). + local known_group_containers=( + "group.com.apple.contentdelivery" + ) + for container in "${known_group_containers[@]}"; do + local container_path="$HOME/Library/Group Containers/$container" + local -a gc_candidates=("$container_path/Logs" "$container_path/Library/Logs") + for candidate in "${gc_candidates[@]}"; do + if [[ -d "$candidate" ]]; then + # Quick count check - skip if too many items + local quick_count + quick_count=$(command find "$candidate" -mindepth 1 -maxdepth 1 -printf '1\n' 2> /dev/null | wc -l | tr -d ' ') + if [[ "$quick_count" -gt 100 ]]; then + local container_label="$container" + if [[ ${#container_label} -gt 24 ]]; then + container_label="${container_label:0:21}..." + fi + stop_section_spinner + start_section_spinner "Scanning Application Support... group [$container_label, bulk clean]" + if [[ "$DRY_RUN" != "true" ]]; then + safe_remove "$candidate" true > /dev/null 2>&1 || true + fi + found_any=true + cleaned_count=$((cleaned_count + 1)) + total_size_partial=true + continue + fi + + local item_found=false + local candidate_size_bytes=0 + local candidate_size_partial=false + local candidate_item_count=0 + while IFS= read -r -d '' item; do + [[ -e "$item" ]] || continue + item_found=true + candidate_item_count=$((candidate_item_count + 1)) + if [[ ! -L "$item" && (-f "$item" || -d "$item") ]]; then + local item_size_bytes="" + if item_size_bytes=$(app_support_item_size_bytes "$item" "$size_timeout_seconds"); then + if [[ "$item_size_bytes" =~ ^[0-9]+$ ]]; then + candidate_size_bytes=$((candidate_size_bytes + item_size_bytes)) + else + candidate_size_partial=true + fi + else + candidate_size_partial=true + fi + fi + if ((candidate_item_count % 250 == 0)); then + local current_time + current_time=$(get_epoch_seconds) + if [[ "$current_time" =~ ^[0-9]+$ ]] && ((current_time - last_progress_update >= 1)); then + local container_label="$container" + if [[ ${#container_label} -gt 24 ]]; then + container_label="${container_label:0:21}..." + fi + stop_section_spinner + start_section_spinner "Scanning Application Support... group [$container_label, $candidate_item_count items]" + last_progress_update=$current_time + fi + fi + if [[ "$DRY_RUN" != "true" ]]; then + safe_remove "$item" true > /dev/null 2>&1 || true + fi + done < <(command find "$candidate" -mindepth 1 -maxdepth 1 -print0 2> /dev/null || true) + if [[ "$item_found" == "true" ]]; then + total_size_bytes=$((total_size_bytes + candidate_size_bytes)) + [[ "$candidate_size_partial" == "true" ]] && total_size_partial=true + cleaned_count=$((cleaned_count + 1)) + found_any=true + fi + fi + done + done + # Restore pipefail if it was previously set + if [[ "$pipefail_was_set" == "true" ]]; then + set -o pipefail + fi + eval "$_ng_state" + stop_section_spinner + if [[ "$found_any" == "true" ]]; then + local size_human + size_human=$(bytes_to_human "$total_size_bytes") + local total_size_kb=$(((total_size_bytes + 1023) / 1024)) + if [[ "$DRY_RUN" == "true" ]]; then + if [[ "$total_size_partial" == "true" ]]; then + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Application Support logs/caches${NC}, ${YELLOW}at least $size_human dry${NC}" + else + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} Application Support logs/caches${NC}, ${YELLOW}$size_human dry${NC}" + fi + else + if [[ "$total_size_partial" == "true" ]]; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Application Support logs/caches${NC}, ${GREEN}at least $size_human${NC}" + else + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Application Support logs/caches${NC}, ${GREEN}$size_human${NC}" + fi + fi + files_cleaned=$((files_cleaned + cleaned_count)) + total_size_cleaned=$((total_size_cleaned + total_size_kb)) + total_items=$((total_items + 1)) + note_activity + fi +} +# iOS device backup info. +check_ios_device_backups() { + local backup_dir="$HOME/Library/Application Support/MobileSync/Backup" + # Simplified check without find to avoid hanging. + if [[ -d "$backup_dir" ]]; then + local backup_kb + backup_kb=$(get_path_size_kb "$backup_dir") + if [[ -n "${backup_kb:-}" && "$backup_kb" -gt 102400 ]]; then + local backup_human + backup_human=$(command du -shP "$backup_dir" 2> /dev/null | awk '{print $1}') + if [[ -n "$backup_human" ]]; then + note_activity + echo -e " ${YELLOW}${ICON_WARNING}${NC} iOS backups: ${GREEN}${backup_human}${NC}${GRAY}, Path: $backup_dir${NC}" + fi + fi + fi + return 0 +} + +# Large file candidates (report only, no deletion). +check_large_file_candidates() { + local threshold_kb=$((1024 * 1024)) # 1GB + local found_any=false + + local mail_dir="$HOME/Library/Mail" + if [[ -d "$mail_dir" ]]; then + local mail_kb + mail_kb=$(get_path_size_kb "$mail_dir") + if [[ "$mail_kb" -ge "$threshold_kb" ]]; then + local mail_human + mail_human=$(bytes_to_human "$((mail_kb * 1024))") + echo -e " ${YELLOW}${ICON_WARNING}${NC} Mail data: ${GREEN}${mail_human}${NC}${GRAY}, Path: $mail_dir${NC}" + found_any=true + fi + fi + + local mail_downloads="$HOME/Library/Mail Downloads" + if [[ -d "$mail_downloads" ]]; then + local downloads_kb + downloads_kb=$(get_path_size_kb "$mail_downloads") + if [[ "$downloads_kb" -ge "$threshold_kb" ]]; then + local downloads_human + downloads_human=$(bytes_to_human "$((downloads_kb * 1024))") + echo -e " ${YELLOW}${ICON_WARNING}${NC} Mail downloads: ${GREEN}${downloads_human}${NC}${GRAY}, Path: $mail_downloads${NC}" + found_any=true + fi + fi + + local installer_path + for installer_path in /Applications/Install\ macOS*.app; do + if [[ -e "$installer_path" ]]; then + local installer_kb + installer_kb=$(get_path_size_kb "$installer_path") + if [[ "$installer_kb" -gt 0 ]]; then + local installer_human + installer_human=$(bytes_to_human "$((installer_kb * 1024))") + echo -e " ${YELLOW}${ICON_WARNING}${NC} macOS installer: ${GREEN}${installer_human}${NC}${GRAY}, Path: $installer_path${NC}" + found_any=true + fi + fi + done + + local updates_dir="$HOME/Library/Updates" + if [[ -d "$updates_dir" ]]; then + local updates_kb + updates_kb=$(get_path_size_kb "$updates_dir") + if [[ "$updates_kb" -ge "$threshold_kb" ]]; then + local updates_human + updates_human=$(bytes_to_human "$((updates_kb * 1024))") + echo -e " ${YELLOW}${ICON_WARNING}${NC} macOS updates cache: ${GREEN}${updates_human}${NC}${GRAY}, Path: $updates_dir${NC}" + found_any=true + fi + fi + + if [[ "${SYSTEM_CLEAN:-false}" != "true" ]] && command -v tmutil > /dev/null 2>&1 && + defaults read /Library/Preferences/com.apple.TimeMachine AutoBackup 2> /dev/null | grep -qE '^[01]$'; then + local snapshot_list snapshot_count + snapshot_list=$(run_with_timeout 3 tmutil listlocalsnapshots / 2> /dev/null || true) + if [[ -n "$snapshot_list" ]]; then + snapshot_count=$(echo "$snapshot_list" | { grep -Eo 'com\.apple\.TimeMachine\.[0-9]{4}-[0-9]{2}-[0-9]{2}-[0-9]{6}' || true; } | wc -l | awk '{print $1}') + if [[ "$snapshot_count" =~ ^[0-9]+$ && "$snapshot_count" -gt 0 ]]; then + echo -e " ${YELLOW}${ICON_WARNING}${NC} Time Machine local snapshots: ${GREEN}${snapshot_count}${NC}" + echo -e " ${GRAY}${ICON_REVIEW}${NC} ${GRAY}Review: tmutil listlocalsnapshots /${NC}" + found_any=true + fi + fi + fi + + if command -v docker > /dev/null 2>&1; then + local docker_output + docker_output=$(run_with_timeout 3 docker system df --format '{{.Type}}\t{{.Size}}\t{{.Reclaimable}}' 2> /dev/null || true) + if [[ -n "$docker_output" ]]; then + echo -e " ${YELLOW}${ICON_WARNING}${NC} Docker storage:" + while IFS=$'\t' read -r dtype dsize dreclaim; do + [[ -z "$dtype" ]] && continue + echo -e " ${GRAY}${ICON_LIST} $dtype: $dsize, Reclaimable: $dreclaim${NC}" + done <<< "$docker_output" + found_any=true + else + docker_output=$(run_with_timeout 3 docker system df 2> /dev/null || true) + if [[ -n "$docker_output" ]]; then + echo -e " ${YELLOW}${ICON_WARNING}${NC} Docker storage:" + echo -e " ${GRAY}${ICON_REVIEW}${NC} ${GRAY}Run: docker system df${NC}" + found_any=true + fi + fi + fi + + if [[ "$found_any" == "false" ]]; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} No large items detected in common locations" + fi + + note_activity + return 0 +} + +# Apple Silicon specific caches (IS_M_SERIES). +clean_apple_silicon_caches() { + if [[ "${IS_M_SERIES:-false}" != "true" ]]; then + return 0 + fi + start_section "Apple Silicon updates" + safe_clean /Library/Apple/usr/share/rosetta/rosetta_update_bundle "Rosetta 2 cache" + safe_clean ~/Library/Caches/com.apple.rosetta.update "Rosetta 2 user cache" + safe_clean ~/Library/Caches/com.apple.amp.mediasevicesd "Apple Silicon media service cache" + end_section +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/app_protection.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/app_protection.sh new file mode 100755 index 0000000..144aac4 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/app_protection.sh @@ -0,0 +1,1476 @@ +#!/bin/bash +# Mole - Application Protection +# System critical and data-protected application lists + +set -euo pipefail + +if [[ -n "${MOLE_APP_PROTECTION_LOADED:-}" ]]; then + return 0 +fi +readonly MOLE_APP_PROTECTION_LOADED=1 + +_MOLE_CORE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +[[ -z "${MOLE_BASE_LOADED:-}" ]] && source "$_MOLE_CORE_DIR/base.sh" + +# Declare WHITELIST_PATTERNS if not already set (used by is_path_whitelisted) +if ! declare -p WHITELIST_PATTERNS &> /dev/null; then + declare -a WHITELIST_PATTERNS=() +fi + +# Application Management + +# ============================================================================ +# Performance Note: +# - SYSTEM_CRITICAL_BUNDLES_FAST: Fast wildcard patterns for cleanup operations +# - SYSTEM_CRITICAL_BUNDLES: Detailed list for uninstall protection (lazy-loaded) +# ============================================================================ + +# Fast patterns for cleanup operations (used by should_protect_data) +# These wildcards provide adequate protection with minimal performance impact +readonly SYSTEM_CRITICAL_BUNDLES_FAST=( + "com.apple.*" + "loginwindow" + "dock" + "systempreferences" + "finder" + "safari" + "backgroundtaskmanagement*" + "keychain*" + "security*" + "bluetooth*" + "wifi*" + "network*" + "tcc" + "notification*" + "accessibility*" + "universalaccess*" + "HIToolbox*" + "textinput*" + "TextInput*" + "keyboard*" + "Keyboard*" + "inputsource*" + "InputSource*" + "keylayout*" + "KeyLayout*" + "GlobalPreferences" + ".GlobalPreferences" + "org.pqrs.Karabiner*" +) + +# Detailed list for uninstall protection +# Critical system components protected from uninstallation +# Note: We explicitly list system components instead of using "com.apple.*" wildcard +# to allow uninstallation of user-installed Apple apps (Xcode, Final Cut Pro, etc.) +readonly SYSTEM_CRITICAL_BUNDLES=( + # Core system applications (in /System/Applications/) + "com.apple.finder" + "com.apple.dock" + "com.apple.Safari" + "com.apple.mail" + "com.apple.systempreferences" + "com.apple.SystemSettings" + "com.apple.Settings*" + "com.apple.controlcenter*" + "com.apple.Spotlight" + "com.apple.notificationcenterui" + "com.apple.loginwindow" + "com.apple.Preview" + "com.apple.TextEdit" + "com.apple.Notes" + "com.apple.reminders" + "com.apple.iCal" + "com.apple.AddressBook" + "com.apple.Photos" + "com.apple.AppStore" + "com.apple.calculator" + "com.apple.Dictionary" + "com.apple.ScreenSharing" + "com.apple.ActivityMonitor" + "com.apple.Console" + "com.apple.DiskUtility" + "com.apple.KeychainAccess" + "com.apple.DigitalColorMeter" + "com.apple.grapher" + "com.apple.Terminal" + "com.apple.ScriptEditor2" + "com.apple.VoiceOverUtility" + "com.apple.BluetoothFileExchange" + "com.apple.print.PrinterProxy" + "com.apple.systempreferences*" + "com.apple.SystemProfiler" + "com.apple.FontBook" + "com.apple.ColorSyncUtility" + "com.apple.audio.AudioMIDISetup" + "com.apple.DirectoryUtility" + "com.apple.NetworkUtility" + "com.apple.exposelauncher" + "com.apple.MigrateAssistant" + "com.apple.RAIDUtility" + "com.apple.BootCampAssistant" + + # System services and daemons + "com.apple.SecurityAgent" + "com.apple.CoreServices*" + "com.apple.SystemUIServer" + "com.apple.backgroundtaskmanagement*" + "com.apple.loginitems*" + "com.apple.sharedfilelist*" + "com.apple.sfl*" + "com.apple.coreservices*" + "com.apple.metadata*" + "com.apple.MobileSoftwareUpdate*" + "com.apple.SoftwareUpdate*" + "com.apple.installer*" + "com.apple.frameworks*" + "com.apple.security*" + "com.apple.keychain*" + "com.apple.trustd*" + "com.apple.securityd*" + "com.apple.cloudd*" + "com.apple.iCloud*" + "com.apple.WiFi*" + "com.apple.airport*" + "com.apple.Bluetooth*" + + # Input methods (system built-in) + "com.apple.inputmethod.*" + "com.apple.inputsource*" + "com.apple.TextInput*" + "com.apple.CharacterPicker*" + "com.apple.PressAndHold*" + + # Legacy pattern-based entries (non com.apple.*) + "loginwindow" + "dock" + "systempreferences" + "finder" + "safari" + "backgroundtaskmanagementagent" + "keychain*" + "security*" + "bluetooth*" + "wifi*" + "network*" + "tcc" + "notification*" + "accessibility*" + "universalaccess*" + "HIToolbox*" + "textinput*" + "TextInput*" + "keyboard*" + "Keyboard*" + "inputsource*" + "InputSource*" + "keylayout*" + "KeyLayout*" + "GlobalPreferences" + ".GlobalPreferences" + "org.pqrs.Karabiner*" +) + +# Apple apps that CAN be uninstalled (from App Store or developer.apple.com) +readonly APPLE_UNINSTALLABLE_APPS=( + "com.apple.dt.*" # Xcode, Instruments, FileMerge + "com.apple.FinalCut*" # Final Cut Pro + "com.apple.Motion" + "com.apple.Compressor" + "com.apple.logic*" # Logic Pro + "com.apple.garageband*" # GarageBand + "com.apple.iMovie" + "com.apple.iWork.*" # Pages, Numbers, Keynote + "com.apple.MainStage*" + "com.apple.server.*" # macOS Server + "com.apple.Playgrounds" # Swift Playgrounds +) + +# Applications with sensitive data; protected during cleanup but removable +readonly DATA_PROTECTED_BUNDLES=( + # Input Methods (protected during cleanup, uninstall allowed) + "com.tencent.inputmethod.QQInput" + "com.sogou.inputmethod.*" + "com.baidu.inputmethod.*" + "com.googlecode.rimeime.*" + "im.rime.*" + "*.inputmethod" + "*.InputMethod" + "*IME" + + # System Utilities & Cleanup + "com.nektony.*" + "com.macpaw.*" + "com.freemacsoft.AppCleaner" + "com.omnigroup.omnidisksweeper" + "com.daisydiskapp.*" + "com.tunabellysoftware.*" + "com.grandperspectiv.*" + "com.binaryfruit.*" + + # Password Managers + "com.1password.*" + "com.agilebits.*" + "com.lastpass.*" + "com.dashlane.*" + "com.bitwarden.*" + "com.keepassx.*" + "org.keepassx.*" + "org.keepassxc.*" + "com.authy.*" + "com.yubico.*" + + # IDEs & Editors + "com.jetbrains.*" + "JetBrains*" + "com.microsoft.VSCode" + "com.visualstudio.code.*" + "com.sublimetext.*" + "com.sublimehq.*" + "com.microsoft.VSCodeInsiders" + "com.apple.dt.Xcode" + "com.coteditor.CotEditor" + "com.macromates.TextMate" + "com.panic.Nova" + "abnerworks.Typora" + "com.uranusjr.macdown" + + # AI & LLM Tools + "com.todesktop.*" + "Cursor" + "com.anthropic.claude*" + "Claude" + "com.openai.chat*" + "ChatGPT" + "com.ollama.ollama" + "Ollama" + "com.lmstudio.lmstudio" + "LM Studio" + "co.supertool.chatbox" + "page.jan.jan" + "com.huggingface.huggingchat" + "Gemini" + "com.perplexity.Perplexity" + "com.drawthings.DrawThings" + "com.divamgupta.diffusionbee" + "com.exafunction.windsurf" + "com.quora.poe.electron" + "chat.openai.com.*" + + # Database Clients + "com.sequelpro.*" + "com.sequel-ace.*" + "com.tinyapp.*" + "com.dbeaver.*" + "com.navicat.*" + "com.mongodb.compass" + "com.redis.RedisInsight" + "com.pgadmin.pgadmin4" + "com.eggerapps.Sequel-Pro" + "com.valentina-db.Valentina-Studio" + "com.dbvis.DbVisualizer" + + # API & Network Tools + "com.postmanlabs.mac" + "com.konghq.insomnia" + "com.CharlesProxy.*" + "com.proxyman.*" + "com.getpaw.*" + "com.luckymarmot.Paw" + "com.charlesproxy.charles" + "com.telerik.Fiddler" + "com.usebruno.app" + + # Network Proxy & VPN Tools (Clash variants - use specific patterns to avoid false positives) + "com.clash.*" + "ClashX*" + "clash-*" + "Clash-*" + "*-clash" + "*-Clash" + "clash.*" + "Clash.*" + "clash_*" + "clashverge*" + "ClashVerge*" + "com.nssurge.surge-mac" + "*surge*" + "*Surge*" + "mihomo*" + "*openvpn*" + "*OpenVPN*" + "net.openvpn.*" + + # Proxy Clients + "*ShadowsocksX-NG*" + "com.qiuyuzhou.*" + "*v2ray*" + "*V2Ray*" + "*v2box*" + "*V2Box*" + "*nekoray*" + "*sing-box*" + "*OneBox*" + "*hiddify*" + "*Hiddify*" + "*loon*" + "*Loon*" + "*quantumult*" + + # Mesh & Corporate VPNs + "*tailscale*" + "io.tailscale.*" + "*zerotier*" + "com.zerotier.*" + "*1dot1dot1dot1*" # Cloudflare WARP + "*cloudflare*warp*" + + # Commercial VPNs + "*nordvpn*" + "*expressvpn*" + "*protonvpn*" + "*surfshark*" + "*windscribe*" + "*mullvad*" + "*privateinternetaccess*" + + # Screensaver & Wallpaper + "*Aerial.saver*" + "com.JohnCoates.Aerial*" + "*Fliqlo*" + "*fliqlo*" + + # Git & Version Control + "com.github.GitHubDesktop" + "com.sublimemerge" + "com.torusknot.SourceTreeNotMAS" + "com.git-tower.Tower*" + "com.gitfox.GitFox" + "com.github.Gitify" + "com.fork.Fork" + "com.axosoft.gitkraken" + + # Terminal & Shell + "com.googlecode.iterm2" + "net.kovidgoyal.kitty" + "io.alacritty" + "com.github.wez.wezterm" + "com.hyper.Hyper" + "com.mizage.divvy" + "com.fig.Fig" + "dev.warp.Warp-Stable" + "com.termius-dmg" + + # Docker & Virtualization + "com.docker.docker" + "com.getutm.UTM" + "com.vmware.fusion" + "com.parallels.desktop.*" + "org.virtualbox.app.VirtualBox" + "com.vagrant.*" + "com.orbstack.OrbStack" + + # System Monitoring + "com.bjango.istatmenus*" + "eu.exelban.Stats" + "com.monitorcontrol.*" + "com.bresink.system-toolkit.*" + "com.mediaatelier.MenuMeters" + "com.activity-indicator.app" + "net.cindori.sensei" + + # Window Management + "com.macitbetter.*" # BetterTouchTool, BetterSnapTool + "com.hegenberg.*" + "com.manytricks.*" # Moom, Witch, etc. + "com.divisiblebyzero.*" + "com.koingdev.*" + "com.if.Amphetamine" + "com.lwouis.alt-tab-macos" + "net.matthewpalmer.Vanilla" + "com.lightheadsw.Caffeine" + "com.contextual.Contexts" + "com.amethyst.Amethyst" + "com.knollsoft.Rectangle" + "com.knollsoft.Hookshot" + "com.surteesstudios.Bartender" + "com.gaosun.eul" + "com.pointum.hazeover" + + # Launcher & Automation + "com.runningwithcrayons.Alfred" + "com.raycast.macos" + "com.blacktree.Quicksilver" + "com.stairways.keyboardmaestro.*" + "com.manytricks.Butler" + "com.happenapps.Quitter" + "com.pilotmoon.scroll-reverser" + "org.pqrs.Karabiner-Elements" + "com.apple.Automator" + + # Note-Taking + "com.bear-writer.*" + "com.typora.*" + "com.ulyssesapp.*" + "com.literatureandlatte.*" + "com.dayoneapp.*" + "notion.id" + "md.obsidian" + "com.logseq.logseq" + "com.evernote.Evernote" + "com.onenote.mac" + "com.omnigroup.OmniOutliner*" + "net.shinyfrog.bear" + "com.goodnotes.GoodNotes" + "com.marginnote.MarginNote*" + "com.roamresearch.*" + "com.reflect.ReflectApp" + "com.inkdrop.*" + + # Design & Creative + "com.adobe.*" + "com.bohemiancoding.*" + "com.figma.*" + "com.framerx.*" + "com.zeplin.*" + "com.invisionapp.*" + "com.principle.*" + "com.pixelmatorteam.*" + "com.affinitydesigner.*" + "com.affinityphoto.*" + "com.affinitypublisher.*" + "com.linearity.curve" + "com.canva.CanvaDesktop" + "com.maxon.cinema4d" + "com.autodesk.*" + "com.sketchup.*" + + # Communication + "com.tencent.xinWeChat" + "com.tencent.qq" + "com.alibaba.DingTalkMac" + "com.alibaba.AliLang.osx" + "com.alibaba.alilang3.osx.ShipIt" + "com.alibaba.AlilangMgr.QueryNetworkInfo" + "us.zoom.xos" + "com.microsoft.teams*" + "com.slack.Slack" + "com.hnc.Discord" + "app.legcord.Legcord" + "org.telegram.desktop" + "ru.keepcoder.Telegram" + "net.whatsapp.WhatsApp" + "com.skype.skype" + "com.cisco.webexmeetings" + "com.ringcentral.RingCentral" + "com.readdle.smartemail-Mac" + "com.airmail.*" + "com.postbox-inc.postbox" + "com.tinyspeck.slackmacgap" + + # Task Management + "com.omnigroup.OmniFocus*" + "com.culturedcode.*" + "com.todoist.*" + "com.any.do.*" + "com.ticktick.*" + "com.microsoft.to-do" + "com.trello.trello" + "com.asana.nativeapp" + "com.clickup.*" + "com.monday.desktop" + "com.airtable.airtable" + "com.notion.id" + "com.linear.linear" + + # File Transfer & Sync + "com.panic.transmit*" + "com.binarynights.ForkLift*" + "com.noodlesoft.Hazel" + "com.cyberduck.Cyberduck" + "io.filezilla.FileZilla" + "com.apple.Xcode.CloudDocuments" + "com.synology.*" + + # Cloud Storage & Backup + "com.dropbox.*" + "com.getdropbox.*" + "*dropbox*" + "ws.agile.*" + "com.backblaze.*" + "*backblaze*" + "com.box.desktop*" + "*box.desktop*" + "com.microsoft.OneDrive*" + "com.microsoft.SyncReporter" + "*OneDrive*" + "com.google.GoogleDrive" + "com.google.keystone*" + "*GoogleDrive*" + "com.amazon.drive" + "com.apple.bird" + "com.apple.CloudDocs*" + "com.displaylink.*" + "com.fujitsu.pfu.ScanSnap*" + "com.citrix.*" + "org.xquartz.*" + "us.zoom.updater*" + "com.DigiDNA.iMazing*" + "com.shirtpocket.*" + "homebrew.mxcl.*" + + # Screenshot & Recording + "com.cleanshot.*" + "com.xnipapp.xnip" + "com.reincubate.camo" + "com.tunabellysoftware.ScreenFloat" + "net.telestream.screenflow*" + "com.techsmith.snagit*" + "com.techsmith.camtasia*" + "com.obsidianapp.screenrecorder" + "com.kap.Kap" + "com.getkap.*" + "com.linebreak.CloudApp" + "com.droplr.droplr-mac" + + # Media & Entertainment + "com.spotify.client" + "com.apple.Music" + "com.apple.podcasts" + "com.apple.BKAgentService" + "com.apple.iBooksX" + "com.apple.iBooks" + "com.blackmagic-design.*" + "com.colliderli.iina" + "org.videolan.vlc" + "io.mpv" + "tv.plex.player.desktop" + "com.netease.163music" + + # Web Browsers + "Firefox" + "org.mozilla.*" + + # License & App Stores + "com.paddle.Paddle*" + "com.setapp.DesktopClient" + "com.devmate.*" + "org.sparkle-project.Sparkle" +) + +# Centralized check for critical system components (case-insensitive) +is_critical_system_component() { + local token="$1" + [[ -z "$token" ]] && return 1 + + local lower + lower=$(echo "$token" | LC_ALL=C tr '[:upper:]' '[:lower:]') + + case "$lower" in + *backgroundtaskmanagement* | *loginitems* | *systempreferences* | *systemsettings* | *settings* | *preferences* | *controlcenter* | *biometrickit* | *sfl* | *tcc*) + return 0 + ;; + *) + return 1 + ;; + esac +} + +# Legacy function - preserved for backward compatibility +# Use should_protect_from_uninstall() or should_protect_data() instead +readonly PRESERVED_BUNDLE_PATTERNS=("${SYSTEM_CRITICAL_BUNDLES[@]}" "${DATA_PROTECTED_BUNDLES[@]}") + +# Check if bundle ID matches pattern (glob support) +bundle_matches_pattern() { + local bundle_id="$1" + local pattern="$2" + + [[ -z "$pattern" ]] && return 1 + + # Use bash [[ ]] for glob pattern matching (works with variables in bash 3.2+) + # shellcheck disable=SC2053 # allow glob pattern matching + if [[ "$bundle_id" == $pattern ]]; then + return 0 + fi + return 1 +} + +# Helper to build regex from array (Bash 3.2 compatible - no namerefs) +# $1: Variable name to store result +# $2...: Array elements (passed as expanded list) +build_regex_var() { + local var_name="$1" + shift + local regex="" + for pattern in "$@"; do + # Escape dots . -> \. + local p="${pattern//./\\.}" + # Convert * to .* + p="${p//\*/.*}" + # Start and end anchors + p="^${p}$" + + if [[ -z "$regex" ]]; then + regex="$p" + else + regex="$regex|$p" + fi + done + eval "$var_name=\"\$regex\"" +} + +# Lazy-loaded regex (only built when needed) +APPLE_UNINSTALLABLE_REGEX="" +SYSTEM_CRITICAL_REGEX="" +SYSTEM_CRITICAL_FAST_REGEX="" +DATA_PROTECTED_REGEX="" + +_ensure_uninstall_regex() { + if [[ -z "$SYSTEM_CRITICAL_REGEX" ]]; then + build_regex_var APPLE_UNINSTALLABLE_REGEX "${APPLE_UNINSTALLABLE_APPS[@]}" + build_regex_var SYSTEM_CRITICAL_REGEX "${SYSTEM_CRITICAL_BUNDLES[@]}" + fi +} + +_ensure_data_protection_regex() { + if [[ -z "$SYSTEM_CRITICAL_FAST_REGEX" ]]; then + build_regex_var SYSTEM_CRITICAL_FAST_REGEX "${SYSTEM_CRITICAL_BUNDLES_FAST[@]}" + build_regex_var DATA_PROTECTED_REGEX "${DATA_PROTECTED_BUNDLES[@]}" + fi +} + +# Check if application is a protected system component +should_protect_from_uninstall() { + local bundle_id="$1" + + _ensure_uninstall_regex + + if [[ "$bundle_id" =~ $APPLE_UNINSTALLABLE_REGEX ]]; then + return 1 + fi + + if [[ "$bundle_id" =~ $SYSTEM_CRITICAL_REGEX ]]; then + return 0 + fi + + return 1 +} + +# Check if application data should be protected during cleanup +should_protect_data() { + local bundle_id="$1" + + case "$bundle_id" in + com.apple.* | loginwindow | dock | systempreferences | finder | safari) + return 0 + ;; + backgroundtaskmanagement* | keychain* | security* | bluetooth* | wifi* | network* | tcc) + return 0 + ;; + notification* | accessibility* | universalaccess* | HIToolbox*) + return 0 + ;; + *inputmethod* | *InputMethod* | *IME | textinput* | TextInput*) + return 0 + ;; + keyboard* | Keyboard* | inputsource* | InputSource* | keylayout* | KeyLayout*) + return 0 + ;; + GlobalPreferences | .GlobalPreferences | org.pqrs.Karabiner*) + return 0 + ;; + com.1password.* | com.agilebits.* | com.lastpass.* | com.dashlane.* | com.bitwarden.*) + return 0 + ;; + com.jetbrains.* | JetBrains* | com.microsoft.* | com.visualstudio.*) + return 0 + ;; + com.sublimetext.* | com.sublimehq.* | Cursor | Claude | ChatGPT | Ollama) + return 0 + ;; + # Specific match to avoid ShellCheck redundancy warning with com.clash.* + com.clash.app) + return 0 + ;; + com.nssurge.* | com.v2ray.* | com.clash.* | ClashX* | Surge* | Shadowrocket* | Quantumult*) + return 0 + ;; + clash-* | Clash-* | *-clash | *-Clash | clash.* | Clash.* | clash_* | clashverge* | ClashVerge*) + return 0 + ;; + com.docker.* | com.getpostman.* | com.insomnia.*) + return 0 + ;; + com.tencent.* | com.sogou.* | com.baidu.* | com.googlecode.* | im.rime.*) + # These might have wildcards, check detailed list + for pattern in "${DATA_PROTECTED_BUNDLES[@]}"; do + if bundle_matches_pattern "$bundle_id" "$pattern"; then + return 0 + fi + done + return 1 + ;; + esac + + # Fallback: check against the full DATA_PROTECTED_BUNDLES list + for pattern in "${DATA_PROTECTED_BUNDLES[@]}"; do + if bundle_matches_pattern "$bundle_id" "$pattern"; then + return 0 + fi + done + + return 1 +} + +# Check if a path is protected from deletion +# Centralized logic to protect system settings, control center, and critical apps +# +# In uninstall mode (MOLE_UNINSTALL_MODE=1), only system-critical components are protected. +# Data-protected apps (VPNs, dev tools, etc.) can be uninstalled when user explicitly chooses to. +# +# Args: $1 - path to check +# Returns: 0 if protected, 1 if safe to delete +should_protect_path() { + local path="$1" + [[ -z "$path" ]] && return 1 + + local path_lower + path_lower=$(echo "$path" | LC_ALL=C tr '[:upper:]' '[:lower:]') + + # 1. Keyword-based matching for system components + # Protect System Settings, Preferences, Control Center, and related XPC services + # Also protect "Settings" (used in macOS Sequoia) and savedState files + if [[ "$path_lower" =~ systemsettings || "$path_lower" =~ systempreferences || "$path_lower" =~ controlcenter ]]; then + return 0 + fi + + # Additional check for com.apple.Settings (macOS Sequoia System Settings) + if [[ "$path_lower" =~ com\.apple\.settings ]]; then + return 0 + fi + + # Protect Notes cache (search index issues) + if [[ "$path_lower" =~ com\.apple\.notes ]]; then + return 0 + fi + + # 2. Protect caches critical for system UI rendering + # These caches are essential for modern macOS (Sonoma/Sequoia) system UI rendering + case "$path" in + # System Settings and Control Center caches (CRITICAL - prevents blank panel bug) + *com.apple.systempreferences.cache* | *com.apple.Settings.cache* | *com.apple.controlcenter.cache*) + return 0 + ;; + # Finder and Dock (system essential) + *com.apple.finder.cache* | *com.apple.dock.cache*) + return 0 + ;; + # System XPC services and sandboxed containers + */Library/Containers/com.apple.Settings* | */Library/Containers/com.apple.SystemSettings* | */Library/Containers/com.apple.controlcenter*) + return 0 + ;; + */Library/Group\ Containers/com.apple.systempreferences* | */Library/Group\ Containers/com.apple.Settings*) + return 0 + ;; + # Shared file lists for System Settings (macOS Sequoia) - Issue #136 + */com.apple.sharedfilelist/*com.apple.Settings* | */com.apple.sharedfilelist/*com.apple.SystemSettings* | */com.apple.sharedfilelist/*systempreferences*) + return 0 + ;; + esac + + # 3. Extract bundle ID from sandbox paths + # Matches: .../Library/Containers/bundle.id/... + # Matches: .../Library/Group Containers/group.id/... + if [[ "$path" =~ /Library/Containers/([^/]+) ]] || [[ "$path" =~ /Library/Group\ Containers/([^/]+) ]]; then + local bundle_id="${BASH_REMATCH[1]}" + # In uninstall mode, only system components are protected; skip data protection + if [[ "${MOLE_UNINSTALL_MODE:-0}" != "1" ]] && should_protect_data "$bundle_id"; then + return 0 + fi + fi + + # 4. Check for specific hardcoded critical patterns + case "$path" in + *com.apple.Settings* | *com.apple.SystemSettings* | *com.apple.controlcenter* | *com.apple.finder* | *com.apple.dock*) + return 0 + ;; + esac + + # 5. Protect critical preference files and user data + case "$path" in + */Library/Preferences/com.apple.dock.plist | */Library/Preferences/com.apple.finder.plist) + return 0 + ;; + # Bluetooth and WiFi configurations + */ByHost/com.apple.bluetooth.* | */ByHost/com.apple.wifi.*) + return 0 + ;; + # iCloud Drive - protect user's cloud synced data + */Library/Mobile\ Documents* | */Mobile\ Documents*) + return 0 + ;; + esac + + # 6. Match full path against protected patterns + # This catches things like /Users/tw93/Library/Caches/Claude when pattern is *Claude* + # In uninstall mode, only check system-critical bundles (user explicitly chose to uninstall) + if [[ "${MOLE_UNINSTALL_MODE:-0}" == "1" ]]; then + # Uninstall mode: first check if it's an uninstallable Apple app + for pattern in "${APPLE_UNINSTALLABLE_APPS[@]}"; do + if bundle_matches_pattern "$path" "$pattern"; then + return 1 # Can be uninstalled + fi + done + # Then check system-critical components + for pattern in "${SYSTEM_CRITICAL_BUNDLES[@]}"; do + if bundle_matches_pattern "$path" "$pattern"; then + return 0 + fi + done + else + # Normal mode (cleanup): protect both system-critical and data-protected bundles + for pattern in "${SYSTEM_CRITICAL_BUNDLES[@]}" "${DATA_PROTECTED_BUNDLES[@]}"; do + if bundle_matches_pattern "$path" "$pattern"; then + return 0 + fi + done + fi + + # 7. Check if the filename itself matches any protected patterns + # Skip in uninstall mode - user explicitly chose to remove this app + if [[ "${MOLE_UNINSTALL_MODE:-0}" != "1" ]]; then + local filename + filename=$(basename "$path") + if should_protect_data "$filename"; then + return 0 + fi + fi + + return 1 +} + +# Check if a path is protected by whitelist patterns +# Args: $1 - path to check +# Returns: 0 if whitelisted, 1 if not +is_path_whitelisted() { + local target_path="$1" + [[ -z "$target_path" ]] && return 1 + + # Normalize path (remove trailing slash) + local normalized_target="${target_path%/}" + + # Empty whitelist means nothing is protected + [[ ${#WHITELIST_PATTERNS[@]} -eq 0 ]] && return 1 + + for pattern in "${WHITELIST_PATTERNS[@]}"; do + # Pattern is already expanded/normalized in bin/clean.sh + local check_pattern="${pattern%/}" + local has_glob="false" + case "$check_pattern" in + *\** | *\?* | *\[*) + has_glob="true" + ;; + esac + + # Check for exact match or glob pattern match + # shellcheck disable=SC2053 + if [[ "$normalized_target" == "$check_pattern" ]] || + [[ "$normalized_target" == $check_pattern ]]; then + return 0 + fi + + # Check if target is a parent directory of a whitelisted path + # e.g., if pattern is /path/to/dir/subdir and target is /path/to/dir, + # the target should be protected to preserve its whitelisted children + if [[ "$check_pattern" == "$normalized_target"/* ]]; then + return 0 + fi + + # Check if target is a child of a whitelisted directory path + if [[ "$has_glob" == "false" && "$normalized_target" == "$check_pattern"/* ]]; then + return 0 + fi + done + + return 1 +} + +# Locate files associated with an application +find_app_files() { + local bundle_id="$1" + local app_name="$2" + + # Early validation: require at least one valid identifier + # Skip scanning if both bundle_id and app_name are invalid + if [[ -z "$bundle_id" || "$bundle_id" == "unknown" ]] && + [[ -z "$app_name" || ${#app_name} -lt 2 ]]; then + return 0 # Silent return to avoid invalid scanning + fi + + local -a files_to_clean=() + + # Normalize app name for matching - generate all common naming variants + # Apps use inconsistent naming: "Maestro Studio" vs "maestro-studio" vs "MaestroStudio" + # Note: Using tr for lowercase conversion (Bash 3.2 compatible, no ${var,,} support) + local nospace_name="${app_name// /}" # "Maestro Studio" -> "MaestroStudio" + local underscore_name="${app_name// /_}" # "Maestro Studio" -> "Maestro_Studio" + local hyphen_name="${app_name// /-}" # "Maestro Studio" -> "Maestro-Studio" + local lowercase_name=$(echo "$app_name" | tr '[:upper:]' '[:lower:]') # "Zed Nightly" -> "zed nightly" + local lowercase_nospace=$(echo "$nospace_name" | tr '[:upper:]' '[:lower:]') # "MaestroStudio" -> "maestrostudio" + local lowercase_hyphen=$(echo "$hyphen_name" | tr '[:upper:]' '[:lower:]') # "Maestro-Studio" -> "maestro-studio" + local lowercase_underscore=$(echo "$underscore_name" | tr '[:upper:]' '[:lower:]') # "Maestro_Studio" -> "maestro_studio" + + # Extract base name by removing common version/channel suffixes + # "Zed Nightly" -> "Zed", "Firefox Developer Edition" -> "Firefox" + local base_name="$app_name" + local version_suffixes="Nightly|Beta|Alpha|Dev|Canary|Preview|Insider|Edge|Stable|Release|RC|LTS" + version_suffixes+="|Developer Edition|Technology Preview" + if [[ "$app_name" =~ ^(.+)[[:space:]]+(${version_suffixes})$ ]]; then + base_name="${BASH_REMATCH[1]}" + fi + local base_lowercase=$(echo "$base_name" | tr '[:upper:]' '[:lower:]') # "Zed" -> "zed" + + # Standard path patterns for user-level files + local -a user_patterns=( + "$HOME/Library/Application Support/$app_name" + "$HOME/Library/Application Support/$bundle_id" + "$HOME/Library/Caches/$bundle_id" + "$HOME/Library/Caches/$app_name" + "$HOME/Library/Logs/$app_name" + "$HOME/Library/Logs/$bundle_id" + "$HOME/Library/Application Support/CrashReporter/$app_name" + "$HOME/Library/Saved Application State/$bundle_id.savedState" + "$HOME/Library/Containers/$bundle_id" + "$HOME/Library/WebKit/$bundle_id" + "$HOME/Library/WebKit/com.apple.WebKit.WebContent/$bundle_id" + "$HOME/Library/HTTPStorages/$bundle_id" + "$HOME/Library/Cookies/$bundle_id.binarycookies" + "$HOME/Library/LaunchAgents/$bundle_id.plist" + "$HOME/Library/Application Scripts/$bundle_id" + "$HOME/Library/Services/$app_name.workflow" + "$HOME/Library/QuickLook/$app_name.qlgenerator" + "$HOME/Library/Internet Plug-Ins/$app_name.plugin" + "$HOME/Library/Audio/Plug-Ins/Components/$app_name.component" + "$HOME/Library/Audio/Plug-Ins/VST/$app_name.vst" + "$HOME/Library/Audio/Plug-Ins/VST3/$app_name.vst3" + "$HOME/Library/Audio/Plug-Ins/Digidesign/$app_name.dpm" + "$HOME/Library/PreferencePanes/$app_name.prefPane" + "$HOME/Library/Input Methods/$app_name.app" + "$HOME/Library/Input Methods/$bundle_id.app" + "$HOME/Library/Screen Savers/$app_name.saver" + "$HOME/Library/Frameworks/$app_name.framework" + "$HOME/Library/Autosave Information/$bundle_id" + "$HOME/Library/Contextual Menu Items/$app_name.plugin" + "$HOME/Library/Spotlight/$app_name.mdimporter" + "$HOME/Library/ColorPickers/$app_name.colorPicker" + "$HOME/Library/Workflows/$app_name.workflow" + "$HOME/.config/$app_name" + "$HOME/.local/share/$app_name" + "$HOME/.$app_name" + "$HOME/.$app_name"rc + ) + + # Add all naming variants to cover inconsistent app directory naming + # Issue #377: Apps create directories with various naming conventions + if [[ ${#app_name} -gt 3 && "$app_name" =~ [[:space:]] ]]; then + user_patterns+=( + # Compound naming (MaestroStudio, Maestro_Studio, Maestro-Studio) + "$HOME/Library/Application Support/$nospace_name" + "$HOME/Library/Caches/$nospace_name" + "$HOME/Library/Logs/$nospace_name" + "$HOME/Library/Application Support/$underscore_name" + "$HOME/Library/Application Support/$hyphen_name" + # Lowercase variants (maestrostudio, maestro-studio, maestro_studio) + "$HOME/.config/$lowercase_nospace" + "$HOME/.config/$lowercase_hyphen" + "$HOME/.config/$lowercase_underscore" + "$HOME/.local/share/$lowercase_nospace" + "$HOME/.local/share/$lowercase_hyphen" + "$HOME/.local/share/$lowercase_underscore" + ) + fi + + # Add base name variants for versioned apps (e.g., "Zed Nightly" -> check for "zed") + if [[ "$base_name" != "$app_name" && ${#base_name} -gt 2 ]]; then + user_patterns+=( + "$HOME/Library/Application Support/$base_name" + "$HOME/Library/Caches/$base_name" + "$HOME/Library/Logs/$base_name" + "$HOME/.config/$base_lowercase" + "$HOME/.local/share/$base_lowercase" + "$HOME/.$base_lowercase" + ) + fi + + # Issue #422: Zed channel builds can leave data under another channel bundle id. + # Example: uninstalling dev.zed.Zed-Nightly should also detect dev.zed.Zed-Preview leftovers. + if [[ "$bundle_id" =~ ^dev\.zed\.Zed- ]] && [[ -d "$HOME/Library/HTTPStorages" ]]; then + while IFS= read -r -d '' zed_http_storage; do + files_to_clean+=("$zed_http_storage") + done < <(command find "$HOME/Library/HTTPStorages" -maxdepth 1 -name "dev.zed.Zed-*" -print0 2> /dev/null) + fi + + # Process standard patterns + for p in "${user_patterns[@]}"; do + local expanded_path="${p/#\~/$HOME}" + # Skip if path doesn't exist + [[ ! -e "$expanded_path" ]] && continue + + # Safety check: Skip if path ends with a common directory name (indicates empty app_name/bundle_id) + # This prevents deletion of entire Library subdirectories when bundle_id is empty + case "$expanded_path" in + */Library/Application\ Support | */Library/Application\ Support/ | \ + */Library/Caches | */Library/Caches/ | \ + */Library/Logs | */Library/Logs/ | \ + */Library/Containers | */Library/Containers/ | \ + */Library/WebKit | */Library/WebKit/ | \ + */Library/HTTPStorages | */Library/HTTPStorages/ | \ + */Library/Application\ Scripts | */Library/Application\ Scripts/ | \ + */Library/Autosave\ Information | */Library/Autosave\ Information/ | \ + */Library/Group\ Containers | */Library/Group\ Containers/) + continue + ;; + esac + + files_to_clean+=("$expanded_path") + done + + # Handle Preferences and ByHost variants (only if bundle_id is valid) + if [[ -n "$bundle_id" && "$bundle_id" != "unknown" && ${#bundle_id} -gt 3 ]]; then + [[ -f ~/Library/Preferences/"$bundle_id".plist ]] && files_to_clean+=("$HOME/Library/Preferences/$bundle_id.plist") + [[ -d ~/Library/Preferences/ByHost ]] && while IFS= read -r -d '' pref; do + files_to_clean+=("$pref") + done < <(command find ~/Library/Preferences/ByHost -maxdepth 1 \( -name "$bundle_id*.plist" \) -print0 2> /dev/null) + + # Group Containers (special handling) + if [[ -d ~/Library/Group\ Containers ]]; then + while IFS= read -r -d '' container; do + files_to_clean+=("$container") + done < <(command find ~/Library/Group\ Containers -maxdepth 1 \( -name "*$bundle_id*" \) -print0 2> /dev/null) + fi + fi + + # Launch Agents by name (special handling) + # Note: LaunchDaemons are system-level and handled in find_app_system_files() + # Minimum 5-char threshold prevents false positives (e.g., "Time" matching system agents) + # Short-name apps (e.g., Zoom, Arc) are still cleaned via bundle_id matching above + # Security: Common words are excluded to prevent matching unrelated plist files + if [[ ${#app_name} -ge 5 ]] && [[ -d ~/Library/LaunchAgents ]]; then + # Skip common words that could match many unrelated LaunchAgents + # These are either generic terms or names that overlap with system/common utilities + local common_words="Music|Notes|Photos|Finder|Safari|Preview|Calendar|Contacts|Messages|Reminders|Clock|Weather|Stocks|Books|News|Podcasts|Voice|Files|Store|System|Helper|Agent|Daemon|Service|Update|Sync|Backup|Cloud|Manager|Monitor|Server|Client|Worker|Runner|Launcher|Driver|Plugin|Extension|Widget|Utility" + if [[ "$app_name" =~ ^($common_words)$ ]]; then + debug_log "Skipping LaunchAgent name search for common word: $app_name" + else + while IFS= read -r -d '' plist; do + local plist_name=$(basename "$plist") + # Skip Apple's LaunchAgents + if [[ "$plist_name" =~ ^com\.apple\. ]]; then + continue + fi + files_to_clean+=("$plist") + done < <(command find ~/Library/LaunchAgents -maxdepth 1 -name "*$app_name*.plist" -print0 2> /dev/null) + fi + fi + + # Handle specialized toolchains and development environments + # 1. DevEco-Studio (Huawei) + if [[ "$app_name" =~ DevEco|deveco ]] || [[ "$bundle_id" =~ huawei.*deveco ]]; then + for d in ~/DevEcoStudioProjects ~/DevEco-Studio ~/Library/Application\ Support/Huawei ~/Library/Caches/Huawei ~/Library/Logs/Huawei ~/Library/Huawei ~/Huawei ~/HarmonyOS ~/.huawei ~/.ohos; do + [[ -d "$d" ]] && files_to_clean+=("$d") + done + fi + + # 2. Android Studio (Google) + if [[ "$app_name" =~ Android.*Studio|android.*studio ]] || [[ "$bundle_id" =~ google.*android.*studio|jetbrains.*android ]]; then + for d in ~/AndroidStudioProjects ~/Library/Android ~/.android; do + [[ -d "$d" ]] && files_to_clean+=("$d") + done + [[ -d ~/Library/Application\ Support/Google ]] && while IFS= read -r -d '' d; do files_to_clean+=("$d"); done < <(command find ~/Library/Application\ Support/Google -maxdepth 1 -name "AndroidStudio*" -print0 2> /dev/null) + fi + + # 3. Xcode (Apple) + if [[ "$app_name" =~ Xcode|xcode ]] || [[ "$bundle_id" =~ apple.*xcode ]]; then + [[ -d ~/Library/Developer ]] && files_to_clean+=("$HOME/Library/Developer") + [[ -d ~/.Xcode ]] && files_to_clean+=("$HOME/.Xcode") + fi + + # 4. JetBrains (IDE settings) + if [[ "$bundle_id" =~ jetbrains ]] || [[ "$app_name" =~ IntelliJ|PyCharm|WebStorm|GoLand|RubyMine|PhpStorm|CLion|DataGrip|Rider ]]; then + for base in ~/Library/Application\ Support/JetBrains ~/Library/Caches/JetBrains ~/Library/Logs/JetBrains; do + [[ -d "$base" ]] && while IFS= read -r -d '' d; do files_to_clean+=("$d"); done < <(command find "$base" -maxdepth 1 -name "${app_name}*" -print0 2> /dev/null) + done + fi + + # 5. Unity / Unreal / Godot + [[ "$app_name" =~ Unity|unity ]] && [[ -d ~/Library/Unity ]] && files_to_clean+=("$HOME/Library/Unity") + [[ "$app_name" =~ Unreal|unreal ]] && [[ -d ~/Library/Application\ Support/Epic ]] && files_to_clean+=("$HOME/Library/Application Support/Epic") + [[ "$app_name" =~ Godot|godot ]] && [[ -d ~/Library/Application\ Support/Godot ]] && files_to_clean+=("$HOME/Library/Application Support/Godot") + + # 6. Tools + if [[ "$bundle_id" =~ microsoft.*[vV][sS][cC]ode ]]; then + [[ -d "$HOME/.vscode" ]] && files_to_clean+=("$HOME/.vscode") + [[ -d "$HOME/Library/Caches/com.microsoft.VSCode.ShipIt" ]] && files_to_clean+=("$HOME/Library/Caches/com.microsoft.VSCode.ShipIt") + [[ -d "$HOME/Library/Caches/com.microsoft.VSCodeInsiders.ShipIt" ]] && files_to_clean+=("$HOME/Library/Caches/com.microsoft.VSCodeInsiders.ShipIt") + fi + [[ "$app_name" =~ Docker ]] && [[ -d ~/.docker ]] && files_to_clean+=("$HOME/.docker") + + # 6.1 Maestro Studio + if [[ "$bundle_id" == "com.maestro.studio" ]] || [[ "$lowercase_name" =~ maestro[[:space:]]*studio ]]; then + [[ -d ~/.mobiledev ]] && files_to_clean+=("$HOME/.mobiledev") + fi + + # 7. Raycast + if [[ "$bundle_id" == "com.raycast.macos" ]]; then + # Standard user directories + local raycast_dirs=( + "$HOME/Library/Application Support" + "$HOME/Library/Application Scripts" + "$HOME/Library/Containers" + ) + for dir in "${raycast_dirs[@]}"; do + [[ -d "$dir" ]] && while IFS= read -r -d '' p; do + files_to_clean+=("$p") + done < <(command find "$dir" -maxdepth 1 -type d -iname "*raycast*" -print0 2> /dev/null) + done + + # Explicit Raycast container directories (hardcoded leftovers) + [[ -d "$HOME/Library/Containers/com.raycast.macos.BrowserExtension" ]] && files_to_clean+=("$HOME/Library/Containers/com.raycast.macos.BrowserExtension") + [[ -d "$HOME/Library/Containers/com.raycast.macos.RaycastAppIntents" ]] && files_to_clean+=("$HOME/Library/Containers/com.raycast.macos.RaycastAppIntents") + + # Cache (deeper search) + [[ -d "$HOME/Library/Caches" ]] && while IFS= read -r -d '' p; do + files_to_clean+=("$p") + done < <(command find "$HOME/Library/Caches" -maxdepth 2 -type d -iname "*raycast*" -print0 2> /dev/null) + + # VSCode extension storage + local vscode_global="$HOME/Library/Application Support/Code/User/globalStorage" + [[ -d "$vscode_global" ]] && while IFS= read -r -d '' p; do + files_to_clean+=("$p") + done < <(command find "$vscode_global" -maxdepth 1 -type d -iname "*raycast*" -print0 2> /dev/null) + fi + + # Output results + if [[ ${#files_to_clean[@]} -gt 0 ]]; then + printf '%s\n' "${files_to_clean[@]}" + fi + return 0 +} + +get_diagnostic_report_paths_for_app() { + local app_path="$1" + local app_name="$2" + local directory="$3" + local prefix="" + local exec_name="" + local nospace_name="${app_name// /}" + + [[ -z "$app_path" || -z "$app_name" || -z "$directory" ]] && return 0 + [[ ! -d "$directory" ]] && return 0 + + if [[ -f "$app_path/Contents/Info.plist" ]]; then + exec_name=$(defaults read "$app_path/Contents/Info.plist" CFBundleExecutable 2> /dev/null || echo "") + if [[ -z "$exec_name" ]]; then + exec_name=$(grep -A1 "CFBundleExecutable" "$app_path/Contents/Info.plist" 2> /dev/null | grep "" | sed -n 's/.*\([^<]*\)<\/string>.*/\1/p' | head -1) + fi + fi + prefix="${exec_name:-$nospace_name}" + [[ -z "$prefix" || ${#prefix} -lt 3 ]] && return 0 + + local dir_abs + dir_abs=$(cd "$directory" 2> /dev/null && pwd -P 2> /dev/null) || return 0 + while IFS= read -r -d '' f; do + [[ -z "$f" ]] && continue + local base + base=$(basename "$f" 2> /dev/null) + case "$base" in + "$prefix".* | "$prefix"_* | "$prefix"-*) ;; + *) continue ;; + esac + case "$base" in + *.ips | *.crash | *.spin) ;; + *) continue ;; + esac + printf '%s\n' "$f" + done < <( + find "$dir_abs" -maxdepth 1 -type f \ + \( -name "${prefix}.*" -o -name "${prefix}_*" -o -name "${prefix}-*" \) \ + -print0 2> /dev/null || true + ) + return 0 +} + +# Locate system-level application files +find_app_system_files() { + local bundle_id="$1" + local app_name="$2" + local -a system_files=() + + # Generate all naming variants (same as find_app_files for consistency) + local nospace_name="${app_name// /}" + local underscore_name="${app_name// /_}" + local hyphen_name="${app_name// /-}" + local lowercase_hyphen=$(echo "$hyphen_name" | tr '[:upper:]' '[:lower:]') + + # Standard system path patterns + local -a system_patterns=( + "/Library/Application Support/$app_name" + "/Library/Application Support/$bundle_id" + "/Library/LaunchAgents/$bundle_id.plist" + "/Library/LaunchDaemons/$bundle_id.plist" + "/Library/Preferences/$bundle_id.plist" + "/Library/Receipts/$bundle_id.bom" + "/Library/Receipts/$bundle_id.plist" + "/Library/Frameworks/$app_name.framework" + "/Library/Internet Plug-Ins/$app_name.plugin" + "/Library/Input Methods/$app_name.app" + "/Library/Input Methods/$bundle_id.app" + "/Library/Audio/Plug-Ins/Components/$app_name.component" + "/Library/Audio/Plug-Ins/VST/$app_name.vst" + "/Library/Audio/Plug-Ins/VST3/$app_name.vst3" + "/Library/Audio/Plug-Ins/Digidesign/$app_name.dpm" + "/Library/QuickLook/$app_name.qlgenerator" + "/Library/PreferencePanes/$app_name.prefPane" + "/Library/Screen Savers/$app_name.saver" + "/Library/Caches/$bundle_id" + "/Library/Caches/$app_name" + ) + + # Add all naming variants for apps with spaces in name + if [[ ${#app_name} -gt 3 && "$app_name" =~ [[:space:]] ]]; then + system_patterns+=( + "/Library/Application Support/$nospace_name" + "/Library/Caches/$nospace_name" + "/Library/Logs/$nospace_name" + "/Library/Application Support/$underscore_name" + "/Library/Application Support/$hyphen_name" + "/Library/Caches/$hyphen_name" + "/Library/Caches/$lowercase_hyphen" + ) + fi + + # Process patterns + for p in "${system_patterns[@]}"; do + [[ ! -e "$p" ]] && continue + + # Safety check: Skip if path ends with a common directory name (indicates empty app_name/bundle_id) + case "$p" in + /Library/Application\ Support | /Library/Application\ Support/ | \ + /Library/Caches | /Library/Caches/ | \ + /Library/Logs | /Library/Logs/) + continue + ;; + esac + + system_files+=("$p") + done + + # System LaunchAgents/LaunchDaemons by name + if [[ ${#app_name} -gt 3 ]]; then + for base in /Library/LaunchAgents /Library/LaunchDaemons; do + [[ -d "$base" ]] && while IFS= read -r -d '' plist; do + system_files+=("$plist") + done < <(command find "$base" -maxdepth 1 \( -name "*$app_name*.plist" \) -print0 2> /dev/null) + done + fi + + # Privileged Helper Tools and Receipts (special handling) + # Only search with bundle_id if it's valid (not empty and not "unknown") + if [[ -n "$bundle_id" && "$bundle_id" != "unknown" && ${#bundle_id} -gt 3 ]]; then + [[ -d /Library/PrivilegedHelperTools ]] && while IFS= read -r -d '' helper; do + system_files+=("$helper") + done < <(command find /Library/PrivilegedHelperTools -maxdepth 1 \( -name "$bundle_id*" \) -print0 2> /dev/null) + + [[ -d /private/var/db/receipts ]] && while IFS= read -r -d '' receipt; do + system_files+=("$receipt") + done < <(command find /private/var/db/receipts -maxdepth 1 \( -name "*$bundle_id*" \) -print0 2> /dev/null) + fi + + # Raycast system-level files + if [[ "$bundle_id" == "com.raycast.macos" ]]; then + [[ -d "/Library/Application Support" ]] && while IFS= read -r -d '' p; do + system_files+=("$p") + done < <(command find "/Library/Application Support" -maxdepth 1 -type d -iname "*raycast*" -print0 2> /dev/null) + fi + + local receipt_files="" + receipt_files=$(find_app_receipt_files "$bundle_id") + + local combined_files="" + if [[ ${#system_files[@]} -gt 0 ]]; then + combined_files=$(printf '%s\n' "${system_files[@]}") + fi + + if [[ -n "$receipt_files" ]]; then + if [[ -n "$combined_files" ]]; then + combined_files+=$'\n' + fi + combined_files+="$receipt_files" + fi + + if [[ -n "$combined_files" ]]; then + printf '%s\n' "$combined_files" | sort -u + fi +} + +# Locate files using installation receipts (BOM) +find_app_receipt_files() { + local bundle_id="$1" + + # Skip if no bundle ID + [[ -z "$bundle_id" || "$bundle_id" == "unknown" ]] && return 0 + + # Validate bundle_id format to prevent wildcard injection + # Only allow alphanumeric characters, dots, hyphens, and underscores + if [[ ! "$bundle_id" =~ ^[a-zA-Z0-9._-]+$ ]]; then + debug_log "Invalid bundle_id format: $bundle_id" + return 0 + fi + + local -a receipt_files=() + local -a bom_files=() + + # Find receipts matching the bundle ID + # Usually in /var/db/receipts/ + if [[ -d /private/var/db/receipts ]]; then + while IFS= read -r -d '' bom; do + bom_files+=("$bom") + done < <(find /private/var/db/receipts -maxdepth 1 -name "${bundle_id}*.bom" -print0 2> /dev/null) + fi + + # Process bom files if any found + if [[ ${#bom_files[@]} -gt 0 ]]; then + for bom_file in "${bom_files[@]}"; do + [[ ! -f "$bom_file" ]] && continue + + # Parse bom file + # lsbom -f: file paths only + # -s: suppress output (convert to text) + local bom_content + bom_content=$(lsbom -f -s "$bom_file" 2> /dev/null) + + while IFS= read -r file_path; do + # Standardize path (remove leading dot) + local clean_path="${file_path#.}" + + # Ensure absolute path + if [[ "$clean_path" != /* ]]; then + clean_path="/$clean_path" + fi + + # Path traversal protection: reject paths containing .. + if [[ "$clean_path" =~ \.\. ]]; then + debug_log "Rejected path traversal in BOM: $clean_path" + continue + fi + + # Normalize path (remove duplicate slashes) + clean_path=$(tr -s "/" <<< "$clean_path") + + # ------------------------------------------------------------------------ + # Safety check: restrict removal to trusted paths + # ------------------------------------------------------------------------ + local is_safe=false + + # Whitelisted prefixes (exclude /Users, /usr, /opt) + case "$clean_path" in + /Applications/*) is_safe=true ;; + /Library/Application\ Support/*) is_safe=true ;; + /Library/Caches/*) is_safe=true ;; + /Library/Logs/*) is_safe=true ;; + /Library/Preferences/*) is_safe=true ;; + /Library/LaunchAgents/*) is_safe=true ;; + /Library/LaunchDaemons/*) is_safe=true ;; + /Library/PrivilegedHelperTools/*) is_safe=true ;; + /Library/Extensions/*) is_safe=false ;; + *) is_safe=false ;; + esac + + # Hard blocks + case "$clean_path" in + /System/* | /usr/bin/* | /usr/lib/* | /bin/* | /sbin/* | /private/*) is_safe=false ;; + esac + + if [[ "$is_safe" == "true" && -e "$clean_path" ]]; then + # Skip top-level directories + if [[ "$clean_path" == "/Applications" || "$clean_path" == "/Library" ]]; then + continue + fi + + if declare -f should_protect_path > /dev/null 2>&1; then + if should_protect_path "$clean_path"; then + continue + fi + fi + + receipt_files+=("$clean_path") + fi + + done <<< "$bom_content" + done + fi + if [[ ${#receipt_files[@]} -gt 0 ]]; then + printf '%s\n' "${receipt_files[@]}" + fi +} + +# Terminate a running application +force_kill_app() { + # Gracefully terminates or force-kills an application + local app_name="$1" + local app_path="${2:-""}" + + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + debug_log "[DRY RUN] Would terminate running app: $app_name" + return 0 + fi + + # Get the executable name from bundle if app_path is provided + local exec_name="" + if [[ -n "$app_path" && -e "$app_path/Contents/Info.plist" ]]; then + exec_name=$(defaults read "$app_path/Contents/Info.plist" CFBundleExecutable 2> /dev/null || echo "") + fi + + # Use executable name for precise matching, fallback to app name + local match_pattern="${exec_name:-$app_name}" + + # Check if process is running using exact match only + if ! pgrep -x "$match_pattern" > /dev/null 2>&1; then + return 0 + fi + + # Try graceful termination first + pkill -x "$match_pattern" 2> /dev/null || true + sleep 2 + + # Check again after graceful kill + if ! pgrep -x "$match_pattern" > /dev/null 2>&1; then + return 0 + fi + + # Force kill if still running + pkill -9 -x "$match_pattern" 2> /dev/null || true + sleep 2 + + # If still running and sudo is available, try with sudo + if pgrep -x "$match_pattern" > /dev/null 2>&1; then + if sudo -n true 2> /dev/null; then + sudo pkill -9 -x "$match_pattern" 2> /dev/null || true + sleep 2 + fi + fi + + # Final check with longer timeout for stubborn processes + local retries=3 + while [[ $retries -gt 0 ]]; do + if ! pgrep -x "$match_pattern" > /dev/null 2>&1; then + return 0 + fi + sleep 1 + ((retries--)) + done + + # Still running after all attempts + pgrep -x "$match_pattern" > /dev/null 2>&1 && return 1 || return 0 +} + +# Note: calculate_total_size() is defined in lib/core/file_ops.sh diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/base.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/base.sh new file mode 100644 index 0000000..14dd48d --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/base.sh @@ -0,0 +1,759 @@ +#!/bin/bash +# Mole - Base Definitions and Utilities +# Core definitions, constants, and basic utility functions used by all modules + +set -euo pipefail + +# Prevent multiple sourcing +if [[ -n "${MOLE_BASE_LOADED:-}" ]]; then + return 0 +fi +readonly MOLE_BASE_LOADED=1 + +# ============================================================================ +# Color Definitions +# ============================================================================ +readonly ESC=$'\033' +readonly GREEN="${ESC}[0;32m" +readonly BLUE="${ESC}[1;34m" +readonly CYAN="${ESC}[0;36m" +readonly YELLOW="${ESC}[0;33m" +readonly PURPLE="${ESC}[0;35m" +readonly PURPLE_BOLD="${ESC}[1;35m" +readonly RED="${ESC}[0;31m" +readonly GRAY="${ESC}[0;90m" +readonly NC="${ESC}[0m" + +# ============================================================================ +# Icon Definitions +# ============================================================================ +readonly ICON_CONFIRM="◎" +readonly ICON_ADMIN="⚙" +readonly ICON_SUCCESS="✓" +readonly ICON_ERROR="☻" +readonly ICON_WARNING="◎" +readonly ICON_EMPTY="○" +readonly ICON_SOLID="●" +readonly ICON_LIST="•" +readonly ICON_SUBLIST="↳" +readonly ICON_ARROW="➤" +readonly ICON_DRY_RUN="→" +readonly ICON_REVIEW="☞" +readonly ICON_NAV_UP="↑" +readonly ICON_NAV_DOWN="↓" +readonly ICON_INFO="ℹ" + +# ============================================================================ +# LaunchServices Utility +# ============================================================================ + +# Locate the lsregister binary (path varies across macOS versions). +get_lsregister_path() { + local -a candidates=( + "/System/Library/Frameworks/CoreServices.framework/Frameworks/LaunchServices.framework/Support/lsregister" + "/System/Library/CoreServices/Frameworks/LaunchServices.framework/Support/lsregister" + ) + local candidate="" + for candidate in "${candidates[@]}"; do + if [[ -x "$candidate" ]]; then + echo "$candidate" + return 0 + fi + done + echo "" + return 0 +} + +# ============================================================================ +# Global Configuration Constants +# ============================================================================ +readonly MOLE_TEMP_FILE_AGE_DAYS=7 # Temp file retention (days) +readonly MOLE_ORPHAN_AGE_DAYS=60 # Orphaned data retention (days) +readonly MOLE_MAX_PARALLEL_JOBS=15 # Parallel job limit +readonly MOLE_MAIL_DOWNLOADS_MIN_KB=5120 # Mail attachment size threshold +readonly MOLE_MAIL_AGE_DAYS=30 # Mail attachment retention (days) +readonly MOLE_LOG_AGE_DAYS=7 # Log retention (days) +readonly MOLE_CRASH_REPORT_AGE_DAYS=7 # Crash report retention (days) +readonly MOLE_SAVED_STATE_AGE_DAYS=30 # Saved state retention (days) - increased for safety +readonly MOLE_TM_BACKUP_SAFE_HOURS=48 # TM backup safety window (hours) +readonly MOLE_MAX_DS_STORE_FILES=500 # Max .DS_Store files to clean per scan +readonly MOLE_MAX_ORPHAN_ITERATIONS=100 # Max iterations for orphaned app data scan + +# ============================================================================ +# Whitelist Configuration +# ============================================================================ +readonly FINDER_METADATA_SENTINEL="FINDER_METADATA" +declare -a DEFAULT_WHITELIST_PATTERNS=( + "$HOME/Library/Caches/ms-playwright*" + "$HOME/.cache/huggingface*" + "$HOME/.m2/repository/*" + "$HOME/.gradle/caches/*" + "$HOME/.gradle/daemon/*" + "$HOME/.ollama/models/*" + "$HOME/Library/Caches/com.nssurge.surge-mac/*" + "$HOME/Library/Application Support/com.nssurge.surge-mac/*" + "$HOME/Library/Caches/org.R-project.R/R/renv/*" + "$HOME/Library/Caches/pypoetry/virtualenvs*" + "$HOME/Library/Caches/JetBrains*" + "$HOME/Library/Caches/com.jetbrains.toolbox*" + "$HOME/Library/Application Support/JetBrains*" + "$HOME/Library/Caches/com.apple.finder" + "$HOME/Library/Mobile Documents*" + # System-critical caches that affect macOS functionality and stability + # CRITICAL: Removing these will cause system search and UI issues + "$HOME/Library/Caches/com.apple.FontRegistry*" + "$HOME/Library/Caches/com.apple.spotlight*" + "$HOME/Library/Caches/com.apple.Spotlight*" + "$HOME/Library/Caches/CloudKit*" + "$FINDER_METADATA_SENTINEL" +) + +declare -a DEFAULT_OPTIMIZE_WHITELIST_PATTERNS=( + "check_brew_health" + "check_touchid" + "check_git_config" +) + +# ============================================================================ +# BSD Stat Compatibility +# ============================================================================ +readonly STAT_BSD="/usr/bin/stat" + +# Get file size in bytes +get_file_size() { + local file="$1" + local result + result=$($STAT_BSD -f%z "$file" 2> /dev/null) + echo "${result:-0}" +} + +# Get file modification time in epoch seconds +get_file_mtime() { + local file="$1" + [[ -z "$file" ]] && { + echo "0" + return + } + local result + result=$($STAT_BSD -f%m "$file" 2> /dev/null || echo "") + if [[ "$result" =~ ^[0-9]+$ ]]; then + echo "$result" + else + echo "0" + fi +} + +# Determine date command once +if [[ -x /bin/date ]]; then + _DATE_CMD="/bin/date" +else + _DATE_CMD="date" +fi + +# Get current time in epoch seconds (defensive against locale/aliases) +get_epoch_seconds() { + local result + result=$($_DATE_CMD +%s 2> /dev/null || echo "") + if [[ "$result" =~ ^[0-9]+$ ]]; then + echo "$result" + else + echo "0" + fi +} + +# Get file owner username +get_file_owner() { + local file="$1" + $STAT_BSD -f%Su "$file" 2> /dev/null || echo "" +} + +# ============================================================================ +# System Utilities +# ============================================================================ + +# Check if System Integrity Protection is enabled +# Returns: 0 if SIP is enabled, 1 if disabled or cannot determine +is_sip_enabled() { + if ! command -v csrutil > /dev/null 2>&1; then + return 0 + fi + + local sip_status + sip_status=$(csrutil status 2> /dev/null || echo "") + + if echo "$sip_status" | grep -qi "enabled"; then + return 0 + else + return 1 + fi +} + +# Detect CPU architecture +# Returns: "Apple Silicon" or "Intel" +detect_architecture() { + if [[ "$(uname -m)" == "arm64" ]]; then + echo "Apple Silicon" + else + echo "Intel" + fi +} + +# Get free disk space on root volume +# Returns: human-readable string (e.g., "100G") +get_free_space() { + local target="/" + if [[ -d "/System/Volumes/Data" ]]; then + target="/System/Volumes/Data" + fi + + df -h "$target" | awk 'NR==2 {print $4}' +} + +# Get Darwin kernel major version (e.g., 24 for 24.2.0) +# Returns 999 on failure to adopt conservative behavior (assume modern system) +get_darwin_major() { + local kernel + kernel=$(uname -r 2> /dev/null || true) + local major="${kernel%%.*}" + if [[ ! "$major" =~ ^[0-9]+$ ]]; then + # Return high number to skip potentially dangerous operations on unknown systems + major=999 + fi + echo "$major" +} + +# Check if Darwin kernel major version meets minimum +is_darwin_ge() { + local minimum="$1" + local major + major=$(get_darwin_major) + [[ "$major" -ge "$minimum" ]] +} + +# Get optimal parallel jobs for operation type (scan|io|compute|default) +get_optimal_parallel_jobs() { + local operation_type="${1:-default}" + local cpu_cores + cpu_cores=$(sysctl -n hw.ncpu 2> /dev/null || echo 4) + case "$operation_type" in + scan | io) + echo $((cpu_cores * 2)) + ;; + compute) + echo "$cpu_cores" + ;; + *) + echo $((cpu_cores + 2)) + ;; + esac +} + +# ============================================================================ +# User Context Utilities +# ============================================================================ + +is_root_user() { + [[ "$(id -u)" == "0" ]] +} + +get_invoking_user() { + if [[ -n "${_MOLE_INVOKING_USER_CACHE:-}" ]]; then + echo "$_MOLE_INVOKING_USER_CACHE" + return 0 + fi + + local user + if [[ -n "${SUDO_USER:-}" && "${SUDO_USER:-}" != "root" ]]; then + user="$SUDO_USER" + else + user="${USER:-}" + fi + + export _MOLE_INVOKING_USER_CACHE="$user" + echo "$user" +} + +get_invoking_uid() { + if [[ -n "${SUDO_UID:-}" ]]; then + echo "$SUDO_UID" + return 0 + fi + + local uid + uid=$(id -u 2> /dev/null || true) + echo "$uid" +} + +get_invoking_gid() { + if [[ -n "${SUDO_GID:-}" ]]; then + echo "$SUDO_GID" + return 0 + fi + + local gid + gid=$(id -g 2> /dev/null || true) + echo "$gid" +} + +get_invoking_home() { + if [[ -n "${SUDO_USER:-}" && "${SUDO_USER:-}" != "root" ]]; then + get_user_home "$SUDO_USER" + return 0 + fi + + echo "${HOME:-}" +} + +get_user_home() { + local user="$1" + local home="" + + if [[ -z "$user" ]]; then + echo "" + return 0 + fi + + if command -v dscl > /dev/null 2>&1; then + home=$(dscl . -read "/Users/$user" NFSHomeDirectory 2> /dev/null | awk '{print $2}' | head -1 || true) + fi + + if [[ -z "$home" ]]; then + home=$(eval echo "~$user" 2> /dev/null || true) + fi + + if [[ "$home" == "~"* ]]; then + home="" + fi + + echo "$home" +} + +ensure_user_dir() { + local raw_path="$1" + if [[ -z "$raw_path" ]]; then + return 0 + fi + + local target_path="$raw_path" + if [[ "$target_path" == "~"* ]]; then + target_path="${target_path/#\~/$HOME}" + fi + + mkdir -p "$target_path" 2> /dev/null || true + + if ! is_root_user; then + return 0 + fi + + local sudo_user="${SUDO_USER:-}" + if [[ -z "$sudo_user" || "$sudo_user" == "root" ]]; then + return 0 + fi + + local user_home + user_home=$(get_user_home "$sudo_user") + if [[ -z "$user_home" ]]; then + return 0 + fi + user_home="${user_home%/}" + + if [[ "$target_path" != "$user_home" && "$target_path" != "$user_home/"* ]]; then + return 0 + fi + + local owner_uid="${SUDO_UID:-}" + local owner_gid="${SUDO_GID:-}" + if [[ -z "$owner_uid" || -z "$owner_gid" ]]; then + owner_uid=$(id -u "$sudo_user" 2> /dev/null || true) + owner_gid=$(id -g "$sudo_user" 2> /dev/null || true) + fi + + if [[ -z "$owner_uid" || -z "$owner_gid" ]]; then + return 0 + fi + + local dir="$target_path" + while [[ -n "$dir" && "$dir" != "/" ]]; do + # Early stop: if ownership is already correct, no need to continue up the tree + if [[ -d "$dir" ]]; then + local current_uid + current_uid=$("$STAT_BSD" -f%u "$dir" 2> /dev/null || echo "") + if [[ "$current_uid" == "$owner_uid" ]]; then + break + fi + fi + + chown "$owner_uid:$owner_gid" "$dir" 2> /dev/null || true + + if [[ "$dir" == "$user_home" ]]; then + break + fi + dir=$(dirname "$dir") + if [[ "$dir" == "." ]]; then + break + fi + done +} + +ensure_user_file() { + local raw_path="$1" + if [[ -z "$raw_path" ]]; then + return 0 + fi + + local target_path="$raw_path" + if [[ "$target_path" == "~"* ]]; then + target_path="${target_path/#\~/$HOME}" + fi + + ensure_user_dir "$(dirname "$target_path")" + touch "$target_path" 2> /dev/null || true + + if ! is_root_user; then + return 0 + fi + + local sudo_user="${SUDO_USER:-}" + if [[ -z "$sudo_user" || "$sudo_user" == "root" ]]; then + return 0 + fi + + local user_home + user_home=$(get_user_home "$sudo_user") + if [[ -z "$user_home" ]]; then + return 0 + fi + user_home="${user_home%/}" + + if [[ "$target_path" != "$user_home" && "$target_path" != "$user_home/"* ]]; then + return 0 + fi + + local owner_uid="${SUDO_UID:-}" + local owner_gid="${SUDO_GID:-}" + if [[ -z "$owner_uid" || -z "$owner_gid" ]]; then + owner_uid=$(id -u "$sudo_user" 2> /dev/null || true) + owner_gid=$(id -g "$sudo_user" 2> /dev/null || true) + fi + + if [[ -n "$owner_uid" && -n "$owner_gid" ]]; then + chown "$owner_uid:$owner_gid" "$target_path" 2> /dev/null || true + fi +} + +# ============================================================================ +# Formatting Utilities +# ============================================================================ + +# Get brand-friendly localized name for an application +get_brand_name() { + local name="$1" + + # Detect if system primary language is Chinese (Cached) + if [[ -z "${MOLE_IS_CHINESE_SYSTEM:-}" ]]; then + local sys_lang + sys_lang=$(defaults read -g AppleLanguages 2> /dev/null | grep -o 'zh-Hans\|zh-Hant\|zh' | head -1 || echo "") + if [[ -n "$sys_lang" ]]; then + export MOLE_IS_CHINESE_SYSTEM="true" + else + export MOLE_IS_CHINESE_SYSTEM="false" + fi + fi + + local is_chinese="${MOLE_IS_CHINESE_SYSTEM}" + + # Return localized names based on system language + if [[ "$is_chinese" == true ]]; then + # Chinese system - prefer Chinese names + case "$name" in + "qiyimac" | "iQiyi") echo "爱奇艺" ;; + "wechat" | "WeChat") echo "微信" ;; + "QQ") echo "QQ" ;; + "VooV Meeting") echo "腾讯会议" ;; + "dingtalk" | "DingTalk") echo "钉钉" ;; + "NeteaseMusic" | "NetEase Music") echo "网易云音乐" ;; + "BaiduNetdisk" | "Baidu NetDisk") echo "百度网盘" ;; + "alipay" | "Alipay") echo "支付宝" ;; + "taobao" | "Taobao") echo "淘宝" ;; + "futunn" | "Futu NiuNiu") echo "富途牛牛" ;; + "tencent lemon" | "Tencent Lemon Cleaner" | "Tencent Lemon") echo "腾讯柠檬清理" ;; + *) echo "$name" ;; + esac + else + # Non-Chinese system - use English names + case "$name" in + "qiyimac" | "爱奇艺") echo "iQiyi" ;; + "wechat" | "微信") echo "WeChat" ;; + "QQ") echo "QQ" ;; + "腾讯会议") echo "VooV Meeting" ;; + "dingtalk" | "钉钉") echo "DingTalk" ;; + "网易云音乐") echo "NetEase Music" ;; + "百度网盘") echo "Baidu NetDisk" ;; + "alipay" | "支付宝") echo "Alipay" ;; + "taobao" | "淘宝") echo "Taobao" ;; + "富途牛牛") echo "Futu NiuNiu" ;; + "腾讯柠檬清理" | "Tencent Lemon Cleaner") echo "Tencent Lemon" ;; + "keynote" | "Keynote") echo "Keynote" ;; + "pages" | "Pages") echo "Pages" ;; + "numbers" | "Numbers") echo "Numbers" ;; + *) echo "$name" ;; + esac + fi +} + +# Convert bytes to human-readable format (e.g., 1.5GB) +# macOS (since Snow Leopard) uses Base-10 calculation (1 KB = 1000 bytes) +bytes_to_human() { + local bytes="$1" + [[ "$bytes" =~ ^[0-9]+$ ]] || { + echo "0B" + return 1 + } + + # GB: >= 1,000,000,000 bytes + if ((bytes >= 1000000000)); then + local scaled=$(((bytes * 100 + 500000000) / 1000000000)) + printf "%d.%02dGB\n" $((scaled / 100)) $((scaled % 100)) + # MB: >= 1,000,000 bytes + elif ((bytes >= 1000000)); then + local scaled=$(((bytes * 10 + 500000) / 1000000)) + printf "%d.%01dMB\n" $((scaled / 10)) $((scaled % 10)) + # KB: >= 1,000 bytes (round up to nearest KB instead of decimal) + elif ((bytes >= 1000)); then + printf "%dKB\n" $(((bytes + 500) / 1000)) + else + printf "%dB\n" "$bytes" + fi +} + +# Convert kilobytes to human-readable format +# Args: $1 - size in KB +# Returns: formatted string +bytes_to_human_kb() { + bytes_to_human "$((${1:-0} * 1024))" +} + +# ============================================================================ +# Temporary File Management +# ============================================================================ + +# Tracked temporary files and directories +declare -a MOLE_TEMP_FILES=() +declare -a MOLE_TEMP_DIRS=() + +# Create tracked temporary file +create_temp_file() { + local temp + temp=$(mktemp) || return 1 + register_temp_file "$temp" + echo "$temp" +} + +# Create tracked temporary directory +create_temp_dir() { + local temp + temp=$(mktemp -d) || return 1 + register_temp_dir "$temp" + echo "$temp" +} + +# Register existing file for cleanup +register_temp_file() { + MOLE_TEMP_FILES+=("$1") +} + +# Register existing directory for cleanup +register_temp_dir() { + MOLE_TEMP_DIRS+=("$1") +} + +# Create temp file with prefix (for analyze.sh compatibility) +# Compatible with both BSD mktemp (macOS default) and GNU mktemp (coreutils) +mktemp_file() { + local prefix="${1:-mole}" + local temp + local error_msg + # Use TMPDIR if set, otherwise /tmp + # Add .XXXXXX suffix to work with both BSD and GNU mktemp + if ! error_msg=$(mktemp "${TMPDIR:-/tmp}/${prefix}.XXXXXX" 2>&1); then + echo "Error: Failed to create temporary file: $error_msg" >&2 + return 1 + fi + temp="$error_msg" + register_temp_file "$temp" + echo "$temp" +} + +# Cleanup all tracked temp files and directories +cleanup_temp_files() { + stop_inline_spinner 2> /dev/null || true + local file + if [[ ${#MOLE_TEMP_FILES[@]} -gt 0 ]]; then + for file in "${MOLE_TEMP_FILES[@]}"; do + [[ -f "$file" ]] && rm -f "$file" 2> /dev/null || true + done + fi + + if [[ ${#MOLE_TEMP_DIRS[@]} -gt 0 ]]; then + for file in "${MOLE_TEMP_DIRS[@]}"; do + [[ -d "$file" ]] && rm -rf "$file" 2> /dev/null || true # SAFE: cleanup_temp_files + done + fi + + MOLE_TEMP_FILES=() + MOLE_TEMP_DIRS=() +} + +# ============================================================================ +# Section Tracking (for progress indication) +# ============================================================================ + +# Global section tracking variables +TRACK_SECTION=0 +SECTION_ACTIVITY=0 + +# Start a new section +# Args: $1 - section title +start_section() { + TRACK_SECTION=1 + SECTION_ACTIVITY=0 + echo "" + echo -e "${PURPLE_BOLD}${ICON_ARROW} $1${NC}" +} + +# End a section +# Shows "Nothing to tidy" if no activity was recorded +end_section() { + if [[ "${TRACK_SECTION:-0}" == "1" && "${SECTION_ACTIVITY:-0}" == "0" ]]; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} Nothing to tidy" + fi + TRACK_SECTION=0 +} + +# Mark activity in current section +note_activity() { + if [[ "${TRACK_SECTION:-0}" == "1" ]]; then + SECTION_ACTIVITY=1 + fi +} + +# Start a section spinner with optional message +# Usage: start_section_spinner "message" +start_section_spinner() { + local message="${1:-Scanning...}" + stop_inline_spinner 2> /dev/null || true + if [[ -t 1 ]]; then + MOLE_SPINNER_PREFIX=" " start_inline_spinner "$message" + fi +} + +# Stop spinner and clear the line +# Usage: stop_section_spinner +stop_section_spinner() { + # Always try to stop spinner (function handles empty PID gracefully) + stop_inline_spinner 2> /dev/null || true + # Always clear line to handle edge cases where spinner output remains + # (e.g., spinner was stopped elsewhere but line not cleared) + if [[ -t 1 ]]; then + printf "\r\033[2K" >&2 || true + fi +} + +# Safe terminal line clearing with terminal type detection +# Usage: safe_clear_lines [tty_device] +# Returns: 0 on success, 1 if terminal doesn't support ANSI +safe_clear_lines() { + local lines="${1:-1}" + local tty_device="${2:-/dev/tty}" + + # Use centralized ANSI support check (defined below) + # Note: This forward reference works because functions are parsed before execution + is_ansi_supported 2> /dev/null || return 1 + + # Clear lines one by one (more reliable than multi-line sequences) + local i + for ((i = 0; i < lines; i++)); do + printf "\033[1A\r\033[2K" > "$tty_device" 2> /dev/null || return 1 + done + + return 0 +} + +# Safe single line clear with fallback +# Usage: safe_clear_line [tty_device] +safe_clear_line() { + local tty_device="${1:-/dev/tty}" + + # Use centralized ANSI support check + is_ansi_supported 2> /dev/null || return 1 + + printf "\r\033[2K" > "$tty_device" 2> /dev/null || return 1 + return 0 +} + +# Update progress spinner if enough time has elapsed +# Usage: update_progress_if_needed [interval] +# Example: update_progress_if_needed "$completed" "$total" last_progress_update 2 +# Returns: 0 if updated, 1 if skipped +update_progress_if_needed() { + local completed="$1" + local total="$2" + local last_update_var="$3" # Name of variable holding last update time + local interval="${4:-2}" # Default: update every 2 seconds + + # Get current time + local current_time + current_time=$(get_epoch_seconds) + + # Get last update time from variable + local last_time + eval "last_time=\${$last_update_var:-0}" + [[ "$last_time" =~ ^[0-9]+$ ]] || last_time=0 + + # Check if enough time has elapsed + if [[ $((current_time - last_time)) -ge $interval ]]; then + # Update the spinner with progress + stop_section_spinner + start_section_spinner "Scanning items... $completed/$total" + + # Update the last_update_time variable + eval "$last_update_var=$current_time" + return 0 + fi + + return 1 +} + +# ============================================================================ +# Terminal Compatibility Checks +# ============================================================================ + +# Check if terminal supports ANSI escape codes +# Usage: is_ansi_supported +# Returns: 0 if supported, 1 if not +is_ansi_supported() { + # Check if running in interactive terminal + [[ -t 1 ]] || return 1 + + # Check TERM variable + [[ -n "${TERM:-}" ]] || return 1 + + # Check for known ANSI-compatible terminals + case "$TERM" in + xterm* | vt100 | vt220 | screen* | tmux* | ansi | linux | rxvt* | konsole*) + return 0 + ;; + dumb | unknown) + return 1 + ;; + *) + # Check terminfo database if available + if command -v tput > /dev/null 2>&1; then + # Test if terminal supports colors (good proxy for ANSI support) + local colors=$(tput colors 2> /dev/null || echo "0") + [[ "$colors" -ge 8 ]] && return 0 + fi + return 1 + ;; + esac +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/commands.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/commands.sh new file mode 100644 index 0000000..3d2559e --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/commands.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +# Shared command list for help text and completions. +MOLE_COMMANDS=( + "clean:Free up disk space" + "uninstall:Remove apps completely" + "optimize:Check and maintain system" + "analyze:Explore disk usage" + "status:Monitor system health" + "purge:Remove old project artifacts" + "installer:Find and remove installer files" + "touchid:Configure Touch ID for sudo" + "completion:Setup shell tab completion" + "update:Update to latest version" + "remove:Remove Mole from system" + "help:Show help" + "version:Show version" +) diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/common.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/common.sh new file mode 100755 index 0000000..38f7640 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/common.sh @@ -0,0 +1,186 @@ +#!/bin/bash +# Mole - Common Functions Library +# Main entry point that loads all core modules + +set -euo pipefail + +# Prevent multiple sourcing +if [[ -n "${MOLE_COMMON_LOADED:-}" ]]; then + return 0 +fi +readonly MOLE_COMMON_LOADED=1 + +_MOLE_CORE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# Load core modules +source "$_MOLE_CORE_DIR/base.sh" +source "$_MOLE_CORE_DIR/log.sh" + +source "$_MOLE_CORE_DIR/timeout.sh" +source "$_MOLE_CORE_DIR/file_ops.sh" +source "$_MOLE_CORE_DIR/help.sh" +source "$_MOLE_CORE_DIR/ui.sh" +source "$_MOLE_CORE_DIR/app_protection.sh" + +# Load sudo management if available +if [[ -f "$_MOLE_CORE_DIR/sudo.sh" ]]; then + source "$_MOLE_CORE_DIR/sudo.sh" +fi + +# Update via Homebrew +update_via_homebrew() { + local current_version="$1" + local temp_update temp_upgrade + temp_update=$(mktemp_file "brew_update") + temp_upgrade=$(mktemp_file "brew_upgrade") + + # Set up trap for interruption (Ctrl+C) with inline cleanup + trap 'stop_inline_spinner 2>/dev/null; safe_remove "$temp_update" true; safe_remove "$temp_upgrade" true; echo ""; exit 130' INT TERM + + # Update Homebrew + if [[ -t 1 ]]; then + start_inline_spinner "Updating Homebrew..." + else + echo "Updating Homebrew..." + fi + + brew update > "$temp_update" 2>&1 & + local update_pid=$! + wait $update_pid 2> /dev/null || true # Continue even if brew update fails + + if [[ -t 1 ]]; then + stop_inline_spinner + fi + + # Upgrade Mole + if [[ -t 1 ]]; then + start_inline_spinner "Upgrading Mole..." + else + echo "Upgrading Mole..." + fi + + brew upgrade mole > "$temp_upgrade" 2>&1 & + local upgrade_pid=$! + wait $upgrade_pid 2> /dev/null || true # Continue even if brew upgrade fails + + local upgrade_output + upgrade_output=$(cat "$temp_upgrade") + + if [[ -t 1 ]]; then + stop_inline_spinner + fi + + # Clear trap + trap - INT TERM + + # Cleanup temp files + safe_remove "$temp_update" true + safe_remove "$temp_upgrade" true + + if echo "$upgrade_output" | grep -q "already installed"; then + local installed_version + installed_version=$(brew list --versions mole 2> /dev/null | awk '{print $2}') + [[ -z "$installed_version" ]] && installed_version=$(mo --version 2> /dev/null | awk '/Mole version/ {print $3; exit}') + echo "" + echo -e "${GREEN}${ICON_SUCCESS}${NC} Already on latest version, ${installed_version:-$current_version}" + echo "" + elif echo "$upgrade_output" | grep -q "Error:"; then + log_error "Homebrew upgrade failed" + echo "$upgrade_output" | grep "Error:" >&2 + return 1 + else + echo "$upgrade_output" | grep -Ev "^(==>|Updating Homebrew|Warning:)" || true + local new_version + new_version=$(brew list --versions mole 2> /dev/null | awk '{print $2}') + [[ -z "$new_version" ]] && new_version=$(mo --version 2> /dev/null | awk '/Mole version/ {print $3; exit}') + echo "" + echo -e "${GREEN}${ICON_SUCCESS}${NC} Updated to latest version, ${new_version:-$current_version}" + echo "" + fi + + # Clear update cache (suppress errors if cache doesn't exist or is locked) + rm -f "$HOME/.cache/mole/version_check" "$HOME/.cache/mole/update_message" 2> /dev/null || true +} + +# Remove applications from Dock +remove_apps_from_dock() { + if [[ $# -eq 0 ]]; then + return 0 + fi + + local -a targets=() + for arg in "$@"; do + [[ -n "$arg" ]] && targets+=("$arg") + done + + if [[ ${#targets[@]} -eq 0 ]]; then + return 0 + fi + + # Use pure shell (PlistBuddy) to remove items from Dock + # This avoids dependencies on Python 3 or osascript (AppleScript) + local plist="$HOME/Library/Preferences/com.apple.dock.plist" + [[ -f "$plist" ]] || return 0 + + # PlistBuddy is at /usr/libexec/PlistBuddy on macOS + [[ -x /usr/libexec/PlistBuddy ]] || return 0 + + local changed=false + for target in "${targets[@]}"; do + local app_path="$target" + local full_path="" + + if [[ "$app_path" =~ [[:cntrl:]] ]]; then + debug_log "Skipping dock removal for path with control chars: $app_path" + continue + fi + + if [[ -e "$app_path" ]]; then + if full_path=$(cd "$(dirname "$app_path")" 2> /dev/null && pwd); then + full_path="$full_path/$(basename "$app_path")" + else + continue + fi + else + case "$app_path" in + ~/*) full_path="$HOME/${app_path#~/}" ;; + /*) full_path="$app_path" ;; + *) continue ;; + esac + fi + + [[ -z "$full_path" ]] && continue + + local encoded_path="${full_path// /%20}" + + # Find the index of the app in persistent-apps + local i=0 + while true; do + local label + label=$(/usr/libexec/PlistBuddy -c "Print :persistent-apps:$i:tile-data:file-label" "$plist" 2> /dev/null || echo "") + [[ -z "$label" ]] && break + + local url + url=$(/usr/libexec/PlistBuddy -c "Print :persistent-apps:$i:tile-data:file-data:_CFURLString" "$plist" 2> /dev/null || echo "") + [[ -z "$url" ]] && { + i=$((i + 1)) + continue + } + + # Match by URL-encoded path to handle spaces in app names + if [[ -n "$encoded_path" && "$url" == *"$encoded_path"* ]]; then + if /usr/libexec/PlistBuddy -c "Delete :persistent-apps:$i" "$plist" 2> /dev/null; then + changed=true + # After deletion, current index i now points to the next item + continue + fi + fi + i=$((i + 1)) + done + done + + if [[ "$changed" == "true" ]]; then + # Restart Dock to apply changes from the plist + killall Dock 2> /dev/null || true + fi +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/file_ops.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/file_ops.sh new file mode 100644 index 0000000..5c41618 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/file_ops.sh @@ -0,0 +1,581 @@ +#!/bin/bash +# Mole - File Operations +# Safe file and directory manipulation with validation + +set -euo pipefail + +# Prevent multiple sourcing +if [[ -n "${MOLE_FILE_OPS_LOADED:-}" ]]; then + return 0 +fi +readonly MOLE_FILE_OPS_LOADED=1 + +# Error codes for removal operations +readonly MOLE_ERR_SIP_PROTECTED=10 +readonly MOLE_ERR_AUTH_FAILED=11 +readonly MOLE_ERR_READONLY_FS=12 + +# Ensure dependencies are loaded +_MOLE_CORE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +if [[ -z "${MOLE_BASE_LOADED:-}" ]]; then + # shellcheck source=lib/core/base.sh + source "$_MOLE_CORE_DIR/base.sh" +fi +if [[ -z "${MOLE_LOG_LOADED:-}" ]]; then + # shellcheck source=lib/core/log.sh + source "$_MOLE_CORE_DIR/log.sh" +fi +if [[ -z "${MOLE_TIMEOUT_LOADED:-}" ]]; then + # shellcheck source=lib/core/timeout.sh + source "$_MOLE_CORE_DIR/timeout.sh" +fi + +# ============================================================================ +# Utility Functions +# ============================================================================ + +# Format duration in seconds to human readable string (e.g., "5 days", "2 months") +format_duration_human() { + local seconds="${1:-0}" + [[ ! "$seconds" =~ ^[0-9]+$ ]] && seconds=0 + + local days=$((seconds / 86400)) + + if [[ $days -eq 0 ]]; then + echo "today" + elif [[ $days -eq 1 ]]; then + echo "1 day" + elif [[ $days -lt 7 ]]; then + echo "${days} days" + elif [[ $days -lt 30 ]]; then + local weeks=$((days / 7)) + [[ $weeks -eq 1 ]] && echo "1 week" || echo "${weeks} weeks" + elif [[ $days -lt 365 ]]; then + local months=$((days / 30)) + [[ $months -eq 1 ]] && echo "1 month" || echo "${months} months" + else + local years=$((days / 365)) + [[ $years -eq 1 ]] && echo "1 year" || echo "${years} years" + fi +} + +# ============================================================================ +# Path Validation +# ============================================================================ + +# Validate path for deletion (absolute, no traversal, not system dir) +validate_path_for_deletion() { + local path="$1" + + # Check path is not empty + if [[ -z "$path" ]]; then + log_error "Path validation failed: empty path" + return 1 + fi + + # Check symlink target if path is a symbolic link + if [[ -L "$path" ]]; then + local link_target + link_target=$(readlink "$path" 2> /dev/null) || { + log_error "Cannot read symlink: $path" + return 1 + } + + # Resolve relative symlinks to absolute paths for validation + local resolved_target="$link_target" + if [[ "$link_target" != /* ]]; then + local link_dir + link_dir=$(dirname "$path") + resolved_target=$(cd "$link_dir" 2> /dev/null && cd "$(dirname "$link_target")" 2> /dev/null && pwd)/$(basename "$link_target") || resolved_target="" + fi + + # Validate resolved target against protected paths + if [[ -n "$resolved_target" ]]; then + case "$resolved_target" in + /System/* | /usr/bin/* | /usr/lib/* | /bin/* | /sbin/* | /private/etc/*) + log_error "Symlink points to protected system path: $path -> $resolved_target" + return 1 + ;; + esac + fi + fi + + # Check path is absolute + if [[ "$path" != /* ]]; then + log_error "Path validation failed: path must be absolute: $path" + return 1 + fi + + # Check for path traversal attempts + # Only reject .. when it appears as a complete path component (/../ or /.. or ../) + # This allows legitimate directory names containing .. (e.g., Firefox's "name..files") + if [[ "$path" =~ (^|/)\.\.(\/|$) ]]; then + log_error "Path validation failed: path traversal not allowed: $path" + return 1 + fi + + # Check path doesn't contain dangerous characters + if [[ "$path" =~ [[:cntrl:]] ]] || [[ "$path" =~ $'\n' ]]; then + log_error "Path validation failed: contains control characters: $path" + return 1 + fi + + # Allow deletion of coresymbolicationd cache (safe system cache that can be rebuilt) + case "$path" in + /System/Library/Caches/com.apple.coresymbolicationd/data | /System/Library/Caches/com.apple.coresymbolicationd/data/*) + return 0 + ;; + esac + + # Allow known safe paths under /private + case "$path" in + /private/tmp | /private/tmp/* | \ + /private/var/tmp | /private/var/tmp/* | \ + /private/var/log | /private/var/log/* | \ + /private/var/folders | /private/var/folders/* | \ + /private/var/db/diagnostics | /private/var/db/diagnostics/* | \ + /private/var/db/DiagnosticPipeline | /private/var/db/DiagnosticPipeline/* | \ + /private/var/db/powerlog | /private/var/db/powerlog/* | \ + /private/var/db/reportmemoryexception | /private/var/db/reportmemoryexception/* | \ + /private/var/db/receipts/*.bom | /private/var/db/receipts/*.plist) + return 0 + ;; + esac + + # Check path isn't critical system directory + case "$path" in + / | /bin | /bin/* | /sbin | /sbin/* | /usr | /usr/bin | /usr/bin/* | /usr/sbin | /usr/sbin/* | /usr/lib | /usr/lib/* | /System | /System/* | /Library/Extensions) + log_error "Path validation failed: critical system directory: $path" + return 1 + ;; + /private) + log_error "Path validation failed: critical system directory: $path" + return 1 + ;; + /etc | /etc/* | /private/etc | /private/etc/*) + log_error "Path validation failed: /etc contains critical system files: $path" + return 1 + ;; + /var | /var/db | /var/db/* | /private/var | /private/var/db | /private/var/db/*) + log_error "Path validation failed: /var/db contains system databases: $path" + return 1 + ;; + esac + + # Check if path is protected (keychains, system settings, etc) + if declare -f should_protect_path > /dev/null 2>&1; then + if should_protect_path "$path"; then + if [[ "${MO_DEBUG:-0}" == "1" ]]; then + log_warning "Path validation: protected path skipped: $path" + fi + return 1 + fi + fi + + return 0 +} + +# ============================================================================ +# Safe Removal Operations +# ============================================================================ + +# Safe wrapper around rm -rf with validation +safe_remove() { + local path="$1" + local silent="${2:-false}" + + # Validate path + if ! validate_path_for_deletion "$path"; then + return 1 + fi + + # Check if path exists + if [[ ! -e "$path" ]]; then + return 0 + fi + + # Dry-run mode: log but don't delete + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + if [[ "${MO_DEBUG:-}" == "1" ]]; then + local file_type="file" + [[ -d "$path" ]] && file_type="directory" + [[ -L "$path" ]] && file_type="symlink" + + local file_size="" + local file_age="" + + if [[ -e "$path" ]]; then + local size_kb + size_kb=$(get_path_size_kb "$path" 2> /dev/null || echo "0") + if [[ "$size_kb" -gt 0 ]]; then + file_size=$(bytes_to_human "$((size_kb * 1024))") + fi + + if [[ -f "$path" || -d "$path" ]] && ! [[ -L "$path" ]]; then + local mod_time + mod_time=$(stat -f%m "$path" 2> /dev/null || echo "0") + local now + now=$(date +%s 2> /dev/null || echo "0") + if [[ "$mod_time" -gt 0 && "$now" -gt 0 ]]; then + file_age=$(((now - mod_time) / 86400)) + fi + fi + fi + + debug_file_action "[DRY RUN] Would remove" "$path" "$file_size" "$file_age" + else + debug_log "[DRY RUN] Would remove: $path" + fi + return 0 + fi + + debug_log "Removing: $path" + + # Calculate size before deletion for logging + local size_kb=0 + local size_human="" + if oplog_enabled; then + if [[ -e "$path" ]]; then + size_kb=$(get_path_size_kb "$path" 2> /dev/null || echo "0") + if [[ "$size_kb" =~ ^[0-9]+$ ]] && [[ "$size_kb" -gt 0 ]]; then + size_human=$(bytes_to_human "$((size_kb * 1024))" 2> /dev/null || echo "${size_kb}KB") + fi + fi + fi + + # Perform the deletion + # Use || to capture the exit code so set -e won't abort on rm failures + local error_msg + local rm_exit=0 + error_msg=$(rm -rf "$path" 2>&1) || rm_exit=$? # safe_remove + + # Preserve interrupt semantics so callers can abort long-running deletions. + if [[ $rm_exit -ge 128 ]]; then + return "$rm_exit" + fi + + if [[ $rm_exit -eq 0 ]]; then + # Log successful removal + log_operation "${MOLE_CURRENT_COMMAND:-clean}" "REMOVED" "$path" "$size_human" + return 0 + else + # Check if it's a permission error + if [[ "$error_msg" == *"Permission denied"* ]] || [[ "$error_msg" == *"Operation not permitted"* ]]; then + MOLE_PERMISSION_DENIED_COUNT=${MOLE_PERMISSION_DENIED_COUNT:-0} + MOLE_PERMISSION_DENIED_COUNT=$((MOLE_PERMISSION_DENIED_COUNT + 1)) + export MOLE_PERMISSION_DENIED_COUNT + debug_log "Permission denied: $path, may need Full Disk Access" + log_operation "${MOLE_CURRENT_COMMAND:-clean}" "FAILED" "$path" "permission denied" + else + [[ "$silent" != "true" ]] && log_error "Failed to remove: $path" + log_operation "${MOLE_CURRENT_COMMAND:-clean}" "FAILED" "$path" "error" + fi + return 1 + fi +} + +# Safe symlink removal (for pre-validated symlinks only) +safe_remove_symlink() { + local path="$1" + local use_sudo="${2:-false}" + + if [[ ! -L "$path" ]]; then + return 1 + fi + + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + debug_log "[DRY RUN] Would remove symlink: $path" + return 0 + fi + + local rm_exit=0 + if [[ "$use_sudo" == "true" ]]; then + sudo rm "$path" 2> /dev/null || rm_exit=$? + else + rm "$path" 2> /dev/null || rm_exit=$? + fi + + if [[ $rm_exit -eq 0 ]]; then + log_operation "${MOLE_CURRENT_COMMAND:-clean}" "REMOVED" "$path" "symlink" + return 0 + else + log_operation "${MOLE_CURRENT_COMMAND:-clean}" "FAILED" "$path" "symlink removal failed" + return 1 + fi +} + +# Safe sudo removal with symlink protection +safe_sudo_remove() { + local path="$1" + + if ! validate_path_for_deletion "$path"; then + log_error "Path validation failed for sudo remove: $path" + return 1 + fi + + if [[ ! -e "$path" ]]; then + return 0 + fi + + if [[ -L "$path" ]]; then + log_error "Refusing to sudo remove symlink: $path" + return 1 + fi + + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + if [[ "${MO_DEBUG:-}" == "1" ]]; then + local file_type="file" + [[ -d "$path" ]] && file_type="directory" + + local file_size="" + local file_age="" + + if sudo test -e "$path" 2> /dev/null; then + local size_kb + size_kb=$(sudo du -skP "$path" 2> /dev/null | awk '{print $1}' || echo "0") + if [[ "$size_kb" -gt 0 ]]; then + file_size=$(bytes_to_human "$((size_kb * 1024))") + fi + + if sudo test -f "$path" 2> /dev/null || sudo test -d "$path" 2> /dev/null; then + local mod_time + mod_time=$(sudo stat -f%m "$path" 2> /dev/null || echo "0") + local now + now=$(date +%s 2> /dev/null || echo "0") + if [[ "$mod_time" -gt 0 && "$now" -gt 0 ]]; then + local age_seconds=$((now - mod_time)) + file_age=$(format_duration_human "$age_seconds") + fi + fi + fi + + log_info "[DRY-RUN] Would sudo remove: $file_type $path" + [[ -n "$file_size" ]] && log_info " Size: $file_size" + [[ -n "$file_age" ]] && log_info " Age: $file_age" + else + log_info "[DRY-RUN] Would sudo remove: $path" + fi + return 0 + fi + + local size_kb=0 + local size_human="" + if oplog_enabled; then + if sudo test -e "$path" 2> /dev/null; then + size_kb=$(sudo du -skP "$path" 2> /dev/null | awk '{print $1}' || echo "0") + if [[ "$size_kb" =~ ^[0-9]+$ ]] && [[ "$size_kb" -gt 0 ]]; then + size_human=$(bytes_to_human "$((size_kb * 1024))" 2> /dev/null || echo "${size_kb}KB") + fi + fi + fi + + local output + local ret=0 + output=$(sudo rm -rf "$path" 2>&1) || ret=$? # safe_remove + + if [[ $ret -eq 0 ]]; then + log_operation "${MOLE_CURRENT_COMMAND:-clean}" "REMOVED" "$path" "$size_human" + return 0 + fi + + case "$output" in + *"Operation not permitted"*) + log_operation "${MOLE_CURRENT_COMMAND:-clean}" "FAILED" "$path" "sip/mdm protected" + return "$MOLE_ERR_SIP_PROTECTED" + ;; + *"Read-only file system"*) + log_operation "${MOLE_CURRENT_COMMAND:-clean}" "FAILED" "$path" "readonly filesystem" + return "$MOLE_ERR_READONLY_FS" + ;; + *"Sorry, try again"* | *"incorrect passphrase"* | *"incorrect credentials"*) + log_operation "${MOLE_CURRENT_COMMAND:-clean}" "FAILED" "$path" "auth failed" + return "$MOLE_ERR_AUTH_FAILED" + ;; + *) + log_error "Failed to remove, sudo: $path" + log_operation "${MOLE_CURRENT_COMMAND:-clean}" "FAILED" "$path" "sudo error" + return 1 + ;; + esac +} + +# ============================================================================ +# Safe Find and Delete Operations +# ============================================================================ + +# Safe file discovery and deletion with depth and age limits +safe_find_delete() { + local base_dir="$1" + local pattern="$2" + local age_days="${3:-7}" + local type_filter="${4:-f}" + + # Validate base directory exists and is not a symlink + if [[ ! -d "$base_dir" ]]; then + log_error "Directory does not exist: $base_dir" + return 1 + fi + + if [[ -L "$base_dir" ]]; then + log_error "Refusing to search symlinked directory: $base_dir" + return 1 + fi + + # Validate type filter + if [[ "$type_filter" != "f" && "$type_filter" != "d" ]]; then + log_error "Invalid type filter: $type_filter, must be 'f' or 'd'" + return 1 + fi + + debug_log "Finding in $base_dir: $pattern, age: ${age_days}d, type: $type_filter" + + local find_args=("-maxdepth" "5" "-name" "$pattern" "-type" "$type_filter") + if [[ "$age_days" -gt 0 ]]; then + find_args+=("-mtime" "+$age_days") + fi + + # Iterate results to respect should_protect_path + while IFS= read -r -d '' match; do + if should_protect_path "$match"; then + continue + fi + safe_remove "$match" true || true + done < <(command find "$base_dir" "${find_args[@]}" -print0 2> /dev/null || true) + + return 0 +} + +# Safe sudo discovery and deletion +safe_sudo_find_delete() { + local base_dir="$1" + local pattern="$2" + local age_days="${3:-7}" + local type_filter="${4:-f}" + + # Validate base directory (use sudo for permission-restricted dirs) + if ! sudo test -d "$base_dir" 2> /dev/null; then + debug_log "Directory does not exist, skipping: $base_dir" + return 0 + fi + + if sudo test -L "$base_dir" 2> /dev/null; then + log_error "Refusing to search symlinked directory: $base_dir" + return 1 + fi + + # Validate type filter + if [[ "$type_filter" != "f" && "$type_filter" != "d" ]]; then + log_error "Invalid type filter: $type_filter, must be 'f' or 'd'" + return 1 + fi + + debug_log "Finding, sudo, in $base_dir: $pattern, age: ${age_days}d, type: $type_filter" + + local find_args=("-maxdepth" "5") + # Skip -name if pattern is "*" (matches everything anyway, but adds overhead) + if [[ "$pattern" != "*" ]]; then + find_args+=("-name" "$pattern") + fi + find_args+=("-type" "$type_filter") + if [[ "$age_days" -gt 0 ]]; then + find_args+=("-mtime" "+$age_days") + fi + + # Iterate results to respect should_protect_path + while IFS= read -r -d '' match; do + if should_protect_path "$match"; then + continue + fi + safe_sudo_remove "$match" || true + done < <(sudo find "$base_dir" "${find_args[@]}" -print0 2> /dev/null || true) + + return 0 +} + +# ============================================================================ +# Size Calculation +# ============================================================================ + +# Get path size in KB (returns 0 if not found) +get_path_size_kb() { + local path="$1" + [[ -z "$path" || ! -e "$path" ]] && { + echo "0" + return + } + + # For .app bundles, prefer mdls logical size as it matches Finder + # (APFS clone/sparse files make 'du' severely underreport apps like Xcode) + if [[ "$path" == *.app || "$path" == *.app/ ]]; then + local mdls_size + mdls_size=$(mdls -name kMDItemLogicalSize -raw "$path" 2> /dev/null || true) + if [[ "$mdls_size" =~ ^[0-9]+$ && "$mdls_size" -gt 0 ]]; then + # Return in KB + echo "$((mdls_size / 1024))" + return + fi + fi + + local size + size=$(command du -skP "$path" 2> /dev/null | awk 'NR==1 {print $1; exit}' || true) + + if [[ "$size" =~ ^[0-9]+$ ]]; then + echo "$size" + else + [[ "${MO_DEBUG:-}" == "1" ]] && debug_log "get_path_size_kb: Failed to get size for $path (returned: $size)" + echo "0" + fi +} + +# Calculate total size for multiple paths +calculate_total_size() { + local files="$1" + local total_kb=0 + + while IFS= read -r file; do + if [[ -n "$file" && -e "$file" ]]; then + local size_kb + size_kb=$(get_path_size_kb "$file") + total_kb=$((total_kb + size_kb)) + fi + done <<< "$files" + + echo "$total_kb" +} + +diagnose_removal_failure() { + local exit_code="$1" + local app_name="${2:-application}" + + local reason="" + local suggestion="" + local touchid_file="/etc/pam.d/sudo" + + case "$exit_code" in + "$MOLE_ERR_SIP_PROTECTED") + reason="protected by macOS (SIP/MDM)" + ;; + "$MOLE_ERR_AUTH_FAILED") + reason="authentication failed" + if [[ -f "$touchid_file" ]] && grep -q "pam_tid.so" "$touchid_file" 2> /dev/null; then + suggestion="Check your credentials or restart Terminal" + else + suggestion="Try 'mole touchid' to enable fingerprint auth" + fi + ;; + "$MOLE_ERR_READONLY_FS") + reason="filesystem is read-only" + suggestion="Check if disk needs repair" + ;; + *) + reason="permission denied" + if [[ -f "$touchid_file" ]] && grep -q "pam_tid.so" "$touchid_file" 2> /dev/null; then + suggestion="Try running again or check file ownership" + else + suggestion="Try 'mole touchid' or check with 'ls -l'" + fi + ;; + esac + + echo "$reason|$suggestion" +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/help.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/help.sh new file mode 100644 index 0000000..6deb945 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/help.sh @@ -0,0 +1,65 @@ +#!/bin/bash + +show_clean_help() { + echo "Usage: mo clean [OPTIONS]" + echo "" + echo "Clean up disk space by removing caches, logs, and temporary files." + echo "" + echo "Options:" + echo " --dry-run, -n Preview cleanup without making changes" + echo " --whitelist Manage protected paths" + echo " --debug Show detailed operation logs" + echo " -h, --help Show this help message" +} + +show_installer_help() { + echo "Usage: mo installer [OPTIONS]" + echo "" + echo "Find and remove installer files (.dmg, .pkg, .iso, .xip, .zip)." + echo "" + echo "Options:" + echo " --dry-run Preview installer cleanup without making changes" + echo " --debug Show detailed operation logs" + echo " -h, --help Show this help message" +} + +show_optimize_help() { + echo "Usage: mo optimize [OPTIONS]" + echo "" + echo "Check and maintain system health, apply optimizations." + echo "" + echo "Options:" + echo " --dry-run Preview optimization without making changes" + echo " --whitelist Manage protected items" + echo " --debug Show detailed operation logs" + echo " -h, --help Show this help message" +} + +show_touchid_help() { + echo "Usage: mo touchid [COMMAND]" + echo "" + echo "Configure Touch ID for sudo authentication." + echo "" + echo "Commands:" + echo " enable Enable Touch ID for sudo" + echo " disable Disable Touch ID for sudo" + echo " status Show current Touch ID status" + echo "" + echo "Options:" + echo " --dry-run Preview Touch ID changes without modifying sudo config" + echo " -h, --help Show this help message" + echo "" + echo "If no command is provided, an interactive menu is shown." +} + +show_uninstall_help() { + echo "Usage: mo uninstall [OPTIONS]" + echo "" + echo "Interactively remove applications and their leftover files." + echo "" + echo "Options:" + echo " --dry-run Preview app uninstallation without making changes" + echo " --whitelist Not supported for uninstall (use clean/optimize)" + echo " --debug Show detailed operation logs" + echo " -h, --help Show this help message" +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/log.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/log.sh new file mode 100644 index 0000000..f045160 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/log.sh @@ -0,0 +1,405 @@ +#!/bin/bash +# Mole - Logging System +# Centralized logging with rotation support + +set -euo pipefail + +# Prevent multiple sourcing +if [[ -n "${MOLE_LOG_LOADED:-}" ]]; then + return 0 +fi +readonly MOLE_LOG_LOADED=1 + +# Ensure base.sh is loaded for colors and icons +if [[ -z "${MOLE_BASE_LOADED:-}" ]]; then + _MOLE_CORE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + # shellcheck source=lib/core/base.sh + source "$_MOLE_CORE_DIR/base.sh" +fi + +# ============================================================================ +# Logging Configuration +# ============================================================================ + +readonly MOLE_STATE_DIR="${MOLE_STATE_DIR:-${HOME}/.config/mole}" + +readonly LOG_FILE="${MOLE_STATE_DIR}/mole.log" +readonly DEBUG_LOG_FILE="${MOLE_STATE_DIR}/mole_debug_session.log" +readonly OPERATIONS_LOG_FILE="${MOLE_STATE_DIR}/operations.log" +readonly LOG_MAX_SIZE_DEFAULT=1048576 # 1MB +readonly OPLOG_MAX_SIZE_DEFAULT=5242880 # 5MB + +# Ensure log directory and file exist with correct ownership +ensure_user_file "$LOG_FILE" +if [[ "${MO_NO_OPLOG:-}" != "1" ]]; then + ensure_user_file "$OPERATIONS_LOG_FILE" +fi + +# ============================================================================ +# Log Rotation +# ============================================================================ + +# Rotate log file if it exceeds maximum size +rotate_log_once() { + # Skip if already checked this session + [[ -n "${MOLE_LOG_ROTATED:-}" ]] && return 0 + export MOLE_LOG_ROTATED=1 + + local max_size="$LOG_MAX_SIZE_DEFAULT" + if [[ -f "$LOG_FILE" ]]; then + local size + size=$(get_file_size "$LOG_FILE") + if [[ "$size" -gt "$max_size" ]]; then + mv "$LOG_FILE" "${LOG_FILE}.old" 2> /dev/null || true + ensure_user_file "$LOG_FILE" + fi + fi + + # Rotate operations log (5MB limit) + if [[ "${MO_NO_OPLOG:-}" != "1" ]]; then + local oplog_max_size="$OPLOG_MAX_SIZE_DEFAULT" + if [[ -f "$OPERATIONS_LOG_FILE" ]]; then + local size + size=$(get_file_size "$OPERATIONS_LOG_FILE") + if [[ "$size" -gt "$oplog_max_size" ]]; then + mv "$OPERATIONS_LOG_FILE" "${OPERATIONS_LOG_FILE}.old" 2> /dev/null || true + ensure_user_file "$OPERATIONS_LOG_FILE" + fi + fi + fi +} + +# ============================================================================ +# Logging Functions +# ============================================================================ + +# Get current timestamp (centralized for consistency) +get_timestamp() { + date '+%Y-%m-%d %H:%M:%S' +} + +# Log informational message +log_info() { + echo -e "${BLUE}$1${NC}" + local timestamp + timestamp=$(get_timestamp) + echo "[$timestamp] INFO: $1" >> "$LOG_FILE" 2> /dev/null || true + if [[ "${MO_DEBUG:-}" == "1" ]]; then + echo "[$timestamp] INFO: $1" >> "$DEBUG_LOG_FILE" 2> /dev/null || true + fi +} + +# Log success message +log_success() { + echo -e " ${GREEN}${ICON_SUCCESS}${NC} $1" + local timestamp + timestamp=$(get_timestamp) + echo "[$timestamp] SUCCESS: $1" >> "$LOG_FILE" 2> /dev/null || true + if [[ "${MO_DEBUG:-}" == "1" ]]; then + echo "[$timestamp] SUCCESS: $1" >> "$DEBUG_LOG_FILE" 2> /dev/null || true + fi +} + +# shellcheck disable=SC2329 +log_warning() { + echo -e "${YELLOW}$1${NC}" + local timestamp + timestamp=$(get_timestamp) + echo "[$timestamp] WARNING: $1" >> "$LOG_FILE" 2> /dev/null || true + if [[ "${MO_DEBUG:-}" == "1" ]]; then + echo "[$timestamp] WARNING: $1" >> "$DEBUG_LOG_FILE" 2> /dev/null || true + fi +} + +# shellcheck disable=SC2329 +log_error() { + echo -e "${YELLOW}${ICON_ERROR}${NC} $1" >&2 + local timestamp + timestamp=$(get_timestamp) + echo "[$timestamp] ERROR: $1" >> "$LOG_FILE" 2> /dev/null || true + if [[ "${MO_DEBUG:-}" == "1" ]]; then + echo "[$timestamp] ERROR: $1" >> "$DEBUG_LOG_FILE" 2> /dev/null || true + fi +} + +# shellcheck disable=SC2329 +debug_log() { + if [[ "${MO_DEBUG:-}" == "1" ]]; then + echo -e "${GRAY}[DEBUG]${NC} $*" >&2 + local timestamp + timestamp=$(get_timestamp) + echo "[$timestamp] DEBUG: $*" >> "$DEBUG_LOG_FILE" 2> /dev/null || true + fi +} + +# ============================================================================ +# Operation Logging (Enabled by default) +# ============================================================================ +# Records all file operations for user troubleshooting +# Disable with MO_NO_OPLOG=1 + +oplog_enabled() { + [[ "${MO_NO_OPLOG:-}" != "1" ]] +} + +# Log an operation to the operations log file +# Usage: log_operation [detail] +# Example: log_operation "clean" "REMOVED" "/path/to/file" "15.2MB" +# Example: log_operation "clean" "SKIPPED" "/path/to/file" "whitelist" +# Example: log_operation "uninstall" "REMOVED" "/Applications/App.app" "150MB" +log_operation() { + # Allow disabling via environment variable + oplog_enabled || return 0 + + local command="${1:-unknown}" # clean/uninstall/optimize/purge + local action="${2:-UNKNOWN}" # REMOVED/SKIPPED/FAILED/REBUILT + local path="${3:-}" + local detail="${4:-}" + + # Skip if no path provided + [[ -z "$path" ]] && return 0 + + local timestamp + timestamp=$(get_timestamp) + + local log_line="[$timestamp] [$command] $action $path" + [[ -n "$detail" ]] && log_line+=" ($detail)" + + echo "$log_line" >> "$OPERATIONS_LOG_FILE" 2> /dev/null || true +} + +# Log session start marker +# Usage: log_operation_session_start +log_operation_session_start() { + oplog_enabled || return 0 + + local command="${1:-mole}" + local timestamp + timestamp=$(get_timestamp) + + { + echo "" + echo "# ========== $command session started at $timestamp ==========" + } >> "$OPERATIONS_LOG_FILE" 2> /dev/null || true +} + +# shellcheck disable=SC2329 +log_operation_session_end() { + oplog_enabled || return 0 + + local command="${1:-mole}" + local items="${2:-0}" + local size="${3:-0}" + local timestamp + timestamp=$(get_timestamp) + + local size_human="" + if [[ "$size" =~ ^[0-9]+$ ]] && [[ "$size" -gt 0 ]]; then + size_human=$(bytes_to_human "$((size * 1024))" 2> /dev/null || echo "${size}KB") + else + size_human="0B" + fi + + { + echo "# ========== $command session ended at $timestamp, $items items, $size_human ==========" + } >> "$OPERATIONS_LOG_FILE" 2> /dev/null || true +} + +# Enhanced debug logging for operations +debug_operation_start() { + local operation_name="$1" + local operation_desc="${2:-}" + + if [[ "${MO_DEBUG:-}" == "1" ]]; then + # Output to stderr for immediate feedback + echo -e "${GRAY}[DEBUG] === $operation_name ===${NC}" >&2 + [[ -n "$operation_desc" ]] && echo -e "${GRAY}[DEBUG] $operation_desc${NC}" >&2 + + # Also log to file + { + echo "" + echo "=== $operation_name ===" + [[ -n "$operation_desc" ]] && echo "Description: $operation_desc" + } >> "$DEBUG_LOG_FILE" 2> /dev/null || true + fi +} + +# Log detailed operation information +debug_operation_detail() { + local detail_type="$1" # e.g., "Method", "Target", "Expected Outcome" + local detail_value="$2" + + if [[ "${MO_DEBUG:-}" == "1" ]]; then + # Output to stderr + echo -e "${GRAY}[DEBUG] $detail_type: $detail_value${NC}" >&2 + + # Also log to file + echo "$detail_type: $detail_value" >> "$DEBUG_LOG_FILE" 2> /dev/null || true + fi +} + +# Log individual file action with metadata +debug_file_action() { + local action="$1" # e.g., "Would remove", "Removing" + local file_path="$2" + local file_size="${3:-}" + local file_age="${4:-}" + + if [[ "${MO_DEBUG:-}" == "1" ]]; then + local msg=" * $file_path" + [[ -n "$file_size" ]] && msg+=", $file_size" + [[ -n "$file_age" ]] && msg+=", ${file_age} days old" + + # Output to stderr + echo -e "${GRAY}[DEBUG] $action: $msg${NC}" >&2 + + # Also log to file + echo "$action: $msg" >> "$DEBUG_LOG_FILE" 2> /dev/null || true + fi +} + +# Log risk level for operations +debug_risk_level() { + local risk_level="$1" # LOW, MEDIUM, HIGH + local reason="$2" + + if [[ "${MO_DEBUG:-}" == "1" ]]; then + local color="$GRAY" + case "$risk_level" in + LOW) color="$GREEN" ;; + MEDIUM) color="$YELLOW" ;; + HIGH) color="$RED" ;; + esac + + # Output to stderr with color + echo -e "${GRAY}[DEBUG] Risk Level: ${color}${risk_level}${GRAY}, $reason${NC}" >&2 + + # Also log to file + echo "Risk Level: $risk_level, $reason" >> "$DEBUG_LOG_FILE" 2> /dev/null || true + fi +} + +# Log system information for debugging +log_system_info() { + # Only allow once per session + [[ -n "${MOLE_SYS_INFO_LOGGED:-}" ]] && return 0 + export MOLE_SYS_INFO_LOGGED=1 + + # Reset debug log file for this new session + ensure_user_file "$DEBUG_LOG_FILE" + if ! : > "$DEBUG_LOG_FILE" 2> /dev/null; then + echo -e "${YELLOW}${ICON_WARNING}${NC} Debug log not writable: $DEBUG_LOG_FILE" >&2 + fi + + # Start block in debug log file + { + echo "----------------------------------------------------------------------" + echo "Mole Debug Session, $(date '+%Y-%m-%d %H:%M:%S')" + echo "----------------------------------------------------------------------" + echo "User: $USER" + echo "Hostname: $(hostname)" + echo "Architecture: $(uname -m)" + echo "Kernel: $(uname -r)" + if command -v sw_vers > /dev/null; then + echo "macOS: $(sw_vers -productVersion), $(sw_vers -buildVersion)" + fi + echo "Shell: ${SHELL:-unknown}, ${TERM:-unknown}" + + # Check sudo status non-interactively + if sudo -n true 2> /dev/null; then + echo "Sudo Access: Active" + else + echo "Sudo Access: Required" + fi + echo "----------------------------------------------------------------------" + } >> "$DEBUG_LOG_FILE" 2> /dev/null || true + + # Notification to stderr + echo -e "${GRAY}[DEBUG] Debug logging enabled. Session log: $DEBUG_LOG_FILE${NC}" >&2 +} + +# ============================================================================ +# Command Execution Wrappers +# ============================================================================ + +# Run command silently (ignore errors) +run_silent() { + "$@" > /dev/null 2>&1 || true +} + +# Run command with error logging +run_logged() { + local cmd="$1" + # Log to main file, and also to debug file if enabled + if [[ "${MO_DEBUG:-}" == "1" ]]; then + if ! "$@" 2>&1 | tee -a "$LOG_FILE" | tee -a "$DEBUG_LOG_FILE" > /dev/null; then + log_warning "Command failed: $cmd" + return 1 + fi + else + if ! "$@" 2>&1 | tee -a "$LOG_FILE" > /dev/null; then + log_warning "Command failed: $cmd" + return 1 + fi + fi + return 0 +} + +# ============================================================================ +# Formatted Output +# ============================================================================ + +# Print formatted summary block +print_summary_block() { + local heading="" + local -a details=() + local saw_heading=false + + # Parse arguments + for arg in "$@"; do + if [[ "$saw_heading" == "false" ]]; then + saw_heading=true + heading="$arg" + else + details+=("$arg") + fi + done + + local _tw + _tw=$(tput cols 2> /dev/null || echo 70) + [[ "$_tw" =~ ^[0-9]+$ ]] || _tw=70 + [[ $_tw -gt 70 ]] && _tw=70 + local divider + divider=$(printf '%*s' "$_tw" '' | tr ' ' '=') + + # Print with dividers + echo "" + echo "$divider" + if [[ -n "$heading" ]]; then + echo -e "${BLUE}${heading}${NC}" + fi + + # Print details + for detail in "${details[@]}"; do + [[ -z "$detail" ]] && continue + echo -e "${detail}" + done + echo "$divider" + + # If debug mode is on, remind user about the log file location + if [[ "${MO_DEBUG:-}" == "1" ]]; then + echo -e "${GRAY}Debug session log saved to:${NC} ${DEBUG_LOG_FILE}" + fi +} + +# ============================================================================ +# Initialize Logging +# ============================================================================ + +# Perform log rotation check on module load +rotate_log_once + +# If debug mode is enabled, log system info immediately +if [[ "${MO_DEBUG:-}" == "1" ]]; then + log_system_info +fi diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/sudo.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/sudo.sh new file mode 100644 index 0000000..483497d --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/sudo.sh @@ -0,0 +1,346 @@ +#!/bin/bash +# Sudo Session Manager +# Unified sudo authentication and keepalive management + +set -euo pipefail + +# ============================================================================ +# Touch ID and Clamshell Detection +# ============================================================================ + +check_touchid_support() { + # Check sudo_local first (Sonoma+) + if [[ -f /etc/pam.d/sudo_local ]]; then + grep -q "pam_tid.so" /etc/pam.d/sudo_local 2> /dev/null + return $? + fi + + # Fallback to checking sudo directly + if [[ -f /etc/pam.d/sudo ]]; then + grep -q "pam_tid.so" /etc/pam.d/sudo 2> /dev/null + return $? + fi + return 1 +} + +# Detect clamshell mode (lid closed) +is_clamshell_mode() { + # ioreg is missing (not macOS) -> treat as lid open + if ! command -v ioreg > /dev/null 2>&1; then + return 1 + fi + + # Check if lid is closed; ignore pipeline failures so set -e doesn't exit + local clamshell_state="" + clamshell_state=$( (ioreg -r -k AppleClamshellState -d 4 2> /dev/null | + grep "AppleClamshellState" | + head -1) || true) + + if [[ "$clamshell_state" =~ \"AppleClamshellState\"\ =\ Yes ]]; then + return 0 # Lid is closed + fi + return 1 # Lid is open +} + +_request_password() { + local tty_path="$1" + local attempts=0 + local show_hint=true + + # Extra safety: ensure sudo cache is cleared before password input + sudo -k 2> /dev/null + + # Save original terminal settings and ensure they're restored on exit + local stty_orig + stty_orig=$(stty -g < "$tty_path" 2> /dev/null || echo "") + trap '[[ -n "${stty_orig:-}" ]] && stty "${stty_orig:-}" < "$tty_path" 2> /dev/null || true' RETURN + + while ((attempts < 3)); do + local password="" + + # Show hint on first attempt about Touch ID appearing again + if [[ $show_hint == true ]] && check_touchid_support; then + echo -e "${GRAY}Note: Touch ID dialog may appear once more, just cancel it${NC}" > "$tty_path" + show_hint=false + fi + + printf "${PURPLE}${ICON_ARROW}${NC} Password: " > "$tty_path" + + # Disable terminal echo to hide password input (keep canonical mode for reliable input) + stty -echo < "$tty_path" 2> /dev/null || true + IFS= read -r password < "$tty_path" || password="" + # Restore terminal echo immediately + stty echo < "$tty_path" 2> /dev/null || true + + printf "\n" > "$tty_path" + + if [[ -z "$password" ]]; then + unset password + attempts=$((attempts + 1)) + if [[ $attempts -lt 3 ]]; then + echo -e "${GRAY}${ICON_WARNING}${NC} Password cannot be empty" > "$tty_path" + fi + continue + fi + + # Verify password with sudo + # NOTE: macOS PAM will trigger Touch ID before password auth - this is system behavior + if printf '%s\n' "$password" | sudo -S -p "" -v > /dev/null 2>&1; then + unset password + return 0 + fi + + unset password + attempts=$((attempts + 1)) + if [[ $attempts -lt 3 ]]; then + echo -e "${GRAY}${ICON_WARNING}${NC} Incorrect password, try again" > "$tty_path" + fi + done + + return 1 +} + +request_sudo_access() { + local prompt_msg="${1:-Admin access required}" + + # Check if already have sudo access + if sudo -n true 2> /dev/null; then + return 0 + fi + + # Detect if running in TTY environment + local tty_path="/dev/tty" + local is_gui_mode=false + + if [[ ! -r "$tty_path" || ! -w "$tty_path" ]]; then + tty_path=$(tty 2> /dev/null || echo "") + if [[ -z "$tty_path" || ! -r "$tty_path" || ! -w "$tty_path" ]]; then + is_gui_mode=true + fi + fi + + # GUI mode: use osascript for password dialog + if [[ "$is_gui_mode" == true ]]; then + # Clear sudo cache before attempting authentication + sudo -k 2> /dev/null + + # Display native macOS password dialog + local password + password=$(osascript -e "display dialog \"$prompt_msg\" default answer \"\" with title \"Mole\" with icon caution with hidden answer" -e 'text returned of result' 2> /dev/null) + + if [[ -z "$password" ]]; then + # User cancelled the dialog + unset password + return 1 + fi + + # Attempt sudo authentication with the provided password + if printf '%s\n' "$password" | sudo -S -p "" -v > /dev/null 2>&1; then + unset password + return 0 + fi + + # Password was incorrect + unset password + return 1 + fi + + sudo -k + + # Check if in clamshell mode - if yes, skip Touch ID entirely + if is_clamshell_mode; then + echo -e "${PURPLE}${ICON_ARROW}${NC} ${prompt_msg}" + if _request_password "$tty_path"; then + # Clear all prompt lines (use safe clearing method) + safe_clear_lines 3 "$tty_path" + return 0 + fi + return 1 + fi + + # Not in clamshell mode - try Touch ID if configured + if ! check_touchid_support; then + echo -e "${PURPLE}${ICON_ARROW}${NC} ${prompt_msg}" + if _request_password "$tty_path"; then + # Clear all prompt lines (use safe clearing method) + safe_clear_lines 3 "$tty_path" + return 0 + fi + return 1 + fi + + # Touch ID is available and not in clamshell mode + echo -e "${PURPLE}${ICON_ARROW}${NC} ${prompt_msg} ${GRAY}, Touch ID or password${NC}" + + # Start sudo in background so we can monitor and control it + sudo -v < /dev/null > /dev/null 2>&1 & + local sudo_pid=$! + + # Wait for sudo to complete or timeout (5 seconds) + local elapsed=0 + local timeout=50 # 50 * 0.1s = 5 seconds + while ((elapsed < timeout)); do + if ! kill -0 "$sudo_pid" 2> /dev/null; then + # Process exited + wait "$sudo_pid" 2> /dev/null + local exit_code=$? + if [[ $exit_code -eq 0 ]] && sudo -n true 2> /dev/null; then + # Touch ID succeeded - clear the prompt line + safe_clear_lines 1 "$tty_path" + return 0 + fi + # Touch ID failed or cancelled + break + fi + sleep 0.1 + elapsed=$((elapsed + 1)) + done + + # Touch ID failed/cancelled - clean up thoroughly before password input + + # Kill the sudo process if still running + if kill -0 "$sudo_pid" 2> /dev/null; then + kill -9 "$sudo_pid" 2> /dev/null + wait "$sudo_pid" 2> /dev/null || true + fi + + # Clear sudo state immediately + sudo -k 2> /dev/null + + # IMPORTANT: Wait longer for macOS to fully close Touch ID UI and SecurityAgent + # Without this delay, subsequent sudo calls may re-trigger Touch ID + sleep 1 + + # Clear any leftover prompts on the screen + safe_clear_line "$tty_path" + + # Now use our password input (this should not trigger Touch ID again) + if _request_password "$tty_path"; then + # Clear all prompt lines (use safe clearing method) + safe_clear_lines 3 "$tty_path" + return 0 + fi + return 1 +} + +# ============================================================================ +# Sudo Session Management +# ============================================================================ + +# Global state +MOLE_SUDO_KEEPALIVE_PID="" +MOLE_SUDO_ESTABLISHED="false" + +# Start sudo keepalive +_start_sudo_keepalive() { + # Start background keepalive process with all outputs redirected + # This is critical: command substitution waits for all file descriptors to close + ( + # Initial delay to let sudo cache stabilize after password entry + # This prevents immediately triggering Touch ID again + sleep 2 + + local retry_count=0 + while true; do + if ! sudo -n -v 2> /dev/null; then + retry_count=$((retry_count + 1)) + if [[ $retry_count -ge 3 ]]; then + exit 1 + fi + sleep 5 + continue + fi + retry_count=0 + sleep 30 + kill -0 "$$" 2> /dev/null || exit + done + ) > /dev/null 2>&1 & + + local pid=$! + echo $pid +} + +# Stop sudo keepalive +_stop_sudo_keepalive() { + local pid="${1:-}" + if [[ -n "$pid" ]]; then + kill "$pid" 2> /dev/null || true + wait "$pid" 2> /dev/null || true + fi +} + +# Check if sudo session is active +has_sudo_session() { + sudo -n true 2> /dev/null +} + +# Request administrative access +request_sudo() { + local prompt_msg="${1:-Admin access required}" + + if has_sudo_session; then + return 0 + fi + + # Use the robust implementation from common.sh + if request_sudo_access "$prompt_msg"; then + return 0 + else + return 1 + fi +} + +# Maintain active sudo session with keepalive +ensure_sudo_session() { + local prompt="${1:-Admin access required}" + + # Check if already established + if has_sudo_session && [[ "$MOLE_SUDO_ESTABLISHED" == "true" ]]; then + return 0 + fi + + # Stop old keepalive if exists + if [[ -n "$MOLE_SUDO_KEEPALIVE_PID" ]]; then + _stop_sudo_keepalive "$MOLE_SUDO_KEEPALIVE_PID" + MOLE_SUDO_KEEPALIVE_PID="" + fi + + # Request sudo access + if ! request_sudo "$prompt"; then + MOLE_SUDO_ESTABLISHED="false" + return 1 + fi + + # Start keepalive + MOLE_SUDO_KEEPALIVE_PID=$(_start_sudo_keepalive) + + MOLE_SUDO_ESTABLISHED="true" + return 0 +} + +# Stop sudo session and cleanup +stop_sudo_session() { + if [[ -n "$MOLE_SUDO_KEEPALIVE_PID" ]]; then + _stop_sudo_keepalive "$MOLE_SUDO_KEEPALIVE_PID" + MOLE_SUDO_KEEPALIVE_PID="" + fi + MOLE_SUDO_ESTABLISHED="false" +} + +# Register cleanup on script exit +register_sudo_cleanup() { + trap stop_sudo_session EXIT INT TERM +} + +# Predict if operation requires administrative access +will_need_sudo() { + local -a operations=("$@") + for op in "${operations[@]}"; do + case "$op" in + system_update | appstore_update | macos_update | firewall | touchid | rosetta | system_fix) + return 0 + ;; + esac + done + return 1 +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/timeout.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/timeout.sh new file mode 100644 index 0000000..bcb3c9d --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/timeout.sh @@ -0,0 +1,230 @@ +#!/bin/bash +# Mole - Timeout Control +# Command execution with timeout support + +set -euo pipefail + +# Prevent multiple sourcing +if [[ -n "${MOLE_TIMEOUT_LOADED:-}" ]]; then + return 0 +fi +readonly MOLE_TIMEOUT_LOADED=1 + +# ============================================================================ +# Timeout Command Initialization +# ============================================================================ + +# Initialize timeout command (prefer gtimeout from coreutils, fallback to timeout) +# Sets MO_TIMEOUT_BIN to the available timeout command +# +# Recommendation: Install coreutils for reliable timeout support +# brew install coreutils +# +# Fallback order: +# 1. gtimeout / timeout +# 2. perl helper with dedicated process group cleanup +# 3. shell-based fallback (last resort) +# +# The shell-based fallback has known limitations: +# - May not clean up all child processes +# - Has race conditions in edge cases +# - Less reliable than native timeout/perl helper +if [[ -z "${MO_TIMEOUT_INITIALIZED:-}" ]]; then + MO_TIMEOUT_BIN="" + MO_TIMEOUT_PERL_BIN="" + for candidate in gtimeout timeout; do + if command -v "$candidate" > /dev/null 2>&1; then + MO_TIMEOUT_BIN="$candidate" + if [[ "${MO_DEBUG:-0}" == "1" ]]; then + echo "[TIMEOUT] Using command: $candidate" >&2 + fi + break + fi + done + + if [[ -z "$MO_TIMEOUT_BIN" ]] && command -v perl > /dev/null 2>&1; then + MO_TIMEOUT_PERL_BIN="$(command -v perl)" + if [[ "${MO_DEBUG:-0}" == "1" ]]; then + echo "[TIMEOUT] Using perl fallback: $MO_TIMEOUT_PERL_BIN" >&2 + fi + fi + + # Log warning if no timeout command available + if [[ -z "$MO_TIMEOUT_BIN" && -z "$MO_TIMEOUT_PERL_BIN" ]] && [[ "${MO_DEBUG:-0}" == "1" ]]; then + echo "[TIMEOUT] No timeout command found, using shell fallback" >&2 + echo "[TIMEOUT] Install coreutils for better reliability: brew install coreutils" >&2 + fi + + export MO_TIMEOUT_INITIALIZED=1 +fi + +# ============================================================================ +# Timeout Execution +# ============================================================================ + +# Run command with timeout +# Uses gtimeout/timeout if available, falls back to shell-based implementation +# +# Args: +# $1 - duration in seconds (0 or invalid = no timeout) +# $@ - command and arguments to execute +# +# Returns: +# Command exit code, or 124 if timed out (matches gtimeout behavior) +# +# Environment: +# MO_DEBUG - Set to 1 to enable debug logging to stderr +# +# Implementation notes: +# - Prefers gtimeout (coreutils) or timeout for reliability +# - Shell fallback uses SIGTERM → SIGKILL escalation +# - Attempts process group cleanup to handle child processes +# - Returns exit code 124 on timeout (standard timeout exit code) +# +# Known limitations of shell-based fallback: +# - Race condition: If command exits during signal delivery, the signal +# may target a reused PID (very rare, requires quick PID reuse) +# - Zombie processes: Brief zombies until wait completes +# - Nested children: SIGKILL may not reach all descendants +# - No process group: Cannot guarantee cleanup of detached children +# +# For mission-critical timeouts, install coreutils. +run_with_timeout() { + local duration="${1:-0}" + shift || true + + # No timeout if duration is invalid or zero + if [[ ! "$duration" =~ ^[0-9]+(\.[0-9]+)?$ ]] || [[ $(echo "$duration <= 0" | bc -l 2> /dev/null) -eq 1 ]]; then + "$@" + return $? + fi + + # Use timeout command if available (preferred path) + if [[ -n "${MO_TIMEOUT_BIN:-}" ]]; then + if [[ "${MO_DEBUG:-0}" == "1" ]]; then + echo "[TIMEOUT] Running with ${duration}s timeout: $*" >&2 + fi + "$MO_TIMEOUT_BIN" "$duration" "$@" + return $? + fi + + # Use perl helper when timeout command is unavailable. + if [[ -n "${MO_TIMEOUT_PERL_BIN:-}" ]]; then + if [[ "${MO_DEBUG:-0}" == "1" ]]; then + echo "[TIMEOUT] Perl fallback, ${duration}s: $*" >&2 + fi + # shellcheck disable=SC2016 # Embedded Perl uses Perl variables inside single quotes. + "$MO_TIMEOUT_PERL_BIN" -e ' + use strict; + use warnings; + use POSIX qw(:sys_wait_h setsid); + use Time::HiRes qw(time sleep); + + my $duration = 0 + shift @ARGV; + $duration = 1 if $duration <= 0; + + my $pid = fork(); + defined $pid or exit 125; + + if ($pid == 0) { + setsid() or exit 125; + exec @ARGV; + exit 127; + } + + my $deadline = time() + $duration; + + while (1) { + my $result = waitpid($pid, WNOHANG); + if ($result == $pid) { + if (WIFEXITED($?)) { + exit WEXITSTATUS($?); + } + if (WIFSIGNALED($?)) { + exit 128 + WTERMSIG($?); + } + exit 1; + } + + if (time() >= $deadline) { + kill "TERM", -$pid; + sleep 0.5; + + for (1 .. 6) { + $result = waitpid($pid, WNOHANG); + if ($result == $pid) { + exit 124; + } + sleep 0.25; + } + + kill "KILL", -$pid; + waitpid($pid, 0); + exit 124; + } + + sleep 0.1; + } + ' "$duration" "$@" + return $? + fi + + # ======================================================================== + # Shell-based fallback implementation + # ======================================================================== + + if [[ "${MO_DEBUG:-0}" == "1" ]]; then + echo "[TIMEOUT] Shell fallback, ${duration}s: $*" >&2 + fi + + # Start command in background + "$@" & + local cmd_pid=$! + + # Start timeout killer in background + ( + # Wait for timeout duration + sleep "$duration" + + # Check if process still exists + if kill -0 "$cmd_pid" 2> /dev/null; then + # Try to kill process group first (negative PID), fallback to single process + # Process group kill is best effort - may not work if setsid was used + kill -TERM -"$cmd_pid" 2> /dev/null || kill -TERM "$cmd_pid" 2> /dev/null || true + + # Grace period for clean shutdown + sleep 2 + + # Escalate to SIGKILL if still alive + if kill -0 "$cmd_pid" 2> /dev/null; then + kill -KILL -"$cmd_pid" 2> /dev/null || kill -KILL "$cmd_pid" 2> /dev/null || true + fi + fi + ) & + local killer_pid=$! + + # Wait for command to complete + local exit_code=0 + set +e + wait "$cmd_pid" 2> /dev/null + exit_code=$? + set -e + + # Clean up killer process + if kill -0 "$killer_pid" 2> /dev/null; then + kill "$killer_pid" 2> /dev/null || true + wait "$killer_pid" 2> /dev/null || true + fi + + # Check if command was killed by timeout (exit codes 143=SIGTERM, 137=SIGKILL) + if [[ $exit_code -eq 143 || $exit_code -eq 137 ]]; then + # Command was killed by timeout + if [[ "${MO_DEBUG:-0}" == "1" ]]; then + echo "[TIMEOUT] Command timed out after ${duration}s" >&2 + fi + return 124 + fi + + # Command completed normally (or with its own error) + return "$exit_code" +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/ui.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/ui.sh new file mode 100755 index 0000000..421d29a --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/core/ui.sh @@ -0,0 +1,490 @@ +#!/bin/bash +# Mole - UI Components +# Terminal UI utilities: cursor control, keyboard input, spinners, menus + +set -euo pipefail + +if [[ -n "${MOLE_UI_LOADED:-}" ]]; then + return 0 +fi +readonly MOLE_UI_LOADED=1 + +_MOLE_CORE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +[[ -z "${MOLE_BASE_LOADED:-}" ]] && source "$_MOLE_CORE_DIR/base.sh" + +# Cursor control +clear_screen() { printf '\033[2J\033[H'; } +hide_cursor() { [[ -t 1 ]] && printf '\033[?25l' >&2 || true; } +show_cursor() { [[ -t 1 ]] && printf '\033[?25h' >&2 || true; } + +# Calculate display width (CJK characters count as 2) +get_display_width() { + local str="$1" + + # Optimized pure bash implementation without forks + local width + + # Save current locale + local old_lc="${LC_ALL:-}" + + # Get Char Count (UTF-8) + # We must export ensuring it applies to the expansion (though just assignment often works in newer bash, export is safer for all subshells/cmds) + export LC_ALL=en_US.UTF-8 + local char_count=${#str} + + # Get Byte Count (C) + export LC_ALL=C + local byte_count=${#str} + + # Restore Locale immediately + if [[ -n "$old_lc" ]]; then + export LC_ALL="$old_lc" + else + unset LC_ALL + fi + + if [[ $byte_count -eq $char_count ]]; then + echo "$char_count" + return + fi + + # CJK Heuristic: + # Most CJK chars are 3 bytes in UTF-8 and width 2. + # ASCII chars are 1 byte and width 1. + # Width ~= CharCount + (ByteCount - CharCount) / 2 + # "中" (1 char, 3 bytes) -> 1 + (2)/2 = 2. + # "A" (1 char, 1 byte) -> 1 + 0 = 1. + # This is an approximation but very fast and sufficient for App names. + # Integer arithmetic in bash automatically handles floor. + local extra_bytes=$((byte_count - char_count)) + local padding=$((extra_bytes / 2)) + width=$((char_count + padding)) + + # Adjust for zero-width joiners and emoji variation selectors (common in filenames/emojis) + # These characters add bytes but no visible width; subtract their count if present. + local zwj=$'\u200d' # zero-width joiner + local vs16=$'\ufe0f' # emoji variation selector + local zero_width=0 + + local without_zwj=${str//$zwj/} + zero_width=$((zero_width + (char_count - ${#without_zwj}))) + + local without_vs=${str//$vs16/} + zero_width=$((zero_width + (char_count - ${#without_vs}))) + + if ((zero_width > 0 && width > zero_width)); then + width=$((width - zero_width)) + fi + + echo "$width" +} + +# Truncate string by display width (handles CJK) +truncate_by_display_width() { + local str="$1" + local max_width="$2" + local current_width + current_width=$(get_display_width "$str") + + if [[ $current_width -le $max_width ]]; then + echo "$str" + return + fi + + # Fallback: Use pure bash character iteration + # Since we need to know the width of *each* character to truncate at the right spot, + # we cannot just use the total width formula on the whole string. + # However, iterating char-by-char and calling the optimized get_display_width function + # is now much faster because it doesn't fork 'wc'. + + # CRITICAL: Switch to UTF-8 for correct character iteration + local old_lc="${LC_ALL:-}" + export LC_ALL=en_US.UTF-8 + + local truncated="" + local width=0 + local i=0 + local char char_width + local strlen=${#str} # Re-calculate in UTF-8 + + # Optimization: If total width <= max_width, return original string (checked above) + + while [[ $i -lt $strlen ]]; do + char="${str:$i:1}" + + # Inlined width calculation for minimal overhead to avoid recursion overhead + # We are already in UTF-8, so ${#char} is char length (1). + # We need byte length for the heuristic. + # But switching locale inside loop is disastrous for perf. + # Logic: If char is ASCII (1 byte), width 1. + # If char is wide (3 bytes), width 2. + # How to detect byte size without switching locale? + # printf %s "$char" | wc -c ? Slow. + # Check against ASCII range? + # Fast ASCII check: if [[ "$char" < $'\x7f' ]]; then ... + + if [[ "$char" =~ [[:ascii:]] ]]; then + char_width=1 + else + # Assume wide for non-ascii in this context (simplified) + # Or use LC_ALL=C inside? No. + # Most non-ASCII in filenames are either CJK (width 2) or heavy symbols. + # Let's assume 2 for simplicity in this fast loop as we know we are usually dealing with CJK. + char_width=2 + fi + + if ((width + char_width + 3 > max_width)); then + break + fi + + truncated+="$char" + width=$((width + char_width)) + i=$((i + 1)) + done + + # Restore locale + if [[ -n "$old_lc" ]]; then + export LC_ALL="$old_lc" + else + unset LC_ALL + fi + + echo "${truncated}..." +} + +# Read single keyboard input +read_key() { + local key rest read_status + IFS= read -r -s -n 1 key + read_status=$? + [[ $read_status -ne 0 ]] && { + echo "QUIT" + return 0 + } + + if [[ "${MOLE_READ_KEY_FORCE_CHAR:-}" == "1" ]]; then + [[ -z "$key" ]] && { + echo "ENTER" + return 0 + } + case "$key" in + $'\n' | $'\r') echo "ENTER" ;; + $'\x7f' | $'\x08') echo "DELETE" ;; + $'\x15') echo "CLEAR_LINE" ;; # Ctrl+U (often mapped from Cmd+Delete in terminals) + $'\x1b') + if IFS= read -r -s -n 1 -t 1 rest 2> /dev/null; then + if [[ "$rest" == "[" ]]; then + if IFS= read -r -s -n 1 -t 1 rest2 2> /dev/null; then + case "$rest2" in + "A") echo "UP" ;; + "B") echo "DOWN" ;; + "C") echo "RIGHT" ;; + "D") echo "LEFT" ;; + "3") + IFS= read -r -s -n 1 -t 1 rest3 2> /dev/null + [[ "$rest3" == "~" ]] && echo "DELETE" || echo "OTHER" + ;; + *) echo "OTHER" ;; + esac + else + echo "QUIT" + fi + elif [[ "$rest" == "O" ]]; then + if IFS= read -r -s -n 1 -t 1 rest2 2> /dev/null; then + case "$rest2" in + "A") echo "UP" ;; + "B") echo "DOWN" ;; + "C") echo "RIGHT" ;; + "D") echo "LEFT" ;; + *) echo "OTHER" ;; + esac + else echo "OTHER"; fi + else + echo "QUIT" + fi + else + echo "QUIT" + fi + ;; + ' ') echo "SPACE" ;; # Allow space in filter mode for selection + [[:print:]]) echo "CHAR:$key" ;; + *) echo "OTHER" ;; + esac + return 0 + fi + + [[ -z "$key" ]] && { + echo "ENTER" + return 0 + } + case "$key" in + $'\n' | $'\r') echo "ENTER" ;; + ' ') echo "SPACE" ;; + 'q' | 'Q') echo "QUIT" ;; + 'R') echo "RETRY" ;; + 'm' | 'M') echo "MORE" ;; + 'u' | 'U') echo "UPDATE" ;; + 't' | 'T') echo "TOUCHID" ;; + 'j' | 'J') echo "DOWN" ;; + 'k' | 'K') echo "UP" ;; + 'h' | 'H') echo "LEFT" ;; + 'l' | 'L') echo "RIGHT" ;; + $'\x03') echo "QUIT" ;; + $'\x7f' | $'\x08') echo "DELETE" ;; + $'\x15') echo "CLEAR_LINE" ;; # Ctrl+U + $'\x1b') + if IFS= read -r -s -n 1 -t 1 rest 2> /dev/null; then + if [[ "$rest" == "[" ]]; then + if IFS= read -r -s -n 1 -t 1 rest2 2> /dev/null; then + case "$rest2" in + "A") echo "UP" ;; "B") echo "DOWN" ;; + "C") echo "RIGHT" ;; "D") echo "LEFT" ;; + "3") + IFS= read -r -s -n 1 -t 1 rest3 2> /dev/null + [[ "$rest3" == "~" ]] && echo "DELETE" || echo "OTHER" + ;; + *) echo "OTHER" ;; + esac + else echo "QUIT"; fi + elif [[ "$rest" == "O" ]]; then + if IFS= read -r -s -n 1 -t 1 rest2 2> /dev/null; then + case "$rest2" in + "A") echo "UP" ;; "B") echo "DOWN" ;; + "C") echo "RIGHT" ;; "D") echo "LEFT" ;; + *) echo "OTHER" ;; + esac + else echo "OTHER"; fi + else echo "OTHER"; fi + else echo "QUIT"; fi + ;; + [[:print:]]) echo "CHAR:$key" ;; + *) echo "OTHER" ;; + esac +} + +drain_pending_input() { + local drained=0 + while IFS= read -r -s -n 1 -t 0.01 _ 2> /dev/null; do + drained=$((drained + 1)) + [[ $drained -gt 100 ]] && break + done +} + +# Format menu option display +show_menu_option() { + local number="$1" + local text="$2" + local selected="$3" + + if [[ "$selected" == "true" ]]; then + echo -e "${CYAN}${ICON_ARROW} $number. $text${NC}" + else + echo " $number. $text" + fi +} + +# Background spinner implementation +INLINE_SPINNER_PID="" +INLINE_SPINNER_STOP_FILE="" + +# Keep spinner message on one line and avoid wrapping/noisy output on narrow terminals. +format_spinner_message() { + local message="$1" + message="${message//$'\r'/ }" + message="${message//$'\n'/ }" + + local cols=80 + if command -v tput > /dev/null 2>&1; then + cols=$(tput cols 2> /dev/null || echo "80") + fi + [[ "$cols" =~ ^[0-9]+$ ]] || cols=80 + + # Reserve space for prefix + spinner char + spacing. + local available=$((cols - 8)) + if [[ $available -lt 20 ]]; then + available=20 + fi + + if [[ ${#message} -gt $available ]]; then + if [[ $available -gt 3 ]]; then + message="${message:0:$((available - 3))}..." + else + message="${message:0:$available}" + fi + fi + + printf "%s" "$message" +} + +start_inline_spinner() { + stop_inline_spinner 2> /dev/null || true + local message="$1" + local display_message + display_message=$(format_spinner_message "$message") + + if [[ -t 1 ]]; then + # Create unique stop flag file for this spinner instance + INLINE_SPINNER_STOP_FILE="${TMPDIR:-/tmp}/mole_spinner_$$_$RANDOM.stop" + + ( + local stop_file="$INLINE_SPINNER_STOP_FILE" + local chars + chars="$(mo_spinner_chars)" + [[ -z "$chars" ]] && chars="|/-\\" + local i=0 + + # Clear line on first output to prevent text remnants from previous messages + printf "\r\033[2K" >&2 || true + + # Cooperative exit: check for stop file instead of relying on signals + while [[ ! -f "$stop_file" ]]; do + local c="${chars:$((i % ${#chars})):1}" + # Output to stderr to avoid interfering with stdout + printf "\r${MOLE_SPINNER_PREFIX:-}${BLUE}%s${NC} %s" "$c" "$display_message" >&2 || break + i=$((i + 1)) + sleep 0.05 + done + + # Clean up stop file before exiting + rm -f "$stop_file" 2> /dev/null || true + exit 0 + ) & + INLINE_SPINNER_PID=$! + disown "$INLINE_SPINNER_PID" 2> /dev/null || true + else + echo -n " ${BLUE}|${NC} $display_message" >&2 || true + fi +} + +stop_inline_spinner() { + if [[ -n "$INLINE_SPINNER_PID" ]]; then + # Cooperative stop: create stop file to signal spinner to exit + if [[ -n "$INLINE_SPINNER_STOP_FILE" ]]; then + touch "$INLINE_SPINNER_STOP_FILE" 2> /dev/null || true + fi + + # Wait briefly for cooperative exit + local wait_count=0 + while kill -0 "$INLINE_SPINNER_PID" 2> /dev/null && [[ $wait_count -lt 5 ]]; do + sleep 0.05 2> /dev/null || true + wait_count=$((wait_count + 1)) + done + + # Only use SIGKILL as last resort if process is stuck + if kill -0 "$INLINE_SPINNER_PID" 2> /dev/null; then + kill -KILL "$INLINE_SPINNER_PID" 2> /dev/null || true + fi + + wait "$INLINE_SPINNER_PID" 2> /dev/null || true + + # Cleanup + rm -f "$INLINE_SPINNER_STOP_FILE" 2> /dev/null || true + INLINE_SPINNER_PID="" + INLINE_SPINNER_STOP_FILE="" + + # Clear the line - use \033[2K to clear entire line, not just to end + [[ -t 1 ]] && printf "\r\033[2K" >&2 || true + fi +} + +# Get spinner characters +mo_spinner_chars() { + local chars="|/-\\" + [[ -z "$chars" ]] && chars="|/-\\" + printf "%s" "$chars" +} + +# Format relative time for compact display (e.g., 3d ago) +format_last_used_summary() { + local value="$1" + + case "$value" in + "" | "Unknown") + echo "Unknown" + return 0 + ;; + "Never" | "Recent" | "Today" | "Yesterday" | "This year" | "Old") + echo "$value" + return 0 + ;; + esac + + if [[ $value =~ ^([0-9]+)[[:space:]]+days?\ ago$ ]]; then + echo "${BASH_REMATCH[1]}d ago" + return 0 + fi + if [[ $value =~ ^([0-9]+)[[:space:]]+weeks?\ ago$ ]]; then + echo "${BASH_REMATCH[1]}w ago" + return 0 + fi + if [[ $value =~ ^([0-9]+)[[:space:]]+months?\ ago$ ]]; then + echo "${BASH_REMATCH[1]}m ago" + return 0 + fi + if [[ $value =~ ^([0-9]+)[[:space:]]+month\(s\)\ ago$ ]]; then + echo "${BASH_REMATCH[1]}m ago" + return 0 + fi + if [[ $value =~ ^([0-9]+)[[:space:]]+years?\ ago$ ]]; then + echo "${BASH_REMATCH[1]}y ago" + return 0 + fi + echo "$value" +} + +# Check if terminal has Full Disk Access +# Returns 0 if FDA is granted, 1 if denied, 2 if unknown +has_full_disk_access() { + # Cache the result to avoid repeated checks + if [[ -n "${MOLE_HAS_FDA:-}" ]]; then + if [[ "$MOLE_HAS_FDA" == "1" ]]; then + return 0 + elif [[ "$MOLE_HAS_FDA" == "unknown" ]]; then + return 2 + else + return 1 + fi + fi + + # Test access to protected directories that require FDA + # Strategy: Try to access directories that are commonly protected + # If ANY of them are accessible, we likely have FDA + # If ALL fail, we definitely don't have FDA + local -a protected_dirs=( + "$HOME/Library/Safari/LocalStorage" + "$HOME/Library/Mail/V10" + "$HOME/Library/Messages/chat.db" + ) + + local accessible_count=0 + local tested_count=0 + + for test_path in "${protected_dirs[@]}"; do + # Only test when the protected path exists + if [[ -e "$test_path" ]]; then + tested_count=$((tested_count + 1)) + # Try to stat the ACTUAL protected path - this requires FDA + if stat "$test_path" > /dev/null 2>&1; then + accessible_count=$((accessible_count + 1)) + fi + fi + done + + # Three possible outcomes: + # 1. tested_count = 0: Can't determine (test paths don't exist) → unknown + # 2. tested_count > 0 && accessible_count > 0: Has FDA → yes + # 3. tested_count > 0 && accessible_count = 0: No FDA → no + if [[ $tested_count -eq 0 ]]; then + # Can't determine - test paths don't exist, treat as unknown + export MOLE_HAS_FDA="unknown" + return 2 + elif [[ $accessible_count -gt 0 ]]; then + # At least one path is accessible → has FDA + export MOLE_HAS_FDA=1 + return 0 + else + # Tested paths exist but not accessible → no FDA + export MOLE_HAS_FDA=0 + return 1 + fi +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/manage/autofix.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/manage/autofix.sh new file mode 100644 index 0000000..eb76fb4 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/manage/autofix.sh @@ -0,0 +1,191 @@ +#!/bin/bash +# Auto-fix Manager +# Unified auto-fix suggestions and execution + +set -euo pipefail + +# Show system suggestions with auto-fix markers +show_suggestions() { + local has_suggestions=false + local can_auto_fix=false + local -a auto_fix_items=() + local -a manual_items=() + local skip_security_autofix=false + if [[ "${MOLE_SECURITY_FIXES_SHOWN:-}" == "true" ]]; then + skip_security_autofix=true + fi + + # Security suggestions + if [[ "$skip_security_autofix" == "false" && -n "${FIREWALL_DISABLED:-}" && "${FIREWALL_DISABLED}" == "true" ]]; then + auto_fix_items+=("Enable Firewall for better security") + has_suggestions=true + can_auto_fix=true + fi + + if [[ -n "${FILEVAULT_DISABLED:-}" && "${FILEVAULT_DISABLED}" == "true" ]]; then + manual_items+=("Enable FileVault|System Settings → Privacy & Security → FileVault") + has_suggestions=true + fi + + # Configuration suggestions + if [[ "$skip_security_autofix" == "false" && -n "${TOUCHID_NOT_CONFIGURED:-}" && "${TOUCHID_NOT_CONFIGURED}" == "true" ]]; then + auto_fix_items+=("Enable Touch ID for sudo") + has_suggestions=true + can_auto_fix=true + fi + + if [[ -n "${ROSETTA_NOT_INSTALLED:-}" && "${ROSETTA_NOT_INSTALLED}" == "true" ]]; then + auto_fix_items+=("Install Rosetta 2 for Intel app support") + has_suggestions=true + can_auto_fix=true + fi + + # Health suggestions + if [[ -n "${CACHE_SIZE_GB:-}" ]]; then + local cache_gb="${CACHE_SIZE_GB:-0}" + if (($(echo "$cache_gb > 5" | bc -l 2> /dev/null || echo 0))); then + manual_items+=("Free up ${cache_gb}GB by cleaning caches|Run: mo clean") + has_suggestions=true + fi + fi + + if [[ -n "${BREW_HAS_WARNINGS:-}" && "${BREW_HAS_WARNINGS}" == "true" ]]; then + manual_items+=("Fix Homebrew warnings|Run: brew doctor to see details") + has_suggestions=true + fi + + if [[ -n "${DISK_FREE_GB:-}" && "${DISK_FREE_GB:-0}" -lt 50 ]]; then + if [[ -z "${CACHE_SIZE_GB:-}" ]] || (($(echo "${CACHE_SIZE_GB:-0} <= 5" | bc -l 2> /dev/null || echo 1))); then + manual_items+=("Low disk space, ${DISK_FREE_GB}GB free|Run: mo analyze to find large files") + has_suggestions=true + fi + fi + + # Display suggestions + echo -e "${BLUE}${ICON_ARROW}${NC} Suggestions" + + if [[ "$has_suggestions" == "false" ]]; then + echo -e " ${GREEN}✓${NC} All looks good" + export HAS_AUTO_FIX_SUGGESTIONS="false" + return + fi + + # Show auto-fix items + if [[ ${#auto_fix_items[@]} -gt 0 ]]; then + for item in "${auto_fix_items[@]}"; do + echo -e " ${GRAY}${ICON_WARNING}${NC} ${item} ${GREEN}[auto]${NC}" + done + fi + + # Show manual items + if [[ ${#manual_items[@]} -gt 0 ]]; then + for item in "${manual_items[@]}"; do + local title="${item%%|*}" + local hint="${item#*|}" + echo -e " ${GRAY}${ICON_WARNING}${NC} ${title}" + echo -e " ${GRAY}${hint}${NC}" + done + fi + + # Export for use in auto-fix + export HAS_AUTO_FIX_SUGGESTIONS="$can_auto_fix" +} + +# Ask user if they want to auto-fix +# Returns: 0 if yes, 1 if no +ask_for_auto_fix() { + if [[ "${HAS_AUTO_FIX_SUGGESTIONS:-false}" != "true" ]]; then + return 1 + fi + + echo -ne "${PURPLE}${ICON_ARROW}${NC} Auto-fix issues now? ${GRAY}Enter confirm / Space cancel${NC}: " + + local key + if ! key=$(read_key); then + echo "no" + echo "" + return 1 + fi + + if [[ "$key" == "ENTER" ]]; then + echo "yes" + echo "" + return 0 + else + echo "no" + echo "" + return 1 + fi +} + +# Perform auto-fixes +# Returns: number of fixes applied +perform_auto_fix() { + local fixed_count=0 + local -a fixed_items=() + + # Ensure sudo access + if ! has_sudo_session; then + if ! ensure_sudo_session "System fixes require admin access"; then + echo -e "${YELLOW}Skipping auto fixes, admin authentication required${NC}" + echo "" + return 0 + fi + fi + + # Fix Firewall + if [[ -n "${FIREWALL_DISABLED:-}" && "${FIREWALL_DISABLED}" == "true" ]]; then + echo -e "${BLUE}Enabling Firewall...${NC}" + if sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setglobalstate on > /dev/null 2>&1; then + echo -e "${GREEN}✓${NC} Firewall enabled" + fixed_count=$((fixed_count + 1)) + fixed_items+=("Firewall enabled") + else + echo -e "${RED}✗${NC} Failed to enable Firewall" + fi + echo "" + fi + + # Fix Touch ID + if [[ -n "${TOUCHID_NOT_CONFIGURED:-}" && "${TOUCHID_NOT_CONFIGURED}" == "true" ]]; then + echo -e "${BLUE}${ICON_ARROW}${NC} Configuring Touch ID for sudo..." + local pam_file="/etc/pam.d/sudo" + if sudo bash -c "grep -q 'pam_tid.so' '$pam_file' 2>/dev/null || sed -i '' '2i\\ +auth sufficient pam_tid.so +' '$pam_file'" 2> /dev/null; then + echo -e "${GREEN}✓${NC} Touch ID configured" + fixed_count=$((fixed_count + 1)) + fixed_items+=("Touch ID configured for sudo") + else + echo -e "${RED}✗${NC} Failed to configure Touch ID" + fi + echo "" + fi + + # Install Rosetta 2 + if [[ -n "${ROSETTA_NOT_INSTALLED:-}" && "${ROSETTA_NOT_INSTALLED}" == "true" ]]; then + echo -e "${BLUE}Installing Rosetta 2...${NC}" + if sudo softwareupdate --install-rosetta --agree-to-license 2>&1 | grep -qE "(Installing|Installed|already installed)"; then + echo -e "${GREEN}✓${NC} Rosetta 2 installed" + fixed_count=$((fixed_count + 1)) + fixed_items+=("Rosetta 2 installed") + else + echo -e "${RED}✗${NC} Failed to install Rosetta 2" + fi + echo "" + fi + + if [[ $fixed_count -gt 0 ]]; then + AUTO_FIX_SUMMARY="Auto fixes applied: ${fixed_count} issues" + if [[ ${#fixed_items[@]} -gt 0 ]]; then + AUTO_FIX_DETAILS=$(printf '%s\n' "${fixed_items[@]}") + else + AUTO_FIX_DETAILS="" + fi + else + AUTO_FIX_SUMMARY="Auto fixes skipped: No changes were required" + AUTO_FIX_DETAILS="" + fi + export AUTO_FIX_SUMMARY AUTO_FIX_DETAILS + return 0 +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/manage/purge_paths.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/manage/purge_paths.sh new file mode 100644 index 0000000..aa34819 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/manage/purge_paths.sh @@ -0,0 +1,117 @@ +#!/bin/bash +# Purge paths management functionality +# Opens config file for editing and shows current status + +set -euo pipefail + +# Get script directory and source dependencies +_MOLE_MANAGE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "$_MOLE_MANAGE_DIR/../core/common.sh" +# Only source project.sh if not already loaded (has readonly vars) +if [[ -z "${PURGE_TARGETS:-}" ]]; then + source "$_MOLE_MANAGE_DIR/../clean/project.sh" +fi + +# Config file path (use :- to avoid re-declaration if already set) +PURGE_PATHS_CONFIG="${PURGE_PATHS_CONFIG:-$HOME/.config/mole/purge_paths}" + +# Ensure config file exists with helpful template +ensure_config_template() { + if [[ ! -f "$PURGE_PATHS_CONFIG" ]]; then + ensure_user_dir "$(dirname "$PURGE_PATHS_CONFIG")" + cat > "$PURGE_PATHS_CONFIG" << 'EOF' +# Mole Purge Paths - Directories to scan for project artifacts +# Add one path per line (supports ~ for home directory) +# Delete all paths or this file to use defaults +# +# Example: +# ~/Documents/MyProjects +# ~/Work/ClientA +# ~/Work/ClientB +EOF + fi +} + +# Main management function +manage_purge_paths() { + ensure_config_template + + local display_config="${PURGE_PATHS_CONFIG/#$HOME/~}" + + # Clear screen + if [[ -t 1 ]]; then + printf '\033[2J\033[H' + fi + + echo -e "${PURPLE_BOLD}Purge Paths Configuration${NC}" + echo "" + + # Show current status + echo -e "${YELLOW}Current Scan Paths:${NC}" + + # Reload config + load_purge_config + + if [[ ${#PURGE_SEARCH_PATHS[@]} -gt 0 ]]; then + for path in "${PURGE_SEARCH_PATHS[@]}"; do + local display_path="${path/#$HOME/~}" + if [[ -d "$path" ]]; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} $display_path" + else + echo -e " ${GRAY}${ICON_EMPTY}${NC} $display_path${GRAY}, not found${NC}" + fi + done + fi + + # Check if using custom config + local custom_count=0 + if [[ -f "$PURGE_PATHS_CONFIG" ]]; then + while IFS= read -r line; do + line="${line#"${line%%[![:space:]]*}"}" + line="${line%"${line##*[![:space:]]}"}" + [[ -z "$line" || "$line" =~ ^# ]] && continue + custom_count=$((custom_count + 1)) + done < "$PURGE_PATHS_CONFIG" + fi + + echo "" + if [[ $custom_count -gt 0 ]]; then + echo -e "${GRAY}Using custom config with $custom_count paths${NC}" + else + echo -e "${GRAY}Using ${#DEFAULT_PURGE_SEARCH_PATHS[@]} default paths${NC}" + fi + + echo "" + echo -e "${YELLOW}Default Paths:${NC}" + for path in "${DEFAULT_PURGE_SEARCH_PATHS[@]}"; do + echo -e " ${GRAY}-${NC} ${path/#$HOME/~}" + done + + echo "" + echo -e "${YELLOW}Config File:${NC} $display_config" + echo "" + + # Open in editor + local editor="${EDITOR:-${VISUAL:-vim}}" + echo -e "Opening in ${CYAN}$editor${NC}..." + echo -e "${GRAY}Save and exit to apply changes. Leave empty to use defaults.${NC}" + echo "" + + # Wait for user to read + sleep 1 + + # Open editor + "$editor" "$PURGE_PATHS_CONFIG" + + # Reload and show updated status + load_purge_config + + echo "" + echo -e "${GREEN}${ICON_SUCCESS}${NC} Configuration updated" + echo -e "${GRAY}Run 'mo purge' to clean with new paths${NC}" + echo "" +} + +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + manage_purge_paths +fi diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/manage/update.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/manage/update.sh new file mode 100644 index 0000000..2dc4027 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/manage/update.sh @@ -0,0 +1,169 @@ +#!/bin/bash +# Update Manager +# Unified update execution for all update types + +set -euo pipefail + +# Format Homebrew update details for display +format_brew_update_detail() { + local total="${BREW_OUTDATED_COUNT:-0}" + if [[ -z "$total" || "$total" -le 0 ]]; then + return + fi + + local -a details=() + local formulas="${BREW_FORMULA_OUTDATED_COUNT:-0}" + local casks="${BREW_CASK_OUTDATED_COUNT:-0}" + + ((formulas > 0)) && details+=("${formulas} formula") + ((casks > 0)) && details+=("${casks} cask") + + local detail_str="${total} updates" + if ((${#details[@]} > 0)); then + detail_str="$( + IFS=', ' + printf '%s' "${details[*]}" + )" + fi + printf "%s" "$detail_str" +} + +# Keep for compatibility with existing callers/tests. +format_brew_update_label() { + local detail + detail=$(format_brew_update_detail || true) + [[ -n "$detail" ]] && printf "Homebrew, %s" "$detail" +} + +populate_brew_update_counts_if_unset() { + local need_probe=false + [[ -z "${BREW_OUTDATED_COUNT:-}" ]] && need_probe=true + [[ -z "${BREW_FORMULA_OUTDATED_COUNT:-}" ]] && need_probe=true + [[ -z "${BREW_CASK_OUTDATED_COUNT:-}" ]] && need_probe=true + + if [[ "$need_probe" == "false" ]]; then + return 0 + fi + + local formula_count="${BREW_FORMULA_OUTDATED_COUNT:-0}" + local cask_count="${BREW_CASK_OUTDATED_COUNT:-0}" + + if command -v brew > /dev/null 2>&1; then + local formula_outdated="" + local cask_outdated="" + + formula_outdated=$(run_with_timeout 8 brew outdated --formula --quiet 2> /dev/null || true) + cask_outdated=$(run_with_timeout 8 brew outdated --cask --quiet 2> /dev/null || true) + + formula_count=$(printf '%s\n' "$formula_outdated" | awk 'NF {count++} END {print count + 0}') + cask_count=$(printf '%s\n' "$cask_outdated" | awk 'NF {count++} END {print count + 0}') + fi + + BREW_FORMULA_OUTDATED_COUNT="$formula_count" + BREW_CASK_OUTDATED_COUNT="$cask_count" + BREW_OUTDATED_COUNT="$((formula_count + cask_count))" +} + +brew_has_outdated() { + local kind="${1:-formula}" + command -v brew > /dev/null 2>&1 || return 1 + + if [[ "$kind" == "cask" ]]; then + brew outdated --cask --quiet 2> /dev/null | grep -q . + else + brew outdated --quiet 2> /dev/null | grep -q . + fi +} + +# Ask user if they want to update +# Returns: 0 if yes, 1 if no +ask_for_updates() { + populate_brew_update_counts_if_unset + + local has_updates=false + if [[ -n "${BREW_OUTDATED_COUNT:-}" && "${BREW_OUTDATED_COUNT:-0}" -gt 0 ]]; then + has_updates=true + fi + + if [[ -n "${APPSTORE_UPDATE_COUNT:-}" && "${APPSTORE_UPDATE_COUNT:-0}" -gt 0 ]]; then + has_updates=true + fi + + if [[ -n "${MACOS_UPDATE_AVAILABLE:-}" && "${MACOS_UPDATE_AVAILABLE}" == "true" ]]; then + has_updates=true + fi + + if [[ -n "${MOLE_UPDATE_AVAILABLE:-}" && "${MOLE_UPDATE_AVAILABLE}" == "true" ]]; then + has_updates=true + fi + + if [[ "$has_updates" == "false" ]]; then + return 1 + fi + + if [[ "${MOLE_UPDATE_AVAILABLE:-}" == "true" ]]; then + echo -ne "${YELLOW}Update Mole now?${NC} ${GRAY}Enter confirm / ESC cancel${NC}: " + + local key + if ! key=$(read_key); then + echo "skip" + return 1 + fi + + if [[ "$key" == "ENTER" ]]; then + echo "yes" + return 0 + fi + fi + + if [[ -n "${BREW_OUTDATED_COUNT:-}" && "${BREW_OUTDATED_COUNT:-0}" -gt 0 ]]; then + echo -e " ${GRAY}${ICON_REVIEW}${NC} Run ${GREEN}brew upgrade${NC} to update" + fi + if [[ -n "${MACOS_UPDATE_AVAILABLE:-}" && "${MACOS_UPDATE_AVAILABLE}" == "true" ]]; then + echo -e " ${GRAY}${ICON_REVIEW}${NC} Open ${GREEN}System Settings${NC} → ${GREEN}General${NC} → ${GREEN}Software Update${NC}" + fi + if [[ -n "${APPSTORE_UPDATE_COUNT:-}" && "${APPSTORE_UPDATE_COUNT:-0}" -gt 0 ]]; then + echo -e " ${GRAY}${ICON_REVIEW}${NC} Open ${GREEN}App Store${NC} → ${GREEN}Updates${NC}" + fi + + return 1 +} + +# Perform all pending updates +# Returns: 0 if all succeeded, 1 if some failed +perform_updates() { + # Only handle Mole updates here; Homebrew/App Store/macOS are manual (tips shown in ask_for_updates) + local updated_count=0 + local total_count=0 + + if [[ -n "${MOLE_UPDATE_AVAILABLE:-}" && "${MOLE_UPDATE_AVAILABLE}" == "true" ]]; then + echo -e "${BLUE}Updating Mole...${NC}" + local mole_bin="${SCRIPT_DIR}/../../mole" + [[ ! -f "$mole_bin" ]] && mole_bin=$(command -v mole 2> /dev/null || echo "") + + if [[ -x "$mole_bin" ]]; then + if "$mole_bin" update 2>&1 | grep -qE "(Updated|latest version)"; then + echo -e "${GREEN}${ICON_SUCCESS}${NC} Mole updated" + reset_mole_cache + updated_count=$((updated_count + 1)) + else + echo -e "${RED}✗${NC} Mole update failed" + fi + else + echo -e "${RED}✗${NC} Mole executable not found" + fi + echo "" + total_count=1 + fi + + if [[ $total_count -eq 0 ]]; then + echo -e "${GRAY}No updates to perform${NC}" + return 0 + elif [[ $updated_count -eq $total_count ]]; then + echo -e "${GREEN}All updates completed, ${updated_count}/${total_count}${NC}" + return 0 + else + echo -e "${RED}Update failed, ${updated_count}/${total_count}${NC}" + return 1 + fi +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/manage/whitelist.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/manage/whitelist.sh new file mode 100755 index 0000000..41259ac --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/manage/whitelist.sh @@ -0,0 +1,430 @@ +#!/bin/bash +# Whitelist management functionality +# Shows actual files that would be deleted by dry-run + +set -euo pipefail + +# Get script directory and source dependencies +_MOLE_MANAGE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +source "$_MOLE_MANAGE_DIR/../core/common.sh" +source "$_MOLE_MANAGE_DIR/../ui/menu_simple.sh" + +# Config file paths +readonly WHITELIST_CONFIG_CLEAN="$HOME/.config/mole/whitelist" +readonly WHITELIST_CONFIG_OPTIMIZE="$HOME/.config/mole/whitelist_optimize" +readonly WHITELIST_CONFIG_OPTIMIZE_LEGACY="$HOME/.config/mole/whitelist_checks" + +# Default whitelist patterns defined in lib/core/common.sh: +# - DEFAULT_WHITELIST_PATTERNS +# - FINDER_METADATA_SENTINEL + +# Save whitelist patterns to config (defaults to "clean" for legacy callers) +save_whitelist_patterns() { + local mode="clean" + if [[ $# -gt 0 ]]; then + case "$1" in + clean | optimize) + mode="$1" + shift + ;; + esac + fi + + local -a patterns + patterns=("$@") + + local config_file + local header_text + + if [[ "$mode" == "optimize" ]]; then + config_file="$WHITELIST_CONFIG_OPTIMIZE" + header_text="# Mole Optimization Whitelist - These checks will be skipped during optimization" + else + config_file="$WHITELIST_CONFIG_CLEAN" + header_text="# Mole Whitelist - Protected paths won't be deleted\n# Default protections: Playwright browsers, HuggingFace models, Maven repo, Ollama models, Surge Mac, R renv, Finder metadata\n# Add one pattern per line to keep items safe." + fi + + ensure_user_file "$config_file" + + echo -e "$header_text" > "$config_file" + + if [[ ${#patterns[@]} -gt 0 ]]; then + local -a unique_patterns=() + for pattern in "${patterns[@]}"; do + local duplicate="false" + if [[ ${#unique_patterns[@]} -gt 0 ]]; then + for existing in "${unique_patterns[@]}"; do + if patterns_equivalent "$pattern" "$existing"; then + duplicate="true" + break + fi + done + fi + [[ "$duplicate" == "true" ]] && continue + unique_patterns+=("$pattern") + done + + if [[ ${#unique_patterns[@]} -gt 0 ]]; then + printf '\n' >> "$config_file" + for pattern in "${unique_patterns[@]}"; do + echo "$pattern" >> "$config_file" + done + fi + fi +} + +# Get all cache items with their patterns +get_all_cache_items() { + # Format: "display_name|pattern|category" + cat << 'EOF' +Apple Mail cache|$HOME/Library/Caches/com.apple.mail/*|system_cache +Gradle build cache (Android Studio, Gradle projects)|$HOME/.gradle/caches/*|ide_cache +Gradle daemon processes cache|$HOME/.gradle/daemon/*|ide_cache +Xcode DerivedData (build outputs, indexes)|$HOME/Library/Developer/Xcode/DerivedData/*|ide_cache +Xcode archives (built app packages)|$HOME/Library/Developer/Xcode/Archives/*|ide_cache +Xcode internal cache files|$HOME/Library/Caches/com.apple.dt.Xcode/*|ide_cache +Xcode iOS device support symbols|$HOME/Library/Developer/Xcode/iOS DeviceSupport/*/Symbols/System/Library/Caches/*|ide_cache +Maven local repository (Java dependencies)|$HOME/.m2/repository/*|ide_cache +JetBrains IDEs data (IntelliJ, PyCharm, WebStorm, GoLand)|$HOME/Library/Application Support/JetBrains/*|ide_cache +JetBrains IDEs cache|$HOME/Library/Caches/JetBrains/*|ide_cache +Android Studio cache and indexes|$HOME/Library/Caches/Google/AndroidStudio*/*|ide_cache +Android build cache|$HOME/.android/build-cache/*|ide_cache +VS Code runtime cache|$HOME/Library/Application Support/Code/Cache/*|ide_cache +VS Code extension and update cache|$HOME/Library/Application Support/Code/CachedData/*|ide_cache +VS Code system cache (Cursor, VSCodium)|$HOME/Library/Caches/com.microsoft.VSCode/*|ide_cache +Cursor editor cache|$HOME/Library/Caches/com.todesktop.230313mzl4w4u92/*|ide_cache +Bazel build cache|$HOME/.cache/bazel/*|compiler_cache +Go build cache|$HOME/Library/Caches/go-build/*|compiler_cache +Go module cache|$HOME/go/pkg/mod/*|compiler_cache +Rust Cargo registry cache|$HOME/.cargo/registry/cache/*|compiler_cache +Rust documentation cache|$HOME/.rustup/toolchains/*/share/doc/*|compiler_cache +Rustup toolchain downloads|$HOME/.rustup/downloads/*|compiler_cache +ccache compiler cache|$HOME/.ccache/*|compiler_cache +sccache distributed compiler cache|$HOME/.cache/sccache/*|compiler_cache +SBT Scala build cache|$HOME/.sbt/*|compiler_cache +Ivy dependency cache|$HOME/.ivy2/cache/*|compiler_cache +Turbo monorepo build cache|$HOME/.turbo/*|compiler_cache +Next.js build cache|$HOME/.next/*|compiler_cache +Vite build cache|$HOME/.vite/*|compiler_cache +Parcel bundler cache|$HOME/.parcel-cache/*|compiler_cache +pre-commit hooks cache|$HOME/.cache/pre-commit/*|compiler_cache +Ruff Python linter cache|$HOME/.cache/ruff/*|compiler_cache +MyPy type checker cache|$HOME/.cache/mypy/*|compiler_cache +Pytest test cache|$HOME/.pytest_cache/*|compiler_cache +Flutter SDK cache|$HOME/.cache/flutter/*|compiler_cache +Swift Package Manager cache|$HOME/.cache/swift-package-manager/*|compiler_cache +Zig compiler cache|$HOME/.cache/zig/*|compiler_cache +Deno cache|$HOME/Library/Caches/deno/*|compiler_cache +CocoaPods cache (iOS dependencies)|$HOME/Library/Caches/CocoaPods/*|package_manager +npm package cache|$HOME/.npm/_cacache/*|package_manager +pip Python package cache|$HOME/.cache/pip/*|package_manager +uv Python package cache|$HOME/.cache/uv/*|package_manager +R renv global cache (virtual environments)|$HOME/Library/Caches/org.R-project.R/R/renv/*|package_manager +Homebrew downloaded packages|$HOME/Library/Caches/Homebrew/*|package_manager +Yarn package manager cache|$HOME/.cache/yarn/*|package_manager +pnpm package store|$HOME/Library/pnpm/store/*|package_manager +Composer PHP dependencies cache|$HOME/.composer/cache/*|package_manager +RubyGems cache|$HOME/.gem/cache/*|package_manager +Conda packages cache|$HOME/.conda/pkgs/*|package_manager +Anaconda packages cache|$HOME/anaconda3/pkgs/*|package_manager +PyTorch model cache|$HOME/.cache/torch/*|ai_ml_cache +TensorFlow model and dataset cache|$HOME/.cache/tensorflow/*|ai_ml_cache +HuggingFace models and datasets|$HOME/.cache/huggingface/*|ai_ml_cache +Playwright browser binaries|$HOME/Library/Caches/ms-playwright*|ai_ml_cache +Selenium WebDriver binaries|$HOME/.cache/selenium/*|ai_ml_cache +Ollama local AI models|$HOME/.ollama/models/*|ai_ml_cache +Weights & Biases ML experiments cache|$HOME/.cache/wandb/*|ai_ml_cache +Safari web browser cache|$HOME/Library/Caches/com.apple.Safari/*|browser_cache +Chrome browser cache|$HOME/Library/Caches/Google/Chrome/*|browser_cache +Firefox browser cache|$HOME/Library/Caches/Firefox/*|browser_cache +Brave browser cache|$HOME/Library/Caches/BraveSoftware/Brave-Browser/*|browser_cache +Surge proxy cache|$HOME/Library/Caches/com.nssurge.surge-mac/*|network_tools +Surge configuration and data|$HOME/Library/Application Support/com.nssurge.surge-mac/*|network_tools +Docker Desktop image cache|$HOME/Library/Containers/com.docker.docker/Data/*|container_cache +Podman container cache|$HOME/.local/share/containers/cache/*|container_cache +Font cache|$HOME/Library/Caches/com.apple.FontRegistry/*|system_cache +Spotlight metadata cache|$HOME/Library/Caches/com.apple.spotlight/*|system_cache +CloudKit cache|$HOME/Library/Caches/CloudKit/*|system_cache +Trash|$HOME/.Trash|system_cache +EOF + # Add FINDER_METADATA with constant reference + echo "Finder metadata, .DS_Store|$FINDER_METADATA_SENTINEL|system_cache" +} + +# Get all optimize items with their patterns +get_optimize_whitelist_items() { + # Format: "display_name|pattern|category" + cat << 'EOF' +macOS Firewall check|firewall|security_check +Gatekeeper check|gatekeeper|security_check +macOS system updates check|check_macos_updates|update_check +Mole updates check|check_mole_update|update_check +Homebrew health check (doctor)|check_brew_health|health_check +SIP status check|check_sip|security_check +FileVault status check|check_filevault|security_check +TouchID sudo check|check_touchid|config_check +Rosetta 2 check|check_rosetta|config_check +Git configuration check|check_git_config|config_check +Login items check|check_login_items|config_check +EOF +} + +patterns_equivalent() { + local first="${1/#~/$HOME}" + local second="${2/#~/$HOME}" + + # Only exact string match, no glob expansion + [[ "$first" == "$second" ]] && return 0 + return 1 +} + +load_whitelist() { + local mode="${1:-clean}" + local -a patterns=() + local config_file + local legacy_file="" + + if [[ "$mode" == "optimize" ]]; then + config_file="$WHITELIST_CONFIG_OPTIMIZE" + legacy_file="$WHITELIST_CONFIG_OPTIMIZE_LEGACY" + else + config_file="$WHITELIST_CONFIG_CLEAN" + fi + + local using_legacy="false" + if [[ ! -f "$config_file" && -n "$legacy_file" && -f "$legacy_file" ]]; then + config_file="$legacy_file" + using_legacy="true" + fi + + if [[ -f "$config_file" ]]; then + while IFS= read -r line; do + # shellcheck disable=SC2295 + line="${line#"${line%%[![:space:]]*}"}" + # shellcheck disable=SC2295 + line="${line%"${line##*[![:space:]]}"}" + [[ -z "$line" || "$line" =~ ^# ]] && continue + patterns+=("$line") + done < "$config_file" + else + if [[ "$mode" == "clean" ]]; then + patterns=("${DEFAULT_WHITELIST_PATTERNS[@]}") + elif [[ "$mode" == "optimize" ]]; then + patterns=("${DEFAULT_OPTIMIZE_WHITELIST_PATTERNS[@]}") + fi + fi + + if [[ ${#patterns[@]} -gt 0 ]]; then + local -a unique_patterns=() + for pattern in "${patterns[@]}"; do + local duplicate="false" + if [[ ${#unique_patterns[@]} -gt 0 ]]; then + for existing in "${unique_patterns[@]}"; do + if patterns_equivalent "$pattern" "$existing"; then + duplicate="true" + break + fi + done + fi + [[ "$duplicate" == "true" ]] && continue + unique_patterns+=("$pattern") + done + CURRENT_WHITELIST_PATTERNS=("${unique_patterns[@]}") + + # Migrate legacy optimize config to the new path automatically + if [[ "$mode" == "optimize" && "$using_legacy" == "true" && "$config_file" != "$WHITELIST_CONFIG_OPTIMIZE" ]]; then + save_whitelist_patterns "$mode" "${CURRENT_WHITELIST_PATTERNS[@]}" + fi + else + CURRENT_WHITELIST_PATTERNS=() + fi +} + +is_whitelisted() { + local pattern="$1" + local check_pattern="${pattern/#\~/$HOME}" + + if [[ ${#CURRENT_WHITELIST_PATTERNS[@]} -eq 0 ]]; then + return 1 + fi + + for existing in "${CURRENT_WHITELIST_PATTERNS[@]}"; do + local existing_expanded="${existing/#\~/$HOME}" + # Only use exact string match to prevent glob expansion security issues + if [[ "$check_pattern" == "$existing_expanded" ]]; then + return 0 + fi + done + return 1 +} + +manage_whitelist() { + local mode="${1:-clean}" + manage_whitelist_categories "$mode" +} + +manage_whitelist_categories() { + local mode="$1" + + # Load currently enabled patterns from both sources + load_whitelist "$mode" + + # Build cache items list + local -a cache_items=() + local -a cache_patterns=() + local -a menu_options=() + local index=0 + + # Choose source based on mode + local items_source + local menu_title + local active_config_file + + if [[ "$mode" == "optimize" ]]; then + items_source=$(get_optimize_whitelist_items) + active_config_file="$WHITELIST_CONFIG_OPTIMIZE" + local display_config="${active_config_file/#$HOME/~}" + menu_title="Whitelist Manager, Select system checks to ignore +${GRAY}Edit: ${display_config}${NC}" + else + items_source=$(get_all_cache_items) + active_config_file="$WHITELIST_CONFIG_CLEAN" + local display_config="${active_config_file/#$HOME/~}" + menu_title="Whitelist Manager, Select caches to protect +${GRAY}Edit: ${display_config}${NC}" + fi + + while IFS='|' read -r display_name pattern _; do + # Expand $HOME in pattern + pattern="${pattern/\$HOME/$HOME}" + + cache_items+=("$display_name") + cache_patterns+=("$pattern") + menu_options+=("$display_name") + + index=$((index + 1)) + done <<< "$items_source" + + # Identify custom patterns (not in predefined list) + local -a custom_patterns=() + if [[ ${#CURRENT_WHITELIST_PATTERNS[@]} -gt 0 ]]; then + for current_pattern in "${CURRENT_WHITELIST_PATTERNS[@]}"; do + local is_predefined=false + for predefined_pattern in "${cache_patterns[@]}"; do + if patterns_equivalent "$current_pattern" "$predefined_pattern"; then + is_predefined=true + break + fi + done + if [[ "$is_predefined" == "false" ]]; then + custom_patterns+=("$current_pattern") + fi + done + fi + + # Prioritize already-selected items to appear first + local -a selected_cache_items=() + local -a selected_cache_patterns=() + local -a selected_menu_options=() + local -a remaining_cache_items=() + local -a remaining_cache_patterns=() + local -a remaining_menu_options=() + + for ((i = 0; i < ${#cache_patterns[@]}; i++)); do + if is_whitelisted "${cache_patterns[i]}"; then + selected_cache_items+=("${cache_items[i]}") + selected_cache_patterns+=("${cache_patterns[i]}") + selected_menu_options+=("${menu_options[i]}") + else + remaining_cache_items+=("${cache_items[i]}") + remaining_cache_patterns+=("${cache_patterns[i]}") + remaining_menu_options+=("${menu_options[i]}") + fi + done + + cache_items=() + cache_patterns=() + menu_options=() + if [[ ${#selected_cache_items[@]} -gt 0 ]]; then + cache_items=("${selected_cache_items[@]}") + cache_patterns=("${selected_cache_patterns[@]}") + menu_options=("${selected_menu_options[@]}") + fi + if [[ ${#remaining_cache_items[@]} -gt 0 ]]; then + cache_items+=("${remaining_cache_items[@]}") + cache_patterns+=("${remaining_cache_patterns[@]}") + menu_options+=("${remaining_menu_options[@]}") + fi + + if [[ ${#selected_cache_patterns[@]} -gt 0 ]]; then + local -a preselected_indices=() + for ((i = 0; i < ${#selected_cache_patterns[@]}; i++)); do + preselected_indices+=("$i") + done + local IFS=',' + export MOLE_PRESELECTED_INDICES="${preselected_indices[*]}" + else + unset MOLE_PRESELECTED_INDICES + fi + + MOLE_SELECTION_RESULT="" + paginated_multi_select "$menu_title" "${menu_options[@]}" + unset MOLE_PRESELECTED_INDICES + local exit_code=$? + + # Normal exit or cancel + if [[ $exit_code -ne 0 ]]; then + return 1 + fi + + # Convert selected indices to patterns + local -a selected_patterns=() + if [[ -n "$MOLE_SELECTION_RESULT" ]]; then + local -a selected_indices + IFS=',' read -ra selected_indices <<< "$MOLE_SELECTION_RESULT" + for idx in "${selected_indices[@]}"; do + if [[ $idx -ge 0 && $idx -lt ${#cache_patterns[@]} ]]; then + local pattern="${cache_patterns[$idx]}" + # Convert back to portable format with ~ + pattern="${pattern/#$HOME/~}" + selected_patterns+=("$pattern") + fi + done + fi + + # Merge custom patterns with selected patterns + local -a all_patterns=() + if [[ ${#selected_patterns[@]} -gt 0 ]]; then + all_patterns=("${selected_patterns[@]}") + fi + if [[ ${#custom_patterns[@]} -gt 0 ]]; then + for custom_pattern in "${custom_patterns[@]}"; do + all_patterns+=("$custom_pattern") + done + fi + + # Save to whitelist config (bash 3.2 + set -u safe) + if [[ ${#all_patterns[@]} -gt 0 ]]; then + save_whitelist_patterns "$mode" "${all_patterns[@]}" + else + save_whitelist_patterns "$mode" + fi + + local total_protected=$((${#selected_patterns[@]} + ${#custom_patterns[@]})) + local -a summary_lines=() + summary_lines+=("Whitelist Updated") + if [[ ${#custom_patterns[@]} -gt 0 ]]; then + summary_lines+=("Protected ${#selected_patterns[@]} predefined + ${#custom_patterns[@]} custom patterns") + else + summary_lines+=("Protected ${total_protected} caches") + fi + local display_config="${active_config_file/#$HOME/~}" + summary_lines+=("Config: ${GRAY}${display_config}${NC}") + + print_summary_block "${summary_lines[@]}" + printf '\n' +} + +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + manage_whitelist +fi diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/optimize/maintenance.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/optimize/maintenance.sh new file mode 100644 index 0000000..a81c9cf --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/optimize/maintenance.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# System Configuration Maintenance Module. +# Fix broken preferences and login items. + +set -euo pipefail + +# Remove corrupted preference files. +fix_broken_preferences() { + local prefs_dir="$HOME/Library/Preferences" + [[ -d "$prefs_dir" ]] || return 0 + + local broken_count=0 + + while IFS= read -r plist_file; do + [[ -f "$plist_file" ]] || continue + + local filename + filename=$(basename "$plist_file") + case "$filename" in + com.apple.* | .GlobalPreferences* | loginwindow.plist) + continue + ;; + esac + + plutil -lint "$plist_file" > /dev/null 2>&1 && continue + + safe_remove "$plist_file" true > /dev/null 2>&1 || true + broken_count=$((broken_count + 1)) + done < <(command find "$prefs_dir" -maxdepth 1 -name "*.plist" -type f 2> /dev/null || true) + + # Check ByHost preferences. + local byhost_dir="$prefs_dir/ByHost" + if [[ -d "$byhost_dir" ]]; then + while IFS= read -r plist_file; do + [[ -f "$plist_file" ]] || continue + + local filename + filename=$(basename "$plist_file") + case "$filename" in + com.apple.* | .GlobalPreferences*) + continue + ;; + esac + + plutil -lint "$plist_file" > /dev/null 2>&1 && continue + + safe_remove "$plist_file" true > /dev/null 2>&1 || true + broken_count=$((broken_count + 1)) + done < <(command find "$byhost_dir" -name "*.plist" -type f 2> /dev/null || true) + fi + + echo "$broken_count" +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/optimize/tasks.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/optimize/tasks.sh new file mode 100644 index 0000000..0f69863 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/optimize/tasks.sh @@ -0,0 +1,817 @@ +#!/bin/bash +# Optimization Tasks + +set -euo pipefail + +# Config constants (override via env). +readonly MOLE_TM_THIN_TIMEOUT=180 +readonly MOLE_TM_THIN_VALUE=9999999999 +readonly MOLE_SQLITE_MAX_SIZE=104857600 # 100MB + +# Dry-run aware output. +opt_msg() { + local message="$1" + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + echo -e " ${YELLOW}${ICON_DRY_RUN}${NC} $message" + else + echo -e " ${GREEN}${ICON_SUCCESS}${NC} $message" + fi +} + +run_launchctl_unload() { + local plist_file="$1" + local need_sudo="${2:-false}" + + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + return 0 + fi + + if [[ "$need_sudo" == "true" ]]; then + sudo launchctl unload "$plist_file" 2> /dev/null || true + else + launchctl unload "$plist_file" 2> /dev/null || true + fi +} + +needs_permissions_repair() { + local owner + owner=$(stat -f %Su "$HOME" 2> /dev/null || echo "") + if [[ -n "$owner" && "$owner" != "$USER" ]]; then + return 0 + fi + + local -a paths=( + "$HOME" + "$HOME/Library" + "$HOME/Library/Preferences" + ) + local path + for path in "${paths[@]}"; do + if [[ -e "$path" && ! -w "$path" ]]; then + return 0 + fi + done + + return 1 +} + +has_bluetooth_hid_connected() { + local bt_report + bt_report=$(system_profiler SPBluetoothDataType 2> /dev/null || echo "") + if ! echo "$bt_report" | grep -q "Connected: Yes"; then + return 1 + fi + + if echo "$bt_report" | grep -Eiq "Keyboard|Trackpad|Mouse|HID"; then + return 0 + fi + + return 1 +} + +is_ac_power() { + pmset -g batt 2> /dev/null | grep -q "AC Power" +} + +is_memory_pressure_high() { + if ! command -v memory_pressure > /dev/null 2>&1; then + return 1 + fi + + local mp_output + mp_output=$(memory_pressure -Q 2> /dev/null || echo "") + if echo "$mp_output" | grep -Eiq "warning|critical"; then + return 0 + fi + + return 1 +} + +flush_dns_cache() { + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + MOLE_DNS_FLUSHED=1 + return 0 + fi + + if sudo dscacheutil -flushcache 2> /dev/null && sudo killall -HUP mDNSResponder 2> /dev/null; then + MOLE_DNS_FLUSHED=1 + return 0 + fi + return 1 +} + +# Basic system maintenance. +opt_system_maintenance() { + if flush_dns_cache; then + opt_msg "DNS cache flushed" + fi + + local spotlight_status + spotlight_status=$(mdutil -s / 2> /dev/null || echo "") + if echo "$spotlight_status" | grep -qi "Indexing disabled"; then + echo -e " ${GRAY}${ICON_EMPTY}${NC} Spotlight indexing disabled" + else + opt_msg "Spotlight index verified" + fi +} + +# Refresh Finder caches (QuickLook/icon services). +opt_cache_refresh() { + local total_cache_size=0 + + if [[ "${MO_DEBUG:-}" == "1" ]]; then + debug_operation_start "Finder Cache Refresh" "Refresh QuickLook thumbnails and icon services" + debug_operation_detail "Method" "Remove cache files and rebuild via qlmanage" + debug_operation_detail "Expected outcome" "Faster Finder preview generation, fixed icon display issues" + debug_risk_level "LOW" "Caches are automatically rebuilt" + + local -a cache_targets=( + "$HOME/Library/Caches/com.apple.QuickLook.thumbnailcache" + "$HOME/Library/Caches/com.apple.iconservices.store" + "$HOME/Library/Caches/com.apple.iconservices" + ) + + debug_operation_detail "Files to be removed" "" + for target_path in "${cache_targets[@]}"; do + if [[ -e "$target_path" ]]; then + local size_kb + size_kb=$(get_path_size_kb "$target_path" 2> /dev/null || echo "0") + local size_human="unknown" + if [[ "$size_kb" -gt 0 ]]; then + size_human=$(bytes_to_human "$((size_kb * 1024))") + fi + debug_file_action " Will remove" "$target_path" "$size_human" "" + fi + done + fi + + if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then + qlmanage -r cache > /dev/null 2>&1 || true + qlmanage -r > /dev/null 2>&1 || true + fi + + local -a cache_targets=( + "$HOME/Library/Caches/com.apple.QuickLook.thumbnailcache" + "$HOME/Library/Caches/com.apple.iconservices.store" + "$HOME/Library/Caches/com.apple.iconservices" + ) + + for target_path in "${cache_targets[@]}"; do + if [[ -e "$target_path" ]]; then + if ! should_protect_path "$target_path"; then + local size_kb + size_kb=$(get_path_size_kb "$target_path" 2> /dev/null || echo "0") + if [[ "$size_kb" =~ ^[0-9]+$ ]]; then + total_cache_size=$((total_cache_size + size_kb)) + fi + safe_remove "$target_path" true > /dev/null 2>&1 || true + fi + fi + done + + export OPTIMIZE_CACHE_CLEANED_KB="${total_cache_size}" + opt_msg "QuickLook thumbnails refreshed" + opt_msg "Icon services cache rebuilt" +} + +# Removed: opt_maintenance_scripts - macOS handles log rotation automatically via launchd + +# Removed: opt_radio_refresh - Interrupts active user connections (WiFi, Bluetooth), degrading UX + +# Old saved states cleanup. +opt_saved_state_cleanup() { + if [[ "${MO_DEBUG:-}" == "1" ]]; then + debug_operation_start "App Saved State Cleanup" "Remove old application saved states" + debug_operation_detail "Method" "Find and remove .savedState folders older than $MOLE_SAVED_STATE_AGE_DAYS days" + debug_operation_detail "Location" "$HOME/Library/Saved Application State" + debug_operation_detail "Expected outcome" "Reduced disk usage, apps start with clean state" + debug_risk_level "LOW" "Old saved states, apps will create new ones" + fi + + local state_dir="$HOME/Library/Saved Application State" + + if [[ -d "$state_dir" ]]; then + while IFS= read -r -d '' state_path; do + if should_protect_path "$state_path"; then + continue + fi + safe_remove "$state_path" true > /dev/null 2>&1 || true + done < <(command find "$state_dir" -type d -name "*.savedState" -mtime "+$MOLE_SAVED_STATE_AGE_DAYS" -print0 2> /dev/null) + fi + + opt_msg "App saved states optimized" +} + +# Removed: opt_swap_cleanup - Direct virtual memory operations pose system crash risk + +# Removed: opt_startup_cache - Modern macOS has no such mechanism + +# Removed: opt_local_snapshots - Deletes user Time Machine recovery points, breaks backup continuity + +opt_fix_broken_configs() { + local spinner_started="false" + if [[ -t 1 ]]; then + MOLE_SPINNER_PREFIX=" " start_inline_spinner "Checking preferences..." + spinner_started="true" + fi + + local broken_prefs=$(fix_broken_preferences) + + if [[ "$spinner_started" == "true" ]]; then + stop_inline_spinner + fi + + export OPTIMIZE_CONFIGS_REPAIRED="${broken_prefs}" + if [[ $broken_prefs -gt 0 ]]; then + opt_msg "Repaired $broken_prefs corrupted preference files" + else + opt_msg "All preference files valid" + fi +} + +# DNS cache refresh. +opt_network_optimization() { + if [[ "${MO_DEBUG:-}" == "1" ]]; then + debug_operation_start "Network Optimization" "Refresh DNS cache and restart mDNSResponder" + debug_operation_detail "Method" "Flush DNS cache via dscacheutil and killall mDNSResponder" + debug_operation_detail "Expected outcome" "Faster DNS resolution, fixed network connectivity issues" + debug_risk_level "LOW" "DNS cache is automatically rebuilt" + fi + + if [[ "${MOLE_DNS_FLUSHED:-0}" == "1" ]]; then + opt_msg "DNS cache already refreshed" + opt_msg "mDNSResponder already restarted" + return 0 + fi + + if flush_dns_cache; then + opt_msg "DNS cache refreshed" + opt_msg "mDNSResponder restarted" + else + echo -e " ${YELLOW}${ICON_WARNING}${NC} Failed to refresh DNS cache" + fi +} + +# SQLite vacuum for Mail/Messages/Safari (safety checks applied). +opt_sqlite_vacuum() { + if [[ "${MO_DEBUG:-}" == "1" ]]; then + debug_operation_start "Database Optimization" "Vacuum SQLite databases for Mail, Safari, and Messages" + debug_operation_detail "Method" "Run VACUUM command on databases after integrity check" + debug_operation_detail "Safety checks" "Skip if apps are running, verify integrity first, 20s timeout" + debug_operation_detail "Expected outcome" "Reduced database size, faster app performance" + debug_risk_level "LOW" "Only optimizes databases, does not delete data" + fi + + if ! command -v sqlite3 > /dev/null 2>&1; then + echo -e " ${GRAY}-${NC} Database optimization already optimal, sqlite3 unavailable" + return 0 + fi + + local -a busy_apps=() + local -a check_apps=("Mail" "Safari" "Messages") + local app + for app in "${check_apps[@]}"; do + if pgrep -x "$app" > /dev/null 2>&1; then + busy_apps+=("$app") + fi + done + + if [[ ${#busy_apps[@]} -gt 0 ]]; then + echo -e " ${YELLOW}${ICON_WARNING}${NC} Close these apps before database optimization: ${busy_apps[*]}" + return 0 + fi + + local spinner_started="false" + if [[ "${MOLE_DRY_RUN:-0}" != "1" && -t 1 ]]; then + MOLE_SPINNER_PREFIX=" " start_inline_spinner "Optimizing databases..." + spinner_started="true" + fi + + local -a db_paths=( + "$HOME/Library/Mail/V*/MailData/Envelope Index*" + "$HOME/Library/Messages/chat.db" + "$HOME/Library/Safari/History.db" + "$HOME/Library/Safari/TopSites.db" + ) + + local vacuumed=0 + local timed_out=0 + local failed=0 + local skipped=0 + + for pattern in "${db_paths[@]}"; do + while IFS= read -r db_file; do + [[ ! -f "$db_file" ]] && continue + [[ "$db_file" == *"-wal" || "$db_file" == *"-shm" ]] && continue + + should_protect_path "$db_file" && continue + + if ! file "$db_file" 2> /dev/null | grep -q "SQLite"; then + continue + fi + + # Skip large DBs (>100MB). + local file_size + file_size=$(get_file_size "$db_file") + if [[ "$file_size" -gt "$MOLE_SQLITE_MAX_SIZE" ]]; then + skipped=$((skipped + 1)) + continue + fi + + # Skip if freelist is tiny (already compact). + local page_info="" + page_info=$(run_with_timeout 5 sqlite3 "$db_file" "PRAGMA page_count; PRAGMA freelist_count;" 2> /dev/null || echo "") + local page_count="" + local freelist_count="" + page_count=$(echo "$page_info" | awk 'NR==1 {print $1}' 2> /dev/null || echo "") + freelist_count=$(echo "$page_info" | awk 'NR==2 {print $1}' 2> /dev/null || echo "") + if [[ "$page_count" =~ ^[0-9]+$ && "$freelist_count" =~ ^[0-9]+$ && "$page_count" -gt 0 ]]; then + if ((freelist_count * 100 < page_count * 5)); then + skipped=$((skipped + 1)) + continue + fi + fi + + # Verify integrity before VACUUM. + if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then + local integrity_check="" + set +e + integrity_check=$(run_with_timeout 10 sqlite3 "$db_file" "PRAGMA integrity_check;" 2> /dev/null) + local integrity_status=$? + set -e + + if [[ $integrity_status -ne 0 ]] || ! echo "$integrity_check" | grep -q "ok"; then + skipped=$((skipped + 1)) + continue + fi + fi + + local exit_code=0 + if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then + set +e + run_with_timeout 20 sqlite3 "$db_file" "VACUUM;" 2> /dev/null + exit_code=$? + set -e + + if [[ $exit_code -eq 0 ]]; then + vacuumed=$((vacuumed + 1)) + elif [[ $exit_code -eq 124 ]]; then + timed_out=$((timed_out + 1)) + else + failed=$((failed + 1)) + fi + else + vacuumed=$((vacuumed + 1)) + fi + done < <(compgen -G "$pattern" || true) + done + + if [[ "$spinner_started" == "true" ]]; then + stop_inline_spinner + fi + + export OPTIMIZE_DATABASES_COUNT="${vacuumed}" + if [[ $vacuumed -gt 0 ]]; then + opt_msg "Optimized $vacuumed databases for Mail, Safari, Messages" + elif [[ $timed_out -eq 0 && $failed -eq 0 ]]; then + opt_msg "All databases already optimized" + else + echo -e " ${YELLOW}${ICON_WARNING}${NC} Database optimization incomplete" + fi + + if [[ $skipped -gt 0 ]]; then + opt_msg "Already optimal for $skipped databases" + fi + + if [[ $timed_out -gt 0 ]]; then + echo -e " ${YELLOW}${ICON_WARNING}${NC} Timed out on $timed_out databases" + fi + + if [[ $failed -gt 0 ]]; then + echo -e " ${YELLOW}${ICON_WARNING}${NC} Failed on $failed databases" + fi +} + +# LaunchServices rebuild ("Open with" issues). +opt_launch_services_rebuild() { + if [[ "${MO_DEBUG:-}" == "1" ]]; then + debug_operation_start "LaunchServices Rebuild" "Rebuild LaunchServices database" + debug_operation_detail "Method" "Run lsregister -gc then force rescan with -r -f on local, user, and system domains" + debug_operation_detail "Purpose" "Fix \"Open with\" menu issues, file associations, and stale app metadata" + debug_operation_detail "Expected outcome" "Correct app associations, fixed duplicate entries, fewer stale app listings" + debug_risk_level "LOW" "Database is automatically rebuilt" + fi + + if [[ -t 1 ]]; then + MOLE_SPINNER_PREFIX=" " start_inline_spinner "Repairing LaunchServices..." + fi + + local lsregister + lsregister=$(get_lsregister_path) + + if [[ -n "$lsregister" ]]; then + local success=0 + + if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then + set +e + "$lsregister" -gc > /dev/null 2>&1 || true + "$lsregister" -r -f -domain local -domain user -domain system > /dev/null 2>&1 + success=$? + if [[ $success -ne 0 ]]; then + "$lsregister" -r -f -domain local -domain user > /dev/null 2>&1 + success=$? + fi + set -e + else + success=0 + fi + + if [[ -t 1 ]]; then + stop_inline_spinner + fi + + if [[ $success -eq 0 ]]; then + opt_msg "LaunchServices repaired" + opt_msg "File associations refreshed" + else + echo -e " ${YELLOW}${ICON_WARNING}${NC} Failed to rebuild LaunchServices" + fi + else + if [[ -t 1 ]]; then + stop_inline_spinner + fi + echo -e " ${YELLOW}${ICON_WARNING}${NC} lsregister not found" + fi +} + +# Font cache rebuild. +opt_font_cache_rebuild() { + if [[ "${MO_DEBUG:-}" == "1" ]]; then + debug_operation_start "Font Cache Rebuild" "Clear and rebuild font cache" + debug_operation_detail "Method" "Run atsutil databases -remove" + debug_operation_detail "Safety checks" "Skip when browsers are running to avoid cache rebuild conflicts" + debug_operation_detail "Expected outcome" "Fixed font display issues, removed corrupted font cache" + debug_risk_level "LOW" "System automatically rebuilds font database" + fi + + local success=false + + if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then + # Some browsers (notably Firefox) can keep stale GPU/text caches in /var/folders if + # system font databases are reset while browser/helper processes are still running. + local -a running_browsers=() + if pgrep -if "Firefox|org\\.mozilla\\.firefox|firefox-gpu-helper" > /dev/null 2>&1; then + running_browsers+=("Firefox") + fi + + local browser_name + local -a browser_checks=( + "Safari" + "Google Chrome" + "Chromium" + "Brave Browser" + "Microsoft Edge" + "Arc" + "Opera" + "Vivaldi" + "Zen Browser" + "Helium" + ) + for browser_name in "${browser_checks[@]}"; do + if pgrep -ix "$browser_name" > /dev/null 2>&1; then + running_browsers+=("$browser_name") + fi + done + + if [[ ${#running_browsers[@]} -gt 0 ]]; then + local running_list + running_list=$(printf "%s, " "${running_browsers[@]}") + running_list="${running_list%, }" + echo -e " ${YELLOW}${ICON_WARNING}${NC} Skipped font cache rebuild because browsers are running: ${running_list}" + echo -e " ${GRAY}${ICON_REVIEW}${NC} ${GRAY}Quit browsers completely, then rerun optimize if font issues persist${NC}" + return 0 + fi + + if sudo atsutil databases -remove > /dev/null 2>&1; then + success=true + fi + else + success=true + fi + + if [[ "$success" == "true" ]]; then + opt_msg "Font cache cleared" + opt_msg "System will rebuild font database automatically" + else + echo -e " ${YELLOW}${ICON_WARNING}${NC} Failed to clear font cache" + fi +} + +# Removed high-risk optimizations: +# - opt_startup_items_cleanup: Risk of deleting legitimate app helpers +# - opt_dyld_cache_update: Low benefit, time-consuming, auto-managed by macOS +# - opt_system_services_refresh: Risk of data loss when killing system services + +# Memory pressure relief. +opt_memory_pressure_relief() { + if [[ "${MO_DEBUG:-}" == "1" ]]; then + debug_operation_start "Memory Pressure Relief" "Release inactive memory if pressure is high" + debug_operation_detail "Method" "Run purge command to clear inactive memory" + debug_operation_detail "Condition" "Only runs if memory pressure is warning/critical" + debug_operation_detail "Expected outcome" "More available memory, improved responsiveness" + debug_risk_level "LOW" "Safe system command, does not affect active processes" + fi + + if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then + if ! is_memory_pressure_high; then + opt_msg "Memory pressure already optimal" + return 0 + fi + + if sudo purge > /dev/null 2>&1; then + opt_msg "Inactive memory released" + opt_msg "System responsiveness improved" + else + echo -e " ${YELLOW}${ICON_WARNING}${NC} Failed to release memory pressure" + fi + else + opt_msg "Inactive memory released" + opt_msg "System responsiveness improved" + fi +} + +# Network stack reset (route + ARP). +opt_network_stack_optimize() { + local route_flushed="false" + local arp_flushed="false" + + if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then + local route_ok=true + local dns_ok=true + + if ! route -n get default > /dev/null 2>&1; then + route_ok=false + fi + if ! dscacheutil -q host -a name "example.com" > /dev/null 2>&1; then + dns_ok=false + fi + + if [[ "$route_ok" == "true" && "$dns_ok" == "true" ]]; then + opt_msg "Network stack already optimal" + return 0 + fi + + if sudo route -n flush > /dev/null 2>&1; then + route_flushed="true" + fi + + if sudo arp -a -d > /dev/null 2>&1; then + arp_flushed="true" + fi + else + route_flushed="true" + arp_flushed="true" + fi + + if [[ "$route_flushed" == "true" ]]; then + opt_msg "Network routing table refreshed" + fi + if [[ "$arp_flushed" == "true" ]]; then + opt_msg "ARP cache cleared" + else + if [[ "$route_flushed" == "true" ]]; then + return 0 + fi + echo -e " ${YELLOW}${ICON_WARNING}${NC} Failed to optimize network stack" + fi +} + +# User directory permissions repair. +opt_disk_permissions_repair() { + if [[ "${MO_DEBUG:-}" == "1" ]]; then + debug_operation_start "Disk Permissions Repair" "Reset user directory permissions" + debug_operation_detail "Method" "Run diskutil resetUserPermissions on user home directory" + debug_operation_detail "Condition" "Only runs if permissions issues are detected" + debug_operation_detail "Expected outcome" "Fixed file access issues, correct ownership" + debug_risk_level "MEDIUM" "Requires sudo, modifies permissions" + fi + + local user_id + user_id=$(id -u) + + if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then + if ! needs_permissions_repair; then + opt_msg "User directory permissions already optimal" + return 0 + fi + + if [[ -t 1 ]]; then + start_inline_spinner "Repairing disk permissions..." + fi + + local success=false + if sudo diskutil resetUserPermissions / "$user_id" > /dev/null 2>&1; then + success=true + fi + + if [[ -t 1 ]]; then + stop_inline_spinner + fi + + if [[ "$success" == "true" ]]; then + opt_msg "User directory permissions repaired" + opt_msg "File access issues resolved" + else + echo -e " ${YELLOW}${ICON_WARNING}${NC} Failed to repair permissions, may not be needed" + fi + else + opt_msg "User directory permissions repaired" + opt_msg "File access issues resolved" + fi +} + +# Bluetooth reset (skip if HID/audio active). +opt_bluetooth_reset() { + if [[ "${MO_DEBUG:-}" == "1" ]]; then + debug_operation_start "Bluetooth Reset" "Restart Bluetooth daemon" + debug_operation_detail "Method" "Kill bluetoothd daemon (auto-restarts)" + debug_operation_detail "Safety" "Skips if active Bluetooth keyboard/mouse/audio detected" + debug_operation_detail "Expected outcome" "Fixed Bluetooth connectivity issues" + debug_risk_level "LOW" "Daemon auto-restarts, connections auto-reconnect" + fi + + local spinner_started="false" + if [[ -t 1 ]]; then + MOLE_SPINNER_PREFIX=" " start_inline_spinner "Checking Bluetooth..." + spinner_started="true" + fi + + if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then + if has_bluetooth_hid_connected; then + if [[ "$spinner_started" == "true" ]]; then + stop_inline_spinner + fi + opt_msg "Bluetooth already optimal" + return 0 + fi + + local bt_audio_active=false + + local audio_info + audio_info=$(system_profiler SPAudioDataType 2> /dev/null || echo "") + + local default_output + default_output=$(echo "$audio_info" | awk '/Default Output Device: Yes/,/^$/' 2> /dev/null || echo "") + + if echo "$default_output" | grep -qi "Transport:.*Bluetooth"; then + bt_audio_active=true + fi + + if [[ "$bt_audio_active" == "false" ]]; then + if system_profiler SPBluetoothDataType 2> /dev/null | grep -q "Connected: Yes"; then + local -a media_apps=("Music" "Spotify" "VLC" "QuickTime Player" "TV" "Podcasts" "Safari" "Google Chrome" "Chrome" "Firefox" "Arc" "IINA" "mpv") + for app in "${media_apps[@]}"; do + if pgrep -x "$app" > /dev/null 2>&1; then + bt_audio_active=true + break + fi + done + fi + fi + + if [[ "$bt_audio_active" == "true" ]]; then + if [[ "$spinner_started" == "true" ]]; then + stop_inline_spinner + fi + opt_msg "Bluetooth already optimal" + return 0 + fi + + if sudo pkill -TERM bluetoothd > /dev/null 2>&1; then + sleep 1 + if pgrep -x bluetoothd > /dev/null 2>&1; then + sudo pkill -KILL bluetoothd > /dev/null 2>&1 || true + fi + if [[ "$spinner_started" == "true" ]]; then + stop_inline_spinner + fi + opt_msg "Bluetooth module restarted" + opt_msg "Connectivity issues resolved" + else + if [[ "$spinner_started" == "true" ]]; then + stop_inline_spinner + fi + opt_msg "Bluetooth already optimal" + fi + else + if [[ "$spinner_started" == "true" ]]; then + stop_inline_spinner + fi + opt_msg "Bluetooth module restarted" + opt_msg "Connectivity issues resolved" + fi +} + +# Spotlight index check/rebuild (only if slow). +opt_spotlight_index_optimize() { + local spotlight_status + spotlight_status=$(mdutil -s / 2> /dev/null || echo "") + + if echo "$spotlight_status" | grep -qi "Indexing disabled"; then + echo -e " ${GRAY}${ICON_EMPTY}${NC} Spotlight indexing is disabled" + return 0 + fi + + if echo "$spotlight_status" | grep -qi "Indexing enabled" && ! echo "$spotlight_status" | grep -qi "Indexing and searching disabled"; then + local slow_count=0 + local test_start test_end test_duration + for _ in 1 2; do + test_start=$(get_epoch_seconds) + mdfind "kMDItemFSName == 'Applications'" > /dev/null 2>&1 || true + test_end=$(get_epoch_seconds) + test_duration=$((test_end - test_start)) + if [[ $test_duration -gt 3 ]]; then + slow_count=$((slow_count + 1)) + fi + sleep 1 + done + + if [[ $slow_count -ge 2 ]]; then + if ! is_ac_power; then + opt_msg "Spotlight index already optimal" + return 0 + fi + + if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then + echo -e " ${BLUE}${ICON_INFO}${NC} Spotlight search is slow, rebuilding index, may take 1-2 hours" + if sudo mdutil -E / > /dev/null 2>&1; then + opt_msg "Spotlight index rebuild started" + echo -e " ${GRAY}Indexing will continue in background${NC}" + else + echo -e " ${YELLOW}${ICON_WARNING}${NC} Failed to rebuild Spotlight index" + fi + else + opt_msg "Spotlight index rebuild started" + fi + else + opt_msg "Spotlight index already optimal" + fi + else + opt_msg "Spotlight index verified" + fi +} + +# Dock cache refresh. +opt_dock_refresh() { + local dock_support="$HOME/Library/Application Support/Dock" + local refreshed=false + + if [[ -d "$dock_support" ]]; then + while IFS= read -r db_file; do + if [[ -f "$db_file" ]]; then + safe_remove "$db_file" true > /dev/null 2>&1 && refreshed=true + fi + done < <(command find "$dock_support" -name "*.db" -type f 2> /dev/null || true) + fi + + local dock_plist="$HOME/Library/Preferences/com.apple.dock.plist" + if [[ -f "$dock_plist" ]]; then + touch "$dock_plist" 2> /dev/null || true + fi + + if [[ "${MOLE_DRY_RUN:-0}" != "1" ]]; then + killall Dock 2> /dev/null || true + fi + + if [[ "$refreshed" == "true" ]]; then + opt_msg "Dock cache cleared" + fi + opt_msg "Dock refreshed" +} + +# Dispatch optimization by action name. +execute_optimization() { + local action="$1" + local path="${2:-}" + + case "$action" in + system_maintenance) opt_system_maintenance ;; + cache_refresh) opt_cache_refresh ;; + saved_state_cleanup) opt_saved_state_cleanup ;; + fix_broken_configs) opt_fix_broken_configs ;; + network_optimization) opt_network_optimization ;; + sqlite_vacuum) opt_sqlite_vacuum ;; + launch_services_rebuild) opt_launch_services_rebuild ;; + font_cache_rebuild) opt_font_cache_rebuild ;; + dock_refresh) opt_dock_refresh ;; + memory_pressure_relief) opt_memory_pressure_relief ;; + network_stack_optimize) opt_network_stack_optimize ;; + disk_permissions_repair) opt_disk_permissions_repair ;; + bluetooth_reset) opt_bluetooth_reset ;; + spotlight_index_optimize) opt_spotlight_index_optimize ;; + *) + echo -e "${YELLOW}${ICON_ERROR}${NC} Unknown action: $action" + return 1 + ;; + esac +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/ui/app_selector.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/ui/app_selector.sh new file mode 100755 index 0000000..add9015 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/ui/app_selector.sh @@ -0,0 +1,203 @@ +#!/bin/bash +# App selection functionality + +set -euo pipefail + +# Note: get_display_width() is now defined in lib/core/ui.sh + +# Format app info for display +format_app_display() { + local display_name="$1" size="$2" last_used="$3" + + # Use common function from ui.sh to format last used time + local compact_last_used + compact_last_used=$(format_last_used_summary "$last_used") + if [[ -z "$compact_last_used" || "$compact_last_used" == "Never" ]]; then + compact_last_used="Unknown" + fi + + # Format size + local size_str="N/A" + [[ "$size" != "0" && "$size" != "" && "$size" != "Unknown" ]] && size_str="$size" + + # Calculate available width for app name based on terminal width + # Accept pre-calculated max_name_width (5th param) to avoid recalculation in loops + local terminal_width="${4:-$(tput cols 2> /dev/null || echo 80)}" + local max_name_width="${5:-}" + local available_width + + if [[ -n "$max_name_width" ]]; then + # Use pre-calculated width from caller + available_width=$max_name_width + else + # Fallback: calculate it (slower, but works for standalone calls) + # Fixed elements: " ○ " (4) + " " (1) + size (9) + " | " (3) + max_last (7) = 24 + local fixed_width=24 + available_width=$((terminal_width - fixed_width)) + + # Dynamic minimum for better spacing on wide terminals + local min_width=18 + if [[ $terminal_width -ge 120 ]]; then + min_width=48 + elif [[ $terminal_width -ge 100 ]]; then + min_width=38 + elif [[ $terminal_width -ge 80 ]]; then + min_width=25 + fi + + [[ $available_width -lt $min_width ]] && available_width=$min_width + [[ $available_width -gt 60 ]] && available_width=60 + fi + + # Truncate long names if needed (based on display width, not char count) + local truncated_name + truncated_name=$(truncate_by_display_width "$display_name" "$available_width") + + # Get actual display width after truncation + local current_display_width + current_display_width=$(get_display_width "$truncated_name") + + # Calculate padding needed + # Formula: char_count + (available_width - display_width) = padding to add + local char_count=${#truncated_name} + local padding_needed=$((available_width - current_display_width)) + local printf_width=$((char_count + padding_needed)) + + # Use dynamic column width with corrected padding + printf "%-*s %9s | %s" "$printf_width" "$truncated_name" "$size_str" "$compact_last_used" +} + +# Global variable to store selection result (bash 3.2 compatible) +MOLE_SELECTION_RESULT="" + +# Main app selection function +# shellcheck disable=SC2154 # apps_data is set by caller +select_apps_for_uninstall() { + if [[ ${#apps_data[@]} -eq 0 ]]; then + log_warning "No applications available for uninstallation" + return 1 + fi + + # Build menu options + # Show loading for large lists (formatting can be slow due to width calculations) + local app_count=${#apps_data[@]} + local terminal_width=$(tput cols 2> /dev/null || echo 80) + if [[ $app_count -gt 100 ]]; then + if [[ -t 2 ]]; then + printf "\rPreparing %d applications... " "$app_count" >&2 + fi + fi + + # Pre-scan to get actual max name width + local max_name_width=0 + for app_data in "${apps_data[@]}"; do + IFS='|' read -r _ _ display_name _ _ _ _ <<< "$app_data" + local name_width=$(get_display_width "$display_name") + [[ $name_width -gt $max_name_width ]] && max_name_width=$name_width + done + # Constrain based on terminal width: fixed=24, min varies by terminal width, max=60 + local fixed_width=24 + local available=$((terminal_width - fixed_width)) + + # Dynamic minimum: wider terminals get larger minimum for better spacing + local min_width=18 + if [[ $terminal_width -ge 120 ]]; then + min_width=48 # Wide terminals: very generous spacing + elif [[ $terminal_width -ge 100 ]]; then + min_width=38 # Medium-wide terminals: generous spacing + elif [[ $terminal_width -ge 80 ]]; then + min_width=25 # Standard terminals + fi + + [[ $max_name_width -lt $min_width ]] && max_name_width=$min_width + [[ $available -lt $max_name_width ]] && max_name_width=$available + [[ $max_name_width -gt 60 ]] && max_name_width=60 + + local -a menu_options=() + local epochs_csv="" + local sizekb_csv="" + local -a names_arr=() + local has_epoch_metadata=false + local has_size_metadata=false + local idx=0 + for app_data in "${apps_data[@]}"; do + IFS='|' read -r epoch _ display_name _ size last_used size_kb <<< "$app_data" + menu_options+=("$(format_app_display "$display_name" "$size" "$last_used" "$terminal_width" "$max_name_width")") + [[ "${epoch:-0}" =~ ^[0-9]+$ && "${epoch:-0}" -gt 0 ]] && has_epoch_metadata=true + [[ "${size_kb:-0}" =~ ^[0-9]+$ && "${size_kb:-0}" -gt 0 ]] && has_size_metadata=true + if [[ $idx -eq 0 ]]; then + epochs_csv="${epoch:-0}" + sizekb_csv="${size_kb:-0}" + else + epochs_csv+=",${epoch:-0}" + sizekb_csv+=",${size_kb:-0}" + fi + names_arr+=("$display_name") + idx=$((idx + 1)) + done + # Use newline separator for names (safe for names with commas) + local names_newline + names_newline=$(printf '%s\n' "${names_arr[@]}") + + # Clear loading message + if [[ $app_count -gt 100 ]]; then + if [[ -t 2 ]]; then + printf "\r\033[K" >&2 + fi + fi + + # Expose metadata for the paginated menu (optional inputs) + # - MOLE_MENU_META_EPOCHS: numeric last_used_epoch per item + # - MOLE_MENU_META_SIZEKB: numeric size in KB per item + # The menu will gracefully fallback if these are unset or malformed. + if [[ $has_epoch_metadata == true ]]; then + export MOLE_MENU_META_EPOCHS="$epochs_csv" + else + unset MOLE_MENU_META_EPOCHS + fi + if [[ $has_size_metadata == true ]]; then + export MOLE_MENU_META_SIZEKB="$sizekb_csv" + else + unset MOLE_MENU_META_SIZEKB + fi + export MOLE_MENU_FILTER_NAMES="$names_newline" + + # Use paginated menu - result will be stored in MOLE_SELECTION_RESULT + # Note: paginated_multi_select enters alternate screen and handles clearing + MOLE_SELECTION_RESULT="" + paginated_multi_select "Select Apps to Remove" "${menu_options[@]}" + local exit_code=$? + + # Clean env leakage for safety + unset MOLE_MENU_META_EPOCHS MOLE_MENU_META_SIZEKB MOLE_MENU_FILTER_NAMES + # leave MOLE_MENU_SORT_DEFAULT untouched if user set it globally + + if [[ $exit_code -ne 0 ]]; then + return 1 + fi + + if [[ -z "$MOLE_SELECTION_RESULT" ]]; then + echo "No apps selected" + return 1 + fi + + # Build selected apps array (global variable in bin/uninstall.sh) + selected_apps=() + + # Parse indices and build selected apps array + IFS=',' read -r -a indices_array <<< "$MOLE_SELECTION_RESULT" + + for idx in "${indices_array[@]}"; do + if [[ "$idx" =~ ^[0-9]+$ ]] && [[ $idx -ge 0 ]] && [[ $idx -lt ${#apps_data[@]} ]]; then + selected_apps+=("${apps_data[idx]}") + fi + done + + return 0 +} + +# Export function for external use +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + echo "This is a library file. Source it from other scripts." >&2 + exit 1 +fi diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/ui/menu_paginated.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/ui/menu_paginated.sh new file mode 100755 index 0000000..c241fc1 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/ui/menu_paginated.sh @@ -0,0 +1,884 @@ +#!/bin/bash +# Paginated menu with arrow key navigation + +set -euo pipefail + +# Terminal control functions +enter_alt_screen() { + if command -v tput > /dev/null 2>&1 && [[ -t 1 ]]; then + tput smcup 2> /dev/null || true + fi +} +leave_alt_screen() { + if command -v tput > /dev/null 2>&1 && [[ -t 1 ]]; then + tput rmcup 2> /dev/null || true + fi +} + +# Get terminal height with fallback +_pm_get_terminal_height() { + local height=0 + + # Try stty size first (most reliable, real-time) + # Use /dev/null | awk '{print $1}') + fi + + # Fallback to tput + if [[ -z "$height" || $height -le 0 ]]; then + if command -v tput > /dev/null 2>&1; then + height=$(tput lines 2> /dev/null || echo "24") + else + height=24 + fi + fi + + echo "$height" +} + +# Calculate dynamic items per page based on terminal height +_pm_calculate_items_per_page() { + local term_height=$(_pm_get_terminal_height) + # Reserved: header(1) + blank(1) + blank(1) + footer(1-2) = 4-5 rows + # Use 5 to be safe (leaves 1 row buffer when footer wraps to 2 lines) + local reserved=5 + local available=$((term_height - reserved)) + + # Ensure minimum and maximum bounds + if [[ $available -lt 1 ]]; then + echo 1 + elif [[ $available -gt 50 ]]; then + echo 50 + else + echo "$available" + fi +} + +# Parse CSV into newline list (Bash 3.2) +_pm_parse_csv_to_array() { + local csv="${1:-}" + if [[ -z "$csv" ]]; then + return 0 + fi + local IFS=',' + for _tok in $csv; do + printf "%s\n" "$_tok" + done +} + +# Main paginated multi-select menu function +paginated_multi_select() { + local title="$1" + shift + local -a items=("$@") + local external_alt_screen=false + if [[ "${MOLE_MANAGED_ALT_SCREEN:-}" == "1" || "${MOLE_MANAGED_ALT_SCREEN:-}" == "true" ]]; then + external_alt_screen=true + fi + + # Validation + if [[ ${#items[@]} -eq 0 ]]; then + echo "No items provided" >&2 + return 1 + fi + + local total_items=${#items[@]} + local items_per_page=$(_pm_calculate_items_per_page) + local cursor_pos=0 + local top_index=0 + local sort_mode="${MOLE_MENU_SORT_MODE:-${MOLE_MENU_SORT_DEFAULT:-date}}" # date|name|size + local sort_reverse="${MOLE_MENU_SORT_REVERSE:-false}" + local filter_text="" # Filter keyword + local filter_text_lower="" + + # Metadata (optional) + # epochs[i] -> last_used_epoch (numeric) for item i + # sizekb[i] -> size in KB (numeric) for item i + # filter_names[i] -> name for filtering (if not set, use items[i]) + local -a epochs=() + local -a sizekb=() + local -a filter_names=() + local has_metadata="false" + local has_filter_names="false" + if [[ -n "${MOLE_MENU_META_EPOCHS:-}" ]]; then + while IFS= read -r v; do epochs+=("${v:-0}"); done < <(_pm_parse_csv_to_array "$MOLE_MENU_META_EPOCHS") + has_metadata="true" + fi + if [[ -n "${MOLE_MENU_META_SIZEKB:-}" ]]; then + while IFS= read -r v; do sizekb+=("${v:-0}"); done < <(_pm_parse_csv_to_array "$MOLE_MENU_META_SIZEKB") + has_metadata="true" + fi + if [[ -n "${MOLE_MENU_FILTER_NAMES:-}" ]]; then + while IFS= read -r v; do filter_names+=("$v"); done <<< "$MOLE_MENU_FILTER_NAMES" + has_filter_names="true" + fi + + # If no metadata, force name sorting and disable sorting controls + if [[ "$has_metadata" == "false" && "$sort_mode" != "name" ]]; then + sort_mode="name" + fi + + # Index mappings + local -a orig_indices=() + local -a view_indices=() + local -a filter_targets_lower=() + local i + for ((i = 0; i < total_items; i++)); do + orig_indices[i]=$i + view_indices[i]=$i + local filter_target + if [[ $has_filter_names == true && -n "${filter_names[i]:-}" ]]; then + filter_target="${filter_names[i]}" + else + filter_target="${items[i]}" + fi + local filter_target_lower + filter_target_lower=$(printf "%s" "$filter_target" | LC_ALL=C tr '[:upper:]' '[:lower:]') + filter_targets_lower[i]="$filter_target_lower" + done + + local -a selected=() + local selected_count=0 # Cache selection count to avoid O(n) loops on every draw + + # Initialize selection array + for ((i = 0; i < total_items; i++)); do + selected[i]=false + done + + if [[ -n "${MOLE_PRESELECTED_INDICES:-}" ]]; then + local cleaned_preselect="${MOLE_PRESELECTED_INDICES//[[:space:]]/}" + local -a initial_indices=() + IFS=',' read -ra initial_indices <<< "$cleaned_preselect" + for idx in "${initial_indices[@]}"; do + if [[ "$idx" =~ ^[0-9]+$ && $idx -ge 0 && $idx -lt $total_items ]]; then + # Only count if not already selected (handles duplicates) + if [[ ${selected[idx]} != true ]]; then + selected[idx]=true + selected_count=$((selected_count + 1)) + fi + fi + done + fi + + # Preserve original TTY settings so we can restore them reliably + local original_stty="" + if [[ -t 0 ]] && command -v stty > /dev/null 2>&1; then + original_stty=$(stty -g 2> /dev/null || echo "") + fi + + restore_terminal() { + show_cursor + if [[ -n "${original_stty-}" ]]; then + stty "${original_stty}" 2> /dev/null || stty sane 2> /dev/null || stty echo icanon 2> /dev/null || true + else + stty sane 2> /dev/null || stty echo icanon 2> /dev/null || true + fi + if [[ "${external_alt_screen:-false}" == false ]]; then + leave_alt_screen + fi + } + + # Cleanup function + cleanup() { + trap - EXIT INT TERM + unset MOLE_READ_KEY_FORCE_CHAR + export MOLE_MENU_SORT_MODE="${sort_mode:-name}" + export MOLE_MENU_SORT_REVERSE="${sort_reverse:-false}" + restore_terminal + } + + # Interrupt handler + # shellcheck disable=SC2329 + handle_interrupt() { + cleanup + exit 130 # Standard exit code for Ctrl+C + } + + trap cleanup EXIT + trap handle_interrupt INT TERM + + # Setup terminal - preserve interrupt character + stty -echo -icanon intr ^C 2> /dev/null || true + if [[ $external_alt_screen == false ]]; then + enter_alt_screen + # Clear screen once on entry to alt screen + printf "\033[2J\033[H" >&2 + else + printf "\033[H" >&2 + fi + hide_cursor + + # Helper functions + # shellcheck disable=SC2329 + print_line() { printf "\r\033[2K%s\n" "$1" >&2; } + + # Print footer lines wrapping only at separators + _print_wrapped_controls() { + local sep="$1" + shift + local -a segs=("$@") + + local cols="${COLUMNS:-}" + [[ -z "$cols" ]] && cols=$(tput cols 2> /dev/null || echo 80) + [[ "$cols" =~ ^[0-9]+$ ]] || cols=80 + + _strip_ansi_len() { + local text="$1" + local stripped + stripped=$(printf "%s" "$text" | LC_ALL=C awk '{gsub(/\033\[[0-9;]*[A-Za-z]/,""); print}' || true) + [[ -z "$stripped" ]] && stripped="$text" + printf "%d" "${#stripped}" + } + + local line="" s candidate + local clear_line=$'\r\033[2K' + for s in "${segs[@]}"; do + if [[ -z "$line" ]]; then + candidate="$s" + else + candidate="$line${sep}${s}" + fi + local candidate_len + candidate_len=$(_strip_ansi_len "$candidate") + [[ -z "$candidate_len" ]] && candidate_len=0 + if ((candidate_len > cols)); then + printf "%s%s\n" "$clear_line" "$line" >&2 + line="$s" + else + line="$candidate" + fi + done + printf "%s%s\n" "$clear_line" "$line" >&2 + } + + local sort_cache_key="" + local -a sorted_indices_cache=() + local filter_cache_key="" + local filter_cache_text_lower="" + local -a filter_cache_indices=() + + ensure_sorted_indices() { + local requested_key="${sort_mode}:${sort_reverse}:${has_metadata}" + if [[ "$requested_key" == "$sort_cache_key" && ${#sorted_indices_cache[@]} -gt 0 ]]; then + return + fi + + if [[ "$has_metadata" == "false" ]]; then + sorted_indices_cache=("${orig_indices[@]}") + sort_cache_key="$requested_key" + return + fi + + # Build sort key once; filtering should reuse this cached order. + local sort_key + if [[ "$sort_mode" == "date" ]]; then + # Date: ascending by default (oldest first) + sort_key="-k1,1n" + [[ "$sort_reverse" == "true" ]] && sort_key="-k1,1nr" + elif [[ "$sort_mode" == "size" ]]; then + # Size: descending by default (largest first) + sort_key="-k1,1nr" + [[ "$sort_reverse" == "true" ]] && sort_key="-k1,1n" + else + # Name: ascending by default (A to Z) + sort_key="-k1,1f" + [[ "$sort_reverse" == "true" ]] && sort_key="-k1,1fr" + fi + + local tmpfile + tmpfile=$(mktemp 2> /dev/null) || tmpfile="" + if [[ -n "$tmpfile" ]]; then + local k id + for id in "${orig_indices[@]}"; do + case "$sort_mode" in + date) k="${epochs[id]:-0}" ;; + size) k="${sizekb[id]:-0}" ;; + name | *) k="${items[id]}|${id}" ;; + esac + printf "%s\t%s\n" "$k" "$id" >> "$tmpfile" + done + + sorted_indices_cache=() + while IFS=$'\t' read -r _key _id; do + [[ -z "$_id" ]] && continue + sorted_indices_cache+=("$_id") + done < <(LC_ALL=C sort -t $'\t' $sort_key -- "$tmpfile" 2> /dev/null) + + rm -f "$tmpfile" + else + sorted_indices_cache=("${orig_indices[@]}") + fi + sort_cache_key="$requested_key" + } + + # Rebuild the view_indices applying filter over cached sort order + rebuild_view() { + ensure_sorted_indices + + if [[ -n "$filter_text_lower" ]]; then + local -a source_indices=() + if [[ "$filter_cache_key" == "$sort_cache_key" && + "$filter_text_lower" == "$filter_cache_text_lower"* && + ${#filter_cache_indices[@]} -gt 0 ]]; then + source_indices=("${filter_cache_indices[@]}") + else + if [[ ${#sorted_indices_cache[@]} -gt 0 ]]; then + source_indices=("${sorted_indices_cache[@]}") + else + source_indices=() + fi + fi + + view_indices=() + local id + for id in "${source_indices[@]}"; do + if [[ "${filter_targets_lower[id]:-}" == *"$filter_text_lower"* ]]; then + view_indices+=("$id") + fi + done + + filter_cache_key="$sort_cache_key" + filter_cache_text_lower="$filter_text_lower" + if [[ ${#view_indices[@]} -gt 0 ]]; then + filter_cache_indices=("${view_indices[@]}") + else + filter_cache_indices=() + fi + else + if [[ ${#sorted_indices_cache[@]} -gt 0 ]]; then + view_indices=("${sorted_indices_cache[@]}") + else + view_indices=() + fi + filter_cache_key="$sort_cache_key" + filter_cache_text_lower="" + if [[ ${#view_indices[@]} -gt 0 ]]; then + filter_cache_indices=("${view_indices[@]}") + else + filter_cache_indices=() + fi + fi + + # Clamp cursor into visible range + local visible_count=${#view_indices[@]} + local max_top + if [[ $visible_count -gt $items_per_page ]]; then + max_top=$((visible_count - items_per_page)) + else + max_top=0 + fi + [[ $top_index -gt $max_top ]] && top_index=$max_top + local current_visible=$((visible_count - top_index)) + [[ $current_visible -gt $items_per_page ]] && current_visible=$items_per_page + if [[ $cursor_pos -ge $current_visible ]]; then + cursor_pos=$((current_visible > 0 ? current_visible - 1 : 0)) + fi + [[ $cursor_pos -lt 0 ]] && cursor_pos=0 + } + + # Initial view (default sort) + rebuild_view + + render_item() { + # $1: visible row index (0..items_per_page-1 in current window) + # $2: is_current flag + local vrow=$1 is_current=$2 + local idx=$((top_index + vrow)) + local real="${view_indices[idx]:--1}" + [[ $real -lt 0 ]] && return + local checkbox="$ICON_EMPTY" + [[ ${selected[real]} == true ]] && checkbox="$ICON_SOLID" + + if [[ $is_current == true ]]; then + printf "\r\033[2K${CYAN}${ICON_ARROW} %s %s${NC}\n" "$checkbox" "${items[real]}" >&2 + else + printf "\r\033[2K %s %s\n" "$checkbox" "${items[real]}" >&2 + fi + } + + draw_header() { + printf "\033[1;1H" >&2 + if [[ -n "$filter_text" ]]; then + printf "\r\033[2K${PURPLE_BOLD}%s${NC} ${YELLOW}/ Filter: ${filter_text}_${NC} ${GRAY}(%d/%d)${NC}\n" "${title}" "${#view_indices[@]}" "$total_items" >&2 + elif [[ -n "${MOLE_READ_KEY_FORCE_CHAR:-}" ]]; then + printf "\r\033[2K${PURPLE_BOLD}%s${NC} ${YELLOW}/ Filter: _ ${NC}${GRAY}(type to search)${NC}\n" "${title}" >&2 + else + printf "\r\033[2K${PURPLE_BOLD}%s${NC} ${GRAY}%d/%d selected${NC}\n" "${title}" "$selected_count" "$total_items" >&2 + fi + } + + # Handle filter character input (reduces code duplication) + # Returns 0 if character was handled, 1 if not in filter mode + handle_filter_char() { + local char="$1" + if [[ -z "${MOLE_READ_KEY_FORCE_CHAR:-}" ]]; then + return 1 + fi + if [[ "$char" =~ ^[[:print:]]$ ]]; then + local char_lower + char_lower=$(printf "%s" "$char" | LC_ALL=C tr '[:upper:]' '[:lower:]') + filter_text+="$char" + filter_text_lower+="$char_lower" + rebuild_view + cursor_pos=0 + top_index=0 + need_full_redraw=true + fi + return 0 + } + + # Draw the complete menu + draw_menu() { + items_per_page=$(_pm_calculate_items_per_page) + local clear_line=$'\r\033[2K' + + printf "\033[H" >&2 + + draw_header + + # Visible slice + local visible_total=${#view_indices[@]} + if [[ $visible_total -eq 0 ]]; then + printf "${clear_line}No items available\n" >&2 + for ((i = 0; i < items_per_page; i++)); do + printf "${clear_line}\n" >&2 + done + printf "${clear_line}${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN} | Space | Enter | Q Exit${NC}\n" >&2 + printf "${clear_line}" >&2 + return + fi + + local visible_count=$((visible_total - top_index)) + [[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page + [[ $visible_count -le 0 ]] && visible_count=1 + if [[ $cursor_pos -ge $visible_count ]]; then + cursor_pos=$((visible_count - 1)) + [[ $cursor_pos -lt 0 ]] && cursor_pos=0 + fi + + printf "${clear_line}\n" >&2 + + # Items for current window + local start_idx=$top_index + local end_idx=$((top_index + items_per_page - 1)) + [[ $end_idx -ge $visible_total ]] && end_idx=$((visible_total - 1)) + + for ((i = start_idx; i <= end_idx; i++)); do + [[ $i -lt 0 ]] && continue + local is_current=false + [[ $((i - start_idx)) -eq $cursor_pos ]] && is_current=true + render_item $((i - start_idx)) $is_current + done + + # Fill empty slots to clear previous content + local items_shown=$((end_idx - start_idx + 1)) + [[ $items_shown -lt 0 ]] && items_shown=0 + for ((i = items_shown; i < items_per_page; i++)); do + printf "${clear_line}\n" >&2 + done + + printf "${clear_line}\n" >&2 + + # Build sort status + local sort_label="" + case "$sort_mode" in + date) sort_label="Date" ;; + name) sort_label="Name" ;; + size) sort_label="Size" ;; + esac + local sort_status="${sort_label}" + + # Footer: single line with controls + local sep=" ${GRAY}|${NC} " + + # Helper to calculate display length without ANSI codes + _calc_len() { + local text="$1" + local stripped + stripped=$(printf "%s" "$text" | LC_ALL=C awk '{gsub(/\033\[[0-9;]*[A-Za-z]/,""); print}') + printf "%d" "${#stripped}" + } + + # Common menu items + local nav="${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN}${NC}" + local space_select="${GRAY}Space Select${NC}" + local enter="${GRAY}Enter${NC}" + local exit="${GRAY}Q Exit${NC}" + + local reverse_arrow="↑" + [[ "$sort_reverse" == "true" ]] && reverse_arrow="↓" + + local sort_ctrl="${GRAY}S ${sort_status}${NC}" + local order_ctrl="${GRAY}O ${reverse_arrow}${NC}" + local filter_ctrl="${GRAY}/ Filter${NC}" + + if [[ -n "$filter_text" ]]; then + local -a _segs_filter=("${GRAY}Backspace${NC}" "${GRAY}Ctrl+U Clear${NC}" "${GRAY}ESC Clear${NC}") + _print_wrapped_controls "$sep" "${_segs_filter[@]}" + elif [[ "$has_metadata" == "true" ]]; then + # With metadata: show sort controls + local term_width="${COLUMNS:-}" + [[ -z "$term_width" ]] && term_width=$(tput cols 2> /dev/null || echo 80) + [[ "$term_width" =~ ^[0-9]+$ ]] || term_width=80 + + # Full controls + local -a _segs=("$nav" "$space_select" "$enter" "$sort_ctrl" "$order_ctrl" "$filter_ctrl" "$exit") + + # Calculate width + local total_len=0 seg_count=${#_segs[@]} + for i in "${!_segs[@]}"; do + total_len=$((total_len + $(_calc_len "${_segs[i]}"))) + [[ $i -lt $((seg_count - 1)) ]] && total_len=$((total_len + 3)) + done + + # Level 1: Remove "Space Select" if too wide + if [[ $total_len -gt $term_width ]]; then + _segs=("$nav" "$enter" "$sort_ctrl" "$order_ctrl" "$filter_ctrl" "$exit") + + total_len=0 + seg_count=${#_segs[@]} + for i in "${!_segs[@]}"; do + total_len=$((total_len + $(_calc_len "${_segs[i]}"))) + [[ $i -lt $((seg_count - 1)) ]] && total_len=$((total_len + 3)) + done + + # Level 2: Remove sort label if still too wide + if [[ $total_len -gt $term_width ]]; then + _segs=("$nav" "$enter" "$order_ctrl" "$filter_ctrl" "$exit") + fi + fi + + _print_wrapped_controls "$sep" "${_segs[@]}" + else + # Without metadata: basic controls + local -a _segs_simple=("$nav" "$space_select" "$enter" "$filter_ctrl" "$exit") + _print_wrapped_controls "$sep" "${_segs_simple[@]}" + fi + printf "${clear_line}" >&2 + } + + # Track previous cursor position for incremental rendering + local prev_cursor_pos=$cursor_pos + local prev_top_index=$top_index + local need_full_redraw=true + + # Main interaction loop + while true; do + if [[ "$need_full_redraw" == "true" ]]; then + draw_menu + need_full_redraw=false + # Update tracking variables after full redraw + prev_cursor_pos=$cursor_pos + prev_top_index=$top_index + fi + + local key + key=$(read_key) + + case "$key" in + "QUIT") + if [[ -n "$filter_text" || -n "${MOLE_READ_KEY_FORCE_CHAR:-}" ]]; then + filter_text="" + filter_text_lower="" + unset MOLE_READ_KEY_FORCE_CHAR + rebuild_view + cursor_pos=0 + top_index=0 + need_full_redraw=true + else + cleanup + return 1 + fi + ;; + "UP") + if [[ ${#view_indices[@]} -eq 0 ]]; then + : + elif [[ $cursor_pos -gt 0 ]]; then + local old_cursor=$cursor_pos + ((cursor_pos--)) + local new_cursor=$cursor_pos + + if [[ -n "$filter_text" || -n "${MOLE_READ_KEY_FORCE_CHAR:-}" ]]; then + draw_header + fi + + local old_row=$((old_cursor + 3)) + local new_row=$((new_cursor + 3)) + + printf "\033[%d;1H" "$old_row" >&2 + render_item "$old_cursor" false + printf "\033[%d;1H" "$new_row" >&2 + render_item "$new_cursor" true + + printf "\033[%d;1H" "$((items_per_page + 4))" >&2 + + prev_cursor_pos=$cursor_pos + continue + elif [[ $top_index -gt 0 ]]; then + ((top_index--)) + + if [[ -n "$filter_text" || -n "${MOLE_READ_KEY_FORCE_CHAR:-}" ]]; then + draw_header + fi + + local start_idx=$top_index + local end_idx=$((top_index + items_per_page - 1)) + local visible_total=${#view_indices[@]} + [[ $end_idx -ge $visible_total ]] && end_idx=$((visible_total - 1)) + + for ((i = start_idx; i <= end_idx; i++)); do + local row=$((i - start_idx + 3)) + printf "\033[%d;1H" "$row" >&2 + local is_current=false + [[ $((i - start_idx)) -eq $cursor_pos ]] && is_current=true + render_item $((i - start_idx)) $is_current + done + + printf "\033[%d;1H" "$((items_per_page + 4))" >&2 + + prev_cursor_pos=$cursor_pos + prev_top_index=$top_index + continue + fi + ;; + "DOWN") + if [[ ${#view_indices[@]} -eq 0 ]]; then + : + else + local absolute_index=$((top_index + cursor_pos)) + local last_index=$((${#view_indices[@]} - 1)) + if [[ $absolute_index -lt $last_index ]]; then + local visible_count=$((${#view_indices[@]} - top_index)) + [[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page + + if [[ $cursor_pos -lt $((visible_count - 1)) ]]; then + local old_cursor=$cursor_pos + cursor_pos=$((cursor_pos + 1)) + local new_cursor=$cursor_pos + + if [[ -n "$filter_text" || -n "${MOLE_READ_KEY_FORCE_CHAR:-}" ]]; then + draw_header + fi + + local old_row=$((old_cursor + 3)) + local new_row=$((new_cursor + 3)) + + printf "\033[%d;1H" "$old_row" >&2 + render_item "$old_cursor" false + printf "\033[%d;1H" "$new_row" >&2 + render_item "$new_cursor" true + + printf "\033[%d;1H" "$((items_per_page + 4))" >&2 + + prev_cursor_pos=$cursor_pos + continue + elif [[ $((top_index + visible_count)) -lt ${#view_indices[@]} ]]; then + top_index=$((top_index + 1)) + visible_count=$((${#view_indices[@]} - top_index)) + [[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page + if [[ $cursor_pos -ge $visible_count ]]; then + cursor_pos=$((visible_count - 1)) + fi + + if [[ -n "$filter_text" || -n "${MOLE_READ_KEY_FORCE_CHAR:-}" ]]; then + draw_header + fi + + local start_idx=$top_index + local end_idx=$((top_index + items_per_page - 1)) + local visible_total=${#view_indices[@]} + [[ $end_idx -ge $visible_total ]] && end_idx=$((visible_total - 1)) + + for ((i = start_idx; i <= end_idx; i++)); do + local row=$((i - start_idx + 3)) + printf "\033[%d;1H" "$row" >&2 + local is_current=false + [[ $((i - start_idx)) -eq $cursor_pos ]] && is_current=true + render_item $((i - start_idx)) $is_current + done + + printf "\033[%d;1H" "$((items_per_page + 4))" >&2 + + prev_cursor_pos=$cursor_pos + prev_top_index=$top_index + continue + fi + fi + fi + ;; + "SPACE") + local idx=$((top_index + cursor_pos)) + if [[ $idx -lt ${#view_indices[@]} ]]; then + local real="${view_indices[idx]}" + if [[ ${selected[real]} == true ]]; then + selected[real]=false + ((selected_count--)) + else + selected[real]=true + selected_count=$((selected_count + 1)) + fi + + # Incremental update: only redraw header (for count) and current row + # Header is at row 1 + printf "\033[1;1H\033[2K${PURPLE_BOLD}%s${NC} ${GRAY}%d/%d selected${NC}\n" "${title}" "$selected_count" "$total_items" >&2 + + # Redraw current item row (+3: row 1=header, row 2=blank, row 3=first item) + local item_row=$((cursor_pos + 3)) + printf "\033[%d;1H" "$item_row" >&2 + render_item "$cursor_pos" true + + # Move cursor to footer to avoid visual artifacts (items + header + 2 blanks) + printf "\033[%d;1H" "$((items_per_page + 4))" >&2 + + continue # Skip full redraw + fi + ;; + "CHAR:s" | "CHAR:S") + if handle_filter_char "${key#CHAR:}"; then + : # Handled as filter input + elif [[ "$has_metadata" == "true" ]]; then + case "$sort_mode" in + date) sort_mode="name" ;; + name) sort_mode="size" ;; + size) sort_mode="date" ;; + esac + rebuild_view + need_full_redraw=true + fi + ;; + "CHAR:j") + if handle_filter_char "${key#CHAR:}"; then + : # Handled as filter input + elif [[ ${#view_indices[@]} -gt 0 ]]; then + local absolute_index=$((top_index + cursor_pos)) + local last_index=$((${#view_indices[@]} - 1)) + if [[ $absolute_index -lt $last_index ]]; then + local visible_count=$((${#view_indices[@]} - top_index)) + [[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page + if [[ $cursor_pos -lt $((visible_count - 1)) ]]; then + cursor_pos=$((cursor_pos + 1)) + elif [[ $((top_index + visible_count)) -lt ${#view_indices[@]} ]]; then + top_index=$((top_index + 1)) + fi + need_full_redraw=true + fi + fi + ;; + "CHAR:k") + if handle_filter_char "${key#CHAR:}"; then + : # Handled as filter input + elif [[ ${#view_indices[@]} -gt 0 ]]; then + if [[ $cursor_pos -gt 0 ]]; then + ((cursor_pos--)) + need_full_redraw=true + elif [[ $top_index -gt 0 ]]; then + ((top_index--)) + need_full_redraw=true + fi + fi + ;; + "CHAR:o" | "CHAR:O") + if handle_filter_char "${key#CHAR:}"; then + : # Handled as filter input + elif [[ "$has_metadata" == "true" ]]; then + if [[ "$sort_reverse" == "true" ]]; then + sort_reverse="false" + else + sort_reverse="true" + fi + rebuild_view + need_full_redraw=true + fi + ;; + "CHAR:/" | "CHAR:?") + if [[ -n "${MOLE_READ_KEY_FORCE_CHAR:-}" ]]; then + unset MOLE_READ_KEY_FORCE_CHAR + else + export MOLE_READ_KEY_FORCE_CHAR=1 + fi + need_full_redraw=true + ;; + "DELETE") + if [[ -n "$filter_text" ]]; then + filter_text="${filter_text%?}" + filter_text_lower="${filter_text_lower%?}" + if [[ -z "$filter_text" ]]; then + filter_text_lower="" + unset MOLE_READ_KEY_FORCE_CHAR + fi + rebuild_view + cursor_pos=0 + top_index=0 + need_full_redraw=true + fi + ;; + "CLEAR_LINE") + if [[ -n "$filter_text" ]]; then + filter_text="" + filter_text_lower="" + rebuild_view + cursor_pos=0 + top_index=0 + need_full_redraw=true + fi + ;; + "CHAR:"*) + handle_filter_char "${key#CHAR:}" || true + ;; + "ENTER") + # Smart Enter behavior + # 1. Check if any items are already selected + local has_selection=false + for ((i = 0; i < total_items; i++)); do + if [[ ${selected[i]} == true ]]; then + has_selection=true + break + fi + done + + # 2. If nothing selected, auto-select current item + if [[ $has_selection == false ]]; then + local idx=$((top_index + cursor_pos)) + if [[ $idx -lt ${#view_indices[@]} ]]; then + local real="${view_indices[idx]}" + selected[real]=true + selected_count=$((selected_count + 1)) + fi + fi + + # 3. Confirm and exit with current selections + local -a selected_indices=() + for ((i = 0; i < total_items; i++)); do + if [[ ${selected[i]} == true ]]; then + selected_indices+=("$i") + fi + done + + local final_result="" + if [[ ${#selected_indices[@]} -gt 0 ]]; then + local IFS=',' + final_result="${selected_indices[*]}" + fi + + trap - EXIT INT TERM + MOLE_SELECTION_RESULT="$final_result" + unset MOLE_READ_KEY_FORCE_CHAR + export MOLE_MENU_SORT_MODE="${sort_mode:-name}" + export MOLE_MENU_SORT_REVERSE="${sort_reverse:-false}" + restore_terminal + return 0 + ;; + esac + + # Drain any accumulated input after processing (e.g., mouse wheel events) + # This prevents buffered events from causing jumps, without blocking keyboard input + drain_pending_input + done +} + +# Export function for external use +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + echo "This is a library file. Source it from other scripts." >&2 + exit 1 +fi diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/ui/menu_simple.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/ui/menu_simple.sh new file mode 100755 index 0000000..0dd4607 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/ui/menu_simple.sh @@ -0,0 +1,318 @@ +#!/bin/bash +# Paginated menu with arrow key navigation + +set -euo pipefail + +# Terminal control functions +enter_alt_screen() { tput smcup 2> /dev/null || true; } +leave_alt_screen() { tput rmcup 2> /dev/null || true; } + +# Get terminal height with fallback +_ms_get_terminal_height() { + local height=0 + + # Try stty size first (most reliable, real-time) + # Use /dev/null | awk '{print $1}') + fi + + # Fallback to tput + if [[ -z "$height" || $height -le 0 ]]; then + if command -v tput > /dev/null 2>&1; then + height=$(tput lines 2> /dev/null || echo "24") + else + height=24 + fi + fi + + echo "$height" +} + +# Calculate dynamic items per page based on terminal height +_ms_calculate_items_per_page() { + local term_height=$(_ms_get_terminal_height) + # Layout: header(1) + spacing(1) + items + spacing(1) + footer(1) + clear(1) = 5 fixed lines + local reserved=6 # Increased to prevent header from being overwritten + local available=$((term_height - reserved)) + + # Ensure minimum and maximum bounds + if [[ $available -lt 1 ]]; then + echo 1 + elif [[ $available -gt 50 ]]; then + echo 50 + else + echo "$available" + fi +} + +# Main paginated multi-select menu function +paginated_multi_select() { + local title="$1" + shift + local -a items=("$@") + local external_alt_screen=false + if [[ "${MOLE_MANAGED_ALT_SCREEN:-}" == "1" || "${MOLE_MANAGED_ALT_SCREEN:-}" == "true" ]]; then + external_alt_screen=true + fi + + # Validation + if [[ ${#items[@]} -eq 0 ]]; then + echo "No items provided" >&2 + return 1 + fi + + local total_items=${#items[@]} + local items_per_page=$(_ms_calculate_items_per_page) + local cursor_pos=0 + local top_index=0 + local -a selected=() + + # Initialize selection array + for ((i = 0; i < total_items; i++)); do + selected[i]=false + done + + if [[ -n "${MOLE_PRESELECTED_INDICES:-}" ]]; then + local cleaned_preselect="${MOLE_PRESELECTED_INDICES//[[:space:]]/}" + local -a initial_indices=() + IFS=',' read -ra initial_indices <<< "$cleaned_preselect" + for idx in "${initial_indices[@]}"; do + if [[ "$idx" =~ ^[0-9]+$ && $idx -ge 0 && $idx -lt $total_items ]]; then + selected[idx]=true + fi + done + fi + + # Preserve original TTY settings so we can restore them reliably + local original_stty="" + if [[ -t 0 ]] && command -v stty > /dev/null 2>&1; then + original_stty=$(stty -g 2> /dev/null || echo "") + fi + + restore_terminal() { + show_cursor + if [[ -n "${original_stty-}" ]]; then + stty "${original_stty}" 2> /dev/null || stty sane 2> /dev/null || stty echo icanon 2> /dev/null || true + else + stty sane 2> /dev/null || stty echo icanon 2> /dev/null || true + fi + if [[ "${external_alt_screen:-false}" == false ]]; then + leave_alt_screen + fi + } + + # Cleanup function + cleanup() { + trap - EXIT INT TERM + restore_terminal + } + + # Interrupt handler + # shellcheck disable=SC2329 + handle_interrupt() { + cleanup + exit 130 # Standard exit code for Ctrl+C + } + + trap cleanup EXIT + trap handle_interrupt INT TERM + + # Setup terminal - preserve interrupt character + stty -echo -icanon intr ^C 2> /dev/null || true + if [[ $external_alt_screen == false ]]; then + enter_alt_screen + # Clear screen once on entry to alt screen + printf "\033[2J\033[H" >&2 + else + printf "\033[H" >&2 + fi + hide_cursor + + # Helper functions + # shellcheck disable=SC2329 + print_line() { printf "\r\033[2K%s\n" "$1" >&2; } + + render_item() { + local idx=$1 is_current=$2 + local checkbox="$ICON_EMPTY" + [[ ${selected[idx]} == true ]] && checkbox="$ICON_SOLID" + + if [[ $is_current == true ]]; then + printf "\r\033[2K${CYAN}${ICON_ARROW} %s %s${NC}\n" "$checkbox" "${items[idx]}" >&2 + else + printf "\r\033[2K %s %s\n" "$checkbox" "${items[idx]}" >&2 + fi + } + + # Draw the complete menu + draw_menu() { + # Recalculate items_per_page dynamically to handle window resize + items_per_page=$(_ms_calculate_items_per_page) + + # Move to home position without clearing (reduces flicker) + printf "\033[H" >&2 + + # Clear each line as we go instead of clearing entire screen + local clear_line="\r\033[2K" + + # Count selections for header display + local selected_count=0 + for ((i = 0; i < total_items; i++)); do + [[ ${selected[i]} == true ]] && selected_count=$((selected_count + 1)) + done + + # Header + printf "${clear_line}${PURPLE_BOLD}%s${NC} ${GRAY}%d/%d selected${NC}\n" "${title}" "$selected_count" "$total_items" >&2 + + if [[ $total_items -eq 0 ]]; then + printf "${clear_line}${GRAY}No items available${NC}\n" >&2 + printf "${clear_line}\n" >&2 + printf "${clear_line}${GRAY}Q${NC} Quit\n" >&2 + printf "${clear_line}" >&2 + return + fi + + if [[ $top_index -gt $((total_items - 1)) ]]; then + if [[ $total_items -gt $items_per_page ]]; then + top_index=$((total_items - items_per_page)) + else + top_index=0 + fi + fi + + local visible_count=$((total_items - top_index)) + [[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page + [[ $visible_count -le 0 ]] && visible_count=1 + if [[ $cursor_pos -ge $visible_count ]]; then + cursor_pos=$((visible_count - 1)) + [[ $cursor_pos -lt 0 ]] && cursor_pos=0 + fi + + printf "${clear_line}\n" >&2 + + # Items for current window + local start_idx=$top_index + local end_idx=$((top_index + items_per_page - 1)) + [[ $end_idx -ge $total_items ]] && end_idx=$((total_items - 1)) + + for ((i = start_idx; i <= end_idx; i++)); do + [[ $i -lt 0 ]] && continue + local is_current=false + [[ $((i - start_idx)) -eq $cursor_pos ]] && is_current=true + render_item $i $is_current + done + + # Fill empty slots to clear previous content + local items_shown=$((end_idx - start_idx + 1)) + [[ $items_shown -lt 0 ]] && items_shown=0 + for ((i = items_shown; i < items_per_page; i++)); do + printf "${clear_line}\n" >&2 + done + + # Clear any remaining lines at bottom + printf "${clear_line}\n" >&2 + printf "${clear_line}${GRAY}${ICON_NAV_UP}${ICON_NAV_DOWN} | Space | Enter | Q Exit${NC}\n" >&2 + + # Clear one more line to ensure no artifacts + printf "${clear_line}" >&2 + } + + # Main interaction loop + while true; do + draw_menu + local key=$(read_key) + + case "$key" in + "QUIT") + cleanup + return 1 + ;; + "UP") + if [[ $total_items -eq 0 ]]; then + : + elif [[ $cursor_pos -gt 0 ]]; then + ((cursor_pos--)) + elif [[ $top_index -gt 0 ]]; then + ((top_index--)) + fi + ;; + "DOWN") + if [[ $total_items -eq 0 ]]; then + : + else + local absolute_index=$((top_index + cursor_pos)) + if [[ $absolute_index -lt $((total_items - 1)) ]]; then + local visible_count=$((total_items - top_index)) + [[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page + + if [[ $cursor_pos -lt $((visible_count - 1)) ]]; then + cursor_pos=$((cursor_pos + 1)) + elif [[ $((top_index + visible_count)) -lt $total_items ]]; then + top_index=$((top_index + 1)) + visible_count=$((total_items - top_index)) + [[ $visible_count -gt $items_per_page ]] && visible_count=$items_per_page + if [[ $cursor_pos -ge $visible_count ]]; then + cursor_pos=$((visible_count - 1)) + fi + fi + fi + fi + ;; + "SPACE") + local idx=$((top_index + cursor_pos)) + if [[ $idx -lt $total_items ]]; then + if [[ ${selected[idx]} == true ]]; then + selected[idx]=false + else + selected[idx]=true + fi + fi + ;; + "ALL") + for ((i = 0; i < total_items; i++)); do + selected[i]=true + done + ;; + "NONE") + for ((i = 0; i < total_items; i++)); do + selected[i]=false + done + ;; + "ENTER") + # Store result in global variable instead of returning via stdout + local -a selected_indices=() + for ((i = 0; i < total_items; i++)); do + if [[ ${selected[i]} == true ]]; then + selected_indices+=("$i") + fi + done + + # Allow empty selection - don't auto-select cursor position + # This fixes the bug where unselecting all items would still select the last cursor position + local final_result="" + if [[ ${#selected_indices[@]} -gt 0 ]]; then + local IFS=',' + final_result="${selected_indices[*]}" + fi + + # Remove the trap to avoid cleanup on normal exit + trap - EXIT INT TERM + + # Store result in global variable + MOLE_SELECTION_RESULT="$final_result" + + # Manually cleanup terminal before returning + restore_terminal + + return 0 + ;; + esac + done +} + +# Export function for external use +if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then + echo "This is a library file. Source it from other scripts." >&2 + exit 1 +fi diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/uninstall/batch.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/uninstall/batch.sh new file mode 100755 index 0000000..8a22f9a --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/uninstall/batch.sh @@ -0,0 +1,808 @@ +#!/bin/bash + +set -euo pipefail + +# Ensure common.sh is loaded. +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)" +[[ -z "${MOLE_COMMON_LOADED:-}" ]] && source "$SCRIPT_DIR/lib/core/common.sh" + +# Load Homebrew cask support (provides get_brew_cask_name, brew_uninstall_cask) +[[ -f "$SCRIPT_DIR/lib/uninstall/brew.sh" ]] && source "$SCRIPT_DIR/lib/uninstall/brew.sh" + +# Batch uninstall with a single confirmation. + +get_lsregister_path() { + echo "/System/Library/Frameworks/CoreServices.framework/Frameworks/LaunchServices.framework/Support/lsregister" +} + +is_uninstall_dry_run() { + [[ "${MOLE_DRY_RUN:-0}" == "1" ]] +} + +# High-performance sensitive data detection (pure Bash, no subprocess) +# Faster than grep for batch operations, especially when processing many apps +has_sensitive_data() { + local files="$1" + [[ -z "$files" ]] && return 1 + + while IFS= read -r file; do + [[ -z "$file" ]] && continue + + # Use Bash native pattern matching (faster than spawning grep) + case "$file" in + */.warp* | */.config/* | */themes/* | */settings/* | */User\ Data/* | \ + */.ssh/* | */.gnupg/* | */Documents/* | */Preferences/*.plist | \ + */Desktop/* | */Downloads/* | */Movies/* | */Music/* | */Pictures/* | \ + */.password* | */.token* | */.auth* | */keychain* | \ + */Passwords/* | */Accounts/* | */Cookies/* | \ + */.aws/* | */.docker/config.json | */.kube/* | \ + */credentials/* | */secrets/*) + return 0 # Found sensitive data + ;; + esac + done <<< "$files" + + return 1 # Not found +} + +# Decode and validate base64 file list (safe for set -e). +decode_file_list() { + local encoded="$1" + local app_name="$2" + local decoded + + # macOS uses -D, GNU uses -d. Always return 0 for set -e safety. + if ! decoded=$(printf '%s' "$encoded" | base64 -D 2> /dev/null); then + if ! decoded=$(printf '%s' "$encoded" | base64 -d 2> /dev/null); then + log_error "Failed to decode file list for $app_name" >&2 + echo "" + return 0 # Return success with empty string + fi + fi + + if [[ "$decoded" =~ $'\0' ]]; then + log_warning "File list for $app_name contains null bytes, rejecting" >&2 + echo "" + return 0 # Return success with empty string + fi + + while IFS= read -r line; do + if [[ -n "$line" && ! "$line" =~ ^/ ]]; then + log_warning "Invalid path in file list for $app_name: $line" >&2 + echo "" + return 0 # Return success with empty string + fi + done <<< "$decoded" + + echo "$decoded" + return 0 +} +# Note: find_app_files() and calculate_total_size() are in lib/core/common.sh. + +# Stop Launch Agents/Daemons for an app. +# Security: bundle_id is validated to be reverse-DNS format before use in find patterns +stop_launch_services() { + local bundle_id="$1" + local has_system_files="${2:-false}" + + if is_uninstall_dry_run; then + debug_log "[DRY RUN] Would unload launch services for bundle: $bundle_id" + return 0 + fi + + [[ -z "$bundle_id" || "$bundle_id" == "unknown" ]] && return 0 + + # Validate bundle_id format: must be reverse-DNS style (e.g., com.example.app) + # This prevents glob injection attacks if bundle_id contains special characters + if [[ ! "$bundle_id" =~ ^[a-zA-Z0-9][-a-zA-Z0-9]*(\.[a-zA-Z0-9][-a-zA-Z0-9]*)+$ ]]; then + debug_log "Invalid bundle_id format for LaunchAgent search: $bundle_id" + return 0 + fi + + if [[ -d ~/Library/LaunchAgents ]]; then + while IFS= read -r -d '' plist; do + launchctl unload "$plist" 2> /dev/null || true + done < <(find ~/Library/LaunchAgents -maxdepth 1 -name "${bundle_id}*.plist" -print0 2> /dev/null) + fi + + if [[ "$has_system_files" == "true" ]]; then + if [[ -d /Library/LaunchAgents ]]; then + while IFS= read -r -d '' plist; do + sudo launchctl unload "$plist" 2> /dev/null || true + done < <(find /Library/LaunchAgents -maxdepth 1 -name "${bundle_id}*.plist" -print0 2> /dev/null) + fi + if [[ -d /Library/LaunchDaemons ]]; then + while IFS= read -r -d '' plist; do + sudo launchctl unload "$plist" 2> /dev/null || true + done < <(find /Library/LaunchDaemons -maxdepth 1 -name "${bundle_id}*.plist" -print0 2> /dev/null) + fi + fi +} + +# Unregister app bundle from LaunchServices before deleting files. +# This helps remove stale app entries from Spotlight's app results list. +unregister_app_bundle() { + local app_path="$1" + + [[ -n "$app_path" && -e "$app_path" ]] || return 0 + [[ "$app_path" == *.app ]] || return 0 + + local lsregister + lsregister=$(get_lsregister_path) + [[ -x "$lsregister" ]] || return 0 + + [[ "${MOLE_DRY_RUN:-0}" == "1" ]] && return 0 + + set +e + "$lsregister" -u "$app_path" > /dev/null 2>&1 + set -e +} + +# Compact and rebuild LaunchServices after uninstall batch to clear stale app metadata. +refresh_launch_services_after_uninstall() { + local lsregister + lsregister=$(get_lsregister_path) + [[ -x "$lsregister" ]] || return 0 + + [[ "${MOLE_DRY_RUN:-0}" == "1" ]] && return 0 + + local success=0 + set +e + # Add 10s timeout to prevent hanging (gc is usually fast) + # run_with_timeout falls back to shell implementation if timeout command unavailable + run_with_timeout 10 "$lsregister" -gc > /dev/null 2>&1 || true + # Add 15s timeout for rebuild (can be slow on some systems) + run_with_timeout 15 "$lsregister" -r -f -domain local -domain user -domain system > /dev/null 2>&1 + success=$? + # 124 = timeout exit code (from run_with_timeout or timeout command) + if [[ $success -eq 124 ]]; then + debug_log "LaunchServices rebuild timed out, trying lighter version" + run_with_timeout 10 "$lsregister" -r -f -domain local -domain user > /dev/null 2>&1 + success=$? + elif [[ $success -ne 0 ]]; then + run_with_timeout 10 "$lsregister" -r -f -domain local -domain user > /dev/null 2>&1 + success=$? + fi + set -e + + [[ $success -eq 0 || $success -eq 124 ]] +} + +# Remove macOS Login Items for an app +remove_login_item() { + local app_name="$1" + local bundle_id="$2" + + if is_uninstall_dry_run; then + debug_log "[DRY RUN] Would remove login item: ${app_name:-$bundle_id}" + return 0 + fi + + # Skip if no identifiers provided + [[ -z "$app_name" && -z "$bundle_id" ]] && return 0 + + # Strip .app suffix if present (login items don't include it) + local clean_name="${app_name%.app}" + + # Remove from Login Items using index-based deletion (handles broken items) + if [[ -n "$clean_name" ]]; then + # Escape double quotes and backslashes for AppleScript + local escaped_name="${clean_name//\\/\\\\}" + escaped_name="${escaped_name//\"/\\\"}" + + osascript <<- EOF > /dev/null 2>&1 || true + tell application "System Events" + try + set itemCount to count of login items + -- Delete in reverse order to avoid index shifting + repeat with i from itemCount to 1 by -1 + try + set itemName to name of login item i + if itemName is "$escaped_name" then + delete login item i + end if + end try + end repeat + end try + end tell + EOF + fi +} + +# Remove files (handles symlinks, optional sudo). +# Security: All paths pass validate_path_for_deletion() before any deletion. +remove_file_list() { + local file_list="$1" + local use_sudo="${2:-false}" + local count=0 + + while IFS= read -r file; do + [[ -n "$file" && -e "$file" ]] || continue + + if ! validate_path_for_deletion "$file"; then + continue + fi + + if [[ -L "$file" ]]; then + safe_remove_symlink "$file" "$use_sudo" && ((++count)) || true + else + if [[ "$use_sudo" == "true" ]]; then + if is_uninstall_dry_run; then + debug_log "[DRY RUN] Would sudo remove: $file" + ((++count)) + else + safe_sudo_remove "$file" && ((++count)) || true + fi + else + safe_remove "$file" true && ((++count)) || true + fi + fi + done <<< "$file_list" + + echo "$count" +} + +# Batch uninstall with single confirmation. +batch_uninstall_applications() { + local total_size_freed=0 + + # shellcheck disable=SC2154 + if [[ ${#selected_apps[@]} -eq 0 ]]; then + log_warning "No applications selected for uninstallation" + return 0 + fi + + local old_trap_int old_trap_term + old_trap_int=$(trap -p INT) + old_trap_term=$(trap -p TERM) + + _cleanup_sudo_keepalive() { + if command -v stop_sudo_session > /dev/null 2>&1; then + stop_sudo_session + fi + } + + _restore_uninstall_traps() { + _cleanup_sudo_keepalive + if [[ -n "$old_trap_int" ]]; then + eval "$old_trap_int" + else + trap - INT + fi + if [[ -n "$old_trap_term" ]]; then + eval "$old_trap_term" + else + trap - TERM + fi + } + + # Trap to clean up spinner, sudo keepalive, and uninstall mode on interrupt + trap 'stop_inline_spinner 2>/dev/null; _cleanup_sudo_keepalive; unset MOLE_UNINSTALL_MODE; echo ""; _restore_uninstall_traps; return 130' INT TERM + + # Pre-scan: running apps, sudo needs, size. + local -a running_apps=() + local -a sudo_apps=() + local total_estimated_size=0 + local -a app_details=() + + # Cache current user outside loop + local current_user=$(whoami) + + if [[ -t 1 ]]; then start_inline_spinner "Scanning files..."; fi + for selected_app in "${selected_apps[@]}"; do + [[ -z "$selected_app" ]] && continue + IFS='|' read -r _ app_path app_name bundle_id _ _ <<< "$selected_app" + + # Check running app by bundle executable if available + local exec_name="" + local info_plist="$app_path/Contents/Info.plist" + if [[ -e "$info_plist" ]]; then + exec_name=$(defaults read "$info_plist" CFBundleExecutable 2> /dev/null || echo "") + fi + if pgrep -qx "${exec_name:-$app_name}" 2> /dev/null; then + running_apps+=("$app_name") + fi + + local cask_name="" is_brew_cask="false" + local resolved_path=$(readlink "$app_path" 2> /dev/null || echo "") + if [[ "$resolved_path" == */Caskroom/* ]]; then + # Extract cask name using bash parameter expansion (faster than sed) + local tmp="${resolved_path#*/Caskroom/}" + cask_name="${tmp%%/*}" + [[ -n "$cask_name" ]] && is_brew_cask="true" + elif command -v get_brew_cask_name > /dev/null 2>&1; then + local detected_cask + detected_cask=$(get_brew_cask_name "$app_path" 2> /dev/null || true) + if [[ -n "$detected_cask" ]]; then + cask_name="$detected_cask" + is_brew_cask="true" + fi + fi + + # Check if sudo is needed + local needs_sudo=false + local app_owner=$(get_file_owner "$app_path") + if [[ ! -w "$(dirname "$app_path")" ]] || + [[ "$app_owner" == "root" ]] || + [[ -n "$app_owner" && "$app_owner" != "$current_user" ]]; then + needs_sudo=true + fi + + local app_size_kb=$(get_path_size_kb "$app_path" || echo "0") + local related_files=$(find_app_files "$bundle_id" "$app_name" || true) + local diag_user + diag_user=$(get_diagnostic_report_paths_for_app "$app_path" "$app_name" "$HOME/Library/Logs/DiagnosticReports" || true) + [[ -n "$diag_user" ]] && related_files=$( + [[ -n "$related_files" ]] && echo "$related_files" + echo "$diag_user" + ) + local related_size_kb=$(calculate_total_size "$related_files" || echo "0") + # system_files is a newline-separated string, not an array. + # shellcheck disable=SC2178,SC2128 + local system_files=$(find_app_system_files "$bundle_id" "$app_name" || true) + local diag_system + diag_system=$(get_diagnostic_report_paths_for_app "$app_path" "$app_name" "/Library/Logs/DiagnosticReports" || true) + # shellcheck disable=SC2128 + local system_size_kb=$(calculate_total_size "$system_files" || echo "0") + local diag_system_size_kb=$(calculate_total_size "$diag_system" || echo "0") + local total_kb=$((app_size_kb + related_size_kb + system_size_kb + diag_system_size_kb)) + total_estimated_size=$((total_estimated_size + total_kb)) + + # shellcheck disable=SC2128 + if [[ -n "$system_files" || -n "$diag_system" ]]; then + needs_sudo=true + fi + + if [[ "$needs_sudo" == "true" ]]; then + sudo_apps+=("$app_name") + fi + + # Check for sensitive user data once. + local has_sensitive_data="false" + if has_sensitive_data "$related_files" 2> /dev/null; then + has_sensitive_data="true" + fi + + # Store details for later use (base64 keeps lists on one line). + local encoded_files + encoded_files=$(printf '%s' "$related_files" | base64 | tr -d '\n' || echo "") + local encoded_system_files + encoded_system_files=$(printf '%s' "$system_files" | base64 | tr -d '\n' || echo "") + local encoded_diag_system + encoded_diag_system=$(printf '%s' "$diag_system" | base64 | tr -d '\n' || echo "") + app_details+=("$app_name|$app_path|$bundle_id|$total_kb|$encoded_files|$encoded_system_files|$has_sensitive_data|$needs_sudo|$is_brew_cask|$cask_name|$encoded_diag_system") + done + if [[ -t 1 ]]; then stop_inline_spinner; fi + + local size_display=$(bytes_to_human "$((total_estimated_size * 1024))") + + echo -e "\n${PURPLE_BOLD}Files to be removed:${NC}" + + # Warn if brew cask apps are present. + local has_brew_cask=false + for detail in "${app_details[@]}"; do + IFS='|' read -r _ _ _ _ _ _ _ _ is_brew_cask_flag _ <<< "$detail" + [[ "$is_brew_cask_flag" == "true" ]] && has_brew_cask=true + done + + if [[ "$has_brew_cask" == "true" ]]; then + echo -e "${GRAY}${ICON_WARNING} Homebrew apps will be fully cleaned, --zap removes configs and data${NC}" + fi + + echo "" + + for detail in "${app_details[@]}"; do + IFS='|' read -r app_name app_path bundle_id total_kb encoded_files encoded_system_files has_sensitive_data needs_sudo_flag is_brew_cask cask_name encoded_diag_system <<< "$detail" + local app_size_display=$(bytes_to_human "$((total_kb * 1024))") + + local brew_tag="" + [[ "$is_brew_cask" == "true" ]] && brew_tag=" ${CYAN}[Brew]${NC}" + echo -e "${BLUE}${ICON_CONFIRM}${NC} ${app_name}${brew_tag} ${GRAY}, ${app_size_display}${NC}" + + # Show detailed file list for ALL apps (brew casks leave user data behind) + local related_files=$(decode_file_list "$encoded_files" "$app_name") + local system_files=$(decode_file_list "$encoded_system_files" "$app_name") + local diag_system_display + diag_system_display=$(decode_file_list "$encoded_diag_system" "$app_name") + [[ -n "$diag_system_display" ]] && system_files=$( + [[ -n "$system_files" ]] && echo "$system_files" + echo "$diag_system_display" + ) + + echo -e " ${GREEN}${ICON_SUCCESS}${NC} ${app_path/$HOME/~}" + + # Show all related files so users can fully review before deletion. + while IFS= read -r file; do + if [[ -n "$file" && -e "$file" ]]; then + echo -e " ${GREEN}${ICON_SUCCESS}${NC} ${file/$HOME/~}" + fi + done <<< "$related_files" + + # Show all system files so users can fully review before deletion. + while IFS= read -r file; do + if [[ -n "$file" && -e "$file" ]]; then + echo -e " ${BLUE}${ICON_WARNING}${NC} System: $file" + fi + done <<< "$system_files" + done + + # Confirmation before requesting sudo. + local app_total=${#selected_apps[@]} + local app_text="app" + [[ $app_total -gt 1 ]] && app_text="apps" + + echo "" + local removal_note="Remove ${app_total} ${app_text}" + [[ -n "$size_display" ]] && removal_note+=", ${size_display}" + if [[ ${#running_apps[@]} -gt 0 ]]; then + removal_note+=" ${YELLOW}[Running]${NC}" + fi + echo -ne "${PURPLE}${ICON_ARROW}${NC} ${removal_note} ${GREEN}Enter${NC} confirm, ${GRAY}ESC${NC} cancel: " + + drain_pending_input # Clean up any pending input before confirmation + IFS= read -r -s -n1 key || key="" + drain_pending_input # Clean up any escape sequence remnants + case "$key" in + $'\e' | q | Q) + echo "" + echo "" + _restore_uninstall_traps + return 0 + ;; + "" | $'\n' | $'\r' | y | Y) + echo "" # Move to next line + ;; + *) + echo "" + echo "" + _restore_uninstall_traps + return 0 + ;; + esac + + # Enable uninstall mode - allows deletion of data-protected apps (VPNs, dev tools, etc.) + # that user explicitly chose to uninstall. System-critical components remain protected. + export MOLE_UNINSTALL_MODE=1 + + # Request sudo if needed for non-Homebrew removal operations. + # Note: Homebrew resets sudo timestamp at process startup, so pre-auth would + # cause duplicate password prompts in cask-only flows. + if [[ ${#sudo_apps[@]} -gt 0 && "${MOLE_DRY_RUN:-0}" != "1" ]]; then + if ! ensure_sudo_session "Admin required for system apps: ${sudo_apps[*]}"; then + echo "" + log_error "Admin access denied" + _restore_uninstall_traps + return 1 + fi + fi + + # Perform uninstallations with per-app progress feedback + local success_count=0 failed_count=0 + local brew_apps_removed=0 # Track successful brew uninstalls for silent autoremove + local -a failed_items=() + local -a success_items=() + local current_index=0 + for detail in "${app_details[@]}"; do + current_index=$((current_index + 1)) + IFS='|' read -r app_name app_path bundle_id total_kb encoded_files encoded_system_files has_sensitive_data needs_sudo is_brew_cask cask_name encoded_diag_system <<< "$detail" + local related_files=$(decode_file_list "$encoded_files" "$app_name") + local system_files=$(decode_file_list "$encoded_system_files" "$app_name") + local diag_system=$(decode_file_list "$encoded_diag_system" "$app_name") + local reason="" + local suggestion="" + + # Show progress for current app + local brew_tag="" + [[ "$is_brew_cask" == "true" ]] && brew_tag=" ${CYAN}[Brew]${NC}" + if [[ -t 1 ]]; then + if [[ ${#app_details[@]} -gt 1 ]]; then + start_inline_spinner "[$current_index/${#app_details[@]}] Uninstalling ${app_name}${brew_tag}..." + else + start_inline_spinner "Uninstalling ${app_name}${brew_tag}..." + fi + fi + + # Stop Launch Agents/Daemons before removal. + local has_system_files="false" + [[ -n "$system_files" ]] && has_system_files="true" + + stop_launch_services "$bundle_id" "$has_system_files" + unregister_app_bundle "$app_path" + + # Remove from Login Items + remove_login_item "$app_name" "$bundle_id" + + if ! force_kill_app "$app_name" "$app_path"; then + reason="still running" + fi + + # Remove the application only if not running. + # Stop spinner before any removal attempt (avoids mixed output on errors) + [[ -t 1 ]] && stop_inline_spinner + + local used_brew_successfully=false + if [[ -z "$reason" ]]; then + if [[ "$is_brew_cask" == "true" && -n "$cask_name" ]]; then + # Use brew_uninstall_cask helper (handles env vars, timeout, verification) + if brew_uninstall_cask "$cask_name" "$app_path"; then + used_brew_successfully=true + else + # Fallback to manual removal if brew fails + if [[ "$needs_sudo" == true ]]; then + if ! safe_sudo_remove "$app_path"; then + reason="brew failed, manual removal failed" + fi + else + if ! safe_remove "$app_path" true; then + reason="brew failed, manual removal failed" + fi + fi + fi + elif [[ "$needs_sudo" == true ]]; then + if [[ -L "$app_path" ]]; then + local link_target + link_target=$(readlink "$app_path" 2> /dev/null) + if [[ -n "$link_target" ]]; then + local resolved_target="$link_target" + if [[ "$link_target" != /* ]]; then + local link_dir + link_dir=$(dirname "$app_path") + resolved_target=$(cd "$link_dir" 2> /dev/null && cd "$(dirname "$link_target")" 2> /dev/null && pwd)/$(basename "$link_target") 2> /dev/null || echo "" + fi + case "$resolved_target" in + /System/* | /usr/bin/* | /usr/lib/* | /bin/* | /sbin/* | /private/etc/*) + reason="protected system symlink, cannot remove" + ;; + *) + if ! safe_remove_symlink "$app_path" "true"; then + reason="failed to remove symlink" + fi + ;; + esac + else + if ! safe_remove_symlink "$app_path" "true"; then + reason="failed to remove symlink" + fi + fi + else + if is_uninstall_dry_run; then + if ! safe_remove "$app_path" true; then + reason="dry-run path validation failed" + fi + else + local ret=0 + safe_sudo_remove "$app_path" || ret=$? + if [[ $ret -ne 0 ]]; then + local diagnosis + diagnosis=$(diagnose_removal_failure "$ret" "$app_name") + IFS='|' read -r reason suggestion <<< "$diagnosis" + fi + fi + fi + else + if ! safe_remove "$app_path" true; then + if [[ ! -w "$(dirname "$app_path")" ]]; then + reason="parent directory not writable" + else + reason="remove failed, check permissions" + fi + fi + fi + fi + + # Remove related files if app removal succeeded. + if [[ -z "$reason" ]]; then + remove_file_list "$related_files" "false" > /dev/null + + if [[ "$used_brew_successfully" == "true" ]]; then + remove_file_list "$diag_system" "true" > /dev/null + else + local system_all="$system_files" + if [[ -n "$diag_system" ]]; then + if [[ -n "$system_all" ]]; then + system_all+=$'\n' + fi + system_all+="$diag_system" + fi + remove_file_list "$system_all" "true" > /dev/null + fi + + # Defaults writes are side effects that should never run in dry-run mode. + if [[ -n "$bundle_id" && "$bundle_id" != "unknown" ]]; then + if is_uninstall_dry_run; then + debug_log "[DRY RUN] Would clear defaults domain: $bundle_id" + else + if defaults read "$bundle_id" &> /dev/null; then + defaults delete "$bundle_id" 2> /dev/null || true + fi + fi + + # ByHost preferences (machine-specific). + if [[ -d "$HOME/Library/Preferences/ByHost" ]]; then + if [[ "$bundle_id" =~ ^[A-Za-z0-9._-]+$ ]]; then + while IFS= read -r -d '' plist_file; do + safe_remove "$plist_file" true > /dev/null || true + done < <(command find "$HOME/Library/Preferences/ByHost" -maxdepth 1 -type f -name "${bundle_id}.*.plist" -print0 2> /dev/null || true) + else + debug_log "Skipping ByHost cleanup, invalid bundle id: $bundle_id" + fi + fi + fi + + # Show success + if [[ -t 1 ]]; then + if [[ ${#app_details[@]} -gt 1 ]]; then + echo -e "${GREEN}${ICON_SUCCESS}${NC} [$current_index/${#app_details[@]}] ${app_name}" + else + echo -e "${GREEN}${ICON_SUCCESS}${NC} ${app_name}" + fi + fi + + total_size_freed=$((total_size_freed + total_kb)) + success_count=$((success_count + 1)) + [[ "$used_brew_successfully" == "true" ]] && brew_apps_removed=$((brew_apps_removed + 1)) + files_cleaned=$((files_cleaned + 1)) + total_items=$((total_items + 1)) + success_items+=("$app_path") + else + if [[ -t 1 ]]; then + if [[ ${#app_details[@]} -gt 1 ]]; then + echo -e "${ICON_ERROR} [$current_index/${#app_details[@]}] ${app_name} ${GRAY}, $reason${NC}" + else + echo -e "${ICON_ERROR} ${app_name} failed: $reason" + fi + if [[ -n "${suggestion:-}" ]]; then + echo -e "${GRAY} ${ICON_REVIEW} ${suggestion}${NC}" + fi + fi + + failed_count=$((failed_count + 1)) + failed_items+=("$app_name:$reason:${suggestion:-}") + fi + done + + # Summary + local freed_display + freed_display=$(bytes_to_human "$((total_size_freed * 1024))") + + local summary_status="success" + local -a summary_details=() + + if [[ $success_count -gt 0 ]]; then + local success_text="app" + [[ $success_count -gt 1 ]] && success_text="apps" + local success_line="Removed ${success_count} ${success_text}" + if is_uninstall_dry_run; then + success_line="Would remove ${success_count} ${success_text}" + fi + if [[ -n "$freed_display" ]]; then + if is_uninstall_dry_run; then + success_line+=", would free ${GREEN}${freed_display}${NC}" + else + success_line+=", freed ${GREEN}${freed_display}${NC}" + fi + fi + + # Format app list with max 3 per line. + if [[ ${#success_items[@]} -gt 0 ]]; then + local idx=0 + local is_first_line=true + local current_line="" + + for success_path in "${success_items[@]}"; do + local display_name + display_name=$(basename "$success_path" .app) + local display_item="${GREEN}${display_name}${NC}" + + if ((idx % 3 == 0)); then + if [[ -n "$current_line" ]]; then + summary_details+=("$current_line") + fi + if [[ "$is_first_line" == true ]]; then + current_line="${success_line}: $display_item" + is_first_line=false + else + current_line="$display_item" + fi + else + current_line="$current_line, $display_item" + fi + idx=$((idx + 1)) + done + if [[ -n "$current_line" ]]; then + summary_details+=("$current_line") + fi + else + summary_details+=("$success_line") + fi + fi + + if [[ $failed_count -gt 0 ]]; then + summary_status="warn" + + local failed_names=() + for item in "${failed_items[@]}"; do + local name=${item%%:*} + failed_names+=("$name") + done + local failed_list="${failed_names[*]}" + + local reason_summary="could not be removed" + local suggestion_text="" + if [[ $failed_count -eq 1 ]]; then + # Extract reason and suggestion from format: app:reason:suggestion + local item="${failed_items[0]}" + local without_app="${item#*:}" + local first_reason="${without_app%%:*}" + local first_suggestion="${without_app#*:}" + + # If suggestion is same as reason, there was no suggestion part + # Also check if suggestion is empty + if [[ "$first_suggestion" != "$first_reason" && -n "$first_suggestion" ]]; then + suggestion_text="${GRAY}${ICON_REVIEW} ${first_suggestion}${NC}" + fi + + case "$first_reason" in + still*running*) reason_summary="is still running" ;; + remove*failed*) reason_summary="could not be removed" ;; + permission*denied*) reason_summary="permission denied" ;; + owned*by*) reason_summary="$first_reason, try with sudo" ;; + *) reason_summary="$first_reason" ;; + esac + fi + summary_details+=("${ICON_LIST} Failed: ${RED}${failed_list}${NC} ${reason_summary}") + if [[ -n "$suggestion_text" ]]; then + summary_details+=("$suggestion_text") + fi + fi + + if [[ $success_count -eq 0 && $failed_count -eq 0 ]]; then + summary_status="info" + summary_details+=("No applications were uninstalled.") + fi + + local title="Uninstall complete" + if [[ "$summary_status" == "warn" ]]; then + title="Uninstall incomplete" + fi + if is_uninstall_dry_run; then + title="Uninstall dry run complete" + fi + + echo "" + print_summary_block "$title" "${summary_details[@]}" + printf '\n' + + # Run brew autoremove silently in background to avoid interrupting UX. + if [[ $brew_apps_removed -gt 0 && "${MOLE_DRY_RUN:-0}" != "1" ]]; then + ( + HOMEBREW_NO_ENV_HINTS=1 HOMEBREW_NO_AUTO_UPDATE=1 NONINTERACTIVE=1 \ + run_with_timeout 30 brew autoremove > /dev/null 2>&1 || true + ) & + disown $! 2> /dev/null || true + fi + + # Clean up Dock entries for uninstalled apps. + if [[ $success_count -gt 0 && ${#success_items[@]} -gt 0 ]]; then + if is_uninstall_dry_run; then + log_info "[DRY RUN] Would refresh LaunchServices and update Dock entries" + else + ( + remove_apps_from_dock "${success_items[@]}" > /dev/null 2>&1 || true + refresh_launch_services_after_uninstall > /dev/null 2>&1 || true + ) & + disown $! 2> /dev/null || true + fi + fi + + _cleanup_sudo_keepalive + + # Disable uninstall mode + unset MOLE_UNINSTALL_MODE + + _restore_uninstall_traps + unset -f _restore_uninstall_traps + + total_size_cleaned=$((total_size_cleaned + total_size_freed)) + unset failed_items +} diff --git a/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/uninstall/brew.sh b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/uninstall/brew.sh new file mode 100644 index 0000000..012ca53 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Sources/AtlasCoreAdapters/Resources/MoleRuntime/lib/uninstall/brew.sh @@ -0,0 +1,225 @@ +#!/bin/bash +# Mole - Homebrew Cask Uninstallation Support +# Detects Homebrew-managed casks via Caskroom linkage and uninstalls them via brew + +set -euo pipefail + +# Prevent multiple sourcing +if [[ -n "${MOLE_BREW_UNINSTALL_LOADED:-}" ]]; then + return 0 +fi +readonly MOLE_BREW_UNINSTALL_LOADED=1 + +# Resolve a path to its absolute real path (follows symlinks) +# Args: $1 - path to resolve +# Returns: Absolute resolved path, or empty string on failure +resolve_path() { + local p="$1" + [[ -e "$p" ]] || return 1 + + # macOS 12.3+ and Linux have realpath + if realpath "$p" 2> /dev/null; then + return 0 + fi + + # Fallback: use cd -P to resolve directory, then append basename + local dir base + dir=$(cd -P "$(dirname "$p")" 2> /dev/null && pwd) || return 1 + base=$(basename "$p") + echo "$dir/$base" +} + +# Check if Homebrew is installed and accessible +# Returns: 0 if brew is available, 1 otherwise +is_homebrew_available() { + command -v brew > /dev/null 2>&1 +} + +# Extract cask token from a Caskroom path +# Args: $1 - path (must be inside Caskroom) +# Prints: cask token to stdout +# Returns: 0 if valid token extracted, 1 otherwise +_extract_cask_token_from_path() { + local path="$1" + + # Check if path is inside Caskroom + case "$path" in + /opt/homebrew/Caskroom/* | /usr/local/Caskroom/*) ;; + *) return 1 ;; + esac + + # Extract token from path: /opt/homebrew/Caskroom///... + local token + token="${path#*/Caskroom/}" # Remove everything up to and including Caskroom/ + token="${token%%/*}" # Take only the first path component + + # Validate token looks like a valid cask name (lowercase alphanumeric with hyphens) + if [[ -n "$token" && "$token" =~ ^[a-z0-9][a-z0-9-]*$ ]]; then + echo "$token" + return 0 + fi + + return 1 +} + +# Stage 1: Deterministic detection via fully resolved path +# Fast, no false positives - follows all symlinks +_detect_cask_via_resolved_path() { + local app_path="$1" + local resolved + if resolved=$(resolve_path "$app_path") && [[ -n "$resolved" ]]; then + _extract_cask_token_from_path "$resolved" && return 0 + fi + return 1 +} + +# Stage 2: Search Caskroom by app bundle name using find +# Catches apps where the .app in /Applications doesn't link to Caskroom +# Only succeeds if exactly one cask matches (avoids wrong uninstall) +_detect_cask_via_caskroom_search() { + local app_bundle_name="$1" + [[ -z "$app_bundle_name" ]] && return 1 + + local -a tokens=() + local room match token + + for room in "/opt/homebrew/Caskroom" "/usr/local/Caskroom"; do + [[ -d "$room" ]] || continue + while IFS= read -r match; do + [[ -n "$match" ]] || continue + token=$(_extract_cask_token_from_path "$match" 2> /dev/null) || continue + [[ -n "$token" ]] && tokens+=("$token") + done < <(find "$room" -maxdepth 3 -name "$app_bundle_name" 2> /dev/null) + done + + # Need at least one token + ((${#tokens[@]} > 0)) || return 1 + + # Deduplicate and check count + local -a uniq + IFS=$'\n' read -r -d '' -a uniq < <(printf '%s\n' "${tokens[@]}" | sort -u && printf '\0') || true + + # Only succeed if exactly one unique token found and it's installed + if ((${#uniq[@]} == 1)) && [[ -n "${uniq[0]}" ]]; then + HOMEBREW_NO_ENV_HINTS=1 brew list --cask 2> /dev/null | grep -qxF "${uniq[0]}" || return 1 + echo "${uniq[0]}" + return 0 + fi + + return 1 +} + +# Stage 3: Check if app_path is a direct symlink to Caskroom +_detect_cask_via_symlink_check() { + local app_path="$1" + [[ -L "$app_path" ]] || return 1 + + local target + target=$(readlink "$app_path" 2> /dev/null) || return 1 + _extract_cask_token_from_path "$target" +} + +# Stage 4: Query brew list --cask and verify with brew info (slowest fallback) +_detect_cask_via_brew_list() { + local app_path="$1" + local app_bundle_name="$2" + local app_name_lower + app_name_lower=$(echo "${app_bundle_name%.app}" | LC_ALL=C tr '[:upper:]' '[:lower:]') + + local cask_name + cask_name=$(HOMEBREW_NO_ENV_HINTS=1 brew list --cask 2> /dev/null | grep -Fix "$app_name_lower") || return 1 + + # Verify this cask actually owns this app path + HOMEBREW_NO_ENV_HINTS=1 brew info --cask "$cask_name" 2> /dev/null | grep -qF "$app_path" || return 1 + echo "$cask_name" +} + +# Get Homebrew cask name for an app +# Uses multi-stage detection (fast to slow, deterministic to heuristic): +# 1. Resolve symlinks fully, check if path is in Caskroom (fast, deterministic) +# 2. Search Caskroom by app bundle name using find +# 3. Check if app is a direct symlink to Caskroom +# 4. Query brew list --cask and verify with brew info (slowest) +# +# Args: $1 - app_path +# Prints: cask token to stdout if brew-managed +# Returns: 0 if Homebrew-managed, 1 otherwise +get_brew_cask_name() { + local app_path="$1" + [[ -z "$app_path" || ! -e "$app_path" ]] && return 1 + is_homebrew_available || return 1 + + local app_bundle_name + app_bundle_name=$(basename "$app_path") + + # Try each detection method in order (fast to slow) + _detect_cask_via_resolved_path "$app_path" && return 0 + _detect_cask_via_caskroom_search "$app_bundle_name" && return 0 + _detect_cask_via_symlink_check "$app_path" && return 0 + _detect_cask_via_brew_list "$app_path" "$app_bundle_name" && return 0 + + return 1 +} + +# Uninstall a Homebrew cask and verify removal +# Args: $1 - cask_name, $2 - app_path (optional, for verification) +# Returns: 0 on success, 1 on failure +brew_uninstall_cask() { + local cask_name="$1" + local app_path="${2:-}" + + if [[ "${MOLE_DRY_RUN:-0}" == "1" ]]; then + debug_log "[DRY RUN] Would brew uninstall --cask --zap $cask_name" + return 0 + fi + + is_homebrew_available || return 1 + [[ -z "$cask_name" ]] && return 1 + + debug_log "Attempting brew uninstall --cask --zap $cask_name" + + local uninstall_ok=false + local brew_exit=0 + + # Calculate timeout based on app size (large apps need more time) + local timeout=300 # Default 5 minutes + if [[ -n "$app_path" && -d "$app_path" ]]; then + local size_gb=$(($(get_path_size_kb "$app_path") / 1048576)) + if [[ $size_gb -gt 15 ]]; then + timeout=900 # 15 minutes for very large apps (Xcode, Adobe, etc.) + elif [[ $size_gb -gt 5 ]]; then + timeout=600 # 10 minutes for large apps + fi + debug_log "App size: ${size_gb}GB, timeout: ${timeout}s" + fi + + # Run with timeout to prevent hangs from problematic cask scripts + local brew_exit=0 + if HOMEBREW_NO_ENV_HINTS=1 HOMEBREW_NO_AUTO_UPDATE=1 NONINTERACTIVE=1 \ + run_with_timeout "$timeout" brew uninstall --cask --zap "$cask_name" 2>&1; then + uninstall_ok=true + else + brew_exit=$? + debug_log "brew uninstall timeout or failed with exit code: $brew_exit" + # Exit code 124 indicates timeout from run_with_timeout + # On timeout, fail immediately without verification to avoid inconsistent state + if [[ $brew_exit -eq 124 ]]; then + debug_log "brew uninstall timed out after ${timeout}s, returning failure" + return 1 + fi + fi + + # Verify removal (only if not timed out) + local cask_gone=true app_gone=true + HOMEBREW_NO_ENV_HINTS=1 brew list --cask 2> /dev/null | grep -qxF "$cask_name" && cask_gone=false + [[ -n "$app_path" && -e "$app_path" ]] && app_gone=false + + # Success: uninstall worked and both are gone, or already uninstalled + if $cask_gone && $app_gone; then + debug_log "Successfully uninstalled cask '$cask_name'" + return 0 + fi + + debug_log "brew uninstall failed: cask_gone=$cask_gone app_gone=$app_gone" + return 1 +} diff --git a/Packages/AtlasCoreAdapters/Tests/AtlasCoreAdaptersTests/MacAppsInventoryAdapterTests.swift b/Packages/AtlasCoreAdapters/Tests/AtlasCoreAdaptersTests/MacAppsInventoryAdapterTests.swift new file mode 100644 index 0000000..224f63f --- /dev/null +++ b/Packages/AtlasCoreAdapters/Tests/AtlasCoreAdaptersTests/MacAppsInventoryAdapterTests.swift @@ -0,0 +1,43 @@ +import XCTest +@testable import AtlasCoreAdapters + +final class MacAppsInventoryAdapterTests: XCTestCase { + func testCollectInstalledAppsBuildsStructuredFootprints() async throws { + let sandboxURL = FileManager.default.temporaryDirectory.appendingPathComponent(UUID().uuidString, isDirectory: true) + let appsRoot = sandboxURL.appendingPathComponent("Applications", isDirectory: true) + let homeRoot = sandboxURL.appendingPathComponent("Home", isDirectory: true) + let appURL = appsRoot.appendingPathComponent("Sample.app", isDirectory: true) + let contentsURL = appURL.appendingPathComponent("Contents", isDirectory: true) + let executableURL = contentsURL.appendingPathComponent("MacOS/sample") + let infoPlistURL = contentsURL.appendingPathComponent("Info.plist") + let leftoverURL = homeRoot.appendingPathComponent("Library/Application Support/Sample", isDirectory: true) + + try FileManager.default.createDirectory(at: executableURL.deletingLastPathComponent(), withIntermediateDirectories: true) + try FileManager.default.createDirectory(at: leftoverURL, withIntermediateDirectories: true) + try Data(repeating: 0x1, count: 1024).write(to: executableURL) + try """ + + + + + CFBundleIdentifier + com.example.Sample + CFBundleName + Sample + CFBundlePackageType + APPL + + + """.data(using: .utf8)!.write(to: infoPlistURL) + + let adapter = MacAppsInventoryAdapter(searchRoots: [appsRoot], homeDirectoryURL: homeRoot) + let apps = try await adapter.collectInstalledApps() + + XCTAssertEqual(apps.count, 1) + XCTAssertEqual(apps.first?.name, "Sample") + XCTAssertEqual(apps.first?.bundleIdentifier, "com.example.Sample") + XCTAssertTrue(apps.first?.bundlePath.hasSuffix("/Applications/Sample.app") == true) + XCTAssertEqual(apps.first?.leftoverItems, 1) + XCTAssertGreaterThan(apps.first?.bytes ?? 0, 0) + } +} diff --git a/Packages/AtlasCoreAdapters/Tests/AtlasCoreAdaptersTests/MoleHealthAdapterTests.swift b/Packages/AtlasCoreAdapters/Tests/AtlasCoreAdaptersTests/MoleHealthAdapterTests.swift new file mode 100644 index 0000000..3f77220 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Tests/AtlasCoreAdaptersTests/MoleHealthAdapterTests.swift @@ -0,0 +1,12 @@ +import XCTest +@testable import AtlasCoreAdapters + +final class MoleHealthAdapterTests: XCTestCase { + func testCollectHealthSnapshotParsesStructuredJSON() async throws { + let snapshot = try await MoleHealthAdapter().collectHealthSnapshot() + + XCTAssertGreaterThanOrEqual(snapshot.memoryTotalGB, 0) + XCTAssertGreaterThan(snapshot.diskTotalGB, 0) + XCTAssertFalse(snapshot.optimizations.isEmpty) + } +} diff --git a/Packages/AtlasCoreAdapters/Tests/AtlasCoreAdaptersTests/MoleSmartCleanAdapterTests.swift b/Packages/AtlasCoreAdapters/Tests/AtlasCoreAdaptersTests/MoleSmartCleanAdapterTests.swift new file mode 100644 index 0000000..757cb74 --- /dev/null +++ b/Packages/AtlasCoreAdapters/Tests/AtlasCoreAdaptersTests/MoleSmartCleanAdapterTests.swift @@ -0,0 +1,43 @@ +import XCTest +@testable import AtlasCoreAdapters + +final class MoleSmartCleanAdapterTests: XCTestCase { + func testParseFindingsBuildsStructuredSmartCleanItems() { + let sample = """ + ➤ Browsers + → Chrome old versions, 2 dirs, 1.37GB dry + + ➤ Developer tools + → npm cache · would clean + → Xcode runtime volumes · 2 unused, 1 in use + • Runtime volumes total: 3.50GB (unused 2.25GB, in-use 1.25GB) + → JetBrains Toolbox · would remove 3 old versions (4.00GB), keeping 1 most recent + + ➤ Orphaned data + → Would remove 4 orphaned launch agent(s), 12MB + """ + + let findings = MoleSmartCleanAdapter.parseFindings(from: sample) + + XCTAssertEqual(findings.first?.title, "JetBrains Toolbox") + XCTAssertEqual(findings.first?.bytes, Int64(4.0 * 1024 * 1024 * 1024)) + XCTAssertTrue(findings.contains(where: { $0.title == "Chrome old versions" && $0.category == "Browsers" })) + XCTAssertTrue(findings.contains(where: { $0.title == "Xcode runtime volumes" && $0.bytes == Int64(2.25 * 1024 * 1024 * 1024) })) + XCTAssertTrue(findings.contains(where: { $0.category == "Orphaned data" && $0.risk.rawValue == "advanced" })) + } + + func testParseDetailedFindingsBuildsExecutableTargets() throws { + let fileURL = FileManager.default.temporaryDirectory.appendingPathComponent(UUID().uuidString).appendingPathExtension("tsv") + try FileManager.default.createDirectory(at: fileURL.deletingLastPathComponent(), withIntermediateDirectories: true) + try """ +Developer tools /Users/test/Library/Developer/Xcode/DerivedData/ProjectA 1024 +Developer tools /Users/test/Library/Developer/Xcode/DerivedData/ProjectB 2048 +Browsers /Users/test/Library/Caches/Google/Chrome/Default/Cache_Data 512 +""".write(to: fileURL, atomically: true, encoding: .utf8) + + let findings = MoleSmartCleanAdapter.parseDetailedFindings(from: fileURL) + + XCTAssertTrue(findings.contains(where: { $0.title == "Xcode DerivedData" && ($0.targetPaths?.count ?? 0) == 2 })) + XCTAssertTrue(findings.contains(where: { $0.title == "Chrome cache" && ($0.targetPaths?.first?.contains("Chrome/Default") ?? false) })) + } +} diff --git a/Packages/AtlasDesignSystem/README.md b/Packages/AtlasDesignSystem/README.md new file mode 100644 index 0000000..8bbc69a --- /dev/null +++ b/Packages/AtlasDesignSystem/README.md @@ -0,0 +1,5 @@ +# AtlasDesignSystem + +## Responsibility + +- Colors, typography, spacing, card styles, risk badges, and reusable UI primitives diff --git a/Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/AtlasBrand.swift b/Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/AtlasBrand.swift new file mode 100644 index 0000000..609264e --- /dev/null +++ b/Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/AtlasBrand.swift @@ -0,0 +1,465 @@ +// MARK: - Atlas Brand Identity & Design Tokens +// +// Brand Concept: "Calm Authority" +// Atlas — like a cartographer mapping your system's terrain. +// Precise, trustworthy, and quietly confident. +// +// Visual Language: +// - Cool indigo base with warm amber highlights +// - Generous whitespace, constrained reading width +// - Three elevation tiers for clear visual hierarchy +// - Rounded, organic shapes (continuous corners) +// - Subtle glassmorphism on cards +// - Motion: snappy but never bouncy +// +// Color Story: +// - Indigo (Primary): trust, depth, tech-premium +// - Amber (Accent): warmth, discovery, the gold on a map +// - Semantic tones keep green/orange/red for system states + +import AppKit +import SwiftUI + +// MARK: - Color Tokens + +/// Atlas brand color palette — all colors adapt to light/dark automatically. +public enum AtlasColor { + + // ── Brand ────────────────────────────────────────── + + /// Primary brand teal — used for key actions and active states. + public static let brand = Color("AtlasBrand", bundle: .module) + + /// Fresh mint accent — used for highlights, badges, and discovery cues. + public static let accent = Color("AtlasAccent", bundle: .module) + + // ── Semantic ─────────────────────────────────────── + + public static let success = Color(nsColor: .systemGreen) + public static let warning = Color(nsColor: .systemOrange) + public static let danger = Color(nsColor: .systemRed) + public static let info = Color(nsColor: .systemBlue) + + // ── Surfaces ─────────────────────────────────────── + + /// App canvas — top of gradient. + public static let canvasTop = Color(nsColor: .windowBackgroundColor) + /// App canvas — bottom of gradient. + public static let canvasBottom = Color(nsColor: .underPageBackgroundColor) + + /// Card surface that adapts to light/dark. + public static var card: Color { + Color(nsColor: .controlBackgroundColor) + } + + /// Raised card overlay — glassmorphic tint. + public static var cardRaised: Color { + if NSApp.effectiveAppearance.bestMatch(from: [.aqua, .darkAqua]) == .darkAqua { + return Color.white.opacity(0.06) + } else { + return Color.white.opacity(0.65) + } + } + + // ── Text ─────────────────────────────────────────── + + /// Primary text on canvas. + public static let textPrimary = Color.primary + /// Secondary / muted text. + public static let textSecondary = Color.secondary + /// Tertiary text — footnotes, timestamps. + public static let textTertiary = Color.secondary.opacity(0.6) + + // ── Border ───────────────────────────────────────── + + /// Subtle card border. + public static let border = Color.primary.opacity(0.08) + /// Emphasis border — used on prominent cards and focus states. + public static let borderEmphasis = Color.primary.opacity(0.14) +} + +// MARK: - Typography Tokens + +/// Centralized type scale. All fonts use `.rounded` design for brand warmth. +public enum AtlasTypography { + + // ── Display ──────────────────────────────────────── + + /// Screen title — the large bold header on each feature screen. + public static let screenTitle = Font.system(size: 34, weight: .bold, design: .rounded) + /// Hero metric — the single most important number on a dashboard. + public static let heroMetric = Font.system(size: 40, weight: .bold, design: .rounded) + + // ── Heading ──────────────────────────────────────── + + /// Section heading inside a card or screen area. + public static let sectionTitle = Font.title3.weight(.semibold) + /// Card metric value — secondary metrics in grids. + public static let cardMetric = Font.system(size: 28, weight: .bold, design: .rounded) + + // ── Label ────────────────────────────────────────── + + /// Semibold label for metric titles, sidebar primary text, etc. + public static let label = Font.subheadline.weight(.semibold) + /// Headline weight for row titles. + public static let rowTitle = Font.headline + /// Standard body text. + public static let body = Font.subheadline + /// Small secondary body text. + public static let bodySmall = Font.caption + + // ── Caption ──────────────────────────────────────── + + /// Chip labels, footnotes, overlines. + public static let caption = Font.caption.weight(.semibold) + /// Extra-small legal and timestamp text. + public static let captionSmall = Font.caption2 +} + +// MARK: - Spacing Tokens + +/// Consistent spacing scale based on a 4pt grid. +public enum AtlasSpacing { + /// 4pt — minimal inner padding. + public static let xxs: CGFloat = 4 + /// 6pt — tight insets, chip padding. + public static let xs: CGFloat = 6 + /// 8pt — compact row spacing. + public static let sm: CGFloat = 8 + /// 12pt — default inner element gap. + public static let md: CGFloat = 12 + /// 16pt — card inner padding, section gaps. + public static let lg: CGFloat = 16 + /// 20pt — generous card padding. + public static let xl: CGFloat = 20 + /// 24pt — screen-level vertical rhythm. + public static let xxl: CGFloat = 24 + /// 28pt — screen horizontal margin. + public static let screenH: CGFloat = 28 + /// 32pt — large section separation. + public static let section: CGFloat = 32 +} + +// MARK: - Radius Tokens + +/// Corner radius scale — all use `.continuous` style. +public enum AtlasRadius { + /// 8pt — small elements: chips, tags. + public static let sm: CGFloat = 8 + /// 12pt — inline cards, callouts. + public static let md: CGFloat = 12 + /// 16pt — detail rows, compact cards. + public static let lg: CGFloat = 16 + /// 20pt — standard cards and info cards. + public static let xl: CGFloat = 20 + /// 24pt — prominent/hero cards. + public static let xxl: CGFloat = 24 +} + +// MARK: - Elevation Tokens + +/// Three-tier elevation system for visual hierarchy. +public enum AtlasElevation: Sendable { + /// Flat — no shadow, subtle border only. For nested/secondary content. + case flat + /// Raised — default card level. Gentle lift. + case raised + /// Prominent — hero cards, primary action areas. Strong presence. + case prominent + + public var shadowRadius: CGFloat { + switch self { + case .flat: return 0 + case .raised: return 18 + case .prominent: return 28 + } + } + + public var shadowY: CGFloat { + switch self { + case .flat: return 0 + case .raised: return 10 + case .prominent: return 16 + } + } + + public var shadowOpacity: Double { + switch self { + case .flat: return 0 + case .raised: return 0.05 + case .prominent: return 0.09 + } + } + + public var cornerRadius: CGFloat { + switch self { + case .flat: return AtlasRadius.lg + case .raised: return AtlasRadius.xl + case .prominent: return AtlasRadius.xxl + } + } + + public var borderOpacity: Double { + switch self { + case .flat: return 0.04 + case .raised: return 0.08 + case .prominent: return 0.12 + } + } +} + +// MARK: - Animation Tokens + +/// Standardized animation curves and durations. +public enum AtlasMotion { + /// Fast micro-interaction — hover, press, chip. + public static let fast = Animation.snappy(duration: 0.15) + /// Standard transition — selection, toggle, card state. + public static let standard = Animation.snappy(duration: 0.22) + /// Slow emphasis — page transitions, hero reveals. + public static let slow = Animation.snappy(duration: 0.35) + /// Spring for playful feedback — completion, celebration. + public static let spring = Animation.spring(response: 0.45, dampingFraction: 0.7) +} + +// MARK: - Layout Tokens + +/// Shared layout constants. +public enum AtlasLayout { + /// Maximum content reading width — prevents overly long text lines. + public static let maxReadingWidth: CGFloat = 960 + /// Standard 3-column metric grid definition. + public static let metricColumns: [GridItem] = [ + GridItem(.flexible(minimum: 220), spacing: AtlasSpacing.lg), + GridItem(.flexible(minimum: 220), spacing: AtlasSpacing.lg), + GridItem(.flexible(minimum: 220), spacing: AtlasSpacing.lg), + ] + /// 2-column grid for wider cards. + public static let wideColumns: [GridItem] = [ + GridItem(.flexible(minimum: 300), spacing: AtlasSpacing.lg), + GridItem(.flexible(minimum: 300), spacing: AtlasSpacing.lg), + ] + /// Sidebar width range. + public static let sidebarMinWidth: CGFloat = 230 + public static let sidebarIdealWidth: CGFloat = 260 + /// Sidebar icon container size (pill-style like System Settings). + public static let sidebarIconSize: CGFloat = 32 +} + +// MARK: - Icon Tokens + +/// Named SF Symbol references for consistent iconography. +public enum AtlasIcon { + // ── Navigation ───────────────────────────────────── + public static let overview = "gauge.with.dots.needle.33percent" + public static let smartClean = "sparkles" + public static let apps = "square.grid.2x2" + public static let history = "clock.arrow.circlepath" + public static let permissions = "lock.shield" + public static let settings = "gearshape" + public static let storage = "internaldrive" + + // ── Toolbar ──────────────────────────────────────── + public static let taskCenter = "list.bullet.rectangle.portrait" + public static let refresh = "arrow.clockwise" + + // ── Status ───────────────────────────────────────── + public static let success = "checkmark.circle.fill" + public static let warning = "exclamationmark.triangle.fill" + public static let danger = "xmark.octagon.fill" + public static let info = "info.circle.fill" + public static let neutral = "circle.fill" + + // ── Actions ──────────────────────────────────────── + public static let scan = "magnifyingglass" + public static let clean = "trash" + public static let restore = "arrow.uturn.backward" + public static let preview = "eye" + public static let grant = "hand.raised" +} + +// MARK: - Brand Helpers + +/// Convenience for building elevation-aware card backgrounds. +public func atlasCardBackground(tone: AtlasTone = .neutral, elevation: AtlasElevation = .raised) -> some View { + let cr = elevation.cornerRadius + return ZStack { + RoundedRectangle(cornerRadius: cr, style: .continuous) + .fill(AtlasColor.card) + .background( + RoundedRectangle(cornerRadius: cr, style: .continuous) + .fill(tone.softFill.opacity(0.55)) + ) + .shadow( + color: Color.black.opacity(elevation.shadowOpacity), + radius: elevation.shadowRadius, + x: 0, + y: elevation.shadowY + ) + + // Prominent cards get a subtle top-left inner glow + if elevation == .prominent { + RoundedRectangle(cornerRadius: cr, style: .continuous) + .fill( + LinearGradient( + colors: [Color.white.opacity(0.08), Color.clear], + startPoint: .topLeading, + endPoint: .center + ) + ) + } + } +} + +/// Convenience for building elevation-aware card borders. +public func atlasCardBorder(tone: AtlasTone = .neutral, elevation: AtlasElevation = .raised) -> some View { + RoundedRectangle(cornerRadius: elevation.cornerRadius, style: .continuous) + .strokeBorder( + LinearGradient( + colors: [ + tone.border.opacity(elevation.borderOpacity / 0.08), + Color.primary.opacity(elevation.borderOpacity), + ], + startPoint: .topLeading, + endPoint: .bottomTrailing + ), + lineWidth: elevation == .prominent ? 1.5 : 1 + ) +} + +// MARK: - View Modifiers + +/// Applies Atlas card styling (background + border + elevation) to any view. +public struct AtlasCardModifier: ViewModifier { + let tone: AtlasTone + let elevation: AtlasElevation + let padding: CGFloat + + public init(tone: AtlasTone = .neutral, elevation: AtlasElevation = .raised, padding: CGFloat = AtlasSpacing.xl) { + self.tone = tone + self.elevation = elevation + self.padding = padding + } + + public func body(content: Content) -> some View { + content + .padding(padding) + .background(atlasCardBackground(tone: tone, elevation: elevation)) + .overlay(atlasCardBorder(tone: tone, elevation: elevation)) + } +} + +public extension View { + /// Wraps the view in an Atlas-styled card with the given tone and elevation. + func atlasCard(tone: AtlasTone = .neutral, elevation: AtlasElevation = .raised, padding: CGFloat = AtlasSpacing.xl) -> some View { + modifier(AtlasCardModifier(tone: tone, elevation: elevation, padding: padding)) + } +} + +/// Hover + press microinteraction for interactive cards. +public struct AtlasHoverModifier: ViewModifier { + @State private var isHovered = false + + public func body(content: Content) -> some View { + content + .scaleEffect(isHovered ? 1.008 : 1.0) + .shadow( + color: Color.black.opacity(isHovered ? 0.08 : 0), + radius: isHovered ? 24 : 0, + y: isHovered ? 12 : 0 + ) + .animation(AtlasMotion.fast, value: isHovered) + .onHover { hovering in + isHovered = hovering + } + } +} + +public extension View { + /// Adds subtle hover lift effect to the view. + func atlasHover() -> some View { + modifier(AtlasHoverModifier()) + } +} + +// MARK: - Button Styles + +/// Primary filled button — the single most important action on screen. +public struct AtlasPrimaryButtonStyle: ButtonStyle { + @Environment(\.isEnabled) private var isEnabled + + public init() {} + + public func makeBody(configuration: Configuration) -> some View { + configuration.label + .font(AtlasTypography.label) + .foregroundStyle(.white) + .padding(.horizontal, AtlasSpacing.xxl) + .padding(.vertical, AtlasSpacing.md) + .background( + Capsule(style: .continuous) + .fill(isEnabled ? AtlasColor.brand : AtlasColor.brand.opacity(0.4)) + ) + .shadow( + color: AtlasColor.brand.opacity(configuration.isPressed ? 0 : 0.25), + radius: configuration.isPressed ? 4 : 12, + y: configuration.isPressed ? 2 : 6 + ) + .scaleEffect(configuration.isPressed ? 0.97 : 1.0) + .animation(AtlasMotion.fast, value: configuration.isPressed) + } +} + +/// Secondary outlined button — supporting actions. +public struct AtlasSecondaryButtonStyle: ButtonStyle { + public init() {} + + public func makeBody(configuration: Configuration) -> some View { + configuration.label + .font(AtlasTypography.label) + .foregroundStyle(AtlasColor.brand) + .padding(.horizontal, AtlasSpacing.xxl) + .padding(.vertical, AtlasSpacing.md) + .background( + Capsule(style: .continuous) + .fill(AtlasColor.brand.opacity(configuration.isPressed ? 0.08 : 0.04)) + ) + .overlay( + Capsule(style: .continuous) + .strokeBorder(AtlasColor.brand.opacity(0.3), lineWidth: 1.5) + ) + .scaleEffect(configuration.isPressed ? 0.97 : 1.0) + .animation(AtlasMotion.fast, value: configuration.isPressed) + } +} + +/// Ghost/tertiary button — minimal weight, for infrequent actions. +public struct AtlasGhostButtonStyle: ButtonStyle { + public init() {} + + public func makeBody(configuration: Configuration) -> some View { + configuration.label + .font(AtlasTypography.label) + .foregroundStyle(AtlasColor.brand) + .padding(.horizontal, AtlasSpacing.lg) + .padding(.vertical, AtlasSpacing.sm) + .background( + Capsule(style: .continuous) + .fill(configuration.isPressed ? AtlasColor.brand.opacity(0.06) : Color.clear) + ) + .scaleEffect(configuration.isPressed ? 0.97 : 1.0) + .animation(AtlasMotion.fast, value: configuration.isPressed) + } +} + +public extension ButtonStyle where Self == AtlasPrimaryButtonStyle { + static var atlasPrimary: AtlasPrimaryButtonStyle { AtlasPrimaryButtonStyle() } +} + +public extension ButtonStyle where Self == AtlasSecondaryButtonStyle { + static var atlasSecondary: AtlasSecondaryButtonStyle { AtlasSecondaryButtonStyle() } +} + +public extension ButtonStyle where Self == AtlasGhostButtonStyle { + static var atlasGhost: AtlasGhostButtonStyle { AtlasGhostButtonStyle() } +} diff --git a/Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/AtlasDesignSystem.swift b/Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/AtlasDesignSystem.swift new file mode 100644 index 0000000..69f7213 --- /dev/null +++ b/Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/AtlasDesignSystem.swift @@ -0,0 +1,567 @@ +import Foundation +import SwiftUI + +public enum AtlasTone: Sendable { + case neutral + case success + case warning + case danger + + public var tint: Color { + switch self { + case .neutral: + return AtlasColor.brand + case .success: + return AtlasColor.success + case .warning: + return AtlasColor.warning + case .danger: + return AtlasColor.danger + } + } + + public var fill: Color { + tint.opacity(0.12) + } + + public var softFill: Color { + tint.opacity(0.08) + } + + public var border: Color { + tint.opacity(0.18) + } + + public var symbol: String { + switch self { + case .neutral: + return "circle.fill" + case .success: + return "checkmark.circle.fill" + case .warning: + return "exclamationmark.triangle.fill" + case .danger: + return "xmark.octagon.fill" + } + } +} + +public enum AtlasFormatters { + public static func byteCount(_ bytes: Int64) -> String { + ByteCountFormatter.string(fromByteCount: bytes, countStyle: .file) + } + + public static func relativeDate(_ date: Date) -> String { + let formatter = RelativeDateTimeFormatter() + formatter.unitsStyle = .full + return formatter.localizedString(for: date, relativeTo: Date()) + } + + public static func shortDate(_ date: Date) -> String { + date.formatted(date: .abbreviated, time: .shortened) + } +} + +public struct AtlasScreen: View { + private let title: String + private let subtitle: String + private let useScrollView: Bool + private let content: Content + + public init(title: String, subtitle: String, useScrollView: Bool = true, @ViewBuilder content: () -> Content) { + self.title = title + self.subtitle = subtitle + self.useScrollView = useScrollView + self.content = content() + } + + public var body: some View { + ZStack { + LinearGradient( + colors: [AtlasColor.canvasTop, AtlasColor.canvasBottom], + startPoint: .top, + endPoint: .bottom + ) + .ignoresSafeArea() + + Group { + if useScrollView { + ScrollView { + contentStack + } + } else { + contentStack + } + } + } + } + + private var contentStack: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.xxl) { + header + content + } + .frame(maxWidth: AtlasLayout.maxReadingWidth, maxHeight: .infinity, alignment: .topLeading) + .padding(.horizontal, AtlasSpacing.screenH) + .padding(.vertical, AtlasSpacing.xxl) + .frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .topLeading) + } + + private var header: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.sm) { + Text(title) + .font(AtlasTypography.screenTitle) + + Text(subtitle) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + } +} + +public struct AtlasMetricCard: View { + private let title: String + private let value: String + private let detail: String + private let tone: AtlasTone + private let systemImage: String? + private let elevation: AtlasElevation + + public init( + title: String, + value: String, + detail: String, + tone: AtlasTone = .neutral, + systemImage: String? = nil, + elevation: AtlasElevation = .raised + ) { + self.title = title + self.value = value + self.detail = detail + self.tone = tone + self.systemImage = systemImage + self.elevation = elevation + } + + public var body: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.lg) { + HStack(alignment: .center, spacing: AtlasSpacing.md) { + if let systemImage { + Image(systemName: systemImage) + .font(.headline) + .foregroundStyle(tone.tint) + .accessibilityHidden(true) + } + + Text(title) + .font(AtlasTypography.label) + .foregroundStyle(.secondary) + } + + Text(value) + .font(elevation == .prominent ? AtlasTypography.heroMetric : AtlasTypography.cardMetric) + .foregroundStyle(.primary) + .contentTransition(.numericText()) + + Text(detail) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + .frame(maxWidth: .infinity, alignment: .leading) + .padding(AtlasSpacing.xl) + .background(atlasCardBackground(tone: tone, elevation: elevation)) + .overlay(atlasCardBorder(tone: tone, elevation: elevation)) + .accessibilityElement(children: .ignore) + .accessibilityLabel(Text(title)) + .accessibilityValue(Text(value)) + .accessibilityHint(Text(detail)) + } +} + +public struct AtlasInfoCard: View { + private let title: String + private let subtitle: String? + private let tone: AtlasTone + private let content: Content + + public init( + title: String, + subtitle: String? = nil, + tone: AtlasTone = .neutral, + @ViewBuilder content: () -> Content + ) { + self.title = title + self.subtitle = subtitle + self.tone = tone + self.content = content() + } + + public var body: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.xl) { + if !title.isEmpty || subtitle != nil { + VStack(alignment: .leading, spacing: AtlasSpacing.xs) { + if !title.isEmpty { + Text(title) + .font(AtlasTypography.sectionTitle) + } + + if let subtitle { + Text(subtitle) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + } + } + + content + } + .frame(maxWidth: .infinity, alignment: .leading) + .padding(AtlasSpacing.xxl) + .background(atlasCardBackground(tone: tone)) + .overlay(atlasCardBorder(tone: tone)) + } +} + +public struct AtlasCallout: View { + private let title: String + private let detail: String + private let tone: AtlasTone + private let systemImage: String? + + public init( + title: String, + detail: String, + tone: AtlasTone = .neutral, + systemImage: String? = nil + ) { + self.title = title + self.detail = detail + self.tone = tone + self.systemImage = systemImage + } + + public var body: some View { + HStack(alignment: .top, spacing: AtlasSpacing.lg) { + Image(systemName: systemImage ?? tone.symbol) + .font(.headline) + .foregroundStyle(tone.tint) + .frame(width: 24) + .accessibilityHidden(true) + + VStack(alignment: .leading, spacing: AtlasSpacing.xs) { + Text(title) + .font(AtlasTypography.rowTitle) + + Text(detail) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + } + .padding(AtlasSpacing.lg) + .frame(maxWidth: .infinity, alignment: .leading) + .background( + RoundedRectangle(cornerRadius: AtlasRadius.lg, style: .continuous) + .fill(tone.softFill) + ) + .overlay( + RoundedRectangle(cornerRadius: AtlasRadius.lg, style: .continuous) + .strokeBorder(tone.border, lineWidth: 1) + ) + .accessibilityElement(children: .ignore) + .accessibilityLabel(Text(title)) + .accessibilityValue(Text(detail)) + } +} + +public struct AtlasDetailRow: View { + private let title: String + private let subtitle: String + private let footnote: String? + private let systemImage: String? + private let tone: AtlasTone + private let isInteractive: Bool + private let trailing: Trailing + + public init( + title: String, + subtitle: String, + footnote: String? = nil, + systemImage: String? = nil, + tone: AtlasTone = .neutral, + isInteractive: Bool = false, + @ViewBuilder trailing: () -> Trailing + ) { + self.title = title + self.subtitle = subtitle + self.footnote = footnote + self.systemImage = systemImage + self.tone = tone + self.isInteractive = isInteractive + self.trailing = trailing() + } + + public var body: some View { + Group { + if isInteractive { + rowBody + .atlasHover() + } else { + rowBody + } + } + .accessibilityElement(children: .contain) + } + + private var rowBody: some View { + HStack(alignment: .top, spacing: AtlasSpacing.lg) { + if let systemImage { + ZStack { + Circle() + .fill(tone.softFill) + .frame(width: AtlasLayout.sidebarIconSize + 4, height: AtlasLayout.sidebarIconSize + 4) + + Image(systemName: systemImage) + .font(.headline) + .foregroundStyle(tone.tint) + .accessibilityHidden(true) + } + } + + VStack(alignment: .leading, spacing: AtlasSpacing.xs) { + Text(title) + .font(AtlasTypography.rowTitle) + + Text(subtitle) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + + if let footnote { + Text(footnote) + .font(AtlasTypography.captionSmall) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + } + + Spacer(minLength: AtlasSpacing.lg) + + trailing + } + .frame(maxWidth: .infinity, alignment: .leading) + .padding(AtlasSpacing.lg) + .background( + RoundedRectangle(cornerRadius: AtlasRadius.lg, style: .continuous) + .fill(AtlasColor.cardRaised) + ) + .overlay( + RoundedRectangle(cornerRadius: AtlasRadius.lg, style: .continuous) + .strokeBorder(AtlasColor.border, lineWidth: 1) + ) + } +} + +public extension AtlasDetailRow where Trailing == EmptyView { + init( + title: String, + subtitle: String, + footnote: String? = nil, + systemImage: String? = nil, + tone: AtlasTone = .neutral, + isInteractive: Bool = false + ) { + self.init(title: title, subtitle: subtitle, footnote: footnote, systemImage: systemImage, tone: tone, isInteractive: isInteractive) { + EmptyView() + } + } +} + +public struct AtlasKeyValueRow: View { + private let title: String + private let value: String + private let detail: String? + + public init(title: String, value: String, detail: String? = nil) { + self.title = title + self.value = value + self.detail = detail + } + + public var body: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + HStack(alignment: .top, spacing: AtlasSpacing.md) { + Text(title) + .font(AtlasTypography.rowTitle) + + Spacer(minLength: AtlasSpacing.lg) + + Text(value) + .font(AtlasTypography.label) + .foregroundStyle(.secondary) + .multilineTextAlignment(.trailing) + } + + if let detail { + Text(detail) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + } + .padding(.vertical, AtlasSpacing.xxs) + .accessibilityElement(children: .ignore) + .accessibilityLabel(Text(title)) + .accessibilityValue(Text(value)) + .accessibilityHint(Text(detail ?? "")) + } +} + +public struct AtlasStatusChip: View { + private let label: String + private let tone: AtlasTone + + public init(_ label: String, tone: AtlasTone) { + self.label = label + self.tone = tone + } + + public var body: some View { + Text(label) + .font(AtlasTypography.caption) + .padding(.horizontal, AtlasSpacing.md) + .padding(.vertical, AtlasSpacing.xs) + .background( + Capsule(style: .continuous) + .fill(tone.fill) + ) + .overlay( + Capsule(style: .continuous) + .strokeBorder(tone.border, lineWidth: 1) + ) + .foregroundStyle(tone.tint) + .accessibilityElement(children: .ignore) + .accessibilityLabel(Text(label)) + } +} + +public struct AtlasEmptyState: View { + private let title: String + private let detail: String + private let systemImage: String + private let tone: AtlasTone + + public init(title: String, detail: String, systemImage: String, tone: AtlasTone = .neutral) { + self.title = title + self.detail = detail + self.systemImage = systemImage + self.tone = tone + } + + public var body: some View { + VStack(spacing: AtlasSpacing.lg) { + ZStack { + Circle() + .strokeBorder(tone.border, lineWidth: 0.5) + .frame(width: 80, height: 80) + + Circle() + .fill( + LinearGradient( + colors: [tone.softFill, tone.softFill.opacity(0.3)], + startPoint: .topLeading, + endPoint: .bottomTrailing + ) + ) + .frame(width: 72, height: 72) + + Image(systemName: systemImage) + .font(.system(size: 28, weight: .semibold)) + .foregroundStyle(tone.tint) + .accessibilityHidden(true) + } + + VStack(spacing: AtlasSpacing.xs) { + Text(title) + .font(AtlasTypography.rowTitle) + + Text(detail) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .multilineTextAlignment(.center) + .fixedSize(horizontal: false, vertical: true) + } + } + .frame(maxWidth: .infinity) + .padding(AtlasSpacing.section) + .background( + RoundedRectangle(cornerRadius: AtlasRadius.xl, style: .continuous) + .fill(Color.primary.opacity(0.03)) + ) + .overlay( + RoundedRectangle(cornerRadius: AtlasRadius.xl, style: .continuous) + .strokeBorder(Color.primary.opacity(0.06), lineWidth: 1) + ) + .accessibilityElement(children: .ignore) + .accessibilityLabel(Text(title)) + .accessibilityValue(Text(detail)) + } +} + +public struct AtlasLoadingState: View { + private let title: String + private let detail: String + private let progress: Double? + @State private var pulsePhase = false + + public init(title: String, detail: String, progress: Double? = nil) { + self.title = title + self.detail = detail + self.progress = progress + } + + public var body: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.lg) { + HStack(spacing: AtlasSpacing.md) { + ProgressView() + .controlSize(.small) + .accessibilityHidden(true) + + Text(title) + .font(AtlasTypography.rowTitle) + } + + Text(detail) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + + if let progress { + ProgressView(value: progress, total: 1) + .controlSize(.large) + } + } + .frame(maxWidth: .infinity, alignment: .leading) + .padding(AtlasSpacing.xl) + .background( + RoundedRectangle(cornerRadius: AtlasRadius.lg, style: .continuous) + .fill(Color.primary.opacity(pulsePhase ? 0.05 : 0.03)) + ) + .overlay( + RoundedRectangle(cornerRadius: AtlasRadius.lg, style: .continuous) + .strokeBorder(Color.primary.opacity(0.08), lineWidth: 1) + ) + .onAppear { + withAnimation(.easeInOut(duration: 1.5).repeatForever(autoreverses: true)) { + pulsePhase = true + } + } + .accessibilityElement(children: .ignore) + .accessibilityLabel(Text(title)) + .accessibilityValue(Text(progress.map { "\(Int(($0 * 100).rounded())) percent complete" } ?? detail)) + .accessibilityHint(Text(detail)) + } +} diff --git a/Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/Resources/AtlasColors.xcassets/AtlasAccent.colorset/Contents.json b/Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/Resources/AtlasColors.xcassets/AtlasAccent.colorset/Contents.json new file mode 100644 index 0000000..33a3fb4 --- /dev/null +++ b/Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/Resources/AtlasColors.xcassets/AtlasAccent.colorset/Contents.json @@ -0,0 +1,44 @@ +{ + "colors" : [ + { + "idiom" : "universal", + "appearances" : [ + { + "appearance" : "luminosity", + "value" : "light" + } + ], + "color" : { + "color-space" : "srgb", + "components" : { + "red" : "0.2039215686", + "green" : "0.8274509804", + "blue" : "0.6000000000", + "alpha" : "1.0000000000" + } + } + }, + { + "idiom" : "universal", + "appearances" : [ + { + "appearance" : "luminosity", + "value" : "dark" + } + ], + "color" : { + "color-space" : "srgb", + "components" : { + "red" : "0.3215686275", + "green" : "0.8862745098", + "blue" : "0.7098039216", + "alpha" : "1.0000000000" + } + } + } + ], + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/Resources/AtlasColors.xcassets/AtlasBrand.colorset/Contents.json b/Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/Resources/AtlasColors.xcassets/AtlasBrand.colorset/Contents.json new file mode 100644 index 0000000..871645f --- /dev/null +++ b/Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/Resources/AtlasColors.xcassets/AtlasBrand.colorset/Contents.json @@ -0,0 +1,44 @@ +{ + "colors" : [ + { + "idiom" : "universal", + "appearances" : [ + { + "appearance" : "luminosity", + "value" : "light" + } + ], + "color" : { + "color-space" : "srgb", + "components" : { + "red" : "0.0588235294", + "green" : "0.4627450980", + "blue" : "0.4313725490", + "alpha" : "1.0000000000" + } + } + }, + { + "idiom" : "universal", + "appearances" : [ + { + "appearance" : "luminosity", + "value" : "dark" + } + ], + "color" : { + "color-space" : "srgb", + "components" : { + "red" : "0.0784313725", + "green" : "0.5647058824", + "blue" : "0.5215686275", + "alpha" : "1.0000000000" + } + } + } + ], + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/Resources/AtlasColors.xcassets/Contents.json b/Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/Resources/AtlasColors.xcassets/Contents.json new file mode 100644 index 0000000..73c0059 --- /dev/null +++ b/Packages/AtlasDesignSystem/Sources/AtlasDesignSystem/Resources/AtlasColors.xcassets/Contents.json @@ -0,0 +1,6 @@ +{ + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/Packages/AtlasDomain/README.md b/Packages/AtlasDomain/README.md new file mode 100644 index 0000000..32d0ad8 --- /dev/null +++ b/Packages/AtlasDomain/README.md @@ -0,0 +1,15 @@ +# AtlasDomain + +## Responsibility + +- Core models and domain rules + +## Planned Types + +- `Finding` +- `ActionPlan` +- `ActionItem` +- `TaskRun` +- `RecoveryItem` +- `AppFootprint` +- `PermissionState` diff --git a/Packages/AtlasDomain/Sources/AtlasDomain/AtlasDomain.swift b/Packages/AtlasDomain/Sources/AtlasDomain/AtlasDomain.swift new file mode 100644 index 0000000..c1472df --- /dev/null +++ b/Packages/AtlasDomain/Sources/AtlasDomain/AtlasDomain.swift @@ -0,0 +1,772 @@ +import Foundation + +public enum AtlasRoute: String, CaseIterable, Codable, Hashable, Identifiable, Sendable { + case overview + case smartClean + case apps + case history + case permissions + case settings + + public var id: String { rawValue } + + public var title: String { + switch self { + case .overview: + return AtlasL10n.string("route.overview.title") + case .smartClean: + return AtlasL10n.string("route.smartclean.title") + case .apps: + return AtlasL10n.string("route.apps.title") + case .history: + return AtlasL10n.string("route.history.title") + case .permissions: + return AtlasL10n.string("route.permissions.title") + case .settings: + return AtlasL10n.string("route.settings.title") + } + } + + public var subtitle: String { + switch self { + case .overview: + return AtlasL10n.string("route.overview.subtitle") + case .smartClean: + return AtlasL10n.string("route.smartclean.subtitle") + case .apps: + return AtlasL10n.string("route.apps.subtitle") + case .history: + return AtlasL10n.string("route.history.subtitle") + case .permissions: + return AtlasL10n.string("route.permissions.subtitle") + case .settings: + return AtlasL10n.string("route.settings.subtitle") + } + } + + public var systemImage: String { + switch self { + case .overview: + return "rectangle.grid.2x2" + case .smartClean: + return "sparkles" + case .apps: + return "square.stack.3d.up" + case .history: + return "clock.arrow.circlepath" + case .permissions: + return "lock.shield" + case .settings: + return "gearshape" + } + } +} + +public enum RiskLevel: String, CaseIterable, Codable, Hashable, Sendable { + case safe + case review + case advanced + + public var title: String { + switch self { + case .safe: + return AtlasL10n.string("risk.safe") + case .review: + return AtlasL10n.string("risk.review") + case .advanced: + return AtlasL10n.string("risk.advanced") + } + } +} + +public struct Finding: Identifiable, Codable, Hashable, Sendable { + public var id: UUID + public var title: String + public var detail: String + public var bytes: Int64 + public var risk: RiskLevel + public var category: String + public var targetPaths: [String]? + + public init( + id: UUID = UUID(), + title: String, + detail: String, + bytes: Int64, + risk: RiskLevel, + category: String, + targetPaths: [String]? = nil + ) { + self.id = id + self.title = title + self.detail = detail + self.bytes = bytes + self.risk = risk + self.category = category + self.targetPaths = targetPaths + } +} + +public struct ActionItem: Identifiable, Codable, Hashable, Sendable { + public enum Kind: String, Codable, Hashable, Sendable { + case removeCache + case removeApp + case archiveFile + case inspectPermission + } + + public var id: UUID + public var title: String + public var detail: String + public var kind: Kind + public var recoverable: Bool + + public init( + id: UUID = UUID(), + title: String, + detail: String, + kind: Kind, + recoverable: Bool + ) { + self.id = id + self.title = title + self.detail = detail + self.kind = kind + self.recoverable = recoverable + } +} + +public struct ActionPlan: Identifiable, Codable, Hashable, Sendable { + public var id: UUID + public var title: String + public var items: [ActionItem] + public var estimatedBytes: Int64 + + public init( + id: UUID = UUID(), + title: String, + items: [ActionItem], + estimatedBytes: Int64 + ) { + self.id = id + self.title = title + self.items = items + self.estimatedBytes = estimatedBytes + } +} + +public enum TaskKind: String, Codable, Hashable, Sendable { + case scan + case executePlan + case uninstallApp + case restore + case inspectPermissions + + public var title: String { + switch self { + case .scan: + return AtlasL10n.string("taskkind.scan") + case .executePlan: + return AtlasL10n.string("taskkind.executePlan") + case .uninstallApp: + return AtlasL10n.string("taskkind.uninstallApp") + case .restore: + return AtlasL10n.string("taskkind.restore") + case .inspectPermissions: + return AtlasL10n.string("taskkind.inspectPermissions") + } + } +} + +public enum TaskStatus: String, Codable, Hashable, Sendable { + case queued + case running + case completed + case failed + case cancelled + + public var title: String { + switch self { + case .queued: + return AtlasL10n.string("taskstatus.queued") + case .running: + return AtlasL10n.string("taskstatus.running") + case .completed: + return AtlasL10n.string("taskstatus.completed") + case .failed: + return AtlasL10n.string("taskstatus.failed") + case .cancelled: + return AtlasL10n.string("taskstatus.cancelled") + } + } +} + +public struct TaskRun: Identifiable, Codable, Hashable, Sendable { + public var id: UUID + public var kind: TaskKind + public var status: TaskStatus + public var summary: String + public var startedAt: Date + public var finishedAt: Date? + + public init( + id: UUID = UUID(), + kind: TaskKind, + status: TaskStatus, + summary: String, + startedAt: Date, + finishedAt: Date? = nil + ) { + self.id = id + self.kind = kind + self.status = status + self.summary = summary + self.startedAt = startedAt + self.finishedAt = finishedAt + } +} + +public enum RecoveryPayload: Codable, Hashable, Sendable { + case finding(Finding) + case app(AppFootprint) +} + +public struct RecoveryPathMapping: Codable, Hashable, Sendable { + public var originalPath: String + public var trashedPath: String + + public init(originalPath: String, trashedPath: String) { + self.originalPath = originalPath + self.trashedPath = trashedPath + } +} + +public struct RecoveryItem: Identifiable, Codable, Hashable, Sendable { + public var id: UUID + public var title: String + public var detail: String + public var originalPath: String + public var bytes: Int64 + public var deletedAt: Date + public var expiresAt: Date? + public var payload: RecoveryPayload? + public var restoreMappings: [RecoveryPathMapping]? + + public init( + id: UUID = UUID(), + title: String, + detail: String, + originalPath: String, + bytes: Int64, + deletedAt: Date, + expiresAt: Date? = nil, + payload: RecoveryPayload? = nil, + restoreMappings: [RecoveryPathMapping]? = nil + ) { + self.id = id + self.title = title + self.detail = detail + self.originalPath = originalPath + self.bytes = bytes + self.deletedAt = deletedAt + self.expiresAt = expiresAt + self.payload = payload + self.restoreMappings = restoreMappings + } +} + +public struct AppFootprint: Identifiable, Codable, Hashable, Sendable { + public var id: UUID + public var name: String + public var bundleIdentifier: String + public var bundlePath: String + public var bytes: Int64 + public var leftoverItems: Int + + public init( + id: UUID = UUID(), + name: String, + bundleIdentifier: String, + bundlePath: String, + bytes: Int64, + leftoverItems: Int + ) { + self.id = id + self.name = name + self.bundleIdentifier = bundleIdentifier + self.bundlePath = bundlePath + self.bytes = bytes + self.leftoverItems = leftoverItems + } +} + +public enum PermissionKind: String, Codable, CaseIterable, Hashable, Sendable, Identifiable { + case fullDiskAccess + case accessibility + case notifications + + public var id: String { rawValue } + + public var isRequiredForCurrentWorkflows: Bool { + switch self { + case .fullDiskAccess: + return true + case .accessibility, .notifications: + return false + } + } + + public var title: String { + switch self { + case .fullDiskAccess: + return AtlasL10n.string("permission.fullDiskAccess") + case .accessibility: + return AtlasL10n.string("permission.accessibility") + case .notifications: + return AtlasL10n.string("permission.notifications") + } + } + + public var systemImage: String { + switch self { + case .fullDiskAccess: + return "externaldrive.badge.checkmark" + case .accessibility: + return "figure.wave" + case .notifications: + return "bell.badge" + } + } +} + +public struct PermissionState: Identifiable, Codable, Hashable, Sendable { + public var kind: PermissionKind + public var isGranted: Bool + public var rationale: String + + public init(kind: PermissionKind, isGranted: Bool, rationale: String) { + self.kind = kind + self.isGranted = isGranted + self.rationale = rationale + } + + public var id: PermissionKind { kind } +} + +public struct AtlasOptimizationRecommendation: Identifiable, Codable, Hashable, Sendable { + public var id: String { action } + public var category: String + public var name: String + public var detail: String + public var action: String + public var isSafe: Bool + + public init(category: String, name: String, detail: String, action: String, isSafe: Bool) { + self.category = category + self.name = name + self.detail = detail + self.action = action + self.isSafe = isSafe + } +} + +public struct AtlasHealthSnapshot: Codable, Hashable, Sendable { + public var memoryUsedGB: Double + public var memoryTotalGB: Double + public var diskUsedGB: Double + public var diskTotalGB: Double + public var diskUsedPercent: Double + public var uptimeDays: Double + public var optimizations: [AtlasOptimizationRecommendation] + + public init( + memoryUsedGB: Double, + memoryTotalGB: Double, + diskUsedGB: Double, + diskTotalGB: Double, + diskUsedPercent: Double, + uptimeDays: Double, + optimizations: [AtlasOptimizationRecommendation] + ) { + self.memoryUsedGB = memoryUsedGB + self.memoryTotalGB = memoryTotalGB + self.diskUsedGB = diskUsedGB + self.diskTotalGB = diskTotalGB + self.diskUsedPercent = diskUsedPercent + self.uptimeDays = uptimeDays + self.optimizations = optimizations + } +} + +public struct StorageInsight: Identifiable, Codable, Hashable, Sendable { + public var id: UUID + public var title: String + public var path: String + public var bytes: Int64 + public var ageDescription: String + + public init( + id: UUID = UUID(), + title: String, + path: String, + bytes: Int64, + ageDescription: String + ) { + self.id = id + self.title = title + self.path = path + self.bytes = bytes + self.ageDescription = ageDescription + } +} + +public struct AtlasSettings: Codable, Hashable, Sendable { + public var recoveryRetentionDays: Int + public var notificationsEnabled: Bool + public var excludedPaths: [String] + public var language: AtlasLanguage + public var acknowledgementText: String + public var thirdPartyNoticesText: String + + public init( + recoveryRetentionDays: Int, + notificationsEnabled: Bool, + excludedPaths: [String], + language: AtlasLanguage = .default, + acknowledgementText: String? = nil, + thirdPartyNoticesText: String? = nil + ) { + self.recoveryRetentionDays = recoveryRetentionDays + self.notificationsEnabled = notificationsEnabled + self.excludedPaths = excludedPaths + self.language = language + self.acknowledgementText = acknowledgementText ?? AtlasL10n.acknowledgement(language: language) + self.thirdPartyNoticesText = thirdPartyNoticesText ?? AtlasL10n.thirdPartyNotices(language: language) + } + + private enum CodingKeys: String, CodingKey { + case recoveryRetentionDays + case notificationsEnabled + case excludedPaths + case language + case acknowledgementText + case thirdPartyNoticesText + } + + public init(from decoder: Decoder) throws { + let container = try decoder.container(keyedBy: CodingKeys.self) + let language = try container.decodeIfPresent(AtlasLanguage.self, forKey: .language) ?? .default + self.recoveryRetentionDays = try container.decodeIfPresent(Int.self, forKey: .recoveryRetentionDays) ?? 7 + self.notificationsEnabled = try container.decodeIfPresent(Bool.self, forKey: .notificationsEnabled) ?? true + self.excludedPaths = try container.decodeIfPresent([String].self, forKey: .excludedPaths) ?? [] + self.language = language + self.acknowledgementText = try container.decodeIfPresent(String.self, forKey: .acknowledgementText) + ?? AtlasL10n.acknowledgement(language: language) + self.thirdPartyNoticesText = try container.decodeIfPresent(String.self, forKey: .thirdPartyNoticesText) + ?? AtlasL10n.thirdPartyNotices(language: language) + } + + public func encode(to encoder: Encoder) throws { + var container = encoder.container(keyedBy: CodingKeys.self) + try container.encode(recoveryRetentionDays, forKey: .recoveryRetentionDays) + try container.encode(notificationsEnabled, forKey: .notificationsEnabled) + try container.encode(excludedPaths, forKey: .excludedPaths) + try container.encode(language, forKey: .language) + try container.encode(acknowledgementText, forKey: .acknowledgementText) + try container.encode(thirdPartyNoticesText, forKey: .thirdPartyNoticesText) + } +} + +public enum AtlasScaffoldFixtures { + private static func uuid(_ value: String) -> UUID { + UUID(uuidString: value) ?? UUID() + } + + private static let now = Date() + + public static var findings: [Finding] { + findings(language: AtlasL10n.currentLanguage) + } + + public static func findings(language: AtlasLanguage) -> [Finding] { + [ + Finding( + id: uuid("00000000-0000-0000-0000-000000000001"), + title: AtlasL10n.string("fixture.finding.derivedData.title", language: language), + detail: AtlasL10n.string("fixture.finding.derivedData.detail", language: language), + bytes: 18_400_000_000, + risk: .safe, + category: "Developer" + ), + Finding( + id: uuid("00000000-0000-0000-0000-000000000002"), + title: AtlasL10n.string("fixture.finding.browserCaches.title", language: language), + detail: AtlasL10n.string("fixture.finding.browserCaches.detail", language: language), + bytes: 4_800_000_000, + risk: .safe, + category: "System" + ), + Finding( + id: uuid("00000000-0000-0000-0000-000000000003"), + title: AtlasL10n.string("fixture.finding.oldRuntimes.title", language: language), + detail: AtlasL10n.string("fixture.finding.oldRuntimes.detail", language: language), + bytes: 12_100_000_000, + risk: .review, + category: "Developer" + ), + Finding( + id: uuid("00000000-0000-0000-0000-000000000004"), + title: AtlasL10n.string("fixture.finding.launchAgents.title", language: language), + detail: AtlasL10n.string("fixture.finding.launchAgents.detail", language: language), + bytes: 820_000_000, + risk: .advanced, + category: "Apps" + ), + ] + } + + public static var actionPlan: ActionPlan { + actionPlan(language: AtlasL10n.currentLanguage) + } + + public static func actionPlan(language: AtlasLanguage) -> ActionPlan { + ActionPlan( + id: uuid("00000000-0000-0000-0000-000000000010"), + title: AtlasL10n.string("fixture.plan.reclaimCommonClutter.title", language: language), + items: [ + ActionItem( + id: uuid("00000000-0000-0000-0000-000000000011"), + title: AtlasL10n.string("fixture.plan.item.moveDerivedData.title", language: language), + detail: AtlasL10n.string("fixture.plan.item.moveDerivedData.detail", language: language), + kind: .removeCache, + recoverable: true + ), + ActionItem( + id: uuid("00000000-0000-0000-0000-000000000012"), + title: AtlasL10n.string("fixture.plan.item.reviewRuntimes.title", language: language), + detail: AtlasL10n.string("fixture.plan.item.reviewRuntimes.detail", language: language), + kind: .archiveFile, + recoverable: true + ), + ActionItem( + id: uuid("00000000-0000-0000-0000-000000000013"), + title: AtlasL10n.string("fixture.plan.item.inspectAgents.title", language: language), + detail: AtlasL10n.string("fixture.plan.item.inspectAgents.detail", language: language), + kind: .inspectPermission, + recoverable: false + ), + ], + estimatedBytes: 23_200_000_000 + ) + } + + public static let apps: [AppFootprint] = [ + AppFootprint( + id: uuid("00000000-0000-0000-0000-000000000020"), + name: "Final Cut Pro", + bundleIdentifier: "com.apple.FinalCut", + bundlePath: "/Applications/Final Cut Pro.app", + bytes: 9_600_000_000, + leftoverItems: 6 + ), + AppFootprint( + id: uuid("00000000-0000-0000-0000-000000000021"), + name: "Xcode", + bundleIdentifier: "com.apple.dt.Xcode", + bundlePath: "/Applications/Xcode.app", + bytes: 34_800_000_000, + leftoverItems: 12 + ), + AppFootprint( + id: uuid("00000000-0000-0000-0000-000000000022"), + name: "Docker", + bundleIdentifier: "com.docker.docker", + bundlePath: "/Applications/Docker.app", + bytes: 7_400_000_000, + leftoverItems: 8 + ), + ] + + public static var taskRuns: [TaskRun] { + taskRuns(language: AtlasL10n.currentLanguage) + } + + public static func taskRuns(language: AtlasLanguage) -> [TaskRun] { + [ + TaskRun( + id: uuid("00000000-0000-0000-0000-000000000030"), + kind: .scan, + status: .completed, + summary: AtlasL10n.string("fixture.task.scan.summary", language: language), + startedAt: now.addingTimeInterval(-9_000), + finishedAt: now.addingTimeInterval(-8_940) + ), + TaskRun( + id: uuid("00000000-0000-0000-0000-000000000031"), + kind: .executePlan, + status: .running, + summary: AtlasL10n.string("fixture.task.execute.summary", language: language), + startedAt: now.addingTimeInterval(-800) + ), + TaskRun( + id: uuid("00000000-0000-0000-0000-000000000032"), + kind: .inspectPermissions, + status: .completed, + summary: AtlasL10n.string("fixture.task.permissions.summary", language: language), + startedAt: now.addingTimeInterval(-300), + finishedAt: now.addingTimeInterval(-285) + ), + ] + } + + public static var recoveryItems: [RecoveryItem] { + recoveryItems(language: AtlasL10n.currentLanguage) + } + + public static func recoveryItems(language: AtlasLanguage) -> [RecoveryItem] { + [ + RecoveryItem( + id: uuid("00000000-0000-0000-0000-000000000040"), + title: AtlasL10n.string("fixture.recovery.chromeCache.title", language: language), + detail: AtlasL10n.string("fixture.recovery.chromeCache.detail", language: language), + originalPath: "~/Library/Caches/Google/Chrome", + bytes: 1_200_000_000, + deletedAt: now.addingTimeInterval(-86_400), + expiresAt: now.addingTimeInterval(518_400), + payload: .finding( + Finding( + title: AtlasL10n.string("fixture.recovery.chromeCache.title", language: language), + detail: AtlasL10n.string("fixture.recovery.chromeCache.payload", language: language), + bytes: 1_200_000_000, + risk: .safe, + category: "Browsers" + ) + ) + ), + RecoveryItem( + id: uuid("00000000-0000-0000-0000-000000000041"), + title: AtlasL10n.string("fixture.recovery.simulatorSupport.title", language: language), + detail: AtlasL10n.string("fixture.recovery.simulatorSupport.detail", language: language), + originalPath: "~/Library/Developer/Xcode/iOS DeviceSupport", + bytes: 3_400_000_000, + deletedAt: now.addingTimeInterval(-172_800), + expiresAt: now.addingTimeInterval(432_000), + payload: .finding( + Finding( + title: AtlasL10n.string("fixture.recovery.simulatorSupport.title", language: language), + detail: AtlasL10n.string("fixture.recovery.simulatorSupport.payload", language: language), + bytes: 3_400_000_000, + risk: .review, + category: "Developer" + ) + ) + ), + ] + } + + public static var permissions: [PermissionState] { + permissions(language: AtlasL10n.currentLanguage) + } + + public static func permissions(language: AtlasLanguage) -> [PermissionState] { + [ + PermissionState( + kind: .fullDiskAccess, + isGranted: false, + rationale: AtlasL10n.string("fixture.permission.fullDiskAccess.rationale", language: language) + ), + PermissionState( + kind: .accessibility, + isGranted: false, + rationale: AtlasL10n.string("fixture.permission.accessibility.rationale", language: language) + ), + PermissionState( + kind: .notifications, + isGranted: true, + rationale: AtlasL10n.string("fixture.permission.notifications.rationale", language: language) + ), + ] + } + + public static var healthSnapshot: AtlasHealthSnapshot { + healthSnapshot(language: AtlasL10n.currentLanguage) + } + + public static func healthSnapshot(language: AtlasLanguage) -> AtlasHealthSnapshot { + AtlasHealthSnapshot( + memoryUsedGB: 14.2, + memoryTotalGB: 24.0, + diskUsedGB: 303.0, + diskTotalGB: 460.0, + diskUsedPercent: 65.9, + uptimeDays: 6.4, + optimizations: [ + AtlasOptimizationRecommendation( + category: "system", + name: AtlasL10n.string("fixture.health.optimization.dns.title", language: language), + detail: AtlasL10n.string("fixture.health.optimization.dns.detail", language: language), + action: "system_maintenance", + isSafe: true + ), + AtlasOptimizationRecommendation( + category: "system", + name: AtlasL10n.string("fixture.health.optimization.finder.title", language: language), + detail: AtlasL10n.string("fixture.health.optimization.finder.detail", language: language), + action: "cache_refresh", + isSafe: true + ), + AtlasOptimizationRecommendation( + category: "system", + name: AtlasL10n.string("fixture.health.optimization.memory.title", language: language), + detail: AtlasL10n.string("fixture.health.optimization.memory.detail", language: language), + action: "memory_pressure_relief", + isSafe: true + ), + ] + ) + } + + public static var storageInsights: [StorageInsight] { + storageInsights(language: AtlasL10n.currentLanguage) + } + + public static func storageInsights(language: AtlasLanguage) -> [StorageInsight] { + [ + StorageInsight( + id: uuid("00000000-0000-0000-0000-000000000050"), + title: AtlasL10n.string("fixture.storage.downloads.title", language: language), + path: "~/Downloads", + bytes: 13_100_000_000, + ageDescription: AtlasL10n.string("fixture.storage.downloads.age", language: language) + ), + StorageInsight( + id: uuid("00000000-0000-0000-0000-000000000051"), + title: AtlasL10n.string("fixture.storage.movies.title", language: language), + path: "~/Movies/Exports", + bytes: 21_400_000_000, + ageDescription: AtlasL10n.string("fixture.storage.movies.age", language: language) + ), + StorageInsight( + id: uuid("00000000-0000-0000-0000-000000000052"), + title: AtlasL10n.string("fixture.storage.installers.title", language: language), + path: "~/Desktop/Installers", + bytes: 6_200_000_000, + ageDescription: AtlasL10n.string("fixture.storage.installers.age", language: language) + ), + ] + } + + public static let settings: AtlasSettings = settings(language: .default) + + public static func settings(language: AtlasLanguage) -> AtlasSettings { + AtlasSettings( + recoveryRetentionDays: 7, + notificationsEnabled: true, + excludedPaths: [ + "~/Projects/ActiveClientWork", + "~/Movies/Exports", + ], + language: language + ) + } +} diff --git a/Packages/AtlasDomain/Sources/AtlasDomain/AtlasLocalization.swift b/Packages/AtlasDomain/Sources/AtlasDomain/AtlasLocalization.swift new file mode 100644 index 0000000..1a99998 --- /dev/null +++ b/Packages/AtlasDomain/Sources/AtlasDomain/AtlasLocalization.swift @@ -0,0 +1,101 @@ +import Foundation + +public enum AtlasLanguage: String, CaseIterable, Codable, Hashable, Sendable, Identifiable { + case zhHans = "zh-Hans" + case en = "en" + + public static let `default`: AtlasLanguage = .zhHans + + public var id: String { rawValue } + + public init(localeIdentifier: String) { + let normalized = localeIdentifier.lowercased() + if normalized.hasPrefix("en") { + self = .en + } else { + self = .zhHans + } + } + + public var locale: Locale { + Locale(identifier: rawValue) + } + + public var displayName: String { + switch self { + case .zhHans: + return "简体中文" + case .en: + return "English" + } + } +} + +public enum AtlasL10n { + private static let stateLock = NSLock() + private static var storedLanguage: AtlasLanguage = .default + + public static var currentLanguage: AtlasLanguage { + stateLock.withLock { + storedLanguage + } + } + + public static func setCurrentLanguage(_ language: AtlasLanguage) { + stateLock.withLock { + storedLanguage = language + } + } + + public static func string(_ key: String, language: AtlasLanguage? = nil, _ arguments: CVarArg...) -> String { + string(key, language: language, arguments: arguments) + } + + public static func string(_ key: String, language: AtlasLanguage? = nil, arguments: [CVarArg]) -> String { + let resolvedLanguage = language ?? currentLanguage + let format = bundle(for: resolvedLanguage).localizedString(forKey: key, value: nil, table: nil) + guard !arguments.isEmpty else { + return format + } + return String(format: format, locale: resolvedLanguage.locale, arguments: arguments) + } + + public static func localizedCategory(_ rawCategory: String, language: AtlasLanguage? = nil) -> String { + switch rawCategory.lowercased() { + case "developer": + return string("category.developer", language: language) + case "system": + return string("category.system", language: language) + case "apps": + return string("category.apps", language: language) + case "browsers": + return string("category.browsers", language: language) + default: + return rawCategory + } + } + + public static func acknowledgement(language: AtlasLanguage? = nil) -> String { + string("settings.acknowledgement.body", language: language) + } + + public static func thirdPartyNotices(language: AtlasLanguage? = nil) -> String { + string("settings.notices.body", language: language) + } + + private static func bundle(for language: AtlasLanguage) -> Bundle { + guard let path = Bundle.module.path(forResource: language.rawValue, ofType: "lproj"), + let bundle = Bundle(path: path) else { + return Bundle.module + } + return bundle + } +} + +private extension NSLock { + func withLock(_ body: () -> T) -> T { + lock() + defer { unlock() } + return body() + } +} diff --git a/Packages/AtlasDomain/Sources/AtlasDomain/Resources/en.lproj/Localizable.strings b/Packages/AtlasDomain/Sources/AtlasDomain/Resources/en.lproj/Localizable.strings new file mode 100644 index 0000000..dfb90ec --- /dev/null +++ b/Packages/AtlasDomain/Sources/AtlasDomain/Resources/en.lproj/Localizable.strings @@ -0,0 +1,561 @@ +"app.name" = "Atlas for Mac"; +"language.zhHans" = "Simplified Chinese"; +"language.en" = "English"; + +"route.overview.title" = "Overview"; +"route.overview.subtitle" = "See health, reclaimable space, and the next safe step first."; +"route.smartclean.title" = "Smart Clean"; +"route.smartclean.subtitle" = "Build and review a cleanup plan before you run anything."; +"route.apps.title" = "Apps"; +"route.apps.subtitle" = "Review app footprints, leftovers, and uninstall plans before you remove anything."; +"route.history.title" = "History"; +"route.history.subtitle" = "Track runs, outcomes, and recovery entry points."; +"route.permissions.title" = "Permissions"; +"route.permissions.subtitle" = "Explain why access is needed before asking for it."; +"route.settings.title" = "Settings"; +"route.settings.subtitle" = "Preferences, exclusions, acknowledgement, and notices."; + +"risk.safe" = "Safe"; +"risk.review" = "Review"; +"risk.advanced" = "Advanced"; + +"taskkind.scan" = "Smart Clean Scan"; +"taskkind.executePlan" = "Run Cleanup Plan"; +"taskkind.uninstallApp" = "Uninstall App"; +"taskkind.restore" = "Restore Item"; +"taskkind.inspectPermissions" = "Check Permission Status"; + +"taskstatus.queued" = "Queued"; +"taskstatus.running" = "Running"; +"taskstatus.completed" = "Completed"; +"taskstatus.failed" = "Failed"; +"taskstatus.cancelled" = "Cancelled"; + +"permission.fullDiskAccess" = "Full Disk Access"; +"permission.accessibility" = "Accessibility"; +"permission.notifications" = "Notifications"; + +"category.developer" = "Developer"; +"category.system" = "System"; +"category.apps" = "Apps"; +"category.browsers" = "Browsers"; + +"settings.acknowledgement.body" = "Atlas for Mac includes software derived from the open-source project Mole by tw93 and contributors, used under the MIT License. Atlas for Mac is an independent product and is not affiliated with or endorsed by the original authors."; +"settings.notices.body" = "This product includes software derived from the open-source project Mole by tw93 and contributors, used under the MIT License."; +"fixture.finding.derivedData.title" = "Xcode Derived Data"; +"fixture.finding.derivedData.detail" = "Build artifacts and indexes from older projects."; +"fixture.finding.browserCaches.title" = "Browser caches"; +"fixture.finding.browserCaches.detail" = "WebKit and Chromium cache folders with low recovery risk."; +"fixture.finding.oldRuntimes.title" = "Old simulator runtimes"; +"fixture.finding.oldRuntimes.detail" = "Unused iOS simulator assets that need review before deletion."; +"fixture.finding.launchAgents.title" = "Launch-agent leftovers"; +"fixture.finding.launchAgents.detail" = "Background helpers tied to removed apps and older experiments."; + +"fixture.plan.reclaimCommonClutter.title" = "Reclaim common clutter"; +"fixture.plan.item.moveDerivedData.title" = "Move Xcode Derived Data to Trash"; +"fixture.plan.item.moveDerivedData.detail" = "Keep recovery available from History and Recovery."; +"fixture.plan.item.reviewRuntimes.title" = "Review simulator runtimes"; +"fixture.plan.item.reviewRuntimes.detail" = "Ask the worker to confirm the plan details before execution."; +"fixture.plan.item.inspectAgents.title" = "Inspect launch agents"; +"fixture.plan.item.inspectAgents.detail" = "Require helper validation before removing privileged items."; + +"fixture.task.scan.summary" = "Scanned 214 locations and generated a cleanup plan that can free 35.3 GB."; +"fixture.task.execute.summary" = "Moving safe cache items into recovery storage."; +"fixture.task.permissions.summary" = "Permission status refreshed without prompting the user."; + +"fixture.recovery.chromeCache.title" = "Chrome cache bundle"; +"fixture.recovery.chromeCache.detail" = "Recoverable cache package from the previous Smart Clean run."; +"fixture.recovery.chromeCache.payload" = "Recovered cache data from the previous Smart Clean run."; +"fixture.recovery.simulatorSupport.title" = "Legacy simulator device support"; +"fixture.recovery.simulatorSupport.detail" = "This item still needs review and will clear after one week."; +"fixture.recovery.simulatorSupport.payload" = "Simulator support files waiting for review."; + +"fixture.permission.fullDiskAccess.rationale" = "Needed only when scanning system-wide caches, app leftovers, and protected Library paths."; +"fixture.permission.accessibility.rationale" = "Needed later for app uninstall flows that need to close running apps safely."; +"fixture.permission.notifications.rationale" = "Used for long-running task completion and recovery reminders."; + +"fixture.health.optimization.dns.title" = "DNS & Spotlight Check"; +"fixture.health.optimization.dns.detail" = "Refresh DNS cache and verify Spotlight status."; +"fixture.health.optimization.finder.title" = "Finder Cache Refresh"; +"fixture.health.optimization.finder.detail" = "Refresh QuickLook thumbnails and icon services cache."; +"fixture.health.optimization.memory.title" = "Memory Optimization"; +"fixture.health.optimization.memory.detail" = "Release inactive memory to improve system responsiveness."; + +"fixture.storage.downloads.title" = "Downloads archive"; +"fixture.storage.downloads.age" = "Mostly older than 90 days"; +"fixture.storage.movies.title" = "Movies export folder"; +"fixture.storage.movies.age" = "Large media files from the last 6 months"; +"fixture.storage.installers.title" = "Unused installers"; +"fixture.storage.installers.age" = "Mostly disk images older than 30 days"; +"application.error.workerRejected" = "Worker rejected request (%@): %@"; +"xpc.error.encodingFailed" = "Could not encode the background worker request: %@"; +"xpc.error.decodingFailed" = "Could not decode the background worker response: %@"; +"xpc.error.invalidResponse" = "The background worker returned an invalid response. Fully quit and reopen Atlas; if it still fails, reinstall the current build."; +"xpc.error.connectionUnavailable" = "The background worker is unavailable. Fully quit Atlas and reopen the installed copy from Applications; if it still fails, remove older copies and reinstall. System detail: %@"; +"xpc.error.timedOut" = "The background worker timed out after %.1f seconds. Try again; if it keeps happening, reopen Atlas."; +"application.plan.inspectPrivileged" = "Inspect privileged cleanup for %@"; +"application.plan.reviewFinding" = "Review %@"; +"application.plan.reviewSelected.one" = "Review 1 selected finding"; +"application.plan.reviewSelected.other" = "Review %d selected findings"; +"application.scan.completed" = "Smart Clean scan completed and a cleanup plan is ready."; +"application.preview.updated.one" = "Updated the cleanup plan for 1 action."; +"application.preview.updated.other" = "Updated the cleanup plan for %d actions."; +"application.plan.executed" = "Cleanup plan completed."; +"application.recovery.completed" = "Recovery restore completed."; +"application.apps.loaded.one" = "Loaded 1 app footprint."; +"application.apps.loaded.other" = "Loaded %d app footprints."; +"application.apps.previewUpdated" = "Updated the uninstall plan for %@."; +"application.apps.uninstallCompleted" = "App uninstall completed."; + +"infrastructure.permission.fullDiskAccess.granted" = "Atlas can inspect protected user data locations required for broader cleanup coverage."; +"infrastructure.permission.fullDiskAccess.needed" = "Needed only when scanning protected Library areas such as Mail, Safari, or Messages data. If you just enabled it in System Settings, fully quit and reopen Atlas before you check again."; +"infrastructure.permission.accessibility.granted" = "Atlas can coordinate app shutdown for safer uninstall flows."; +"infrastructure.permission.accessibility.needed" = "Needed later when uninstall flows must close running apps safely before cleanup."; +"infrastructure.permission.notifications.granted" = "Atlas can send completion and recovery reminders for long-running tasks."; +"infrastructure.permission.notifications.needed" = "Used for long-running task completion and recovery reminders."; +"infrastructure.scan.completed.one" = "Scanned 1 finding group and generated a cleanup plan."; +"infrastructure.scan.completed.other" = "Scanned %d finding groups and generated a cleanup plan."; +"infrastructure.execute.summary.clean.one" = "Moved 1 Smart Clean item into recovery."; +"infrastructure.execute.summary.clean.other" = "Moved %d Smart Clean items into recovery."; +"infrastructure.execute.summary.clean.mixed" = "Moved %d Smart Clean items into recovery; %d advanced items still need review."; +"infrastructure.restore.summary.one" = "Restored 1 item back into the workspace."; +"infrastructure.restore.summary.other" = "Restored %d items back into the workspace."; +"infrastructure.apps.loaded.one" = "Loaded 1 app footprint."; +"infrastructure.apps.loaded.other" = "Loaded %d app footprints."; +"infrastructure.apps.preview.summary" = "Generated an uninstall plan for %@."; +"infrastructure.apps.uninstall.summary" = "Moved %@ and its leftovers into recovery."; +"infrastructure.recovery.app.detail.one" = "Recoverable uninstall bundle with 1 leftover item."; +"infrastructure.recovery.app.detail.other" = "Recoverable uninstall bundle with %d leftover items."; +"infrastructure.plan.review.one" = "Review 1 selected finding"; +"infrastructure.plan.review.other" = "Review %d selected findings"; +"infrastructure.plan.uninstall.title" = "Uninstall %@"; +"infrastructure.plan.uninstall.moveBundle.title" = "Move %@ bundle into recovery"; +"infrastructure.plan.uninstall.moveBundle.detail" = "Trash the bundle at %@ while keeping the uninstall visible in History."; +"infrastructure.plan.uninstall.archive.one" = "Archive 1 leftover item"; +"infrastructure.plan.uninstall.archive.other" = "Archive %d leftover items"; +"infrastructure.plan.uninstall.archive.detail" = "Support files, caches, and launch items remain reviewable through recovery history."; +"infrastructure.action.reviewUninstall" = "Review uninstall plan for %@"; +"infrastructure.action.inspectPrivileged" = "Inspect privileged cleanup for %@"; +"infrastructure.action.archiveRecovery" = "Archive %@ into recovery"; +"infrastructure.action.moveToTrash" = "Move %@ to Trash"; +"app.search.prompt" = "Search findings, apps, history"; +"app.search.hint" = "Searches the visible content in the current route."; +"app.search.prompt.route" = "Search %@"; +"app.search.hint.route" = "Searches the visible content in %@."; + +"commands.navigate.menu" = "Navigate"; +"commands.actions.menu" = "Actions"; +"commands.taskcenter.open" = "Open Task Center"; +"commands.taskcenter.close" = "Close Task Center"; +"commands.actions.refreshCurrent" = "Refresh Current Screen"; +"commands.actions.runScan" = "Run Smart Clean Scan"; +"commands.actions.refreshApps" = "Refresh App Footprints"; +"commands.actions.refreshPermissions" = "Check Permission Status"; +"commands.actions.refreshHealth" = "Refresh System Snapshot"; + +"toolbar.taskcenter" = "Task Center"; +"toolbar.taskcenter.help" = "Open recent task activity (⌘7)"; +"toolbar.taskcenter.accessibilityLabel" = "Open Task Center"; +"toolbar.taskcenter.accessibilityHint" = "Shows recent task activity and a shortcut into History."; +"toolbar.permissions" = "Permissions"; +"toolbar.permissions.help" = "Check permission status (⌥⌘P)"; +"toolbar.permissions.accessibilityLabel" = "Open Permissions and check status"; +"toolbar.permissions.accessibilityHint" = "Takes you to the Permissions screen and checks the current permission snapshot."; +"toolbar.settings" = "Settings"; +"toolbar.settings.help" = "Open settings (⌘6)"; +"toolbar.settings.accessibilityLabel" = "Open Settings"; +"toolbar.settings.accessibilityHint" = "Takes you to preferences, retention, notifications, and notices."; +"sidebar.route.hint" = "Keyboard shortcut Command-%@."; + +"common.days" = "days"; +"common.enabled" = "Enabled"; +"common.disabled" = "Disabled"; +"common.granted" = "Granted"; +"common.neededLater" = "Not Needed Yet"; +"common.recoverable" = "Recoverable"; +"common.manualReview" = "Needs Review"; +"common.visible" = "Visible"; + +"model.scan.ready" = "Ready to build a cleanup plan."; +"model.scan.submitting" = "Starting a Smart Clean scan and building a cleanup plan…"; +"model.apps.ready" = "Ready to inspect app footprints and build uninstall plans."; +"model.apps.refreshing" = "Refreshing app footprints and uninstall plans…"; +"model.permissions.ready" = "Check permission status whenever you want an updated access snapshot."; +"model.permissions.refreshing" = "Checking permission status…"; +"model.permissions.summary.one" = "%d of %d tracked permissions is currently available."; +"model.permissions.summary.other" = "%d of %d tracked permissions are currently available."; +"model.taskcenter.none" = "No active tasks"; +"model.taskcenter.matching.one" = "1 matching task"; +"model.taskcenter.matching.other" = "%d matching tasks"; +"model.taskcenter.active.one" = "1 active task"; +"model.taskcenter.active.other" = "%d active tasks"; + +"taskcenter.title" = "Task Center"; +"taskcenter.callout.empty.title" = "No matching task activity"; +"taskcenter.callout.empty.detail" = "Run a scan, run a cleanup plan, uninstall an app, or restore an item to start the task timeline."; +"taskcenter.callout.active.title" = "Recent task activity stays visible here"; +"taskcenter.callout.active.detail" = "Open History for the full audit trail and available recovery items."; +"taskcenter.empty.title" = "No tasks yet"; +"taskcenter.empty.detail" = "Try another search, or run a new Smart Clean scan to populate the task timeline."; +"taskcenter.openHistory" = "Open History"; +"taskcenter.openHistory.hint" = "Moves to the History screen with the full audit trail and recovery items."; +"taskcenter.timeline.finished" = "Started %@ • Finished %@"; +"taskcenter.timeline.running" = "Started %@ • Still in progress"; + +"overview.screen.title" = "Overview"; +"overview.screen.subtitle" = "See what matters first, understand the trade-offs, and move into the next safe maintenance step with confidence."; +"overview.callout.ready.title" = "Atlas is ready for the current core workflows"; +"overview.callout.ready.detail" = "The core permissions required right now are available. Optional access such as Accessibility or notifications no longer marks the app as limited."; +"overview.callout.limited.title" = "You can keep going in limited mode"; +"overview.callout.limited.detail" = "At least one permission required for the current workflows is still unavailable, so Atlas stays in limited mode until that access is ready."; +"overview.metric.reclaimable.title" = "Reclaimable Space"; +"overview.metric.reclaimable.detail" = "Estimated from the current cleanup plan and recent workspace state. After a plan runs, this value recalculates from the remaining items."; +"overview.metric.findings.title" = "Findings Ready"; +"overview.metric.findings.detail" = "Grouped into Safe, Review, and Advanced lanes before you execute anything."; +"overview.metric.permissions.title" = "Permissions Ready"; +"overview.metric.permissions.ready" = "Atlas has the access it needs for the current workflows."; +"overview.metric.permissions.limited" = "A permission required for the current workflows is still missing, so Atlas remains in limited mode."; +"overview.snapshot.title" = "System Snapshot"; +"overview.snapshot.subtitle" = "High-signal health data and the safest next optimizations."; +"overview.snapshot.loading.title" = "Refreshing system snapshot"; +"overview.snapshot.loading.detail" = "Atlas is collecting the latest health information before updating the recommendations."; +"overview.snapshot.memory.title" = "Memory Used"; +"overview.snapshot.memory.detail" = "A quick signal for memory pressure and long-lived background load."; +"overview.snapshot.disk.title" = "Disk Used"; +"overview.snapshot.disk.detail" = "%@/%@ GB on the current system volume."; +"overview.snapshot.uptime.title" = "Uptime"; +"overview.snapshot.uptime.detail" = "Useful for spotting stale caches, long sessions, and restart candidates."; +"overview.snapshot.callout.warning.title" = "Storage pressure is worth addressing soon"; +"overview.snapshot.callout.warning.detail" = "Start with the Safe lane in Smart Clean to reclaim space without widening risk."; +"overview.snapshot.callout.ok.title" = "Your system looks stable enough for a safe cleanup pass"; +"overview.snapshot.callout.ok.detail" = "Review the recommended maintenance items below, then open Smart Clean when you want a deeper plan."; +"overview.snapshot.empty.title" = "System snapshot unavailable"; +"overview.snapshot.empty.detail" = "Atlas can still show persisted findings and recovery data while it waits for the next health refresh."; +"overview.actions.title" = "Recommended Actions"; +"overview.actions.subtitle" = "The highest-value findings from the current workspace snapshot."; +"overview.actions.empty.title" = "No matching findings"; +"overview.actions.empty.detail" = "Run Smart Clean again or clear the current search to repopulate the recommended actions list."; +"overview.activity.title" = "Recent Activity"; +"overview.activity.subtitle" = "See what Atlas changed recently and which actions are still recoverable."; +"overview.activity.empty.title" = "No recent activity"; +"overview.activity.empty.detail" = "Your task timeline will appear here after the next scan, execution, or restore action."; +"overview.activity.timeline.finished" = "Started %@ • Finished %@"; +"overview.activity.timeline.running" = "Started %@ • Still in progress"; +"overview.risk.safe" = "Low-risk cleanup"; +"overview.risk.review" = "Review before removing"; +"overview.risk.advanced" = "Advanced inspection recommended"; + +"smartclean.screen.title" = "Smart Clean"; +"smartclean.screen.subtitle" = "Turn scan results into a clear cleanup plan before you decide to run anything destructive."; +"smartclean.controls.title" = "Scan & Plan"; +"smartclean.controls.subtitle" = "Run a scan, update the plan, then choose the safest next step."; +"smartclean.loading.scan" = "Smart Clean is scanning"; +"smartclean.loading.execute" = "Running the reviewed cleanup plan"; +"smartclean.action.runScan" = "Run Scan"; +"smartclean.action.runScan.hint" = "Starts a new Smart Clean scan and rebuilds the current cleanup plan."; +"smartclean.action.refreshPreview" = "Update Plan"; +"smartclean.action.refreshPreview.hint" = "Regenerates the cleanup plan from current findings without running anything."; +"smartclean.action.execute" = "Run Plan"; +"smartclean.action.execute.hint" = "Runs the reviewed cleanup plan and keeps recoverable work visible in History."; +"smartclean.primary.scan.title" = "Start with a fresh scan"; +"smartclean.primary.scan.detail" = "Scan first when you do not yet have a current plan to review."; +"smartclean.primary.refresh.title" = "Refresh the current plan"; +"smartclean.primary.refresh.detail" = "Update the plan before you trust it, especially after the findings or validation state changed."; +"smartclean.primary.execute.title" = "Run the reviewed plan"; +"smartclean.primary.execute.detail" = "The current plan is ready, so execution becomes the single primary action."; +"smartclean.metric.previewSize.title" = "Estimated Space"; +"smartclean.metric.previewSize.detail" = "Estimated space the current cleanup plan can free. After a plan runs, this value recalculates from the remaining items."; +"smartclean.metric.actions.title" = "Selected Actions"; +"smartclean.metric.actions.detail" = "Every step is listed before Atlas changes anything."; +"smartclean.metric.review.title" = "Needs Review"; +"smartclean.metric.review.none" = "Everything in this plan is recoverable."; +"smartclean.metric.review.some" = "These items deserve a closer look before execution."; +"smartclean.preview.title" = "Current Cleanup Plan"; +"smartclean.execution.real" = "Can Run"; +"smartclean.execution.reviewOnly" = "Review Only"; +"smartclean.execution.coverage.full" = "All %d steps in this plan can run directly"; +"smartclean.execution.coverage.partial" = "Only %d of %d steps in this plan can run directly"; +"smartclean.execution.coverage.full.detail" = "These steps will really move items to Trash and support restore when a recovery path is available."; +"smartclean.execution.coverage.partial.detail" = "%d remaining step(s) still need review or are not supported for direct execution yet."; +"smartclean.preview.metric.space.title" = "This Plan Can Free"; +"smartclean.preview.metric.space.detail.one" = "Estimated across 1 planned step."; +"smartclean.preview.metric.space.detail.other" = "Estimated across %d planned steps."; +"smartclean.preview.callout.safe.title" = "This plan stays mostly in the Safe lane"; +"smartclean.preview.callout.safe.detail" = "Most selected steps should remain recoverable through History and Recovery."; +"smartclean.preview.empty.detail" = "Run a scan or update the plan to turn current findings into concrete cleanup steps. If you just ran a plan, this section shows only the remaining items."; +"smartclean.preview.callout.review.detail" = "Check the highlighted steps before you run the plan so you understand what stays recoverable and what needs extra judgment."; +"smartclean.preview.empty.title" = "No cleanup plan yet"; +"smartclean.preview.empty.detail" = "Run a scan or update the plan to turn current findings into concrete cleanup steps."; +"smartclean.empty.title" = "No matching findings"; +"smartclean.empty.detail" = "Clear the current search or run another scan to repopulate the Smart Clean lanes."; +"smartclean.status.scanning" = "Scanning for reclaimable clutter"; +"smartclean.status.executing" = "Applying the reviewed cleanup plan"; +"smartclean.status.empty" = "Run a fresh scan to generate a cleanup plan"; +"smartclean.status.cached" = "This plan has not been revalidated yet"; +"smartclean.status.revalidationFailed" = "Could not update the current plan"; +"smartclean.cached.title" = "This plan is from a previous result"; +"smartclean.revalidationFailed.title" = "Revalidation failed, so this still shows the previous plan"; +"smartclean.cached.detail" = "Run a fresh scan or update the plan before you execute it. What you see here is the last saved plan, not a verified current result."; +"smartclean.status.empty.detail" = "Atlas always turns scan results into a cleanup plan before anything runs, so you can review risk and recoverability first."; +"smartclean.status.ready" = "Review the plan, then run it"; +"smartclean.status.ready.detail" = "%d findings are ready. Start with the Safe lane, then review anything marked Review or Advanced before you run the plan."; +"smartclean.support.removeCache" = "Good candidate for a reversible cleanup step."; +"smartclean.support.removeApp" = "Review the footprint before you uninstall so leftovers are expected."; +"smartclean.support.archiveFile" = "Atlas will keep this in a reversible path when possible."; +"smartclean.support.inspectPermission" = "Requires extra attention before Atlas can safely proceed."; +"smartclean.section.safe" = "High-confidence cleanup candidates that should stay easy to recover."; +"smartclean.section.review" = "Worth a quick review before you remove them."; +"smartclean.section.advanced" = "Potentially sensitive items that deserve an informed decision."; +"smartclean.expectation.safe" = "Usually recoverable"; +"smartclean.expectation.review" = "Review before removal"; +"smartclean.expectation.advanced" = "Inspect carefully"; + +"apps.screen.title" = "Apps"; +"apps.screen.subtitle" = "Review each app's footprint, leftovers, and recovery path before you uninstall it."; +"apps.callout.default.title" = "Review the uninstall plan before you remove anything"; +"apps.callout.default.detail" = "Atlas lists large app footprints first, then builds an uninstall plan so you can review leftovers and recovery expectations."; +"apps.callout.preview.title" = "The uninstall plan is ready"; +"apps.callout.preview.detail" = "Review the steps below to confirm what will be removed, what stays recoverable, and which leftovers need extra attention."; +"apps.maintenance.title" = "App Maintenance"; +"apps.maintenance.subtitle" = "Refresh the local app inventory and review high-impact uninstall candidates first."; +"apps.inventory.title" = "App Inventory"; +"apps.inventory.subtitle" = "Refresh the inventory, then choose one app at a time instead of making uninstall decisions in a long list."; +"apps.loading.title" = "Refreshing app footprints"; +"apps.metric.listed.title" = "Apps Listed"; +"apps.metric.listed.detail" = "Installed apps Atlas can inspect and turn into uninstall plans."; +"apps.metric.footprint.title" = "Total App Footprint"; +"apps.metric.footprint.detail" = "Combined size across the current app inventory."; +"apps.metric.leftovers.title" = "Leftover Files"; +"apps.metric.leftovers.detail" = "Extra files Atlas can include in the uninstall plan before anything is removed."; +"apps.refresh.action" = "Refresh App Footprints"; +"apps.refresh.running" = "Refreshing…"; +"apps.refresh.hint" = "Refreshes the installed app inventory and recalculates footprints."; +"apps.preview.title" = "Uninstall Plan"; +"apps.preview.metric.size.title" = "Estimated Space"; +"apps.preview.metric.size.detail" = "Estimated space this uninstall plan would remove, including leftovers."; +"apps.preview.metric.actions.title" = "Plan Steps"; +"apps.preview.metric.actions.detail" = "Every uninstall step is listed before Atlas removes anything."; +"apps.preview.metric.recoverable.title" = "Recoverable Steps"; +"apps.preview.metric.recoverable.detail" = "These steps stay visible in History and Recovery when supported."; +"apps.preview.callout.safe.title" = "This uninstall plan stays mostly recoverable"; +"apps.preview.callout.safe.detail" = "Atlas preserves a recovery path for the selected app and related files where possible."; +"apps.preview.callout.review.title" = "Some steps in this uninstall plan need a closer review"; +"apps.preview.callout.review.detail" = "Review each step so the uninstall and leftover cleanup match your expectations."; +"apps.preview.row.recoverable" = "Recoverable through History when supported."; +"apps.preview.row.review" = "Run only after review."; +"apps.preview.action" = "Review Plan"; +"apps.preview.running" = "Building Plan…"; +"apps.preview.hint" = "Builds the uninstall plan for this app before anything is removed."; +"apps.uninstall.action" = "Run Uninstall"; +"apps.uninstall.running" = "Uninstalling…"; +"apps.uninstall.hint" = "Runs the reviewed uninstall plan for this app and tracks recoverable items when supported."; +"apps.list.title" = "Installed App Footprints"; +"apps.list.subtitle" = "Review app footprint, bundle ID, leftover count, and uninstall actions in one place."; +"apps.list.empty.title" = "No matching apps"; +"apps.list.empty.detail" = "Try another search term or refresh the app list to inspect the latest uninstall candidates."; +"apps.list.row.footnote" = "%@ • %d leftover files"; +"apps.list.row.leftovers" = "%d leftover files"; +"apps.browser.title" = "Review One App at a Time"; +"apps.browser.subtitle" = "Pick an app from the grouped list, inspect its footprint, then build the uninstall plan before you run it."; +"apps.group.large" = "Large Footprints"; +"apps.group.leftovers" = "Has Leftovers"; +"apps.group.other" = "Other Apps"; +"apps.detail.title" = "App Details"; +"apps.detail.empty.title" = "Select an app"; +"apps.detail.empty.detail" = "Choose an app from the list to inspect its footprint and build an uninstall plan."; +"apps.detail.size" = "App footprint"; +"apps.detail.leftovers" = "Leftover files"; +"apps.detail.path" = "Bundle path"; +"apps.detail.callout.preview.title" = "Build the uninstall plan first"; +"apps.detail.callout.preview.detail" = "Atlas keeps uninstall confidence high by showing the exact steps before anything is removed."; +"apps.detail.callout.ready.title" = "This app is ready for reviewed uninstall"; +"apps.detail.callout.ready.detail" = "The plan below shows what will be removed and what remains recoverable."; + +"history.screen.title" = "History"; +"history.screen.subtitle" = "See what ran, what changed, and what you can still restore before recovery retention expires."; +"history.callout.empty.title" = "History is ready even when recovery is empty"; +"history.callout.empty.detail" = "Your audit trail still records completed work, and future recoverable actions will appear here automatically."; +"history.callout.expiring.title" = "Some recovery items expire soon"; +"history.callout.expiring.detail" = "Open the recovery list first so you can restore anything you still need before the retention window closes."; +"history.callout.running.title" = "A recent task is still in progress"; +"history.callout.running.detail" = "Keep the timeline open if you want to confirm when it finishes and whether it creates new recovery items."; +"history.callout.recovery.title" = "Recovery-first cleanup is active"; +"history.callout.recovery.detail" = "Each recoverable action stays visible until its retention window ends, so you can reverse decisions with confidence."; +"history.metric.activity.title" = "Visible events"; +"history.metric.activity.detail.empty" = "Run a scan or cleanup action to build the audit trail."; +"history.metric.activity.detail.latest" = "Latest update %@"; +"history.metric.running.title" = "In progress"; +"history.metric.running.detail.none" = "No tasks are still running."; +"history.metric.running.detail.active" = "Running tasks stay pinned in the timeline until they finish."; +"history.metric.recovery.title" = "Recoverable now"; +"history.metric.recovery.detail.none" = "Nothing is waiting in recovery."; +"history.metric.recovery.detail.available" = "%@ total size available to restore."; +"history.browser.title" = "Browse History"; +"history.browser.subtitle" = "Start with what you can still restore, then open the archive when you need older task records."; +"history.browser.section.recovery" = "Can Restore"; +"history.browser.section.archive" = "Archive"; +"history.browser.summary.archive.one" = "1 archived task record"; +"history.browser.summary.archive.other" = "%d archived task records"; +"history.browser.summary.recovery.one" = "1 recoverable item"; +"history.browser.summary.recovery.other" = "%d recoverable items"; +"history.runs.title" = "Archive"; +"history.runs.subtitle" = "Current and past task records grouped so older activity does not overwhelm the page."; +"history.runs.empty.title" = "No matching tasks"; +"history.runs.empty.detail" = "Run a scan or clear the current search to inspect the latest task timeline."; +"history.timeline.latest" = "Latest"; +"history.timeline.meta.started" = "Started %@"; +"history.timeline.meta.finished" = "Finished %@"; +"history.timeline.meta.running" = "Still in progress"; +"history.recovery.title" = "Recoverable Items"; +"history.recovery.subtitle" = "Check the original location and retention window before restoring."; +"history.recovery.empty.title" = "No matching recovery items"; +"history.recovery.empty.detail" = "Recovery-first deletion is active, but no items currently match the search."; +"history.recovery.badge.available" = "Recoverable"; +"history.recovery.badge.expiring" = "Expiring soon"; +"history.recovery.filter.all" = "All"; +"history.recovery.filter.expiring" = "Expiring"; +"history.recovery.filtered.empty.title" = "No items in this filter"; +"history.recovery.filtered.empty.detail" = "Try another recovery category or clear the current filter to see everything that is still recoverable."; +"history.recovery.group.expiring" = "Expiring Soon"; +"history.recovery.group.apps" = "Apps"; +"history.recovery.group.developer" = "Developer"; +"history.recovery.group.browsers" = "Browsers"; +"history.recovery.group.system" = "System"; +"history.recovery.group.other" = "Other"; +"history.recovery.meta.deleted" = "Deleted %@"; +"history.recovery.meta.expires" = "Recoverable until %@"; +"history.recovery.meta.noexpiry" = "Retention window active"; +"history.recovery.path.label" = "Original location"; +"history.archive.group.active" = "Still Running"; +"history.archive.group.recent" = "Recent"; +"history.archive.group.older" = "Older Archive"; +"history.detail.title" = "Details"; +"history.detail.empty.title" = "Select an item"; +"history.detail.empty.detail" = "Choose a timeline event or recovery item to inspect its details here."; +"history.detail.task.status" = "Status"; +"history.detail.task.started" = "Started"; +"history.detail.task.finished" = "Finished"; +"history.detail.task.finished.running" = "Still in progress"; +"history.detail.task.callout.queued.title" = "This task is queued"; +"history.detail.task.callout.queued.detail" = "It has been accepted into the workflow, but execution has not started yet."; +"history.detail.task.callout.running.title" = "This task is still running"; +"history.detail.task.callout.running.detail" = "Keep this detail open if you want to confirm when the work completes."; +"history.detail.task.callout.completed.title" = "This task finished successfully"; +"history.detail.task.callout.completed.detail" = "Use the summary and timestamps below to confirm what changed."; +"history.detail.task.callout.failed.title" = "This task did not finish cleanly"; +"history.detail.task.callout.failed.detail" = "Review the summary and recent activity before retrying the workflow."; +"history.detail.recovery.size" = "Size"; +"history.detail.recovery.deleted" = "Deleted"; +"history.detail.recovery.window" = "Retention window"; +"history.detail.recovery.window.open" = "Still recoverable"; +"history.detail.recovery.callout.available.title" = "This item is still recoverable"; +"history.detail.recovery.callout.available.detail" = "Restore it whenever you are ready while the retention window remains open."; +"history.detail.recovery.callout.expiring.title" = "Restore soon if you still need this"; +"history.detail.recovery.callout.expiring.detail" = "This recovery item is close to the end of its retention window."; +"history.restore.action" = "Restore"; +"history.restore.running" = "Restoring…"; +"history.restore.hint" = "Restores this item while its recovery window is still open."; +"history.run.footnote.finished" = "Started %@ • Finished %@"; +"history.run.footnote.running" = "Started %@ • Still in progress"; +"history.recovery.footnote.deleted" = "Deleted %@"; +"history.recovery.footnote.expires" = "Recoverable until %@"; + +"permissions.screen.title" = "Permissions"; +"permissions.screen.subtitle" = "Explain why access matters, keep limited mode useful, and open System Settings only when a workflow truly needs it."; +"permissions.callout.ready.title" = "Core access is ready"; +"permissions.callout.ready.detail" = "The permissions required for the current workflows are already available. The remaining permissions can wait until the related workflow actually needs them."; +"permissions.callout.limited.title" = "Atlas is still in limited mode"; +"permissions.callout.limited.detail" = "At least one permission required for the current workflows is still unavailable. You can keep browsing and doing partial scans, but deeper actions stay limited until the status refreshes."; +"permissions.next.title" = "Next Step"; +"permissions.next.subtitle" = "Show the one access decision that matters most right now, then keep the rest in context."; +"permissions.next.missing.title" = "Next: %@"; +"permissions.next.ready.title" = "No urgent permission work"; +"permissions.next.ready.detail" = "%d of %d tracked permissions are already available."; +"permissions.controls.title" = "Access Overview"; +"permissions.controls.subtitle" = "Check the current access snapshot and understand why each permission matters before you open System Settings."; +"permissions.loading.title" = "Refreshing permission status"; +"permissions.metric.granted.title" = "Granted"; +"permissions.metric.granted.detail" = "Permissions Atlas can already rely on for the current workflow set."; +"permissions.metric.required.title" = "Required Now"; +"permissions.metric.required.detail" = "The core permissions that decide whether Atlas can leave limited mode."; +"permissions.metric.later.title" = "Not Needed Yet"; +"permissions.metric.later.detail" = "These can stay off without putting Atlas into limited mode until a related workflow asks for them."; +"permissions.metric.tracked.title" = "Tracked Permissions"; +"permissions.metric.tracked.detail" = "The minimum set Atlas surfaces for the frozen MVP workflows."; +"permissions.refresh" = "Check Permission Status"; +"permissions.refresh.hint" = "Checks the current permission snapshot without opening System Settings."; +"permissions.requiredSection.title" = "Required Now"; +"permissions.requiredSection.subtitle" = "These permissions decide whether Atlas can fully support the current workflows."; +"permissions.optionalSection.title" = "Can Wait"; +"permissions.optionalSection.subtitle" = "Keep these collapsed until a related workflow actually needs them."; +"permissions.optionalSection.disclosure" = "Optional permissions"; +"permissions.optionalSection.count.one" = "%d pending"; +"permissions.optionalSection.count.other" = "%d pending"; +"permissions.status.title" = "Permission Details"; +"permissions.status.subtitle" = "Each card explains what the permission unlocks, when it matters, and whether it can wait."; +"permissions.empty.title" = "No matching permission states"; +"permissions.empty.detail" = "Refresh permissions or clear the current search to inspect the access model."; +"permissions.row.ready" = "Already available for workflows that depend on this permission."; +"permissions.row.required" = "This permission is required for the current core workflows."; +"permissions.row.optional" = "This permission can wait until a related workflow actually needs it."; +"permissions.status.required" = "Needs Access"; +"permissions.status.optional" = "Can Wait"; +"permissions.grant.action" = "Open System Settings"; +"permissions.grant.notifications" = "Request Notifications"; +"permissions.support.fullDiskAccess" = "Needed only when you want deeper coverage in protected Library locations. If you just enabled it, fully quit and reopen Atlas before checking again."; +"permissions.support.accessibility" = "Needed later when Atlas must close running apps safely before uninstalling."; +"permissions.support.notifications" = "Optional, but useful for long-running task and recovery reminders."; + +"settings.screen.title" = "Settings"; +"settings.screen.subtitle" = "Adjust language, recovery retention, notifications, exclusions, and legal information in one place."; +"settings.callout.title" = "Atlas stores state locally and keeps destructive work auditable"; +"settings.callout.detail" = "Recovery retention, notifications, exclusions, acknowledgements, and notices are all visible here so users can understand how the product behaves before they rely on it."; +"settings.panel.title" = "Preference Center"; +"settings.panel.subtitle" = "Switch between active preferences, recovery behavior, and trust information without scrolling through one long page."; +"settings.panel.general" = "General"; +"settings.panel.recovery" = "Recovery"; +"settings.panel.trust" = "Trust"; +"settings.general.title" = "General"; +"settings.general.subtitle" = "Set the language, recovery retention, and notifications you use day to day."; +"settings.language.title" = "Interface Language"; +"settings.language.detail" = "Atlas currently supports Simplified Chinese and English."; +"settings.language.picker" = "Select language"; +"settings.language.hint" = "Switch between Simplified Chinese and English. Simplified Chinese is the default."; +"settings.retention.title" = "Recovery retention"; +"settings.retention.value" = "%d days"; +"settings.retention.detail" = "Recoverable cleanup stays available until this window expires."; +"settings.retention.adjust" = "Adjust retention"; +"settings.retention.hint" = "Changes how many days recoverable items remain available."; +"settings.recoveryPanel.title" = "Recovery behavior"; +"settings.recoveryPanel.subtitle" = "Control how long recoverable results stay available and which paths stay out of plans."; +"settings.notifications.title" = "Task notifications"; +"settings.notifications.detail" = "Helpful when scans, plan updates, or cleanup take long enough to leave the foreground."; +"settings.notifications.toggle" = "Enable task notifications"; +"settings.notifications.hint" = "Turns task and recovery notifications on or off."; +"settings.distribution.title" = "Distribution"; +"settings.distribution.value" = "Developer ID + Notarization"; +"settings.distribution.detail" = "The frozen MVP assumes direct distribution rather than a Mac App Store release."; +"settings.exclusions.title" = "Rules & Exclusions"; +"settings.exclusions.subtitle" = "These paths stay out of scan results and cleanup plans."; +"settings.exclusions.empty.title" = "No exclusions configured"; +"settings.exclusions.empty.detail" = "Atlas will scan the default coverage set until you add exclusions in a later iteration."; +"settings.exclusions.row.subtitle" = "Excluded from cleanup recommendations and plans."; +"settings.trust.title" = "Trust"; +"settings.trust.subtitle" = "These promises explain how Atlas keeps cleanup and recovery safe to use."; +"settings.trust.ack.title" = "Open-source acknowledgement"; +"settings.trust.ack.subtitle" = "Visible in-app so users can understand the product lineage."; +"settings.trust.notices.title" = "Third-party notices"; +"settings.trust.notices.subtitle" = "Visible in-app alongside the acknowledgement material."; +"settings.trust.destructive.title" = "Destructive actions"; +"settings.trust.destructive.subtitle" = "Destructive work is presented as recoverable, reviewable, and auditable whenever possible."; +"settings.trust.destructive.badge" = "Recovery-first"; +"settings.trust.documents.title" = "Reference Documents"; +"settings.trust.documents.subtitle" = "Open acknowledgements and third-party notices only when you need the full text."; +"settings.trust.documents.ack" = "View Acknowledgement"; +"settings.trust.documents.notices" = "View Notices"; +"settings.legal.title" = "Legal"; +"settings.legal.subtitle" = "Acknowledgements and third-party notices"; +"settings.acknowledgement.title" = "Acknowledgement"; +"settings.acknowledgement.subtitle" = "The in-app attribution text users can read without leaving the product."; +"settings.notices.title" = "Third-Party Notices"; +"settings.notices.subtitle" = "A concise notice surface for shipped third-party attributions."; diff --git a/Packages/AtlasDomain/Sources/AtlasDomain/Resources/zh-Hans.lproj/Localizable.strings b/Packages/AtlasDomain/Sources/AtlasDomain/Resources/zh-Hans.lproj/Localizable.strings new file mode 100644 index 0000000..7b158c5 --- /dev/null +++ b/Packages/AtlasDomain/Sources/AtlasDomain/Resources/zh-Hans.lproj/Localizable.strings @@ -0,0 +1,561 @@ +"app.name" = "Atlas for Mac"; +"language.zhHans" = "简体中文"; +"language.en" = "English"; + +"route.overview.title" = "概览"; +"route.overview.subtitle" = "查看健康摘要、预计可释放空间和下一步安全操作。"; +"route.smartclean.title" = "智能清理"; +"route.smartclean.subtitle" = "先生成并复核清理计划,再决定是否执行。"; +"route.apps.title" = "应用"; +"route.apps.subtitle" = "先查看应用占用、残留和卸载计划,再决定是否移除。"; +"route.history.title" = "历史"; +"route.history.subtitle" = "跟踪任务、结果和恢复入口。"; +"route.permissions.title" = "权限"; +"route.permissions.subtitle" = "先解释原因,再请求访问。"; +"route.settings.title" = "设置"; +"route.settings.subtitle" = "偏好、排除项、致谢和通知。"; + +"risk.safe" = "安全"; +"risk.review" = "复核"; +"risk.advanced" = "高级"; + +"taskkind.scan" = "智能清理扫描"; +"taskkind.executePlan" = "执行清理计划"; +"taskkind.uninstallApp" = "卸载应用"; +"taskkind.restore" = "恢复项目"; +"taskkind.inspectPermissions" = "检查权限状态"; + +"taskstatus.queued" = "排队中"; +"taskstatus.running" = "进行中"; +"taskstatus.completed" = "已完成"; +"taskstatus.failed" = "失败"; +"taskstatus.cancelled" = "已取消"; + +"permission.fullDiskAccess" = "完全磁盘访问"; +"permission.accessibility" = "辅助功能"; +"permission.notifications" = "通知"; + +"category.developer" = "开发"; +"category.system" = "系统"; +"category.apps" = "应用"; +"category.browsers" = "浏览器"; + +"settings.acknowledgement.body" = "Atlas for Mac 包含源自开源项目 Mole(作者 tw93 及贡献者)的软件,并依据 MIT 许可证使用。Atlas for Mac 是独立产品,与原作者不存在关联或背书关系。"; +"settings.notices.body" = "本产品包含源自开源项目 Mole(作者 tw93 及贡献者)的软件,并依据 MIT 许可证使用。"; +"fixture.finding.derivedData.title" = "Xcode 派生数据"; +"fixture.finding.derivedData.detail" = "来自旧项目的构建产物和索引。"; +"fixture.finding.browserCaches.title" = "浏览器缓存"; +"fixture.finding.browserCaches.detail" = "低风险的 WebKit 和 Chromium 缓存目录。"; +"fixture.finding.oldRuntimes.title" = "旧模拟器运行时"; +"fixture.finding.oldRuntimes.detail" = "需要删除前先复核的闲置 iOS 模拟器资源。"; +"fixture.finding.launchAgents.title" = "启动代理残留"; +"fixture.finding.launchAgents.detail" = "与已删除应用或旧实验相关的后台辅助项。"; + +"fixture.plan.reclaimCommonClutter.title" = "回收常见杂项"; +"fixture.plan.item.moveDerivedData.title" = "将 Xcode 派生数据移入废纸篓"; +"fixture.plan.item.moveDerivedData.detail" = "保留恢复路径,可在历史和恢复中找回。"; +"fixture.plan.item.reviewRuntimes.title" = "复核模拟器运行时"; +"fixture.plan.item.reviewRuntimes.detail" = "执行前先让 worker 确认计划细节。"; +"fixture.plan.item.inspectAgents.title" = "检查启动代理"; +"fixture.plan.item.inspectAgents.detail" = "移除受权限影响的项目之前,需要先通过 helper 校验。"; + +"fixture.task.scan.summary" = "已扫描 214 个位置,并生成预计可释放 35.3 GB 的清理计划。"; +"fixture.task.execute.summary" = "正在将安全缓存项移入恢复区。"; +"fixture.task.permissions.summary" = "权限状态已刷新,无需立即弹窗请求。"; + +"fixture.recovery.chromeCache.title" = "Chrome 缓存包"; +"fixture.recovery.chromeCache.detail" = "来自上一次智能清理的可恢复缓存包。"; +"fixture.recovery.chromeCache.payload" = "从上一次智能清理中恢复的缓存数据。"; +"fixture.recovery.simulatorSupport.title" = "旧版模拟器设备支持"; +"fixture.recovery.simulatorSupport.detail" = "这是仍需复核的项目,保留一周后才会清除。"; +"fixture.recovery.simulatorSupport.payload" = "等待复核的模拟器支持文件。"; + +"fixture.permission.fullDiskAccess.rationale" = "只有在扫描系统级缓存、应用残留和受保护的 Library 路径时才需要。"; +"fixture.permission.accessibility.rationale" = "卸载流程需要安全关闭正在运行的应用时,再请求此权限即可。"; +"fixture.permission.notifications.rationale" = "用于长任务完成通知和恢复提醒。"; + +"fixture.health.optimization.dns.title" = "DNS 与 Spotlight 检查"; +"fixture.health.optimization.dns.detail" = "刷新 DNS 缓存并确认 Spotlight 状态。"; +"fixture.health.optimization.finder.title" = "Finder 缓存刷新"; +"fixture.health.optimization.finder.detail" = "刷新 QuickLook 缩略图与图标服务缓存。"; +"fixture.health.optimization.memory.title" = "内存优化"; +"fixture.health.optimization.memory.detail" = "释放非活跃内存以改善系统响应。"; + +"fixture.storage.downloads.title" = "下载归档"; +"fixture.storage.downloads.age" = "大部分文件已超过 90 天"; +"fixture.storage.movies.title" = "影片导出目录"; +"fixture.storage.movies.age" = "最近 6 个月内生成的大型媒体文件"; +"fixture.storage.installers.title" = "未使用的安装器"; +"fixture.storage.installers.age" = "大部分磁盘镜像已超过 30 天"; +"application.error.workerRejected" = "Worker 拒绝了请求(%@):%@"; +"xpc.error.encodingFailed" = "无法编码后台请求:%@"; +"xpc.error.decodingFailed" = "无法解析后台响应:%@"; +"xpc.error.invalidResponse" = "后台工作组件返回了无效响应。请完全退出并重新打开 Atlas;若仍失败,请重新安装当前版本。"; +"xpc.error.connectionUnavailable" = "后台工作组件不可用。请完全退出 Atlas,并从“应用程序”文件夹重新打开当前安装版本;若仍失败,请删除旧版本后重新安装。系统详情:%@"; +"xpc.error.timedOut" = "后台工作组件响应超时(%.1f 秒)。请重试;若持续出现,请重新打开 Atlas。"; +"application.plan.inspectPrivileged" = "检查 %@ 的受权限影响清理项"; +"application.plan.reviewFinding" = "复核 %@"; +"application.plan.reviewSelected.one" = "复核 1 个已选发现项"; +"application.plan.reviewSelected.other" = "复核 %d 个已选发现项"; +"application.scan.completed" = "智能清理扫描已完成,并已生成清理计划。"; +"application.preview.updated.one" = "已根据 1 个操作更新清理计划。"; +"application.preview.updated.other" = "已根据 %d 个操作更新清理计划。"; +"application.plan.executed" = "清理计划已执行完成。"; +"application.recovery.completed" = "恢复操作已完成。"; +"application.apps.loaded.one" = "已载入 1 个应用占用项。"; +"application.apps.loaded.other" = "已载入 %d 个应用占用项。"; +"application.apps.previewUpdated" = "已更新 %@ 的卸载计划。"; +"application.apps.uninstallCompleted" = "应用卸载已完成。"; + +"infrastructure.permission.fullDiskAccess.granted" = "Atlas 已可检查更广范围的受保护用户数据位置。"; +"infrastructure.permission.fullDiskAccess.needed" = "只有在扫描 Mail、Safari 或 Messages 等受保护的 Library 区域时才需要。若你刚在系统设置里开启,请完全退出并重新打开 Atlas 后再检查权限状态。"; +"infrastructure.permission.accessibility.granted" = "Atlas 已可协调关闭应用,以支持更安全的卸载流程。"; +"infrastructure.permission.accessibility.needed" = "当卸载流程需要安全关闭正在运行的应用时,再请求此权限即可。"; +"infrastructure.permission.notifications.granted" = "Atlas 已可为长任务发送完成和恢复提醒。"; +"infrastructure.permission.notifications.needed" = "用于长任务完成通知和恢复提醒。"; +"infrastructure.scan.completed.one" = "已扫描 1 组发现项,并生成清理计划。"; +"infrastructure.scan.completed.other" = "已扫描 %d 组发现项,并生成清理计划。"; +"infrastructure.execute.summary.clean.one" = "已将 1 个智能清理项目移入恢复区。"; +"infrastructure.execute.summary.clean.other" = "已将 %d 个智能清理项目移入恢复区。"; +"infrastructure.execute.summary.clean.mixed" = "已将 %d 个智能清理项目移入恢复区;仍有 %d 个高级项目需复核。"; +"infrastructure.restore.summary.one" = "已将 1 个项目恢复回工作区。"; +"infrastructure.restore.summary.other" = "已将 %d 个项目恢复回工作区。"; +"infrastructure.apps.loaded.one" = "已载入 1 个应用占用项。"; +"infrastructure.apps.loaded.other" = "已载入 %d 个应用占用项。"; +"infrastructure.apps.preview.summary" = "已为 %@ 生成卸载计划。"; +"infrastructure.apps.uninstall.summary" = "已将 %@ 及其残留移入恢复区。"; +"infrastructure.recovery.app.detail.one" = "可恢复的卸载包,包含 1 个残留项目。"; +"infrastructure.recovery.app.detail.other" = "可恢复的卸载包,包含 %d 个残留项目。"; +"infrastructure.plan.review.one" = "复核 1 个已选发现项"; +"infrastructure.plan.review.other" = "复核 %d 个已选发现项"; +"infrastructure.plan.uninstall.title" = "卸载 %@"; +"infrastructure.plan.uninstall.moveBundle.title" = "将 %@ 应用包移入恢复区"; +"infrastructure.plan.uninstall.moveBundle.detail" = "将位于 %@ 的应用包移入废纸篓,同时在历史中保留本次卸载记录。"; +"infrastructure.plan.uninstall.archive.one" = "归档 1 个残留项目"; +"infrastructure.plan.uninstall.archive.other" = "归档 %d 个残留项目"; +"infrastructure.plan.uninstall.archive.detail" = "支持文件、缓存和启动项仍会通过恢复历史保留可追溯性。"; +"infrastructure.action.reviewUninstall" = "复核 %@ 的卸载计划"; +"infrastructure.action.inspectPrivileged" = "检查 %@ 的受权限影响清理项"; +"infrastructure.action.archiveRecovery" = "将 %@ 归档到恢复区"; +"infrastructure.action.moveToTrash" = "将 %@ 移入废纸篓"; +"app.search.prompt" = "搜索发现项、应用、历史"; +"app.search.hint" = "搜索当前页面中可见的内容。"; +"app.search.prompt.route" = "搜索 %@"; +"app.search.hint.route" = "搜索 %@ 中当前可见的内容。"; + +"commands.navigate.menu" = "导航"; +"commands.actions.menu" = "操作"; +"commands.taskcenter.open" = "打开任务中心"; +"commands.taskcenter.close" = "关闭任务中心"; +"commands.actions.refreshCurrent" = "刷新当前页面"; +"commands.actions.runScan" = "运行智能清理扫描"; +"commands.actions.refreshApps" = "刷新应用占用"; +"commands.actions.refreshPermissions" = "检查权限状态"; +"commands.actions.refreshHealth" = "刷新系统快照"; + +"toolbar.taskcenter" = "任务中心"; +"toolbar.taskcenter.help" = "打开最近任务活动(⌘7)"; +"toolbar.taskcenter.accessibilityLabel" = "打开任务中心"; +"toolbar.taskcenter.accessibilityHint" = "显示最近任务活动,并可快速跳转到历史页面。"; +"toolbar.permissions" = "权限"; +"toolbar.permissions.help" = "检查权限状态(⌥⌘P)"; +"toolbar.permissions.accessibilityLabel" = "打开权限并检查状态"; +"toolbar.permissions.accessibilityHint" = "进入权限页面并检查当前权限快照。"; +"toolbar.settings" = "设置"; +"toolbar.settings.help" = "打开设置(⌘6)"; +"toolbar.settings.accessibilityLabel" = "打开设置"; +"toolbar.settings.accessibilityHint" = "进入偏好、保留策略、通知和说明页面。"; +"sidebar.route.hint" = "键盘快捷键 Command-%@。"; + +"common.days" = "天"; +"common.enabled" = "已开启"; +"common.disabled" = "已关闭"; +"common.granted" = "已授予"; +"common.neededLater" = "暂不需要"; +"common.recoverable" = "可恢复"; +"common.manualReview" = "需复核"; +"common.visible" = "可见"; + +"model.scan.ready" = "准备开始扫描并生成清理计划。"; +"model.scan.submitting" = "正在开始智能清理扫描并生成清理计划…"; +"model.apps.ready" = "准备检查应用占用并生成卸载计划。"; +"model.apps.refreshing" = "正在刷新应用占用并更新卸载计划…"; +"model.permissions.ready" = "需要时可随时检查权限状态。"; +"model.permissions.refreshing" = "正在检查权限状态…"; +"model.permissions.summary.one" = "%d / %d 个跟踪权限当前可用。"; +"model.permissions.summary.other" = "%d / %d 个跟踪权限当前可用。"; +"model.taskcenter.none" = "当前没有活动任务"; +"model.taskcenter.matching.one" = "1 个匹配任务"; +"model.taskcenter.matching.other" = "%d 个匹配任务"; +"model.taskcenter.active.one" = "1 个活动任务"; +"model.taskcenter.active.other" = "%d 个活动任务"; + +"taskcenter.title" = "任务中心"; +"taskcenter.callout.empty.title" = "当前没有匹配的任务活动"; +"taskcenter.callout.empty.detail" = "运行扫描、执行清理计划、卸载应用或恢复项目后,任务时间线就会出现在这里。"; +"taskcenter.callout.active.title" = "最近任务活动会集中显示在这里"; +"taskcenter.callout.active.detail" = "想看完整审计轨迹和可恢复项目时,可以打开历史页面。"; +"taskcenter.empty.title" = "还没有任务"; +"taskcenter.empty.detail" = "试试新的搜索词,或者运行一次智能清理扫描来填充任务时间线。"; +"taskcenter.openHistory" = "打开历史"; +"taskcenter.openHistory.hint" = "进入历史页面,查看完整审计轨迹和恢复项。"; +"taskcenter.timeline.finished" = "开始于 %@ • 结束于 %@"; +"taskcenter.timeline.running" = "开始于 %@ • 仍在进行中"; + +"overview.screen.title" = "概览"; +"overview.screen.subtitle" = "先看最重要的信息,再在理解取舍的前提下执行下一步安全维护。"; +"overview.callout.ready.title" = "Atlas 已为当前主流程做好准备"; +"overview.callout.ready.detail" = "当前主流程所需的核心权限已经就绪。像辅助功能、通知这类可稍后授予的权限,不会再把状态标记为受限。"; +"overview.callout.limited.title" = "你仍然可以在受限模式下继续使用"; +"overview.callout.limited.detail" = "当前仍缺少至少一项主流程必需权限,因此 Atlas 会保持受限模式;补齐后就会自动恢复为就绪状态。"; +"overview.metric.reclaimable.title" = "预计可释放空间"; +"overview.metric.reclaimable.detail" = "基于当前清理计划和最近的工作区状态估算。执行后会按剩余项目重新计算。"; +"overview.metric.findings.title" = "待处理发现项"; +"overview.metric.findings.detail" = "在真正执行前,Atlas 会先将其分为安全、复核和高级三个分区。"; +"overview.metric.permissions.title" = "权限就绪度"; +"overview.metric.permissions.ready" = "Atlas 已具备当前流程所需的权限。"; +"overview.metric.permissions.limited" = "仍有主流程必需权限未就绪,因此当前显示为受限模式。"; +"overview.snapshot.title" = "系统快照"; +"overview.snapshot.subtitle" = "高信号健康数据,以及最适合优先处理的维护建议。"; +"overview.snapshot.loading.title" = "正在刷新系统快照"; +"overview.snapshot.loading.detail" = "Atlas 正在收集最新健康数据,然后更新推荐内容。"; +"overview.snapshot.memory.title" = "已用内存"; +"overview.snapshot.memory.detail" = "用于快速判断内存压力和长期后台负载。"; +"overview.snapshot.disk.title" = "磁盘占用"; +"overview.snapshot.disk.detail" = "当前系统卷已使用 %@ / %@ GB。"; +"overview.snapshot.uptime.title" = "运行时长"; +"overview.snapshot.uptime.detail" = "有助于发现陈旧缓存、长时间会话和重启时机。"; +"overview.snapshot.callout.warning.title" = "当前存储压力较高,值得尽快处理"; +"overview.snapshot.callout.warning.detail" = "建议先从智能清理中的“安全”分区开始,在不扩大风险的前提下释放空间。"; +"overview.snapshot.callout.ok.title" = "当前系统状态稳定,适合进行一次安全清理"; +"overview.snapshot.callout.ok.detail" = "先看下方维护建议,再在需要时进入智能清理做更深一步的规划。"; +"overview.snapshot.empty.title" = "暂时无法获取系统快照"; +"overview.snapshot.empty.detail" = "Atlas 仍然可以显示已持久化的发现项和恢复数据,并等待下一次健康刷新。"; +"overview.actions.title" = "推荐操作"; +"overview.actions.subtitle" = "当前工作区里最值得优先处理的高价值发现项。"; +"overview.actions.empty.title" = "没有匹配的发现项"; +"overview.actions.empty.detail" = "重新运行智能清理,或者清空当前搜索,以重新生成推荐操作列表。"; +"overview.activity.title" = "最近活动"; +"overview.activity.subtitle" = "查看 Atlas 最近做了什么,以及哪些操作仍可恢复。"; +"overview.activity.empty.title" = "最近没有活动"; +"overview.activity.empty.detail" = "下一次扫描、执行或恢复操作后,这里就会出现任务时间线。"; +"overview.activity.timeline.finished" = "开始于 %@ • 结束于 %@"; +"overview.activity.timeline.running" = "开始于 %@ • 仍在进行中"; +"overview.risk.safe" = "低风险清理"; +"overview.risk.review" = "建议删除前先复核"; +"overview.risk.advanced" = "建议高级检查"; + +"smartclean.screen.title" = "智能清理"; +"smartclean.screen.subtitle" = "先把扫描结果变成清晰的清理计划,再决定是否执行任何具有破坏性的操作。"; +"smartclean.controls.title" = "扫描与计划"; +"smartclean.controls.subtitle" = "先运行扫描,再更新计划,然后选择下一步最安全的动作。"; +"smartclean.loading.scan" = "正在扫描可回收杂项"; +"smartclean.loading.execute" = "正在执行已复核的清理计划"; +"smartclean.action.runScan" = "运行扫描"; +"smartclean.action.runScan.hint" = "开始新的智能清理扫描,并重建当前清理计划。"; +"smartclean.action.refreshPreview" = "更新计划"; +"smartclean.action.refreshPreview.hint" = "基于当前发现项重新生成清理计划,不会立即执行任何操作。"; +"smartclean.action.execute" = "执行计划"; +"smartclean.action.execute.hint" = "执行已复核的清理计划,并在历史中保留可恢复项目。"; +"smartclean.primary.scan.title" = "先运行新的扫描"; +"smartclean.primary.scan.detail" = "当你还没有当前可复核的计划时,扫描应该是唯一主动作。"; +"smartclean.primary.refresh.title" = "先更新当前计划"; +"smartclean.primary.refresh.detail" = "在信任这份计划之前,先刷新它,尤其是在发现项或验证状态发生变化后。"; +"smartclean.primary.execute.title" = "执行已复核的计划"; +"smartclean.primary.execute.detail" = "当前计划已经可用,因此执行应成为唯一主动作。"; +"smartclean.metric.previewSize.title" = "预计释放空间"; +"smartclean.metric.previewSize.detail" = "当前清理计划预计可释放的空间。执行后会按剩余项目重新计算。"; +"smartclean.metric.actions.title" = "已选操作"; +"smartclean.metric.actions.detail" = "Atlas 会在真正修改之前先列出每一个步骤。"; +"smartclean.metric.review.title" = "需要复核"; +"smartclean.metric.review.none" = "这份计划中的所有项目都可恢复。"; +"smartclean.metric.review.some" = "这些项目在执行前值得再看一眼。"; +"smartclean.preview.title" = "当前清理计划"; +"smartclean.execution.real" = "可直接执行"; +"smartclean.execution.reviewOnly" = "仅供复核"; +"smartclean.execution.coverage.full" = "这份计划中的 %d 个步骤都可直接执行"; +"smartclean.execution.coverage.partial" = "这份计划中有 %d/%d 个步骤可直接执行"; +"smartclean.execution.coverage.full.detail" = "这些步骤会真正移动到废纸篓,并在有恢复路径时支持恢复。"; +"smartclean.execution.coverage.partial.detail" = "其余 %d 个步骤仍需复核,或当前还不支持直接执行。"; +"smartclean.preview.metric.space.title" = "这份计划预计释放"; +"smartclean.preview.metric.space.detail.one" = "按当前 1 个计划步骤估算。"; +"smartclean.preview.metric.space.detail.other" = "按当前 %d 个计划步骤估算。"; +"smartclean.preview.callout.safe.title" = "当前计划主要来自“安全”分区"; +"smartclean.preview.callout.safe.detail" = "大多数已选步骤都可以在历史和恢复中找回。"; +"smartclean.preview.empty.detail" = "运行一次扫描或更新计划,把当前发现项变成具体的清理步骤。若刚执行完计划,这里只显示剩余项目。"; +"smartclean.preview.callout.review.detail" = "建议在执行前检查高亮步骤,确认哪些仍可恢复、哪些需要额外判断。"; +"smartclean.preview.empty.title" = "还没有清理计划"; +"smartclean.preview.empty.detail" = "运行一次扫描或更新计划,把当前发现项变成具体的清理步骤。"; +"smartclean.empty.title" = "没有匹配的发现项"; +"smartclean.empty.detail" = "清空当前搜索,或重新运行一次扫描来填充智能清理分区。"; +"smartclean.status.scanning" = "正在扫描可回收杂项"; +"smartclean.status.executing" = "正在应用已复核的清理方案"; +"smartclean.status.empty" = "运行新的扫描以生成清理计划"; +"smartclean.status.cached" = "当前计划尚未重新验证"; +"smartclean.status.revalidationFailed" = "未能更新当前计划"; +"smartclean.cached.title" = "这份计划来自上一次结果"; +"smartclean.revalidationFailed.title" = "重新验证失败,以下仍是上一次计划"; +"smartclean.cached.detail" = "请先重新运行扫描或更新计划,再执行。当前显示的只是上一次保存的计划,不能直接视为当前可执行结果。"; +"smartclean.status.empty.detail" = "Atlas 会先把扫描结果变成清理计划,再执行任何操作,方便你先复核风险和恢复方式。"; +"smartclean.status.ready" = "先复核计划,再执行"; +"smartclean.status.ready.detail" = "当前已有 %d 个发现项。建议先从“安全”分区开始,再复核“复核”和“高级”分区中的项目。"; +"smartclean.support.removeCache" = "适合作为可恢复的清理步骤。"; +"smartclean.support.removeApp" = "卸载前先复核占用和残留,更容易形成可预期结果。"; +"smartclean.support.archiveFile" = "在支持的情况下,Atlas 会将此类项目保留在可恢复路径中。"; +"smartclean.support.inspectPermission" = "在 Atlas 能安全继续之前,需要额外确认。"; +"smartclean.section.safe" = "高置信度的清理候选项,通常也更容易恢复。"; +"smartclean.section.review" = "建议删除前快速复核一次。"; +"smartclean.section.advanced" = "可能较敏感的项目,值得做更审慎的决定。"; +"smartclean.expectation.safe" = "通常可恢复"; +"smartclean.expectation.review" = "删除前先复核"; +"smartclean.expectation.advanced" = "建议谨慎检查"; + +"apps.screen.title" = "应用"; +"apps.screen.subtitle" = "先检查每个应用的占用、残留和恢复路径,再决定是否卸载。"; +"apps.callout.default.title" = "先复核卸载计划,再决定是否移除"; +"apps.callout.default.detail" = "Atlas 会先列出占用较大的应用,再生成卸载计划,方便你检查残留和恢复预期。"; +"apps.callout.preview.title" = "卸载计划已准备好"; +"apps.callout.preview.detail" = "请先复核下面的步骤,确认会删除什么、哪些项目可恢复、哪些残留需要额外注意。"; +"apps.maintenance.title" = "应用维护"; +"apps.maintenance.subtitle" = "刷新本地应用清单,并优先复核占用较大的卸载候选项。"; +"apps.inventory.title" = "应用清单"; +"apps.inventory.subtitle" = "先刷新清单,再一次只处理一个应用,而不是在长列表里直接做卸载决定。"; +"apps.loading.title" = "正在刷新应用占用"; +"apps.metric.listed.title" = "已列出应用"; +"apps.metric.listed.detail" = "Atlas 当前可以检查并生成卸载计划的已安装应用数量。"; +"apps.metric.footprint.title" = "总应用占用"; +"apps.metric.footprint.detail" = "当前应用清单的总体空间占用。"; +"apps.metric.leftovers.title" = "残留文件"; +"apps.metric.leftovers.detail" = "Atlas 会在卸载前先把这些附加文件纳入计划,方便你逐项确认。"; +"apps.refresh.action" = "刷新应用占用"; +"apps.refresh.running" = "正在刷新…"; +"apps.refresh.hint" = "刷新已安装应用清单,并重新计算占用。"; +"apps.preview.title" = "卸载计划"; +"apps.preview.metric.size.title" = "预计释放空间"; +"apps.preview.metric.size.detail" = "执行这份卸载计划后,预计可释放的空间,包含残留文件。"; +"apps.preview.metric.actions.title" = "计划步骤"; +"apps.preview.metric.actions.detail" = "Atlas 会在真正移除前先列出每一个卸载步骤。"; +"apps.preview.metric.recoverable.title" = "可恢复步骤"; +"apps.preview.metric.recoverable.detail" = "支持恢复的步骤会在历史和恢复中保留。"; +"apps.preview.callout.safe.title" = "这份卸载计划大多可恢复"; +"apps.preview.callout.safe.detail" = "在支持的情况下,Atlas 会为所选应用及相关文件保留恢复路径。"; +"apps.preview.callout.review.title" = "这份卸载计划中仍有步骤需要复核"; +"apps.preview.callout.review.detail" = "建议逐项查看计划,确保卸载结果和残留清理方式符合你的预期。"; +"apps.preview.row.recoverable" = "支持时可通过历史恢复。"; +"apps.preview.row.review" = "执行前需复核。"; +"apps.preview.action" = "查看计划"; +"apps.preview.running" = "正在生成计划…"; +"apps.preview.hint" = "先为这个应用生成卸载计划,再决定是否真正移除。"; +"apps.uninstall.action" = "执行卸载"; +"apps.uninstall.running" = "正在卸载…"; +"apps.uninstall.hint" = "执行已复核的卸载计划,并在支持时保留可恢复项目。"; +"apps.list.title" = "已安装应用占用"; +"apps.list.subtitle" = "在一个列表中查看应用占用、标识符、残留数量和卸载操作。"; +"apps.list.empty.title" = "没有匹配的应用"; +"apps.list.empty.detail" = "试试新的搜索词,或者刷新应用列表以检查最新的候选项。"; +"apps.list.row.footnote" = "%@ • 残留 %d 项"; +"apps.list.row.leftovers" = "残留 %d 项"; +"apps.browser.title" = "逐个复核应用"; +"apps.browser.subtitle" = "从分组列表里选择一个应用,先检查占用,再生成卸载计划并决定是否执行。"; +"apps.group.large" = "大体积应用"; +"apps.group.leftovers" = "有残留"; +"apps.group.other" = "其他应用"; +"apps.detail.title" = "应用详情"; +"apps.detail.empty.title" = "选择一个应用"; +"apps.detail.empty.detail" = "从左侧列表选择一个应用,以检查占用并生成卸载计划。"; +"apps.detail.size" = "应用占用"; +"apps.detail.leftovers" = "残留文件"; +"apps.detail.path" = "Bundle 路径"; +"apps.detail.callout.preview.title" = "先生成卸载计划"; +"apps.detail.callout.preview.detail" = "为了让卸载更可控,Atlas 会先展示准确步骤,再决定是否真正移除。"; +"apps.detail.callout.ready.title" = "这个应用已经可以在复核后卸载"; +"apps.detail.callout.ready.detail" = "下面的计划会说明将删除什么,以及哪些内容仍可恢复。"; + +"history.screen.title" = "历史"; +"history.screen.subtitle" = "查看执行过的任务、发生的变更,以及在恢复窗口关闭前仍可找回的项目。"; +"history.callout.empty.title" = "即使恢复区为空,历史仍然可用"; +"history.callout.empty.detail" = "审计轨迹仍会记录已完成的工作,下一次可恢复操作也会自动出现在这里。"; +"history.callout.expiring.title" = "有恢复项即将到期"; +"history.callout.expiring.detail" = "先查看恢复列表,确认是否有仍需找回的项目,避免在保留窗口关闭后再处理。"; +"history.callout.running.title" = "最近有任务仍在进行中"; +"history.callout.running.detail" = "保留时间线可帮助你确认任务何时完成,以及是否会产生新的可恢复项目。"; +"history.callout.recovery.title" = "恢复优先的清理策略已启用"; +"history.callout.recovery.detail" = "每个可恢复操作都会在保留期结束前保持可见,便于你有把握地撤销决定。"; +"history.metric.activity.title" = "当前记录"; +"history.metric.activity.detail.empty" = "运行一次扫描或清理操作后,这里会开始形成审计轨迹。"; +"history.metric.activity.detail.latest" = "最近更新 %@"; +"history.metric.running.title" = "进行中"; +"history.metric.running.detail.none" = "当前没有仍在运行的任务。"; +"history.metric.running.detail.active" = "正在运行的任务会固定显示在时间线中,直到完成。"; +"history.metric.recovery.title" = "可恢复项"; +"history.metric.recovery.detail.none" = "当前没有等待恢复的项目。"; +"history.metric.recovery.detail.available" = "共 %@,仍可恢复。"; +"history.browser.title" = "浏览历史"; +"history.browser.subtitle" = "先看仍可恢复的内容,需要回溯旧任务时再打开归档,避免信息混在一起。"; +"history.browser.section.recovery" = "可恢复"; +"history.browser.section.archive" = "归档"; +"history.browser.summary.archive.one" = "1 条归档任务记录"; +"history.browser.summary.archive.other" = "%d 条归档任务记录"; +"history.browser.summary.recovery.one" = "1 个可恢复项目"; +"history.browser.summary.recovery.other" = "%d 个可恢复项目"; +"history.runs.title" = "归档"; +"history.runs.subtitle" = "把当前和过往任务分组展示,避免旧记录把页面拖得太长。"; +"history.runs.empty.title" = "没有匹配的任务"; +"history.runs.empty.detail" = "运行一次扫描,或者清空当前搜索,以查看最新任务时间线。"; +"history.timeline.latest" = "最新"; +"history.timeline.meta.started" = "开始于 %@"; +"history.timeline.meta.finished" = "结束于 %@"; +"history.timeline.meta.running" = "仍在进行中"; +"history.recovery.title" = "可恢复项目"; +"history.recovery.subtitle" = "恢复前先确认原始位置和保留窗口。"; +"history.recovery.empty.title" = "没有匹配的恢复项"; +"history.recovery.empty.detail" = "恢复优先的删除策略已启用,但当前搜索下没有匹配的项目。"; +"history.recovery.badge.available" = "可恢复"; +"history.recovery.badge.expiring" = "即将到期"; +"history.recovery.filter.all" = "全部"; +"history.recovery.filter.expiring" = "即将到期"; +"history.recovery.filtered.empty.title" = "这个筛选下没有项目"; +"history.recovery.filtered.empty.detail" = "试试其他恢复分类,或清空当前筛选来查看所有仍可恢复的项目。"; +"history.recovery.group.expiring" = "即将到期"; +"history.recovery.group.apps" = "应用"; +"history.recovery.group.developer" = "开发者"; +"history.recovery.group.browsers" = "浏览器"; +"history.recovery.group.system" = "系统"; +"history.recovery.group.other" = "其他"; +"history.recovery.meta.deleted" = "删除于 %@"; +"history.recovery.meta.expires" = "可恢复至 %@"; +"history.recovery.meta.noexpiry" = "保留窗口仍然开放"; +"history.recovery.path.label" = "原始位置"; +"history.archive.group.active" = "仍在运行"; +"history.archive.group.recent" = "近期记录"; +"history.archive.group.older" = "更早归档"; +"history.detail.title" = "详情"; +"history.detail.empty.title" = "选择一个项目"; +"history.detail.empty.detail" = "在左侧选择一条时间线记录或一个恢复项,即可在这里查看详情。"; +"history.detail.task.status" = "状态"; +"history.detail.task.started" = "开始时间"; +"history.detail.task.finished" = "结束时间"; +"history.detail.task.finished.running" = "仍在进行中"; +"history.detail.task.callout.queued.title" = "任务已进入队列"; +"history.detail.task.callout.queued.detail" = "系统已经接受这项任务,但执行尚未真正开始。"; +"history.detail.task.callout.running.title" = "任务仍在运行中"; +"history.detail.task.callout.running.detail" = "如果你想确认任务何时完成,可以先保留这个详情视图。"; +"history.detail.task.callout.completed.title" = "任务已成功完成"; +"history.detail.task.callout.completed.detail" = "可通过下面的摘要和时间信息确认这次变更实际做了什么。"; +"history.detail.task.callout.failed.title" = "任务未能完整结束"; +"history.detail.task.callout.failed.detail" = "建议先检查摘要和最近活动,再决定是否重新执行。"; +"history.detail.recovery.size" = "大小"; +"history.detail.recovery.deleted" = "删除时间"; +"history.detail.recovery.window" = "保留窗口"; +"history.detail.recovery.window.open" = "仍可恢复"; +"history.detail.recovery.callout.available.title" = "这个项目仍可恢复"; +"history.detail.recovery.callout.available.detail" = "只要保留窗口还在,你可以在准备好后随时恢复。"; +"history.detail.recovery.callout.expiring.title" = "如果还需要它,请尽快恢复"; +"history.detail.recovery.callout.expiring.detail" = "这个恢复项已经接近保留窗口的结束时间。"; +"history.restore.action" = "恢复"; +"history.restore.running" = "正在恢复…"; +"history.restore.hint" = "只要恢复窗口仍然开放,就可以把这个项目恢复回工作区。"; +"history.run.footnote.finished" = "开始于 %@ • 结束于 %@"; +"history.run.footnote.running" = "开始于 %@ • 仍在进行中"; +"history.recovery.footnote.deleted" = "删除于 %@"; +"history.recovery.footnote.expires" = "可恢复至 %@"; + +"permissions.screen.title" = "权限"; +"permissions.screen.subtitle" = "先解释为什么需要访问,保持受限模式可用,并且只在具体流程真正需要时再打开系统设置。"; +"permissions.callout.ready.title" = "核心访问能力已就绪"; +"permissions.callout.ready.detail" = "当前主流程所需的权限已经齐备。其余权限即使暂未授予,也只会在相关流程真正需要时再提示。"; +"permissions.callout.limited.title" = "当前仍处于受限模式"; +"permissions.callout.limited.detail" = "至少还有一项主流程必需权限未就绪。你仍可继续浏览和部分扫描,但更深层操作会保持受限,直到权限状态刷新为止。"; +"permissions.next.title" = "下一步"; +"permissions.next.subtitle" = "只突出当前最重要的权限决策,其余状态留在上下文中查看。"; +"permissions.next.missing.title" = "下一步:%@"; +"permissions.next.ready.title" = "当前没有紧急权限事项"; +"permissions.next.ready.detail" = "当前已具备 %d/%d 项跟踪权限。"; +"permissions.controls.title" = "权限概览"; +"permissions.controls.subtitle" = "检查当前权限快照,并在打开系统设置前理解每项权限的用途。"; +"permissions.loading.title" = "正在刷新权限状态"; +"permissions.metric.granted.title" = "已授予"; +"permissions.metric.granted.detail" = "Atlas 当前已可依赖的权限数量。"; +"permissions.metric.required.title" = "当前必需"; +"permissions.metric.required.detail" = "决定是否退出受限模式的核心权限。"; +"permissions.metric.later.title" = "暂不需要"; +"permissions.metric.later.detail" = "这些权限暂未授予也不会让 Atlas 进入受限模式,只有相关流程需要时才会用到。"; +"permissions.metric.tracked.title" = "跟踪权限"; +"permissions.metric.tracked.detail" = "冻结 MVP 流程当前展示的最小权限集合。"; +"permissions.refresh" = "检查权限状态"; +"permissions.refresh.hint" = "检查当前权限快照,而不会直接打开系统设置。"; +"permissions.requiredSection.title" = "当前必需"; +"permissions.requiredSection.subtitle" = "这些权限决定了 Atlas 是否能完整支持当前主流程。"; +"permissions.optionalSection.title" = "可稍后处理"; +"permissions.optionalSection.subtitle" = "先折叠这些权限,等相关流程真正需要时再展开。"; +"permissions.optionalSection.disclosure" = "可选权限"; +"permissions.optionalSection.count.one" = "%d 项待处理"; +"permissions.optionalSection.count.other" = "%d 项待处理"; +"permissions.status.title" = "权限详情"; +"permissions.status.subtitle" = "每张卡片都会说明这项权限能解锁什么、什么时候需要,以及是否可以稍后再处理。"; +"permissions.empty.title" = "没有匹配的权限状态"; +"permissions.empty.detail" = "刷新权限,或清空当前搜索,以查看当前访问模型。"; +"permissions.row.ready" = "依赖这项权限的流程现在已经可以运行。"; +"permissions.row.required" = "当前主流程需要这项权限。"; +"permissions.row.optional" = "这项权限可以等相关流程真正需要时再授予。"; +"permissions.status.required" = "需要授权"; +"permissions.status.optional" = "可稍后授权"; +"permissions.grant.action" = "打开系统设置"; +"permissions.grant.notifications" = "请求通知权限"; +"permissions.support.fullDiskAccess" = "只有在你想扫描更深层的受保护 Library 位置时才需要。若刚开启,请完全退出并重新打开 Atlas,再回来检查。"; +"permissions.support.accessibility" = "只有在 Atlas 需要先安全关闭正在运行的应用再卸载时才需要。"; +"permissions.support.notifications" = "不是必须项,但对长任务提醒和恢复提醒很有帮助。"; + +"settings.screen.title" = "设置"; +"settings.screen.subtitle" = "在这里调整语言、恢复保留期、通知、排除项,并查看致谢与说明。"; +"settings.callout.title" = "Atlas 会将状态保存在本地,并让破坏性操作保持可审计"; +"settings.callout.detail" = "恢复保留策略、通知、排除项、致谢和说明都集中在这里,方便你在真正依赖产品前先理解其行为。"; +"settings.panel.title" = "偏好中心"; +"settings.panel.subtitle" = "在活跃设置、恢复行为和信任信息之间切换,而不是在一个长页面里反复滚动。"; +"settings.panel.general" = "通用"; +"settings.panel.recovery" = "恢复"; +"settings.panel.trust" = "信任"; +"settings.general.title" = "通用"; +"settings.general.subtitle" = "设置日常维护会用到的语言、恢复保留期和通知。"; +"settings.language.title" = "界面语言"; +"settings.language.detail" = "当前优先支持简体中文和英文切换。"; +"settings.language.picker" = "选择界面语言"; +"settings.language.hint" = "在简体中文和英文之间切换,默认语言为简体中文。"; +"settings.retention.title" = "恢复保留时长"; +"settings.retention.value" = "%d 天"; +"settings.retention.detail" = "可恢复的清理结果会在这个时间窗口内保留。"; +"settings.retention.adjust" = "调整保留时长"; +"settings.retention.hint" = "更改可恢复项目的保留天数。"; +"settings.recoveryPanel.title" = "恢复行为"; +"settings.recoveryPanel.subtitle" = "控制可恢复结果保留多久,以及哪些路径不进入计划。"; +"settings.notifications.title" = "任务通知"; +"settings.notifications.detail" = "适合在扫描、更新计划或清理需要较长时间时提醒你。"; +"settings.notifications.toggle" = "启用任务通知"; +"settings.notifications.hint" = "打开或关闭任务和恢复通知。"; +"settings.distribution.title" = "分发方式"; +"settings.distribution.value" = "Developer ID + Notarization"; +"settings.distribution.detail" = "冻结 MVP 假设采用直接分发,而不是 Mac App Store。"; +"settings.exclusions.title" = "规则与排除项"; +"settings.exclusions.subtitle" = "这些路径不会出现在扫描结果和清理计划里。"; +"settings.exclusions.empty.title" = "还没有配置排除项"; +"settings.exclusions.empty.detail" = "在后续迭代前,Atlas 会先使用默认覆盖范围进行扫描。"; +"settings.exclusions.row.subtitle" = "不会纳入清理建议和计划。"; +"settings.trust.title" = "信任"; +"settings.trust.subtitle" = "这些承诺帮助你理解 Atlas 如何安全地处理清理与恢复。"; +"settings.trust.ack.title" = "开源致谢"; +"settings.trust.ack.subtitle" = "在应用内可见,帮助用户理解产品来源。"; +"settings.trust.notices.title" = "第三方说明"; +"settings.trust.notices.subtitle" = "与致谢内容一起在应用内展示。"; +"settings.trust.destructive.title" = "破坏性操作"; +"settings.trust.destructive.subtitle" = "Atlas 会尽量把具有破坏性的操作做成可恢复、可复核、可追溯。"; +"settings.trust.destructive.badge" = "恢复优先"; +"settings.trust.documents.title" = "参考文档"; +"settings.trust.documents.subtitle" = "只有在需要完整文本时再打开致谢和第三方说明。"; +"settings.trust.documents.ack" = "查看致谢"; +"settings.trust.documents.notices" = "查看第三方说明"; +"settings.legal.title" = "法律信息"; +"settings.legal.subtitle" = "致谢与第三方声明"; +"settings.acknowledgement.title" = "致谢"; +"settings.acknowledgement.subtitle" = "用户无需离开产品,就能查看应用内致谢文本。"; +"settings.notices.title" = "第三方说明"; +"settings.notices.subtitle" = "用于展示随产品一起分发的第三方说明信息。"; diff --git a/Packages/AtlasDomain/Tests/AtlasDomainTests/AtlasDomainTests.swift b/Packages/AtlasDomain/Tests/AtlasDomainTests/AtlasDomainTests.swift new file mode 100644 index 0000000..f31fd19 --- /dev/null +++ b/Packages/AtlasDomain/Tests/AtlasDomainTests/AtlasDomainTests.swift @@ -0,0 +1,44 @@ +import XCTest +@testable import AtlasDomain + +final class AtlasDomainTests: XCTestCase { + override func setUp() { + super.setUp() + AtlasL10n.setCurrentLanguage(.zhHans) + } + + func testPrimaryRoutesMatchFrozenMVP() { + XCTAssertEqual( + AtlasRoute.allCases.map(\.title), + ["概览", "智能清理", "应用", "历史", "权限", "设置"] + ) + } + + func testScaffoldFixturesExposeRecoveryItems() { + XCTAssertFalse(AtlasScaffoldFixtures.recoveryItems.isEmpty) + XCTAssertGreaterThan(AtlasScaffoldFixtures.findings.map(\.bytes).reduce(0, +), 0) + } + + + func testSettingsDecodeDefaultsLanguageToChineseWhenMissing() throws { + let data = Data(""" + { + "recoveryRetentionDays": 7, + "notificationsEnabled": true, + "excludedPaths": [] + } + """.utf8) + + let settings = try JSONDecoder().decode(AtlasSettings.self, from: data) + + XCTAssertEqual(settings.language, .zhHans) + XCTAssertEqual(settings.acknowledgementText, AtlasL10n.acknowledgement(language: .zhHans)) + } + + func testOnlyFullDiskAccessIsRequiredForCurrentWorkflows() { + XCTAssertTrue(PermissionKind.fullDiskAccess.isRequiredForCurrentWorkflows) + XCTAssertFalse(PermissionKind.accessibility.isRequiredForCurrentWorkflows) + XCTAssertFalse(PermissionKind.notifications.isRequiredForCurrentWorkflows) + } + +} diff --git a/Packages/AtlasFeaturesApps/README.md b/Packages/AtlasFeaturesApps/README.md new file mode 100644 index 0000000..f737900 --- /dev/null +++ b/Packages/AtlasFeaturesApps/README.md @@ -0,0 +1,7 @@ +# AtlasFeaturesApps + +## Responsibility + +- Installed app list +- Footprint details +- Uninstall preview and execution UI diff --git a/Packages/AtlasFeaturesApps/Sources/AtlasFeaturesApps/AppsFeatureView.swift b/Packages/AtlasFeaturesApps/Sources/AtlasFeaturesApps/AppsFeatureView.swift new file mode 100644 index 0000000..0db23d7 --- /dev/null +++ b/Packages/AtlasFeaturesApps/Sources/AtlasFeaturesApps/AppsFeatureView.swift @@ -0,0 +1,549 @@ +import AtlasDesignSystem +import AtlasDomain +import SwiftUI + +public struct AppsFeatureView: View { + private let apps: [AppFootprint] + private let previewPlan: ActionPlan? + private let currentPreviewedAppID: UUID? + private let summary: String + private let isRunning: Bool + private let activePreviewAppID: UUID? + private let activeUninstallAppID: UUID? + private let onRefreshApps: () -> Void + private let onPreviewAppUninstall: (UUID) -> Void + private let onExecuteAppUninstall: (UUID) -> Void + + @State private var selectedAppID: UUID? + + public init( + apps: [AppFootprint] = AtlasScaffoldFixtures.apps, + previewPlan: ActionPlan? = nil, + currentPreviewedAppID: UUID? = nil, + summary: String = AtlasL10n.string("model.apps.ready"), + isRunning: Bool = false, + activePreviewAppID: UUID? = nil, + activeUninstallAppID: UUID? = nil, + onRefreshApps: @escaping () -> Void = {}, + onPreviewAppUninstall: @escaping (UUID) -> Void = { _ in }, + onExecuteAppUninstall: @escaping (UUID) -> Void = { _ in } + ) { + self.apps = apps + self.previewPlan = previewPlan + self.currentPreviewedAppID = currentPreviewedAppID + self.summary = summary + self.isRunning = isRunning + self.activePreviewAppID = activePreviewAppID + self.activeUninstallAppID = activeUninstallAppID + self.onRefreshApps = onRefreshApps + self.onPreviewAppUninstall = onPreviewAppUninstall + self.onExecuteAppUninstall = onExecuteAppUninstall + _selectedAppID = State(initialValue: Self.sortedApps(apps).first?.id) + } + + public var body: some View { + AtlasScreen( + title: AtlasL10n.string("apps.screen.title"), + subtitle: AtlasL10n.string("apps.screen.subtitle") + ) { + AtlasCallout( + title: previewPlan == nil ? AtlasL10n.string("apps.callout.default.title") : AtlasL10n.string("apps.callout.preview.title"), + detail: previewPlan == nil + ? AtlasL10n.string("apps.callout.default.detail") + : AtlasL10n.string("apps.callout.preview.detail"), + tone: previewPlan == nil ? .neutral : .warning, + systemImage: previewPlan == nil ? "app.badge.minus" : "list.clipboard.fill" + ) + + AtlasInfoCard( + title: AtlasL10n.string("apps.inventory.title"), + subtitle: AtlasL10n.string("apps.inventory.subtitle") + ) { + VStack(alignment: .leading, spacing: AtlasSpacing.lg) { + Text(summary) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + + LazyVGrid(columns: AtlasLayout.metricColumns, spacing: AtlasSpacing.lg) { + AtlasMetricCard( + title: AtlasL10n.string("apps.metric.listed.title"), + value: "\(sortedApps.count)", + detail: AtlasL10n.string("apps.metric.listed.detail"), + tone: .neutral, + systemImage: "square.stack.3d.up" + ) + AtlasMetricCard( + title: AtlasL10n.string("apps.metric.footprint.title"), + value: AtlasFormatters.byteCount(sortedApps.map(\.bytes).reduce(0, +)), + detail: AtlasL10n.string("apps.metric.footprint.detail"), + tone: .warning, + systemImage: "shippingbox" + ) + AtlasMetricCard( + title: AtlasL10n.string("apps.metric.leftovers.title"), + value: "\(sortedApps.map(\.leftoverItems).reduce(0, +))", + detail: AtlasL10n.string("apps.metric.leftovers.detail"), + tone: .warning, + systemImage: "tray.full" + ) + } + + Button(action: onRefreshApps) { + Label(isRunning ? AtlasL10n.string("apps.refresh.running") : AtlasL10n.string("apps.refresh.action"), systemImage: "arrow.clockwise") + } + .buttonStyle(.atlasSecondary) + .disabled(isRunning) + .accessibilityIdentifier("apps.refresh") + .accessibilityHint(AtlasL10n.string("apps.refresh.hint")) + } + } + + AtlasInfoCard( + title: AtlasL10n.string("apps.browser.title"), + subtitle: AtlasL10n.string("apps.browser.subtitle"), + tone: selectedAppMatchingPreview == nil ? .neutral : .warning + ) { + GeometryReader { proxy in + let isWide = proxy.size.width >= 760 + let sidebarWidth = min(max(proxy.size.width * 0.32, 260), 300) + + Group { + if isWide { + HStack(alignment: .top, spacing: AtlasSpacing.xl) { + appsSidebar + .frame(width: sidebarWidth) + .frame(maxHeight: .infinity) + + appDetailPanel + .frame(maxWidth: .infinity, maxHeight: .infinity) + } + } else { + VStack(alignment: .leading, spacing: AtlasSpacing.xl) { + appsSidebar + .frame(minHeight: 260, maxHeight: 260) + + appDetailPanel + .frame(maxWidth: .infinity, maxHeight: .infinity) + } + } + } + .frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .topLeading) + } + .frame(height: 560) + } + } + .onAppear(perform: syncSelection) + .onChange(of: sortedAppIDs) { _, _ in + syncSelection() + } + } + + private var sortedApps: [AppFootprint] { + Self.sortedApps(apps) + } + + private var sortedAppIDs: [UUID] { + sortedApps.map(\.id) + } + + private var groupedApps: [AppGroup] { + var groups: [AppGroup] = [] + let grouped = Dictionary(grouping: sortedApps, by: \.bucket) + + for bucket in AppBucket.displayOrder { + guard let items = grouped[bucket], !items.isEmpty else { + continue + } + groups.append(AppGroup(id: bucket.rawValue, title: bucket.title, tone: bucket.tone, apps: items)) + } + + return groups + } + + private var selectedApp: AppFootprint? { + guard let selectedAppID else { + return nil + } + return sortedApps.first(where: { $0.id == selectedAppID }) + } + + private var selectedAppMatchingPreview: ActionPlan? { + guard currentPreviewedAppID == selectedApp?.id else { + return nil + } + return previewPlan + } + + private var appsSidebar: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + Text(AtlasL10n.string("apps.list.title")) + .font(AtlasTypography.label) + .foregroundStyle(.secondary) + + if sortedApps.isEmpty { + AtlasEmptyState( + title: AtlasL10n.string("apps.list.empty.title"), + detail: AtlasL10n.string("apps.list.empty.detail"), + systemImage: "square.stack.3d.up.slash", + tone: .neutral + ) + } else { + List(selection: $selectedAppID) { + ForEach(groupedApps) { group in + Section { + ForEach(group.apps) { app in + AppSidebarRow(app: app) + .tag(app.id) + .listRowInsets(EdgeInsets(top: 10, leading: 12, bottom: 10, trailing: 12)) + } + } header: { + AppSidebarSectionHeader(title: group.title, count: group.apps.count, tone: group.tone) + } + } + } + .listStyle(.plain) + .scrollContentBackground(.hidden) + } + } + .frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .topLeading) + .padding(AtlasSpacing.lg) + .background( + RoundedRectangle(cornerRadius: AtlasRadius.lg, style: .continuous) + .fill(AtlasColor.cardRaised) + ) + .overlay( + RoundedRectangle(cornerRadius: AtlasRadius.lg, style: .continuous) + .strokeBorder(AtlasColor.border, lineWidth: 1) + ) + } + + private var appDetailPanel: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.lg) { + Text(AtlasL10n.string("apps.detail.title")) + .font(AtlasTypography.label) + .foregroundStyle(.secondary) + + ScrollView { + if let selectedApp { + AppDetailView( + app: selectedApp, + previewPlan: selectedAppMatchingPreview, + isBuildingPreview: activePreviewAppID == selectedApp.id, + isUninstalling: activeUninstallAppID == selectedApp.id, + isBusy: isRunning, + onPreview: { onPreviewAppUninstall(selectedApp.id) }, + onUninstall: { onExecuteAppUninstall(selectedApp.id) } + ) + } else { + AtlasEmptyState( + title: AtlasL10n.string("apps.detail.empty.title"), + detail: AtlasL10n.string("apps.detail.empty.detail"), + systemImage: "cursorarrow.click", + tone: .neutral + ) + } + } + } + .frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .topLeading) + .padding(AtlasSpacing.xl) + .background( + RoundedRectangle(cornerRadius: AtlasRadius.lg, style: .continuous) + .fill(Color.primary.opacity(0.03)) + ) + .overlay( + RoundedRectangle(cornerRadius: AtlasRadius.lg, style: .continuous) + .strokeBorder(Color.primary.opacity(0.08), lineWidth: 1) + ) + } + + private func syncSelection() { + if selectedApp == nil { + selectedAppID = sortedApps.first?.id + } + } + + private static func sortedApps(_ apps: [AppFootprint]) -> [AppFootprint] { + apps.sorted { lhs, rhs in + if lhs.bytes == rhs.bytes { + if lhs.leftoverItems == rhs.leftoverItems { + return lhs.name.localizedCaseInsensitiveCompare(rhs.name) == .orderedAscending + } + return lhs.leftoverItems > rhs.leftoverItems + } + return lhs.bytes > rhs.bytes + } + } +} + +private struct AppSidebarRow: View { + let app: AppFootprint + + var body: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.xs) { + HStack(alignment: .center, spacing: AtlasSpacing.sm) { + Image(systemName: "app.fill") + .font(AtlasTypography.caption) + .foregroundStyle(app.leftoverItems > 0 ? AtlasColor.warning : AtlasColor.brand) + .accessibilityHidden(true) + + Text(app.name) + .font(AtlasTypography.rowTitle) + .lineLimit(1) + + Spacer(minLength: AtlasSpacing.sm) + + Text(AtlasFormatters.byteCount(app.bytes)) + .font(AtlasTypography.captionSmall) + .foregroundStyle(.secondary) + } + + Text(app.bundleIdentifier) + .font(AtlasTypography.bodySmall) + .foregroundStyle(.secondary) + .lineLimit(1) + + HStack(alignment: .center, spacing: AtlasSpacing.sm) { + AtlasStatusChip( + AtlasL10n.string("apps.list.row.leftovers", app.leftoverItems), + tone: app.leftoverItems > 0 ? .warning : .success + ) + + Spacer(minLength: AtlasSpacing.sm) + + Text(app.bucket.title) + .font(AtlasTypography.captionSmall) + .foregroundStyle(.secondary) + } + } + .padding(.vertical, AtlasSpacing.xxs) + .accessibilityElement(children: .contain) + } +} + +private struct AppSidebarSectionHeader: View { + let title: String + let count: Int + let tone: AtlasTone + + var body: some View { + HStack(alignment: .center, spacing: AtlasSpacing.sm) { + Text(title) + .font(AtlasTypography.caption) + .foregroundStyle(.secondary) + + Spacer(minLength: AtlasSpacing.sm) + + Text("\(count)") + .font(AtlasTypography.captionSmall) + .foregroundStyle(tone.tint) + } + .textCase(nil) + } +} + +private struct AppDetailView: View { + let app: AppFootprint + let previewPlan: ActionPlan? + let isBuildingPreview: Bool + let isUninstalling: Bool + let isBusy: Bool + let onPreview: () -> Void + let onUninstall: () -> Void + + var body: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.xl) { + HStack(alignment: .top, spacing: AtlasSpacing.lg) { + VStack(alignment: .leading, spacing: AtlasSpacing.xs) { + Text(app.name) + .font(AtlasTypography.sectionTitle) + + Text(app.bundleIdentifier) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + } + + Spacer(minLength: AtlasSpacing.lg) + + VStack(alignment: .trailing, spacing: AtlasSpacing.sm) { + Text(AtlasFormatters.byteCount(app.bytes)) + .font(AtlasTypography.cardMetric) + .foregroundStyle(.primary) + + AtlasStatusChip( + AtlasL10n.string("apps.list.row.leftovers", app.leftoverItems), + tone: app.leftoverItems > 0 ? .warning : .success + ) + } + } + + AtlasCallout( + title: previewPlan == nil + ? AtlasL10n.string("apps.detail.callout.preview.title") + : AtlasL10n.string("apps.detail.callout.ready.title"), + detail: previewPlan == nil + ? AtlasL10n.string("apps.detail.callout.preview.detail") + : AtlasL10n.string("apps.detail.callout.ready.detail"), + tone: previewPlan == nil ? .neutral : .warning, + systemImage: previewPlan == nil ? "eye" : "checkmark.shield.fill" + ) + + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + AtlasKeyValueRow( + title: AtlasL10n.string("apps.detail.size"), + value: AtlasFormatters.byteCount(app.bytes), + detail: AtlasL10n.string("apps.metric.footprint.detail") + ) + AtlasKeyValueRow( + title: AtlasL10n.string("apps.detail.leftovers"), + value: "\(app.leftoverItems)", + detail: AtlasL10n.string("apps.metric.leftovers.detail") + ) + AtlasKeyValueRow( + title: AtlasL10n.string("apps.detail.path"), + value: app.bundlePath, + detail: app.bucket.title + ) + } + + if let previewPlan { + AtlasInfoCard( + title: AtlasL10n.string("apps.preview.title"), + subtitle: previewPlan.title, + tone: .warning + ) { + LazyVGrid(columns: AtlasLayout.metricColumns, spacing: AtlasSpacing.lg) { + AtlasMetricCard( + title: AtlasL10n.string("apps.preview.metric.size.title"), + value: AtlasFormatters.byteCount(previewPlan.estimatedBytes), + detail: AtlasL10n.string("apps.preview.metric.size.detail"), + tone: .warning, + systemImage: "shippingbox" + ) + AtlasMetricCard( + title: AtlasL10n.string("apps.preview.metric.actions.title"), + value: "\(previewPlan.items.count)", + detail: AtlasL10n.string("apps.preview.metric.actions.detail"), + tone: .neutral, + systemImage: "list.bullet.rectangle" + ) + AtlasMetricCard( + title: AtlasL10n.string("apps.preview.metric.recoverable.title"), + value: "\(previewPlan.items.filter(\.recoverable).count)", + detail: AtlasL10n.string("apps.preview.metric.recoverable.detail"), + tone: .success, + systemImage: "arrow.uturn.backward.circle" + ) + } + + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + ForEach(previewPlan.items) { item in + AtlasDetailRow( + title: item.title, + subtitle: item.detail, + footnote: item.recoverable ? AtlasL10n.string("apps.preview.row.recoverable") : AtlasL10n.string("apps.preview.row.review"), + systemImage: icon(for: item.kind), + tone: item.recoverable ? .success : .warning + ) { + AtlasStatusChip( + item.recoverable ? AtlasL10n.string("common.recoverable") : AtlasL10n.string("common.manualReview"), + tone: item.recoverable ? .success : .warning + ) + } + } + } + } + } + + HStack(alignment: .center, spacing: AtlasSpacing.md) { + Group { + if previewPlan == nil { + Button(isBuildingPreview ? AtlasL10n.string("apps.preview.running") : AtlasL10n.string("apps.preview.action")) { + onPreview() + } + .buttonStyle(.atlasPrimary) + } else { + Button(isBuildingPreview ? AtlasL10n.string("apps.preview.running") : AtlasL10n.string("apps.preview.action")) { + onPreview() + } + .buttonStyle(.atlasSecondary) + } + } + .disabled(isBusy) + .accessibilityIdentifier("apps.preview.\(app.id.uuidString)") + .accessibilityHint(AtlasL10n.string("apps.preview.hint")) + + Button(isUninstalling ? AtlasL10n.string("apps.uninstall.running") : AtlasL10n.string("apps.uninstall.action")) { + onUninstall() + } + .buttonStyle(.atlasPrimary) + .disabled(isBusy || previewPlan == nil) + .accessibilityIdentifier("apps.uninstall.\(app.id.uuidString)") + .accessibilityHint(AtlasL10n.string("apps.uninstall.hint")) + } + } + .frame(maxWidth: .infinity, alignment: .leading) + } + + private func icon(for kind: ActionItem.Kind) -> String { + switch kind { + case .removeCache: + return "trash" + case .removeApp: + return "app.badge.minus" + case .archiveFile: + return "archivebox" + case .inspectPermission: + return "lock.shield" + } + } +} + +private struct AppGroup: Identifiable { + let id: String + let title: String + let tone: AtlasTone + let apps: [AppFootprint] +} + +private enum AppBucket: String, CaseIterable { + case large + case leftovers + case other + + static let displayOrder: [AppBucket] = [.large, .leftovers, .other] + + var title: String { + switch self { + case .large: + return AtlasL10n.string("apps.group.large") + case .leftovers: + return AtlasL10n.string("apps.group.leftovers") + case .other: + return AtlasL10n.string("apps.group.other") + } + } + + var tone: AtlasTone { + switch self { + case .large: + return .warning + case .leftovers: + return .neutral + case .other: + return .success + } + } +} + +private extension AppFootprint { + var bucket: AppBucket { + if bytes >= 2_000_000_000 { + return .large + } + if leftoverItems > 0 { + return .leftovers + } + return .other + } +} diff --git a/Packages/AtlasFeaturesHistory/README.md b/Packages/AtlasFeaturesHistory/README.md new file mode 100644 index 0000000..0c2be6a --- /dev/null +++ b/Packages/AtlasFeaturesHistory/README.md @@ -0,0 +1,7 @@ +# AtlasFeaturesHistory + +## Responsibility + +- Task timeline +- Task detail view +- Recovery entry points diff --git a/Packages/AtlasFeaturesHistory/Sources/AtlasFeaturesHistory/HistoryFeatureView.swift b/Packages/AtlasFeaturesHistory/Sources/AtlasFeaturesHistory/HistoryFeatureView.swift new file mode 100644 index 0000000..58bff2c --- /dev/null +++ b/Packages/AtlasFeaturesHistory/Sources/AtlasFeaturesHistory/HistoryFeatureView.swift @@ -0,0 +1,1108 @@ +import AtlasDesignSystem +import AtlasDomain +import SwiftUI + +public struct HistoryFeatureView: View { + private let taskRuns: [TaskRun] + private let recoveryItems: [RecoveryItem] + private let restoringItemID: UUID? + private let onRestoreItem: (UUID) -> Void + + @State private var selectedSection: HistoryBrowserSection + @State private var selectedTaskRunID: UUID? + @State private var selectedRecoveryItemID: UUID? + @State private var selectedRecoveryFilter: HistoryRecoveryFilter = .all + @State private var isOlderArchiveExpanded = false + + public init( + taskRuns: [TaskRun] = AtlasScaffoldFixtures.taskRuns, + recoveryItems: [RecoveryItem] = AtlasScaffoldFixtures.recoveryItems, + restoringItemID: UUID? = nil, + onRestoreItem: @escaping (UUID) -> Void = { _ in } + ) { + self.taskRuns = taskRuns + self.recoveryItems = recoveryItems + self.restoringItemID = restoringItemID + self.onRestoreItem = onRestoreItem + + let sortedTaskRuns = Self.sortTaskRuns(taskRuns) + let sortedRecoveryItems = Self.sortRecoveryItems(recoveryItems) + _selectedSection = State(initialValue: Self.initialSection(taskRuns: sortedTaskRuns, recoveryItems: sortedRecoveryItems)) + _selectedTaskRunID = State(initialValue: sortedTaskRuns.first?.id) + _selectedRecoveryItemID = State(initialValue: sortedRecoveryItems.first?.id) + } + + public var body: some View { + AtlasScreen( + title: AtlasL10n.string("history.screen.title"), + subtitle: AtlasL10n.string("history.screen.subtitle") + ) { + AtlasCallout( + title: screenCalloutTitle, + detail: screenCalloutDetail, + tone: screenCalloutTone, + systemImage: screenCalloutSystemImage + ) + + LazyVGrid(columns: AtlasLayout.metricColumns, spacing: AtlasSpacing.lg) { + AtlasMetricCard( + title: AtlasL10n.string("history.metric.activity.title"), + value: "\(visibleEventCount)", + detail: activityMetricDetail, + tone: .neutral, + systemImage: "clock.arrow.circlepath" + ) + AtlasMetricCard( + title: AtlasL10n.string("history.metric.running.title"), + value: "\(activeTaskCount)", + detail: activeTaskCount == 0 + ? AtlasL10n.string("history.metric.running.detail.none") + : AtlasL10n.string("history.metric.running.detail.active"), + tone: activeTaskCount == 0 ? .success : .warning, + systemImage: activeTaskCount == 0 ? "checkmark.circle" : "play.circle" + ) + AtlasMetricCard( + title: AtlasL10n.string("history.metric.recovery.title"), + value: "\(sortedRecoveryItems.count)", + detail: sortedRecoveryItems.isEmpty + ? AtlasL10n.string("history.metric.recovery.detail.none") + : AtlasL10n.string("history.metric.recovery.detail.available", AtlasFormatters.byteCount(totalRecoveryBytes)), + tone: sortedRecoveryItems.isEmpty ? .neutral : recoverySummaryTone, + systemImage: sortedRecoveryItems.isEmpty ? "lifepreserver" : "arrow.uturn.backward.circle" + ) + } + + AtlasInfoCard( + title: AtlasL10n.string("history.browser.title"), + subtitle: AtlasL10n.string("history.browser.subtitle"), + tone: browserTone + ) { + VStack(alignment: .leading, spacing: AtlasSpacing.xl) { + HStack(alignment: .center, spacing: AtlasSpacing.lg) { + Picker("", selection: $selectedSection) { + ForEach(HistoryBrowserSection.allCases) { section in + Text(section.title).tag(section) + } + } + .pickerStyle(.segmented) + .frame(maxWidth: 300) + .accessibilityIdentifier("history.sectionPicker") + + Spacer(minLength: AtlasSpacing.lg) + + Text(browserSummary) + .font(AtlasTypography.bodySmall) + .foregroundStyle(.secondary) + } + + GeometryReader { proxy in + let isWide = proxy.size.width >= 760 + let sidebarWidth = min(max(proxy.size.width * 0.32, 250), 290) + + Group { + if isWide { + HStack(alignment: .top, spacing: AtlasSpacing.xl) { + browserSidebar + .frame(width: sidebarWidth) + .frame(maxHeight: .infinity) + detailPanel + .frame(maxWidth: .infinity, maxHeight: .infinity) + } + } else { + VStack(alignment: .leading, spacing: AtlasSpacing.xl) { + browserSidebar + .frame(minHeight: 260, maxHeight: 260) + detailPanel + .frame(maxWidth: .infinity, maxHeight: .infinity) + } + } + } + .frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .topLeading) + } + .frame(height: 560) + } + } + } + .onAppear(perform: syncSelection) + .onChange(of: sortedTaskRunIDs) { _, _ in + syncSelection() + } + .onChange(of: sortedRecoveryItemIDs) { _, _ in + syncSelection() + } + .onChange(of: selectedSection) { _, _ in + syncSelection() + } + .onChange(of: selectedRecoveryFilter) { _, _ in + syncSelection() + } + } + + private var sortedTaskRuns: [TaskRun] { + Self.sortTaskRuns(taskRuns) + } + + private var sortedRecoveryItems: [RecoveryItem] { + Self.sortRecoveryItems(recoveryItems) + } + + private var sortedTaskRunIDs: [UUID] { + sortedTaskRuns.map(\.id) + } + + private var sortedRecoveryItemIDs: [UUID] { + sortedRecoveryItems.map(\.id) + } + + private var selectedTaskRun: TaskRun? { + guard let selectedTaskRunID else { + return nil + } + return sortedTaskRuns.first(where: { $0.id == selectedTaskRunID }) + } + + private var selectedRecoveryItem: RecoveryItem? { + guard let selectedRecoveryItemID else { + return nil + } + return sortedRecoveryItems.first(where: { $0.id == selectedRecoveryItemID }) + } + + private var visibleEventCount: Int { + sortedTaskRuns.count + sortedRecoveryItems.count + } + + private var totalRecoveryBytes: Int64 { + sortedRecoveryItems.map(\.bytes).reduce(0, +) + } + + private var activeTaskCount: Int { + sortedTaskRuns.filter(\.isActive).count + } + + private var latestVisibleEventDate: Date? { + let taskDates = sortedTaskRuns.map(\.activityDate) + let recoveryDates = sortedRecoveryItems.map(\.deletedAt) + return (taskDates + recoveryDates).max() + } + + private var soonExpiringRecoveryItems: [RecoveryItem] { + sortedRecoveryItems.filter(\.isExpiringSoon) + } + + private var recoveryGroups: [HistoryRecoveryGroup] { + var groups: [HistoryRecoveryGroup] = [] + + let visibleRecoveryItems = sortedRecoveryItems.filter { selectedRecoveryFilter.matches($0) } + + let visibleSoonExpiringRecoveryItems = visibleRecoveryItems.filter(\.isExpiringSoon) + if !visibleSoonExpiringRecoveryItems.isEmpty { + groups.append( + HistoryRecoveryGroup( + id: "expiring", + title: AtlasL10n.string("history.recovery.group.expiring"), + tone: .warning, + items: visibleSoonExpiringRecoveryItems + ) + ) + } + + let remainingItems = visibleRecoveryItems.filter { !$0.isExpiringSoon } + let groupedItems = Dictionary(grouping: remainingItems, by: \.historyCategory) + + for category in HistoryRecoveryCategory.displayOrder { + guard let items = groupedItems[category], !items.isEmpty else { + continue + } + groups.append( + HistoryRecoveryGroup( + id: category.rawValue, + title: category.title, + tone: category.tone, + items: items + ) + ) + } + + return groups + } + + private var taskRunGroups: [HistoryTaskRunGroup] { + var groups: [HistoryTaskRunGroup] = [] + + let activeItems = sortedTaskRuns.filter(\.isActive) + if !activeItems.isEmpty { + groups.append( + HistoryTaskRunGroup( + id: "active", + title: AtlasL10n.string("history.archive.group.active"), + tone: .warning, + items: activeItems + ) + ) + } + + let archivedItems = sortedTaskRuns.filter { !$0.isActive } + let recentItems = archivedItems.filter(\.isRecentArchive) + if !recentItems.isEmpty { + groups.append( + HistoryTaskRunGroup( + id: "recent", + title: AtlasL10n.string("history.archive.group.recent"), + tone: .neutral, + items: recentItems + ) + ) + } + + let olderItems = archivedItems.filter { !$0.isRecentArchive } + if !olderItems.isEmpty { + groups.append( + HistoryTaskRunGroup( + id: "older", + title: AtlasL10n.string("history.archive.group.older"), + tone: .neutral, + items: olderItems + ) + ) + } + + return groups + } + + private var activityMetricDetail: String { + guard let latestVisibleEventDate else { + return AtlasL10n.string("history.metric.activity.detail.empty") + } + return AtlasL10n.string("history.metric.activity.detail.latest", AtlasFormatters.relativeDate(latestVisibleEventDate)) + } + + private var recoverySummaryTone: AtlasTone { + soonExpiringRecoveryItems.isEmpty ? .success : .warning + } + + private var screenCalloutTitle: String { + if !soonExpiringRecoveryItems.isEmpty { + return AtlasL10n.string("history.callout.expiring.title") + } + if activeTaskCount > 0 { + return AtlasL10n.string("history.callout.running.title") + } + if sortedRecoveryItems.isEmpty { + return AtlasL10n.string("history.callout.empty.title") + } + return AtlasL10n.string("history.callout.recovery.title") + } + + private var screenCalloutDetail: String { + if !soonExpiringRecoveryItems.isEmpty { + return AtlasL10n.string("history.callout.expiring.detail") + } + if activeTaskCount > 0 { + return AtlasL10n.string("history.callout.running.detail") + } + if sortedRecoveryItems.isEmpty { + return AtlasL10n.string("history.callout.empty.detail") + } + return AtlasL10n.string("history.callout.recovery.detail") + } + + private var screenCalloutTone: AtlasTone { + if !soonExpiringRecoveryItems.isEmpty || activeTaskCount > 0 { + return .warning + } + if sortedRecoveryItems.isEmpty { + return .neutral + } + return .success + } + + private var screenCalloutSystemImage: String { + if !soonExpiringRecoveryItems.isEmpty { + return "exclamationmark.triangle.fill" + } + if activeTaskCount > 0 { + return "play.circle.fill" + } + if sortedRecoveryItems.isEmpty { + return "clock.arrow.circlepath" + } + return "lifepreserver.fill" + } + + private var browserTone: AtlasTone { + switch selectedSection { + case .archive: + return activeTaskCount > 0 ? .warning : .neutral + case .recovery: + if sortedRecoveryItems.isEmpty { + return .neutral + } + return recoverySummaryTone + } + } + + private var browserSummary: String { + switch selectedSection { + case .archive: + let count = sortedTaskRuns.count + let key = count == 1 ? "history.browser.summary.archive.one" : "history.browser.summary.archive.other" + return AtlasL10n.string(key, count) + case .recovery: + let count = sortedRecoveryItems.count + let key = count == 1 ? "history.browser.summary.recovery.one" : "history.browser.summary.recovery.other" + return AtlasL10n.string(key, count) + } + } + + @ViewBuilder + private var browserSidebar: some View { + switch selectedSection { + case .archive: + browserSidebarContainer { + if sortedTaskRuns.isEmpty { + AtlasEmptyState( + title: AtlasL10n.string("history.runs.empty.title"), + detail: AtlasL10n.string("history.runs.empty.detail"), + systemImage: "clock.arrow.circlepath", + tone: .neutral + ) + } else { + List(selection: $selectedTaskRunID) { + ForEach(taskRunGroups) { group in + Section { + if group.id != "older" || isOlderArchiveExpanded { + ForEach(group.items) { taskRun in + HistoryTaskSidebarRow( + taskRun: taskRun, + isLatest: sortedTaskRuns.first?.id == taskRun.id + ) + .tag(taskRun.id) + .listRowInsets(EdgeInsets(top: 10, leading: 12, bottom: 10, trailing: 12)) + } + } + } header: { + HistorySidebarSectionHeader( + title: group.title, + count: group.items.count, + tone: group.tone, + isCollapsible: group.id == "older", + isExpanded: isOlderArchiveExpanded, + onToggle: group.id == "older" ? { isOlderArchiveExpanded.toggle() } : nil + ) + } + } + } + .listStyle(.plain) + .scrollContentBackground(.hidden) + } + } + case .recovery: + browserSidebarContainer { + if sortedRecoveryItems.isEmpty || recoveryGroups.isEmpty { + AtlasEmptyState( + title: AtlasL10n.string(sortedRecoveryItems.isEmpty ? "history.recovery.empty.title" : "history.recovery.filtered.empty.title"), + detail: AtlasL10n.string(sortedRecoveryItems.isEmpty ? "history.recovery.empty.detail" : "history.recovery.filtered.empty.detail"), + systemImage: "lifepreserver", + tone: .neutral + ) + } else { + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + ScrollView(.horizontal, showsIndicators: false) { + HStack(spacing: AtlasSpacing.sm) { + ForEach(HistoryRecoveryFilter.allCases) { filter in + Group { + if selectedRecoveryFilter == filter { + Button(filter.title) { + selectedRecoveryFilter = filter + } + .buttonStyle(.atlasSecondary) + } else { + Button(filter.title) { + selectedRecoveryFilter = filter + } + .buttonStyle(.atlasGhost) + } + } + } + } + } + + List(selection: $selectedRecoveryItemID) { + ForEach(recoveryGroups) { group in + Section { + ForEach(group.items) { item in + HistoryRecoverySidebarRow(item: item) + .tag(item.id) + .listRowInsets(EdgeInsets(top: 10, leading: 12, bottom: 10, trailing: 12)) + } + } header: { + HistorySidebarSectionHeader( + title: group.title, + count: group.items.count, + tone: group.tone + ) + } + } + } + .listStyle(.plain) + .scrollContentBackground(.hidden) + } + } + } + } + } + + private func browserSidebarContainer(@ViewBuilder content: () -> Content) -> some View { + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + Text(selectedSection.sidebarTitle) + .font(AtlasTypography.label) + .foregroundStyle(.secondary) + + content() + .frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .topLeading) + } + .frame(maxWidth: .infinity, maxHeight: .infinity, alignment: .topLeading) + .padding(AtlasSpacing.lg) + .background(sidebarBackground) + .overlay(sidebarBorder) + } + + private var detailPanel: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.lg) { + Text(AtlasL10n.string("history.detail.title")) + .font(AtlasTypography.label) + .foregroundStyle(.secondary) + + ScrollView { + switch selectedSection { + case .archive: + if let taskRun = selectedTaskRun { + HistoryTaskDetailView( + taskRun: taskRun, + isLatest: sortedTaskRuns.first?.id == taskRun.id + ) + } else { + AtlasEmptyState( + title: AtlasL10n.string("history.detail.empty.title"), + detail: AtlasL10n.string("history.detail.empty.detail"), + systemImage: "cursorarrow.click", + tone: .neutral + ) + } + case .recovery: + if let item = selectedRecoveryItem { + HistoryRecoveryDetailView( + item: item, + isRestoring: restoringItemID == item.id, + canRestore: restoringItemID == nil, + onRestore: { onRestoreItem(item.id) } + ) + } else { + AtlasEmptyState( + title: AtlasL10n.string("history.detail.empty.title"), + detail: AtlasL10n.string("history.detail.empty.detail"), + systemImage: "cursorarrow.click", + tone: .neutral + ) + } + } + } + } + .frame(maxWidth: .infinity, alignment: .leading) + .padding(AtlasSpacing.xl) + .background(detailBackground) + .overlay(detailBorder) + } + + private var sidebarBackground: some View { + RoundedRectangle(cornerRadius: AtlasRadius.lg, style: .continuous) + .fill(AtlasColor.cardRaised) + } + + private var sidebarBorder: some View { + RoundedRectangle(cornerRadius: AtlasRadius.lg, style: .continuous) + .strokeBorder(AtlasColor.border, lineWidth: 1) + } + + private var detailBackground: some View { + RoundedRectangle(cornerRadius: AtlasRadius.lg, style: .continuous) + .fill(Color.primary.opacity(0.03)) + } + + private var detailBorder: some View { + RoundedRectangle(cornerRadius: AtlasRadius.lg, style: .continuous) + .strokeBorder(AtlasColor.border, lineWidth: 1) + } + + private func syncSelection() { + if !sortedTaskRuns.isEmpty, selectedTaskRun == nil { + selectedTaskRunID = sortedTaskRuns.first?.id + } + + let visibleRecoveryIDs = Set(recoveryGroups.flatMap { $0.items.map(\.id) }) + if !visibleRecoveryIDs.isEmpty, !visibleRecoveryIDs.contains(selectedRecoveryItemID ?? UUID()) { + selectedRecoveryItemID = recoveryGroups.first?.items.first?.id + } + + switch selectedSection { + case .archive: + if sortedTaskRuns.isEmpty, !sortedRecoveryItems.isEmpty { + selectedSection = .recovery + } + case .recovery: + if sortedRecoveryItems.isEmpty, !sortedTaskRuns.isEmpty { + selectedSection = .archive + } + } + } + + private static func initialSection(taskRuns: [TaskRun], recoveryItems: [RecoveryItem]) -> HistoryBrowserSection { + if !recoveryItems.isEmpty { + return .recovery + } + if !taskRuns.isEmpty { + return .archive + } + return .recovery + } + + private static func sortTaskRuns(_ taskRuns: [TaskRun]) -> [TaskRun] { + taskRuns.sorted { lhs, rhs in + if lhs.activityDate == rhs.activityDate { + return lhs.startedAt > rhs.startedAt + } + return lhs.activityDate > rhs.activityDate + } + } + + private static func sortRecoveryItems(_ recoveryItems: [RecoveryItem]) -> [RecoveryItem] { + recoveryItems.sorted { lhs, rhs in + lhs.deletedAt > rhs.deletedAt + } + } +} + +private enum HistoryBrowserSection: String, CaseIterable, Identifiable { + case recovery + case archive + + var id: String { rawValue } + + var title: String { + switch self { + case .recovery: + return AtlasL10n.string("history.browser.section.recovery") + case .archive: + return AtlasL10n.string("history.browser.section.archive") + } + } + + var sidebarTitle: String { + switch self { + case .archive: + return AtlasL10n.string("history.runs.title") + case .recovery: + return AtlasL10n.string("history.recovery.title") + } + } +} + +private enum HistoryRecoveryFilter: String, CaseIterable, Identifiable { + case all + case expiring + case apps + case developer + case browsers + case system + + var id: String { rawValue } + + var title: String { + switch self { + case .all: + return AtlasL10n.string("history.recovery.filter.all") + case .expiring: + return AtlasL10n.string("history.recovery.filter.expiring") + case .apps: + return AtlasL10n.string("history.recovery.group.apps") + case .developer: + return AtlasL10n.string("history.recovery.group.developer") + case .browsers: + return AtlasL10n.string("history.recovery.group.browsers") + case .system: + return AtlasL10n.string("history.recovery.group.system") + } + } + + func matches(_ item: RecoveryItem) -> Bool { + switch self { + case .all: + return true + case .expiring: + return item.isExpiringSoon + case .apps: + return item.historyCategory == .apps + case .developer: + return item.historyCategory == .developer + case .browsers: + return item.historyCategory == .browsers + case .system: + return item.historyCategory == .system + } + } +} + +private struct HistoryRecoveryGroup: Identifiable { + let id: String + let title: String + let tone: AtlasTone + let items: [RecoveryItem] +} + +private struct HistoryTaskRunGroup: Identifiable { + let id: String + let title: String + let tone: AtlasTone + let items: [TaskRun] +} + +private struct HistorySidebarSectionHeader: View { + let title: String + let count: Int + let tone: AtlasTone + var isCollapsible: Bool = false + var isExpanded: Bool = true + var onToggle: (() -> Void)? = nil + + var body: some View { + HStack(alignment: .center, spacing: AtlasSpacing.sm) { + if isCollapsible, let onToggle { + Button(action: onToggle) { + HStack(spacing: AtlasSpacing.xs) { + Image(systemName: isExpanded ? "chevron.down" : "chevron.right") + .font(AtlasTypography.captionSmall) + .foregroundStyle(.secondary) + + Text(title) + .font(AtlasTypography.caption) + .foregroundStyle(.secondary) + } + } + .buttonStyle(.plain) + } else { + Text(title) + .font(AtlasTypography.caption) + .foregroundStyle(.secondary) + } + + Spacer(minLength: AtlasSpacing.sm) + + Text("\(count)") + .font(AtlasTypography.captionSmall) + .foregroundStyle(tone.tint) + } + .textCase(nil) + } +} + +private struct HistoryTaskSidebarRow: View { + let taskRun: TaskRun + let isLatest: Bool + + var body: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.xs) { + HStack(alignment: .center, spacing: AtlasSpacing.sm) { + Image(systemName: taskRun.kind.historySystemImage) + .font(AtlasTypography.caption) + .foregroundStyle(taskRun.status.atlasTone.tint) + .accessibilityHidden(true) + + Text(taskRun.kind.title) + .font(AtlasTypography.rowTitle) + .lineLimit(1) + + Spacer(minLength: AtlasSpacing.sm) + + if isLatest { + Text(AtlasL10n.string("history.timeline.latest")) + .font(AtlasTypography.captionSmall) + .foregroundStyle(AtlasColor.brand) + } + } + + Text(taskRun.summary) + .font(AtlasTypography.bodySmall) + .foregroundStyle(.secondary) + .lineLimit(2) + + HStack(alignment: .center, spacing: AtlasSpacing.sm) { + Text(AtlasFormatters.relativeDate(taskRun.activityDate)) + .font(AtlasTypography.captionSmall) + .foregroundStyle(.secondary) + + Spacer(minLength: AtlasSpacing.sm) + + AtlasStatusChip(taskRun.status.title, tone: taskRun.status.atlasTone) + } + } + .padding(.vertical, AtlasSpacing.xxs) + .accessibilityElement(children: .contain) + } +} + +private struct HistoryRecoverySidebarRow: View { + let item: RecoveryItem + + var body: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.xs) { + HStack(alignment: .center, spacing: AtlasSpacing.sm) { + Image(systemName: "arrow.uturn.backward.circle") + .font(AtlasTypography.caption) + .foregroundStyle(item.isExpiringSoon ? AtlasColor.warning : AtlasColor.success) + .accessibilityHidden(true) + + Text(item.title) + .font(AtlasTypography.rowTitle) + .lineLimit(1) + + Spacer(minLength: AtlasSpacing.sm) + + AtlasStatusChip( + item.isExpiringSoon + ? AtlasL10n.string("history.recovery.badge.expiring") + : AtlasL10n.string("history.recovery.badge.available"), + tone: item.isExpiringSoon ? .warning : .success + ) + } + + Text(item.detail) + .font(AtlasTypography.bodySmall) + .foregroundStyle(.secondary) + .lineLimit(2) + + HStack(alignment: .center, spacing: AtlasSpacing.sm) { + Text(AtlasFormatters.byteCount(item.bytes)) + .font(AtlasTypography.captionSmall) + .foregroundStyle(.secondary) + + Spacer(minLength: AtlasSpacing.sm) + + Text(AtlasFormatters.relativeDate(item.deletedAt)) + .font(AtlasTypography.captionSmall) + .foregroundStyle(.secondary) + } + } + .padding(.vertical, AtlasSpacing.xxs) + .accessibilityElement(children: .contain) + } +} + +private struct HistoryTaskDetailView: View { + let taskRun: TaskRun + let isLatest: Bool + + var body: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.xl) { + HStack(alignment: .top, spacing: AtlasSpacing.lg) { + VStack(alignment: .leading, spacing: AtlasSpacing.xs) { + HStack(spacing: AtlasSpacing.sm) { + Text(taskRun.kind.title) + .font(AtlasTypography.sectionTitle) + + if isLatest { + Text(AtlasL10n.string("history.timeline.latest")) + .font(AtlasTypography.caption) + .foregroundStyle(AtlasColor.brand) + } + } + + Text(taskRun.summary) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + + Spacer(minLength: AtlasSpacing.lg) + + AtlasStatusChip(taskRun.status.title, tone: taskRun.status.atlasTone) + } + + AtlasCallout( + title: taskRun.status.historyCalloutTitle, + detail: taskRun.status.historyCalloutDetail, + tone: taskRun.status.atlasTone, + systemImage: taskRun.status.atlasTone.symbol + ) + + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + AtlasKeyValueRow( + title: AtlasL10n.string("history.detail.task.status"), + value: taskRun.status.title, + detail: taskRun.kind.title + ) + AtlasKeyValueRow( + title: AtlasL10n.string("history.detail.task.started"), + value: AtlasFormatters.shortDate(taskRun.startedAt), + detail: AtlasFormatters.relativeDate(taskRun.startedAt) + ) + AtlasKeyValueRow( + title: AtlasL10n.string("history.detail.task.finished"), + value: taskRun.finishedAt.map(AtlasFormatters.shortDate) ?? AtlasL10n.string("history.detail.task.finished.running"), + detail: taskRun.finishedAt.map(AtlasFormatters.relativeDate) ?? AtlasL10n.string("history.timeline.meta.running") + ) + } + } + .frame(maxWidth: .infinity, alignment: .leading) + } +} + +private struct HistoryRecoveryDetailView: View { + let item: RecoveryItem + let isRestoring: Bool + let canRestore: Bool + let onRestore: () -> Void + + var body: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.xl) { + HStack(alignment: .top, spacing: AtlasSpacing.lg) { + VStack(alignment: .leading, spacing: AtlasSpacing.xs) { + Text(item.title) + .font(AtlasTypography.sectionTitle) + + Text(item.detail) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + } + + Spacer(minLength: AtlasSpacing.lg) + + VStack(alignment: .trailing, spacing: AtlasSpacing.sm) { + AtlasStatusChip( + item.isExpiringSoon + ? AtlasL10n.string("history.recovery.badge.expiring") + : AtlasL10n.string("history.recovery.badge.available"), + tone: item.isExpiringSoon ? .warning : .success + ) + + Text(AtlasFormatters.byteCount(item.bytes)) + .font(AtlasTypography.label) + .foregroundStyle(.secondary) + } + } + + AtlasCallout( + title: item.isExpiringSoon + ? AtlasL10n.string("history.detail.recovery.callout.expiring.title") + : AtlasL10n.string("history.detail.recovery.callout.available.title"), + detail: item.isExpiringSoon + ? AtlasL10n.string("history.detail.recovery.callout.expiring.detail") + : AtlasL10n.string("history.detail.recovery.callout.available.detail"), + tone: item.isExpiringSoon ? .warning : .success, + systemImage: item.isExpiringSoon ? "exclamationmark.triangle.fill" : "checkmark.circle.fill" + ) + + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + AtlasKeyValueRow( + title: AtlasL10n.string("history.detail.recovery.size"), + value: AtlasFormatters.byteCount(item.bytes), + detail: item.detail + ) + AtlasKeyValueRow( + title: AtlasL10n.string("history.detail.recovery.deleted"), + value: AtlasFormatters.shortDate(item.deletedAt), + detail: AtlasFormatters.relativeDate(item.deletedAt) + ) + AtlasKeyValueRow( + title: AtlasL10n.string("history.detail.recovery.window"), + value: item.expiresAt.map(AtlasFormatters.shortDate) ?? AtlasL10n.string("history.detail.recovery.window.open"), + detail: item.expiresAt.map(AtlasFormatters.relativeDate) ?? AtlasL10n.string("history.recovery.meta.noexpiry") + ) + } + + VStack(alignment: .leading, spacing: AtlasSpacing.xs) { + Text(AtlasL10n.string("history.recovery.path.label")) + .font(AtlasTypography.caption) + .foregroundStyle(.secondary) + + Text(item.originalPath) + .font(.system(size: 12, weight: .regular, design: .monospaced)) + .foregroundStyle(.secondary) + .textSelection(.enabled) + .fixedSize(horizontal: false, vertical: true) + } + .frame(maxWidth: .infinity, alignment: .leading) + .padding(AtlasSpacing.md) + .background( + RoundedRectangle(cornerRadius: AtlasRadius.md, style: .continuous) + .fill(Color.primary.opacity(0.03)) + ) + .overlay( + RoundedRectangle(cornerRadius: AtlasRadius.md, style: .continuous) + .strokeBorder(AtlasColor.border, lineWidth: 1) + ) + + HStack(alignment: .center, spacing: AtlasSpacing.md) { + Spacer(minLength: 0) + + Button(isRestoring ? AtlasL10n.string("history.restore.running") : AtlasL10n.string("history.restore.action")) { + onRestore() + } + .buttonStyle(.atlasPrimary) + .disabled(!canRestore) + .accessibilityIdentifier("history.restore.\(item.id.uuidString)") + .accessibilityHint(AtlasL10n.string("history.restore.hint")) + } + } + .frame(maxWidth: .infinity, alignment: .leading) + } +} + +private extension TaskRun { + var activityDate: Date { + finishedAt ?? startedAt + } + + var isActive: Bool { + status == .queued || status == .running + } + + var isRecentArchive: Bool { + guard !isActive else { + return false + } + let sevenDaysAgo = Calendar.current.date(byAdding: .day, value: -7, to: Date()) ?? Date() + return activityDate >= sevenDaysAgo + } +} + +private extension RecoveryItem { + var isExpiringSoon: Bool { + guard let expiresAt else { + return false + } + let cutoff = Calendar.current.date(byAdding: .day, value: 3, to: Date()) ?? Date() + return expiresAt <= cutoff + } + + var historyCategory: HistoryRecoveryCategory { + switch payload { + case .app: + return .apps + case let .finding(finding): + switch finding.category.lowercased() { + case "developer": + return .developer + case "browsers": + return .browsers + case "system": + return .system + case "apps": + return .apps + default: + return .other + } + case nil: + return .other + } + } +} + +private enum HistoryRecoveryCategory: String, CaseIterable { + case apps + case developer + case browsers + case system + case other + + static let displayOrder: [HistoryRecoveryCategory] = [.apps, .developer, .browsers, .system, .other] + + var title: String { + switch self { + case .apps: + return AtlasL10n.string("history.recovery.group.apps") + case .developer: + return AtlasL10n.string("history.recovery.group.developer") + case .browsers: + return AtlasL10n.string("history.recovery.group.browsers") + case .system: + return AtlasL10n.string("history.recovery.group.system") + case .other: + return AtlasL10n.string("history.recovery.group.other") + } + } + + var tone: AtlasTone { + switch self { + case .apps: + return .warning + case .developer: + return .neutral + case .browsers: + return .success + case .system: + return .warning + case .other: + return .neutral + } + } +} + +private extension TaskKind { + var historySystemImage: String { + switch self { + case .scan: + return "sparkles" + case .executePlan: + return "play.circle" + case .uninstallApp: + return "trash" + case .restore: + return "arrow.uturn.backward.circle" + case .inspectPermissions: + return "lock.shield" + } + } +} + +private extension TaskStatus { + var atlasTone: AtlasTone { + switch self { + case .queued: + return .neutral + case .running: + return .warning + case .completed: + return .success + case .failed, .cancelled: + return .danger + } + } + + var historyCalloutTitle: String { + switch self { + case .queued: + return AtlasL10n.string("history.detail.task.callout.queued.title") + case .running: + return AtlasL10n.string("history.detail.task.callout.running.title") + case .completed: + return AtlasL10n.string("history.detail.task.callout.completed.title") + case .failed, .cancelled: + return AtlasL10n.string("history.detail.task.callout.failed.title") + } + } + + var historyCalloutDetail: String { + switch self { + case .queued: + return AtlasL10n.string("history.detail.task.callout.queued.detail") + case .running: + return AtlasL10n.string("history.detail.task.callout.running.detail") + case .completed: + return AtlasL10n.string("history.detail.task.callout.completed.detail") + case .failed, .cancelled: + return AtlasL10n.string("history.detail.task.callout.failed.detail") + } + } +} diff --git a/Packages/AtlasFeaturesOverview/README.md b/Packages/AtlasFeaturesOverview/README.md new file mode 100644 index 0000000..ba7e550 --- /dev/null +++ b/Packages/AtlasFeaturesOverview/README.md @@ -0,0 +1,5 @@ +# AtlasFeaturesOverview + +## Responsibility + +- Overview screen state, view models, routes, and components diff --git a/Packages/AtlasFeaturesOverview/Sources/AtlasFeaturesOverview/OverviewFeatureView.swift b/Packages/AtlasFeaturesOverview/Sources/AtlasFeaturesOverview/OverviewFeatureView.swift new file mode 100644 index 0000000..53f2a0c --- /dev/null +++ b/Packages/AtlasFeaturesOverview/Sources/AtlasFeaturesOverview/OverviewFeatureView.swift @@ -0,0 +1,298 @@ +import AtlasApplication +import AtlasDesignSystem +import AtlasDomain +import SwiftUI + +public struct OverviewFeatureView: View { + private let snapshot: AtlasWorkspaceSnapshot + private let isRefreshingHealthSnapshot: Bool + + public init( + snapshot: AtlasWorkspaceSnapshot = AtlasScaffoldWorkspace.snapshot(), + isRefreshingHealthSnapshot: Bool = false + ) { + self.snapshot = snapshot + self.isRefreshingHealthSnapshot = isRefreshingHealthSnapshot + } + + public var body: some View { + AtlasScreen( + title: AtlasL10n.string("overview.screen.title"), + subtitle: AtlasL10n.string("overview.screen.subtitle") + ) { + AtlasCallout( + title: overviewCalloutTitle, + detail: overviewCalloutDetail, + tone: overviewCalloutTone, + systemImage: overviewCalloutTone.symbol + ) + + LazyVGrid(columns: AtlasLayout.metricColumns, spacing: AtlasSpacing.lg) { + AtlasMetricCard( + title: AtlasL10n.string("overview.metric.reclaimable.title"), + value: AtlasFormatters.byteCount(snapshot.reclaimableSpaceBytes), + detail: AtlasL10n.string("overview.metric.reclaimable.detail"), + tone: .success, + systemImage: "sparkles", + elevation: .prominent + ) + AtlasMetricCard( + title: AtlasL10n.string("overview.metric.findings.title"), + value: "\(snapshot.findings.count)", + detail: AtlasL10n.string("overview.metric.findings.detail"), + tone: .neutral, + systemImage: "line.3.horizontal.decrease.circle" + ) + AtlasMetricCard( + title: AtlasL10n.string("overview.metric.permissions.title"), + value: "\(grantedRequiredPermissionCount)/\(max(requiredPermissionCount, 1))", + detail: requiredPermissionsReady + ? AtlasL10n.string("overview.metric.permissions.ready") + : AtlasL10n.string("overview.metric.permissions.limited"), + tone: requiredPermissionsReady ? .success : .warning, + systemImage: "lock.shield" + ) + } + + AtlasInfoCard( + title: AtlasL10n.string("overview.snapshot.title"), + subtitle: AtlasL10n.string("overview.snapshot.subtitle") + ) { + if isRefreshingHealthSnapshot, snapshot.healthSnapshot == nil { + AtlasLoadingState( + title: AtlasL10n.string("overview.snapshot.loading.title"), + detail: AtlasL10n.string("overview.snapshot.loading.detail") + ) + } else if let healthSnapshot = snapshot.healthSnapshot { + LazyVGrid(columns: AtlasLayout.metricColumns, spacing: AtlasSpacing.lg) { + AtlasMetricCard( + title: AtlasL10n.string("overview.snapshot.memory.title"), + value: "\(formatted(healthSnapshot.memoryUsedGB))/\(formatted(healthSnapshot.memoryTotalGB)) GB", + detail: AtlasL10n.string("overview.snapshot.memory.detail"), + tone: healthSnapshot.memoryUsedGB / max(healthSnapshot.memoryTotalGB, 1) > 0.75 ? .warning : .neutral, + systemImage: "memorychip" + ) + AtlasMetricCard( + title: AtlasL10n.string("overview.snapshot.disk.title"), + value: "\(formatted(healthSnapshot.diskUsedPercent))%", + detail: AtlasL10n.string("overview.snapshot.disk.detail", formatted(healthSnapshot.diskUsedGB), formatted(healthSnapshot.diskTotalGB)), + tone: healthSnapshot.diskUsedPercent > 80 ? .warning : .success, + systemImage: "internaldrive" + ) + AtlasMetricCard( + title: AtlasL10n.string("overview.snapshot.uptime.title"), + value: "\(formatted(healthSnapshot.uptimeDays)) \(AtlasL10n.string("common.days"))", + detail: AtlasL10n.string("overview.snapshot.uptime.detail"), + tone: .neutral, + systemImage: "clock" + ) + } + + AtlasCallout( + title: healthSnapshot.diskUsedPercent > 80 ? AtlasL10n.string("overview.snapshot.callout.warning.title") : AtlasL10n.string("overview.snapshot.callout.ok.title"), + detail: healthSnapshot.diskUsedPercent > 80 + ? AtlasL10n.string("overview.snapshot.callout.warning.detail") + : AtlasL10n.string("overview.snapshot.callout.ok.detail"), + tone: healthSnapshot.diskUsedPercent > 80 ? .warning : .success, + systemImage: healthSnapshot.diskUsedPercent > 80 ? "exclamationmark.triangle.fill" : "checkmark.circle.fill" + ) + + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + ForEach(Array(healthSnapshot.optimizations.prefix(4))) { optimization in + AtlasDetailRow( + title: optimization.name, + subtitle: optimization.detail, + footnote: AtlasL10n.localizedCategory(optimization.category).capitalized, + systemImage: optimization.isSafe ? "checkmark.shield" : "slider.horizontal.3", + tone: optimization.isSafe ? .success : .warning + ) { + AtlasStatusChip(optimization.isSafe ? AtlasL10n.string("risk.safe") : AtlasL10n.string("risk.review"), tone: optimization.isSafe ? .success : .warning) + } + } + } + } else { + AtlasEmptyState( + title: AtlasL10n.string("overview.snapshot.empty.title"), + detail: AtlasL10n.string("overview.snapshot.empty.detail"), + systemImage: "waveform.path.ecg", + tone: .warning + ) + } + } + + AtlasInfoCard( + title: AtlasL10n.string("overview.actions.title"), + subtitle: AtlasL10n.string("overview.actions.subtitle") + ) { + if snapshot.findings.isEmpty { + AtlasEmptyState( + title: AtlasL10n.string("overview.actions.empty.title"), + detail: AtlasL10n.string("overview.actions.empty.detail"), + systemImage: "sparkles.slash", + tone: .neutral + ) + } else { + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + ForEach(Array(snapshot.findings.prefix(4))) { finding in + AtlasDetailRow( + title: finding.title, + subtitle: finding.detail, + footnote: "\(AtlasL10n.localizedCategory(finding.category)) • \(riskSupport(for: finding.risk))", + systemImage: icon(for: finding.category), + tone: finding.risk.atlasTone + ) { + VStack(alignment: .trailing, spacing: AtlasSpacing.sm) { + AtlasStatusChip(finding.risk.title, tone: finding.risk.atlasTone) + Text(AtlasFormatters.byteCount(finding.bytes)) + .font(AtlasTypography.label) + .foregroundStyle(.secondary) + } + } + } + } + } + } + + AtlasInfoCard( + title: AtlasL10n.string("overview.activity.title"), + subtitle: AtlasL10n.string("overview.activity.subtitle") + ) { + if snapshot.taskRuns.isEmpty { + AtlasEmptyState( + title: AtlasL10n.string("overview.activity.empty.title"), + detail: AtlasL10n.string("overview.activity.empty.detail"), + systemImage: "clock.badge.questionmark", + tone: .neutral + ) + } else { + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + ForEach(snapshot.taskRuns.prefix(3)) { taskRun in + AtlasDetailRow( + title: taskRun.kind.title, + subtitle: taskRun.summary, + footnote: timelineFootnote(for: taskRun), + systemImage: icon(for: taskRun.kind), + tone: taskRun.status.atlasTone + ) { + AtlasStatusChip(taskRun.status.title, tone: taskRun.status.atlasTone) + } + } + } + } + } + } + } + + private var requiredPermissionStates: [PermissionState] { + snapshot.permissions.filter { $0.kind.isRequiredForCurrentWorkflows } + } + + private var requiredPermissionCount: Int { + requiredPermissionStates.count + } + + private var grantedRequiredPermissionCount: Int { + requiredPermissionStates.filter(\.isGranted).count + } + + private var requiredPermissionsReady: Bool { + requiredPermissionCount > 0 && grantedRequiredPermissionCount == requiredPermissionCount + } + + private var overviewCalloutTitle: String { + requiredPermissionsReady + ? AtlasL10n.string("overview.callout.ready.title") + : AtlasL10n.string("overview.callout.limited.title") + } + + private var overviewCalloutDetail: String { + requiredPermissionsReady + ? AtlasL10n.string("overview.callout.ready.detail") + : AtlasL10n.string("overview.callout.limited.detail") + } + + private var overviewCalloutTone: AtlasTone { + requiredPermissionsReady ? .success : .warning + } + + private func formatted(_ value: Double) -> String { + value.formatted(.number.precision(.fractionLength(1))) + } + + private func riskSupport(for risk: RiskLevel) -> String { + switch risk { + case .safe: + return AtlasL10n.string("overview.risk.safe") + case .review: + return AtlasL10n.string("overview.risk.review") + case .advanced: + return AtlasL10n.string("overview.risk.advanced") + } + } + + private func timelineFootnote(for taskRun: TaskRun) -> String { + let start = AtlasFormatters.relativeDate(taskRun.startedAt) + if let finishedAt = taskRun.finishedAt { + return AtlasL10n.string("overview.activity.timeline.finished", start, AtlasFormatters.relativeDate(finishedAt)) + } + return AtlasL10n.string("overview.activity.timeline.running", start) + } + + private func icon(for category: String) -> String { + switch category.lowercased() { + case "developer": + return "hammer" + case "system": + return "gearshape.2" + case "apps": + return "square.stack.3d.up" + case "browsers": + return "globe" + default: + return "sparkles" + } + } + + private func icon(for kind: TaskKind) -> String { + switch kind { + case .scan: + return "sparkles" + case .executePlan: + return "play.circle" + case .uninstallApp: + return "trash" + case .restore: + return "arrow.uturn.backward.circle" + case .inspectPermissions: + return "lock.shield" + } + } +} + +private extension RiskLevel { + var atlasTone: AtlasTone { + switch self { + case .safe: + return .success + case .review: + return .warning + case .advanced: + return .danger + } + } +} + +private extension TaskStatus { + var atlasTone: AtlasTone { + switch self { + case .queued: + return .neutral + case .running: + return .warning + case .completed: + return .success + case .failed, .cancelled: + return .danger + } + } +} diff --git a/Packages/AtlasFeaturesPermissions/README.md b/Packages/AtlasFeaturesPermissions/README.md new file mode 100644 index 0000000..e7a3ab0 --- /dev/null +++ b/Packages/AtlasFeaturesPermissions/README.md @@ -0,0 +1,7 @@ +# AtlasFeaturesPermissions + +## Responsibility + +- Permission cards +- Explainer flows +- Permission status refresh logic diff --git a/Packages/AtlasFeaturesPermissions/Sources/AtlasFeaturesPermissions/PermissionsFeatureView.swift b/Packages/AtlasFeaturesPermissions/Sources/AtlasFeaturesPermissions/PermissionsFeatureView.swift new file mode 100644 index 0000000..9c87c42 --- /dev/null +++ b/Packages/AtlasFeaturesPermissions/Sources/AtlasFeaturesPermissions/PermissionsFeatureView.swift @@ -0,0 +1,321 @@ +import AppKit +import AtlasDesignSystem +import AtlasDomain +import SwiftUI + +public struct PermissionsFeatureView: View { + @Environment(\.scenePhase) private var scenePhase + @State private var isOptionalExpanded = false + + private let permissionStates: [PermissionState] + private let summary: String + private let isRefreshing: Bool + private let onRefresh: () -> Void + private let onRequestNotificationPermission: () -> Void + + public init( + permissionStates: [PermissionState] = AtlasScaffoldFixtures.permissions, + summary: String = AtlasL10n.string("model.permissions.ready"), + isRefreshing: Bool = false, + onRefresh: @escaping () -> Void = {}, + onRequestNotificationPermission: @escaping () -> Void = {} + ) { + self.permissionStates = permissionStates + self.summary = summary + self.isRefreshing = isRefreshing + self.onRefresh = onRefresh + self.onRequestNotificationPermission = onRequestNotificationPermission + } + + public var body: some View { + AtlasScreen( + title: AtlasL10n.string("permissions.screen.title"), + subtitle: AtlasL10n.string("permissions.screen.subtitle") + ) { + AtlasCallout( + title: corePermissionsReady ? AtlasL10n.string("permissions.callout.ready.title") : AtlasL10n.string("permissions.callout.limited.title"), + detail: corePermissionsReady + ? AtlasL10n.string("permissions.callout.ready.detail") + : AtlasL10n.string("permissions.callout.limited.detail"), + tone: corePermissionsReady ? .success : .warning, + systemImage: corePermissionsReady ? "checkmark.shield.fill" : "lock.shield" + ) + + AtlasInfoCard( + title: AtlasL10n.string("permissions.next.title"), + subtitle: AtlasL10n.string("permissions.next.subtitle"), + tone: nextStepTone + ) { + if isRefreshing { + AtlasLoadingState( + title: AtlasL10n.string("permissions.loading.title"), + detail: summary + ) + } else { + VStack(alignment: .leading, spacing: AtlasSpacing.lg) { + Text(summary) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + + AtlasCallout( + title: nextStepTitle, + detail: nextStepDetail, + tone: nextStepTone, + systemImage: nextStepSystemImage + ) + + LazyVGrid(columns: AtlasLayout.wideColumns, spacing: AtlasSpacing.lg) { + AtlasMetricCard( + title: AtlasL10n.string("permissions.metric.required.title"), + value: "\(grantedRequiredCount)/\(max(requiredCount, 1))", + detail: AtlasL10n.string("permissions.metric.required.detail"), + tone: corePermissionsReady ? .success : .warning, + systemImage: "exclamationmark.shield" + ) + AtlasMetricCard( + title: AtlasL10n.string("permissions.metric.later.title"), + value: "\(optionalMissingCount)", + detail: AtlasL10n.string("permissions.metric.later.detail"), + tone: optionalMissingCount == 0 ? .success : .neutral, + systemImage: "hourglass" + ) + } + + HStack(alignment: .center, spacing: AtlasSpacing.md) { + if let nextActionKind { + Button(buttonTitle(for: nextActionKind)) { + performAction(for: nextActionKind) + } + .buttonStyle(.atlasPrimary) + } + + Button(action: onRefresh) { + Label(AtlasL10n.string("permissions.refresh"), systemImage: "arrow.clockwise") + } + .buttonStyle(.atlasSecondary) + .accessibilityIdentifier("permissions.refresh") + .accessibilityHint(AtlasL10n.string("permissions.refresh.hint")) + } + } + } + } + + AtlasInfoCard( + title: AtlasL10n.string("permissions.requiredSection.title"), + subtitle: AtlasL10n.string("permissions.requiredSection.subtitle"), + tone: corePermissionsReady ? .success : .warning + ) { + if requiredPermissionStates.isEmpty { + AtlasEmptyState( + title: AtlasL10n.string("permissions.empty.title"), + detail: AtlasL10n.string("permissions.empty.detail"), + systemImage: "lock.slash", + tone: .neutral + ) + } else { + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + ForEach(requiredPermissionStates) { state in + permissionRow(state) + } + } + } + } + + if !optionalPermissionStates.isEmpty { + AtlasInfoCard( + title: AtlasL10n.string("permissions.optionalSection.title"), + subtitle: AtlasL10n.string("permissions.optionalSection.subtitle") + ) { + DisclosureGroup(isExpanded: $isOptionalExpanded) { + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + ForEach(optionalPermissionStates) { state in + permissionRow(state) + } + } + .padding(.top, AtlasSpacing.md) + } label: { + HStack(alignment: .center, spacing: AtlasSpacing.sm) { + Text(AtlasL10n.string("permissions.optionalSection.disclosure")) + .font(AtlasTypography.rowTitle) + + Spacer(minLength: AtlasSpacing.sm) + + AtlasStatusChip( + AtlasL10n.string( + optionalMissingCount == 1 + ? "permissions.optionalSection.count.one" + : "permissions.optionalSection.count.other", + optionalMissingCount + ), + tone: optionalMissingCount == 0 ? .success : .neutral + ) + } + } + } + } + } + .onChange(of: scenePhase, initial: false) { _, newPhase in + guard newPhase == .active, !isRefreshing else { + return + } + onRefresh() + } + } + + private var grantedCount: Int { + permissionStates.filter(\.isGranted).count + } + + private var requiredPermissionStates: [PermissionState] { + permissionStates.filter { $0.kind.isRequiredForCurrentWorkflows } + } + + private var optionalPermissionStates: [PermissionState] { + permissionStates.filter { !$0.kind.isRequiredForCurrentWorkflows } + } + + private var requiredCount: Int { + requiredPermissionStates.count + } + + private var grantedRequiredCount: Int { + requiredPermissionStates.filter(\.isGranted).count + } + + private var optionalMissingCount: Int { + optionalPermissionStates.filter { !$0.isGranted }.count + } + + private var corePermissionsReady: Bool { + requiredCount > 0 && grantedRequiredCount == requiredCount + } + + private var nextActionKind: PermissionKind? { + requiredPermissionStates.first(where: { !$0.isGranted })?.kind + ?? optionalPermissionStates.first(where: { !$0.isGranted })?.kind + } + + private var nextStepTitle: String { + guard let nextActionKind else { + return AtlasL10n.string("permissions.next.ready.title") + } + return AtlasL10n.string("permissions.next.missing.title", nextActionKind.title) + } + + private var nextStepDetail: String { + guard let nextActionKind else { + return AtlasL10n.string("permissions.next.ready.detail", grantedCount, permissionStates.count) + } + return supportText(for: nextActionKind) + } + + private var nextStepTone: AtlasTone { + guard let nextActionKind else { + return .success + } + return nextActionKind.isRequiredForCurrentWorkflows ? .warning : .neutral + } + + private var nextStepSystemImage: String { + guard let nextActionKind else { + return "checkmark.circle.fill" + } + return nextActionKind.systemImage + } + + @ViewBuilder + private func permissionRow(_ state: PermissionState) -> some View { + AtlasDetailRow( + title: state.kind.title, + subtitle: state.rationale, + footnote: rowFootnote(for: state), + systemImage: state.kind.systemImage, + tone: state.isGranted ? .success : statusTone(for: state) + ) { + VStack(alignment: .trailing, spacing: AtlasSpacing.sm) { + AtlasStatusChip( + statusText(for: state), + tone: statusTone(for: state) + ) + + if !state.isGranted { + Button(buttonTitle(for: state.kind)) { + performAction(for: state.kind) + } + .buttonStyle(.atlasSecondary) + } + } + } + } + + private func rowFootnote(for state: PermissionState) -> String { + if state.isGranted { + return AtlasL10n.string("permissions.row.ready") + } + return state.kind.isRequiredForCurrentWorkflows + ? AtlasL10n.string("permissions.row.required") + : AtlasL10n.string("permissions.row.optional") + } + + private func statusText(for state: PermissionState) -> String { + if state.isGranted { + return AtlasL10n.string("common.granted") + } + return state.kind.isRequiredForCurrentWorkflows + ? AtlasL10n.string("permissions.status.required") + : AtlasL10n.string("permissions.status.optional") + } + + private func statusTone(for state: PermissionState) -> AtlasTone { + if state.isGranted { + return .success + } + return state.kind.isRequiredForCurrentWorkflows ? .warning : .neutral + } + + private func buttonTitle(for kind: PermissionKind) -> String { + switch kind { + case .notifications: + return AtlasL10n.string("permissions.grant.notifications") + case .fullDiskAccess, .accessibility: + return AtlasL10n.string("permissions.grant.action") + } + } + + private func supportText(for kind: PermissionKind) -> String { + switch kind { + case .fullDiskAccess: + return AtlasL10n.string("permissions.support.fullDiskAccess") + case .accessibility: + return AtlasL10n.string("permissions.support.accessibility") + case .notifications: + return AtlasL10n.string("permissions.support.notifications") + } + } + + private func performAction(for kind: PermissionKind) { + switch kind { + case .notifications: + onRequestNotificationPermission() + case .fullDiskAccess, .accessibility: + openSystemPreferences(for: kind) + } + } + + private func openSystemPreferences(for kind: PermissionKind) { + let urlString: String + switch kind { + case .fullDiskAccess: + urlString = "x-apple.systempreferences:com.apple.preference.security?Privacy_AllFiles" + case .accessibility: + urlString = "x-apple.systempreferences:com.apple.preference.security?Privacy_Accessibility" + case .notifications: + urlString = "x-apple.systempreferences:com.apple.preference.security?Privacy_Notifications" + } + + if let url = URL(string: urlString) { + NSWorkspace.shared.open(url) + } + } +} diff --git a/Packages/AtlasFeaturesSettings/README.md b/Packages/AtlasFeaturesSettings/README.md new file mode 100644 index 0000000..7169404 --- /dev/null +++ b/Packages/AtlasFeaturesSettings/README.md @@ -0,0 +1,8 @@ +# AtlasFeaturesSettings + +## Responsibility + +- Preferences +- Rules and exclusions +- Recovery retention +- Acknowledgements and notices diff --git a/Packages/AtlasFeaturesSettings/Sources/AtlasFeaturesSettings/SettingsFeatureView.swift b/Packages/AtlasFeaturesSettings/Sources/AtlasFeaturesSettings/SettingsFeatureView.swift new file mode 100644 index 0000000..e330ed4 --- /dev/null +++ b/Packages/AtlasFeaturesSettings/Sources/AtlasFeaturesSettings/SettingsFeatureView.swift @@ -0,0 +1,323 @@ +import AtlasDesignSystem +import AtlasDomain +import SwiftUI + +public struct SettingsFeatureView: View { + @State private var selectedPanel: SettingsPanel = .general + @State private var presentedDocument: SettingsDocument? + + private let settings: AtlasSettings + private let onSetLanguage: (AtlasLanguage) -> Void + private let onSetRecoveryRetention: (Int) -> Void + private let onToggleNotifications: (Bool) -> Void + + public init( + settings: AtlasSettings = AtlasScaffoldFixtures.settings, + onSetLanguage: @escaping (AtlasLanguage) -> Void = { _ in }, + onSetRecoveryRetention: @escaping (Int) -> Void = { _ in }, + onToggleNotifications: @escaping (Bool) -> Void = { _ in } + ) { + self.settings = settings + self.onSetLanguage = onSetLanguage + self.onSetRecoveryRetention = onSetRecoveryRetention + self.onToggleNotifications = onToggleNotifications + } + + public var body: some View { + AtlasScreen( + title: AtlasL10n.string("settings.screen.title"), + subtitle: AtlasL10n.string("settings.screen.subtitle") + ) { + AtlasCallout( + title: AtlasL10n.string("settings.callout.title"), + detail: AtlasL10n.string("settings.callout.detail"), + tone: .neutral, + systemImage: "gearshape.2.fill" + ) + + AtlasInfoCard( + title: AtlasL10n.string("settings.panel.title"), + subtitle: AtlasL10n.string("settings.panel.subtitle") + ) { + VStack(alignment: .leading, spacing: AtlasSpacing.xl) { + HStack(spacing: AtlasSpacing.sm) { + ForEach(SettingsPanel.allCases) { panel in + Group { + if selectedPanel == panel { + Button(panel.title) { + selectedPanel = panel + } + .buttonStyle(.atlasSecondary) + } else { + Button(panel.title) { + selectedPanel = panel + } + .buttonStyle(.atlasGhost) + } + } + .accessibilityIdentifier("settings.panel.\(panel.id)") + } + } + + ScrollView { + switch selectedPanel { + case .general: + generalPanel + case .recovery: + recoveryPanel + case .trust: + trustPanel + } + } + } + } + } + .sheet(item: $presentedDocument) { document in + SettingsDocumentSheet(document: document) + } + } + + private var generalPanel: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.xl) { + AtlasCallout( + title: AtlasL10n.string("settings.general.title"), + detail: AtlasL10n.string("settings.general.subtitle"), + tone: .neutral, + systemImage: "slider.horizontal.3" + ) + + VStack(alignment: .leading, spacing: AtlasSpacing.lg) { + AtlasKeyValueRow( + title: AtlasL10n.string("settings.language.title"), + value: settings.language.displayName, + detail: AtlasL10n.string("settings.language.detail") + ) + + Picker(AtlasL10n.string("settings.language.picker"), selection: Binding(get: { + settings.language + }, set: { newValue in + onSetLanguage(newValue) + })) { + ForEach(AtlasLanguage.allCases) { language in + Text(language.displayName) + .tag(language) + } + } + .pickerStyle(.segmented) + .accessibilityIdentifier("settings.language") + .accessibilityHint(AtlasL10n.string("settings.language.hint")) + } + + Divider() + + VStack(alignment: .leading, spacing: AtlasSpacing.lg) { + AtlasKeyValueRow( + title: AtlasL10n.string("settings.notifications.title"), + value: settings.notificationsEnabled ? AtlasL10n.string("common.enabled") : AtlasL10n.string("common.disabled"), + detail: AtlasL10n.string("settings.notifications.detail") + ) + + Toggle( + isOn: Binding( + get: { settings.notificationsEnabled }, + set: onToggleNotifications + ) + ) { + Text(AtlasL10n.string("settings.notifications.toggle")) + .font(AtlasTypography.body.weight(.medium)) + } + .toggleStyle(.switch) + .accessibilityIdentifier("settings.notifications") + .accessibilityHint(AtlasL10n.string("settings.notifications.hint")) + } + } + .frame(maxWidth: .infinity, alignment: .leading) + } + + private var recoveryPanel: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.xl) { + AtlasCallout( + title: AtlasL10n.string("settings.recoveryPanel.title"), + detail: AtlasL10n.string("settings.recoveryPanel.subtitle"), + tone: .warning, + systemImage: "arrow.uturn.backward.circle.fill" + ) + + VStack(alignment: .leading, spacing: AtlasSpacing.lg) { + AtlasKeyValueRow( + title: AtlasL10n.string("settings.retention.title"), + value: AtlasL10n.string("settings.retention.value", settings.recoveryRetentionDays), + detail: AtlasL10n.string("settings.retention.detail") + ) + + Stepper( + value: Binding( + get: { settings.recoveryRetentionDays }, + set: onSetRecoveryRetention + ), + in: 1 ... 30 + ) { + Text(AtlasL10n.string("settings.retention.adjust")) + .font(AtlasTypography.body.weight(.medium)) + } + .accessibilityIdentifier("settings.recoveryRetention") + .accessibilityHint(AtlasL10n.string("settings.retention.hint")) + } + + Divider() + + VStack(alignment: .leading, spacing: AtlasSpacing.lg) { + Text(AtlasL10n.string("settings.exclusions.title")) + .font(AtlasTypography.sectionTitle) + + Text(AtlasL10n.string("settings.exclusions.subtitle")) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + + if settings.excludedPaths.isEmpty { + AtlasEmptyState( + title: AtlasL10n.string("settings.exclusions.empty.title"), + detail: AtlasL10n.string("settings.exclusions.empty.detail"), + systemImage: "folder.badge.minus", + tone: .neutral + ) + } else { + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + ForEach(settings.excludedPaths, id: \.self) { path in + AtlasDetailRow( + title: path, + subtitle: AtlasL10n.string("settings.exclusions.row.subtitle"), + systemImage: "folder.badge.minus", + tone: .warning + ) + } + } + } + } + } + .frame(maxWidth: .infinity, alignment: .leading) + } + + private var trustPanel: some View { + VStack(alignment: .leading, spacing: AtlasSpacing.xl) { + AtlasCallout( + title: AtlasL10n.string("settings.trust.title"), + detail: AtlasL10n.string("settings.trust.subtitle"), + tone: .success, + systemImage: "checkmark.shield.fill" + ) + + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + AtlasDetailRow( + title: AtlasL10n.string("settings.distribution.title"), + subtitle: AtlasL10n.string("settings.distribution.detail"), + systemImage: "shippingbox", + tone: .neutral + ) { + AtlasStatusChip(AtlasL10n.string("settings.distribution.value"), tone: .neutral) + } + + AtlasDetailRow( + title: AtlasL10n.string("settings.trust.destructive.title"), + subtitle: AtlasL10n.string("settings.trust.destructive.subtitle"), + systemImage: "checkmark.shield", + tone: .success + ) { + AtlasStatusChip(AtlasL10n.string("settings.trust.destructive.badge"), tone: .success) + } + } + + Divider() + + VStack(alignment: .leading, spacing: AtlasSpacing.lg) { + Text(AtlasL10n.string("settings.trust.documents.title")) + .font(AtlasTypography.sectionTitle) + + Text(AtlasL10n.string("settings.trust.documents.subtitle")) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + + HStack(alignment: .center, spacing: AtlasSpacing.md) { + Button(AtlasL10n.string("settings.trust.documents.ack")) { + presentedDocument = .acknowledgement(settings.acknowledgementText) + } + .buttonStyle(.atlasSecondary) + + Button(AtlasL10n.string("settings.trust.documents.notices")) { + presentedDocument = .notices(settings.thirdPartyNoticesText) + } + .buttonStyle(.atlasSecondary) + } + } + } + .frame(maxWidth: .infinity, alignment: .leading) + } +} + +private enum SettingsPanel: String, CaseIterable, Identifiable { + case general + case recovery + case trust + + var id: String { rawValue } + + var title: String { + switch self { + case .general: + return AtlasL10n.string("settings.panel.general") + case .recovery: + return AtlasL10n.string("settings.panel.recovery") + case .trust: + return AtlasL10n.string("settings.panel.trust") + } + } +} + +private enum SettingsDocument: Identifiable { + case acknowledgement(String) + case notices(String) + + var id: String { + switch self { + case .acknowledgement: + return "acknowledgement" + case .notices: + return "notices" + } + } + + var title: String { + switch self { + case .acknowledgement: + return AtlasL10n.string("settings.acknowledgement.title") + case .notices: + return AtlasL10n.string("settings.notices.title") + } + } + + var bodyText: String { + switch self { + case let .acknowledgement(text), let .notices(text): + return text + } + } +} + +private struct SettingsDocumentSheet: View { + let document: SettingsDocument + + var body: some View { + NavigationStack { + ScrollView { + Text(document.bodyText) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .textSelection(.enabled) + .frame(maxWidth: .infinity, alignment: .leading) + .padding(AtlasSpacing.xl) + } + .navigationTitle(document.title) + } + .frame(minWidth: 560, minHeight: 420) + } +} diff --git a/Packages/AtlasFeaturesSmartClean/README.md b/Packages/AtlasFeaturesSmartClean/README.md new file mode 100644 index 0000000..83be0fe --- /dev/null +++ b/Packages/AtlasFeaturesSmartClean/README.md @@ -0,0 +1,7 @@ +# AtlasFeaturesSmartClean + +## Responsibility + +- Scan flow +- Findings presentation +- Action plan selection and execution UI diff --git a/Packages/AtlasFeaturesSmartClean/Sources/AtlasFeaturesSmartClean/SmartCleanFeatureView.swift b/Packages/AtlasFeaturesSmartClean/Sources/AtlasFeaturesSmartClean/SmartCleanFeatureView.swift new file mode 100644 index 0000000..0747869 --- /dev/null +++ b/Packages/AtlasFeaturesSmartClean/Sources/AtlasFeaturesSmartClean/SmartCleanFeatureView.swift @@ -0,0 +1,593 @@ +import AtlasDesignSystem +import AtlasDomain +import SwiftUI + +public struct SmartCleanFeatureView: View { + private let findings: [Finding] + private let plan: ActionPlan + private let scanSummary: String + private let scanProgress: Double + private let isScanning: Bool + private let isExecutingPlan: Bool + private let isCurrentPlanFresh: Bool + private let canExecutePlan: Bool + private let planIssue: String? + private let onStartScan: () -> Void + private let onRefreshPreview: () -> Void + private let onExecutePlan: () -> Void + + public init( + findings: [Finding] = AtlasScaffoldFixtures.findings, + plan: ActionPlan = AtlasScaffoldFixtures.actionPlan, + scanSummary: String = AtlasL10n.string("model.scan.ready"), + scanProgress: Double = 0, + isScanning: Bool = false, + isExecutingPlan: Bool = false, + isCurrentPlanFresh: Bool = false, + canExecutePlan: Bool = false, + planIssue: String? = nil, + onStartScan: @escaping () -> Void = {}, + onRefreshPreview: @escaping () -> Void = {}, + onExecutePlan: @escaping () -> Void = {} + ) { + self.findings = findings + self.plan = plan + self.scanSummary = scanSummary + self.scanProgress = scanProgress + self.isScanning = isScanning + self.isExecutingPlan = isExecutingPlan + self.isCurrentPlanFresh = isCurrentPlanFresh + self.canExecutePlan = canExecutePlan + self.planIssue = planIssue + self.onStartScan = onStartScan + self.onRefreshPreview = onRefreshPreview + self.onExecutePlan = onExecutePlan + } + + public var body: some View { + AtlasScreen( + title: AtlasL10n.string("smartclean.screen.title"), + subtitle: AtlasL10n.string("smartclean.screen.subtitle") + ) { + AtlasCallout( + title: statusTitle, + detail: statusDetail, + tone: statusTone, + systemImage: statusSymbol + ) + + AtlasInfoCard( + title: AtlasL10n.string("smartclean.controls.title"), + subtitle: AtlasL10n.string("smartclean.controls.subtitle"), + tone: statusTone + ) { + if isScanning || isExecutingPlan { + AtlasLoadingState( + title: isScanning ? AtlasL10n.string("smartclean.loading.scan") : AtlasL10n.string("smartclean.loading.execute"), + detail: scanSummary, + progress: scanProgress == 0 ? nil : scanProgress + ) + } else { + VStack(alignment: .leading, spacing: AtlasSpacing.lg) { + Text(scanSummary) + .font(AtlasTypography.body) + .foregroundStyle(.secondary) + .fixedSize(horizontal: false, vertical: true) + + if scanProgress > 0 { + ProgressView(value: max(scanProgress, 0), total: 1) + .controlSize(.large) + } + + AtlasCallout( + title: primaryAction.title, + detail: primaryAction.detail, + tone: primaryAction.tone, + systemImage: primaryAction.systemImage + ) + + HStack(alignment: .center, spacing: AtlasSpacing.md) { + Button(action: primaryAction.handler(startScan: onStartScan, refreshPreview: onRefreshPreview, executePlan: onExecutePlan)) { + Label(primaryAction.buttonTitle, systemImage: primaryAction.buttonSystemImage) + } + .buttonStyle(.atlasPrimary) + .keyboardShortcut(.defaultAction) + .disabled(primaryAction.isDisabled(canExecutePlan: canExecutePlan)) + .accessibilityIdentifier(primaryAction.accessibilityIdentifier) + .accessibilityHint(primaryAction.accessibilityHint) + + if primaryAction != .scan { + Button(action: onStartScan) { + Label(AtlasL10n.string("smartclean.action.runScan"), systemImage: "sparkles") + } + .buttonStyle(.atlasSecondary) + .keyboardShortcut("s", modifiers: [.command, .option]) + .disabled(isScanning || isExecutingPlan) + .accessibilityIdentifier("smartclean.runScan") + .accessibilityHint(AtlasL10n.string("smartclean.action.runScan.hint")) + } + + if primaryAction != .refresh, !findings.isEmpty { + Button(action: onRefreshPreview) { + Label(AtlasL10n.string("smartclean.action.refreshPreview"), systemImage: "arrow.clockwise") + } + .buttonStyle(.atlasGhost) + .disabled(isScanning || isExecutingPlan) + .accessibilityIdentifier("smartclean.refreshPreview") + .accessibilityHint(AtlasL10n.string("smartclean.action.refreshPreview.hint")) + } + + Spacer(minLength: 0) + } + } + } + } + + LazyVGrid(columns: AtlasLayout.metricColumns, spacing: AtlasSpacing.lg) { + AtlasMetricCard( + title: AtlasL10n.string("smartclean.metric.previewSize.title"), + value: AtlasFormatters.byteCount(resolvedPlanEstimatedBytes), + detail: AtlasL10n.string("smartclean.metric.previewSize.detail"), + tone: .success, + systemImage: "internaldrive" + ) + AtlasMetricCard( + title: AtlasL10n.string("smartclean.metric.actions.title"), + value: "\(plan.items.count)", + detail: AtlasL10n.string("smartclean.metric.actions.detail"), + tone: .neutral, + systemImage: "checklist" + ) + AtlasMetricCard( + title: AtlasL10n.string("smartclean.metric.review.title"), + value: "\(manualReviewCount)", + detail: manualReviewCount == 0 ? AtlasL10n.string("smartclean.metric.review.none") : AtlasL10n.string("smartclean.metric.review.some"), + tone: manualReviewCount == 0 ? .success : .warning, + systemImage: "exclamationmark.bubble" + ) + } + + AtlasInfoCard( + title: AtlasL10n.string("smartclean.preview.title"), + subtitle: plan.title, + tone: manualReviewCount == 0 ? .success : .warning + ) { + if !plan.items.isEmpty { + AtlasMetricCard( + title: AtlasL10n.string("smartclean.preview.metric.space.title"), + value: AtlasFormatters.byteCount(resolvedPlanEstimatedBytes), + detail: AtlasL10n.string( + plan.items.count == 1 + ? "smartclean.preview.metric.space.detail.one" + : "smartclean.preview.metric.space.detail.other", + plan.items.count + ), + tone: .success, + systemImage: "internaldrive", + elevation: .prominent + ) + } + + if !plan.items.isEmpty { + AtlasCallout( + title: planValidationCalloutTitle, + detail: planValidationCalloutDetail, + tone: planValidationCalloutTone, + systemImage: planValidationCalloutSymbol + ) + } + + AtlasCallout( + title: manualReviewCount == 0 ? AtlasL10n.string("smartclean.preview.callout.safe.title") : AtlasL10n.string("smartclean.preview.callout.review.title"), + detail: manualReviewCount == 0 + ? AtlasL10n.string("smartclean.preview.callout.safe.detail") + : AtlasL10n.string("smartclean.preview.callout.review.detail"), + tone: manualReviewCount == 0 ? .success : .warning, + systemImage: manualReviewCount == 0 ? "checkmark.shield.fill" : "exclamationmark.triangle.fill" + ) + + if plan.items.isEmpty { + AtlasEmptyState( + title: AtlasL10n.string("smartclean.preview.empty.title"), + detail: AtlasL10n.string("smartclean.preview.empty.detail"), + systemImage: "list.bullet.clipboard", + tone: .neutral + ) + } else { + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + ForEach(plan.items) { item in + AtlasDetailRow( + title: item.title, + subtitle: item.detail, + footnote: supportText(for: item.kind), + systemImage: icon(for: item.kind), + tone: item.recoverable ? .success : .warning + ) { + VStack(alignment: .trailing, spacing: AtlasSpacing.xs) { + AtlasStatusChip( + isPhysicallyExecutable(item) + ? AtlasL10n.string("smartclean.execution.real") + : AtlasL10n.string("smartclean.execution.reviewOnly"), + tone: isPhysicallyExecutable(item) ? .success : .warning + ) + AtlasStatusChip(item.recoverable ? AtlasL10n.string("common.recoverable") : AtlasL10n.string("common.manualReview"), tone: item.recoverable ? .success : .warning) + } + } + } + } + } + } + + if findings.isEmpty { + AtlasEmptyState( + title: AtlasL10n.string("smartclean.empty.title"), + detail: AtlasL10n.string("smartclean.empty.detail"), + systemImage: "sparkles.tv", + tone: .neutral + ) + } else { + ForEach(RiskLevel.allCases, id: \.self) { risk in + riskSection(risk) + } + } + } + } + + @ViewBuilder + private func riskSection(_ risk: RiskLevel) -> some View { + let items = findings.filter { $0.risk == risk } + + if !items.isEmpty { + AtlasInfoCard( + title: risk.title, + subtitle: sectionDetail(for: risk), + tone: risk.atlasTone + ) { + VStack(alignment: .leading, spacing: AtlasSpacing.md) { + ForEach(items) { finding in + AtlasDetailRow( + title: finding.title, + subtitle: finding.detail, + footnote: "\(AtlasL10n.localizedCategory(finding.category)) • \(actionExpectation(for: finding.risk))", + systemImage: icon(for: finding.category), + tone: risk.atlasTone + ) { + VStack(alignment: .trailing, spacing: AtlasSpacing.sm) { + AtlasStatusChip(AtlasL10n.localizedCategory(finding.category), tone: risk.atlasTone) + Text(AtlasFormatters.byteCount(finding.bytes)) + .font(AtlasTypography.label) + .foregroundStyle(.secondary) + } + } + } + } + } + } + } + + private var resolvedPlanEstimatedBytes: Int64 { + if plan.estimatedBytes > 0 { + return plan.estimatedBytes + } + + let planItemIDs = Set(plan.items.map(\.id)) + if !planItemIDs.isEmpty { + let matchingFindings = findings.filter { planItemIDs.contains($0.id) } + if !matchingFindings.isEmpty { + return matchingFindings.map(\.bytes).reduce(0, +) + } + } + + return findings.map(\.bytes).reduce(0, +) + } + + private var executablePlanItemCount: Int { + plan.items.filter(isPhysicallyExecutable).count + } + + private var reviewOnlyPlanItemCount: Int { + max(plan.items.count - executablePlanItemCount, 0) + } + + private var executionCoverageTitle: String { + if reviewOnlyPlanItemCount == 0 { + return AtlasL10n.string("smartclean.execution.coverage.full", executablePlanItemCount) + } + return AtlasL10n.string("smartclean.execution.coverage.partial", executablePlanItemCount, plan.items.count) + } + + private var executionCoverageDetail: String { + if reviewOnlyPlanItemCount == 0 { + return AtlasL10n.string("smartclean.execution.coverage.full.detail") + } + return AtlasL10n.string("smartclean.execution.coverage.partial.detail", reviewOnlyPlanItemCount) + } + + private func isPhysicallyExecutable(_ item: ActionItem) -> Bool { + guard item.kind != .inspectPermission else { + return false + } + guard let finding = findings.first(where: { $0.id == item.id }) else { + return false + } + return !((finding.targetPaths ?? []).isEmpty) + } + + private var manualReviewCount: Int { + plan.items.filter { !$0.recoverable }.count + } + + private var hasPlanRevalidationFailure: Bool { + !isCurrentPlanFresh && planIssue != nil + } + + private var isShowingCachedPlanState: Bool { + !isCurrentPlanFresh && !plan.items.isEmpty + } + + private var planValidationCalloutTitle: String { + if isCurrentPlanFresh { + return executionCoverageTitle + } + if hasPlanRevalidationFailure { + return AtlasL10n.string("smartclean.revalidationFailed.title") + } + return AtlasL10n.string("smartclean.cached.title") + } + + private var planValidationCalloutDetail: String { + if isCurrentPlanFresh { + return executionCoverageDetail + } + return planIssue ?? AtlasL10n.string("smartclean.cached.detail") + } + + private var planValidationCalloutTone: AtlasTone { + if hasPlanRevalidationFailure { + return .danger + } + return isCurrentPlanFresh && reviewOnlyPlanItemCount == 0 ? .success : .warning + } + + private var planValidationCalloutSymbol: String { + if hasPlanRevalidationFailure { + return "xmark.octagon.fill" + } + return isCurrentPlanFresh && reviewOnlyPlanItemCount == 0 ? "play.circle.fill" : "externaldrive.badge.exclamationmark" + } + + private var statusTitle: String { + if isScanning { return AtlasL10n.string("smartclean.status.scanning") } + if isExecutingPlan { return AtlasL10n.string("smartclean.status.executing") } + if hasPlanRevalidationFailure { return AtlasL10n.string("smartclean.status.revalidationFailed") } + if isShowingCachedPlanState { return AtlasL10n.string("smartclean.status.cached") } + if findings.isEmpty { return AtlasL10n.string("smartclean.status.empty") } + return AtlasL10n.string("smartclean.status.ready") + } + + private var statusDetail: String { + if isScanning || isExecutingPlan { return scanSummary } + if hasPlanRevalidationFailure { return planIssue ?? AtlasL10n.string("smartclean.cached.detail") } + if isShowingCachedPlanState { return AtlasL10n.string("smartclean.cached.detail") } + if findings.isEmpty { return AtlasL10n.string("smartclean.status.empty.detail") } + return AtlasL10n.string("smartclean.status.ready.detail", findings.count) + } + + private var statusTone: AtlasTone { + if isExecutingPlan { return .warning } + if isScanning { return .neutral } + if hasPlanRevalidationFailure { return .danger } + if isShowingCachedPlanState { return .warning } + return manualReviewCount == 0 ? .success : .warning + } + + private var statusSymbol: String { + if isScanning { return "sparkles" } + if isExecutingPlan { return "play.circle.fill" } + if hasPlanRevalidationFailure { return "xmark.octagon.fill" } + if isShowingCachedPlanState { return "externaldrive.badge.exclamationmark" } + return manualReviewCount == 0 ? "checkmark.shield.fill" : "exclamationmark.triangle.fill" + } + + private var primaryAction: SmartCleanPrimaryAction { + if plan.items.isEmpty { + return findings.isEmpty ? .scan : .refresh + } + if isCurrentPlanFresh && canExecutePlan { + return .execute + } + return .refresh + } + + private func supportText(for kind: ActionItem.Kind) -> String { + switch kind { + case .removeCache: + return AtlasL10n.string("smartclean.support.removeCache") + case .removeApp: + return AtlasL10n.string("smartclean.support.removeApp") + case .archiveFile: + return AtlasL10n.string("smartclean.support.archiveFile") + case .inspectPermission: + return AtlasL10n.string("smartclean.support.inspectPermission") + } + } + + private func sectionDetail(for risk: RiskLevel) -> String { + switch risk { + case .safe: + return AtlasL10n.string("smartclean.section.safe") + case .review: + return AtlasL10n.string("smartclean.section.review") + case .advanced: + return AtlasL10n.string("smartclean.section.advanced") + } + } + + private func actionExpectation(for risk: RiskLevel) -> String { + switch risk { + case .safe: + return AtlasL10n.string("smartclean.expectation.safe") + case .review: + return AtlasL10n.string("smartclean.expectation.review") + case .advanced: + return AtlasL10n.string("smartclean.expectation.advanced") + } + } + + private func icon(for kind: ActionItem.Kind) -> String { + switch kind { + case .removeCache: + return "trash" + case .removeApp: + return "app.badge.minus" + case .archiveFile: + return "archivebox" + case .inspectPermission: + return "lock.shield" + } + } + + private func icon(for category: String) -> String { + switch category.lowercased() { + case "developer": + return "hammer" + case "system": + return "gearshape.2" + case "apps": + return "square.stack.3d.up" + case "browsers": + return "globe" + default: + return "sparkles" + } + } +} + +private enum SmartCleanPrimaryAction: Equatable { + case scan + case refresh + case execute + + var title: String { + switch self { + case .scan: + return AtlasL10n.string("smartclean.primary.scan.title") + case .refresh: + return AtlasL10n.string("smartclean.primary.refresh.title") + case .execute: + return AtlasL10n.string("smartclean.primary.execute.title") + } + } + + var detail: String { + switch self { + case .scan: + return AtlasL10n.string("smartclean.primary.scan.detail") + case .refresh: + return AtlasL10n.string("smartclean.primary.refresh.detail") + case .execute: + return AtlasL10n.string("smartclean.primary.execute.detail") + } + } + + var tone: AtlasTone { + switch self { + case .scan, .refresh: + return .neutral + case .execute: + return .warning + } + } + + var systemImage: String { + switch self { + case .scan: + return "sparkles" + case .refresh: + return "arrow.clockwise" + case .execute: + return "play.circle.fill" + } + } + + var buttonTitle: String { + switch self { + case .scan: + return AtlasL10n.string("smartclean.action.runScan") + case .refresh: + return AtlasL10n.string("smartclean.action.refreshPreview") + case .execute: + return AtlasL10n.string("smartclean.action.execute") + } + } + + var buttonSystemImage: String { + switch self { + case .scan: + return "sparkles" + case .refresh: + return "arrow.clockwise" + case .execute: + return "play.fill" + } + } + + var accessibilityIdentifier: String { + switch self { + case .scan: + return "smartclean.runScan" + case .refresh: + return "smartclean.refreshPreview" + case .execute: + return "smartclean.executePreview" + } + } + + var accessibilityHint: String { + switch self { + case .scan: + return AtlasL10n.string("smartclean.action.runScan.hint") + case .refresh: + return AtlasL10n.string("smartclean.action.refreshPreview.hint") + case .execute: + return AtlasL10n.string("smartclean.action.execute.hint") + } + } + + func isDisabled(canExecutePlan: Bool) -> Bool { + switch self { + case .execute: + return !canExecutePlan + case .scan, .refresh: + return false + } + } + + func handler( + startScan: @escaping () -> Void, + refreshPreview: @escaping () -> Void, + executePlan: @escaping () -> Void + ) -> () -> Void { + switch self { + case .scan: + return startScan + case .refresh: + return refreshPreview + case .execute: + return executePlan + } + } +} + +private extension RiskLevel { + var atlasTone: AtlasTone { + switch self { + case .safe: + return .success + case .review: + return .warning + case .advanced: + return .danger + } + } +} diff --git a/Packages/AtlasFeaturesStorage/README.md b/Packages/AtlasFeaturesStorage/README.md new file mode 100644 index 0000000..12571eb --- /dev/null +++ b/Packages/AtlasFeaturesStorage/README.md @@ -0,0 +1,10 @@ +# AtlasFeaturesStorage + +## Responsibility + +- Large folders, large files, and unused-item views + +## Note + +- The full storage module is deferred beyond the frozen MVP shell. +- This directory remains scaffolded so list-based storage work can resume without changing top-level layout. diff --git a/Packages/AtlasFeaturesStorage/Sources/AtlasFeaturesStorage/StorageFeatureView.swift b/Packages/AtlasFeaturesStorage/Sources/AtlasFeaturesStorage/StorageFeatureView.swift new file mode 100644 index 0000000..dd9841c --- /dev/null +++ b/Packages/AtlasFeaturesStorage/Sources/AtlasFeaturesStorage/StorageFeatureView.swift @@ -0,0 +1,42 @@ +import AtlasDesignSystem +import AtlasDomain +import SwiftUI + +public struct StorageFeatureView: View { + private let insights: [StorageInsight] + + public init(insights: [StorageInsight] = AtlasScaffoldFixtures.storageInsights) { + self.insights = insights + } + + public var body: some View { + AtlasScreen( + title: "Storage", + subtitle: "Reserved list-based storage views for a future scope decision beyond the frozen MVP shell." + ) { + AtlasInfoCard(title: "Large Items") { + VStack(alignment: .leading, spacing: 14) { + ForEach(insights) { insight in + HStack(alignment: .top, spacing: 12) { + VStack(alignment: .leading, spacing: 6) { + Text(insight.title) + .font(.headline) + Text(insight.path) + .font(.subheadline) + .foregroundStyle(.secondary) + } + + Spacer() + + VStack(alignment: .trailing, spacing: 8) { + AtlasStatusChip(insight.ageDescription, tone: .neutral) + Text(AtlasFormatters.byteCount(insight.bytes)) + .font(.subheadline.weight(.medium)) + } + } + } + } + } + } + } +} diff --git a/Packages/AtlasInfrastructure/README.md b/Packages/AtlasInfrastructure/README.md new file mode 100644 index 0000000..fa0145f --- /dev/null +++ b/Packages/AtlasInfrastructure/README.md @@ -0,0 +1,19 @@ +# AtlasInfrastructure + +## Responsibility + +- Persistence +- Logging and audit events +- Permission inspection +- Process, helper, and XPC transport support +- Worker-boundary orchestration for MVP flows + +## Current Implementation + +- `AtlasWorkspaceRepository` persists the workspace snapshot, current plan, and settings as local JSON state. +- `AtlasScaffoldWorkerService` now backs scan, preview, execute, restore, apps, uninstall, and settings flows through structured protocol requests. +- `AtlasPermissionInspector` performs best-effort macOS permission checks for Full Disk Access, Accessibility, and Notifications. +- `AtlasPrivilegedHelperClient` invokes the allowlisted helper executable using structured JSON payloads. +- `AtlasXPCWorkerClient` and `AtlasXPCListenerDelegate` provide the real app-to-worker transport boundary using `NSXPCConnection` with structured `Data` payloads. +- `AtlasPreferredWorkerService` prefers the bundled XPC service and falls back to the in-process worker when needed. +- `AtlasAuditStore` records audit-friendly task events. diff --git a/Packages/AtlasInfrastructure/Sources/AtlasInfrastructure/AtlasInfrastructure.swift b/Packages/AtlasInfrastructure/Sources/AtlasInfrastructure/AtlasInfrastructure.swift new file mode 100644 index 0000000..5a9e4d8 --- /dev/null +++ b/Packages/AtlasInfrastructure/Sources/AtlasInfrastructure/AtlasInfrastructure.swift @@ -0,0 +1,1227 @@ +import AtlasApplication +import AtlasDomain +import AtlasProtocol +import Foundation +import ApplicationServices +import UserNotifications + +public struct AuditEntry: Identifiable, Hashable, Sendable { + public var id: UUID + public var createdAt: Date + public var message: String + + public init(id: UUID = UUID(), createdAt: Date = Date(), message: String) { + self.id = id + self.createdAt = createdAt + self.message = message + } +} + +public actor AtlasAuditStore { + private var entries: [AuditEntry] + + public init(entries: [AuditEntry] = []) { + self.entries = entries + } + + public func append(_ message: String) { + entries.insert(AuditEntry(message: message), at: 0) + } + + public func allEntries() -> [AuditEntry] { + entries + } +} + +public struct AtlasCapabilityStatus: Hashable, Sendable { + public var workerConnected: Bool + public var helperInstalled: Bool + public var protocolVersion: String + + public init( + workerConnected: Bool = false, + helperInstalled: Bool = false, + protocolVersion: String = AtlasProtocolVersion.current + ) { + self.workerConnected = workerConnected + self.helperInstalled = helperInstalled + self.protocolVersion = protocolVersion + } +} + +public struct AtlasPermissionInspector: Sendable { + private let fullDiskAccessProbeURLs: [URL] + private let protectedLocationReader: @Sendable (URL) -> Bool + private let accessibilityStatusProvider: @Sendable () -> Bool + private let notificationsAuthorizationProvider: @Sendable () async -> Bool + + public init( + homeDirectoryURL: URL = FileManager.default.homeDirectoryForCurrentUser, + fullDiskAccessProbeURLs: [URL]? = nil, + protectedLocationReader: (@Sendable (URL) -> Bool)? = nil, + accessibilityStatusProvider: (@Sendable () -> Bool)? = nil, + notificationsAuthorizationProvider: (@Sendable () async -> Bool)? = nil + ) { + self.fullDiskAccessProbeURLs = fullDiskAccessProbeURLs ?? Self.defaultFullDiskAccessProbeURLs(homeDirectoryURL: homeDirectoryURL) + self.protectedLocationReader = protectedLocationReader ?? { url in + Self.defaultProtectedLocationReader(url) + } + self.accessibilityStatusProvider = accessibilityStatusProvider ?? { + Self.defaultAccessibilityStatusProvider() + } + self.notificationsAuthorizationProvider = notificationsAuthorizationProvider ?? { + await Self.defaultNotificationsAuthorizationProvider() + } + } + + public func snapshot() async -> [PermissionState] { + [ + fullDiskAccessState(), + accessibilityState(), + await notificationsState(), + ] + } + + private static func defaultFullDiskAccessProbeURLs(homeDirectoryURL: URL) -> [URL] { + [ + homeDirectoryURL.appendingPathComponent("Library/Application Support/com.apple.TCC/TCC.db"), + homeDirectoryURL.appendingPathComponent("Library/Mail", isDirectory: true), + homeDirectoryURL.appendingPathComponent("Library/Safari", isDirectory: true), + homeDirectoryURL.appendingPathComponent("Library/Messages", isDirectory: true), + homeDirectoryURL.appendingPathComponent("Library/Calendars", isDirectory: true), + ] + } + + private static func defaultProtectedLocationReader(_ url: URL) -> Bool { + var isDirectory: ObjCBool = false + guard FileManager.default.fileExists(atPath: url.path, isDirectory: &isDirectory) else { + return false + } + + if isDirectory.boolValue { + return (try? FileManager.default.contentsOfDirectory(at: url, includingPropertiesForKeys: nil)) != nil + } + + guard FileManager.default.isReadableFile(atPath: url.path) else { + return false + } + + do { + let handle = try FileHandle(forReadingFrom: url) + try handle.close() + return true + } catch { + return false + } + } + + private static func defaultAccessibilityStatusProvider() -> Bool { + AXIsProcessTrusted() + } + + private static func defaultNotificationsAuthorizationProvider() async -> Bool { + let settings = await withCheckedContinuation { continuation in + UNUserNotificationCenter.current().getNotificationSettings { settings in + continuation.resume(returning: settings) + } + } + + return settings.authorizationStatus == .authorized || settings.authorizationStatus == .provisional + } + + private func fullDiskAccessState() -> PermissionState { + let accessibleProtectedPath = fullDiskAccessProbeURLs.first(where: protectedLocationReader) + let isGranted = accessibleProtectedPath != nil + let rationale = isGranted + ? AtlasL10n.string("infrastructure.permission.fullDiskAccess.granted") + : AtlasL10n.string("infrastructure.permission.fullDiskAccess.needed") + + return PermissionState(kind: .fullDiskAccess, isGranted: isGranted, rationale: rationale) + } + + private func accessibilityState() -> PermissionState { + let isGranted = accessibilityStatusProvider() + let rationale = isGranted + ? AtlasL10n.string("infrastructure.permission.accessibility.granted") + : AtlasL10n.string("infrastructure.permission.accessibility.needed") + return PermissionState(kind: .accessibility, isGranted: isGranted, rationale: rationale) + } + + private func notificationsState() async -> PermissionState { + let isGranted = await notificationsAuthorizationProvider() + let rationale = isGranted + ? AtlasL10n.string("infrastructure.permission.notifications.granted") + : AtlasL10n.string("infrastructure.permission.notifications.needed") + return PermissionState(kind: .notifications, isGranted: isGranted, rationale: rationale) + } +} + +public enum AtlasWorkspaceRepositoryError: LocalizedError, Sendable, Equatable { + case readFailed(String) + case decodeFailed(String) + case createDirectoryFailed(String) + case encodeFailed(String) + case writeFailed(String) + + public var errorDescription: String? { + switch self { + case let .readFailed(reason): + return "Failed to read workspace state: \(reason)" + case let .decodeFailed(reason): + return "Failed to decode workspace state: \(reason)" + case let .createDirectoryFailed(reason): + return "Failed to prepare workspace state directory: \(reason)" + case let .encodeFailed(reason): + return "Failed to encode workspace state: \(reason)" + case let .writeFailed(reason): + return "Failed to write workspace state: \(reason)" + } + } +} + +public enum AtlasSmartCleanExecutionSupport { + public static func requiresHelper(for targetURL: URL, homeDirectoryURL: URL = FileManager.default.homeDirectoryForCurrentUser) -> Bool { + let helperRoots = [ + "/Applications", + homeDirectoryURL.appendingPathComponent("Applications", isDirectory: true).path, + homeDirectoryURL.appendingPathComponent("Library/LaunchAgents", isDirectory: true).path, + "/Library/LaunchAgents", + "/Library/LaunchDaemons", + ] + let path = targetURL.path + return helperRoots.contains { root in + path == root || path.hasPrefix(root + "/") + } + } + + public static func isDirectlyTrashable(_ targetURL: URL, homeDirectoryURL: URL = FileManager.default.homeDirectoryForCurrentUser) -> Bool { + let path = targetURL.path + let home = homeDirectoryURL.path + guard path.hasPrefix(home + "/") else { return false } + if path == home || path == home + "/Library" { return false } + + let safePrefixes = [ + home + "/Library/Caches", + home + "/Library/Logs", + home + "/Library/Suggestions", + home + "/Library/Messages/Caches", + home + "/Library/Developer/Xcode/DerivedData", + home + "/.npm", + home + "/.npm_cache", + home + "/.oh-my-zsh/cache", + home + "/.cache", + home + "/.pytest_cache", + home + "/.jupyter/runtime", + home + "/.tnpm/_cacache", + home + "/.tnpm/_logs", + home + "/.yarn/cache", + home + "/.bun/install/cache", + home + "/.pyenv/cache", + home + "/.conda/pkgs", + home + "/anaconda3/pkgs", + home + "/.cargo/registry/cache", + home + "/.cargo/git", + home + "/.rustup/downloads", + home + "/.docker/buildx/cache", + home + "/.kube/cache", + home + "/.local/share/containers/storage/tmp", + home + "/.aws/cli/cache", + home + "/.config/gcloud/logs", + home + "/.azure/logs", + home + "/.node-gyp", + home + "/.turbo/cache", + home + "/.vite/cache", + home + "/.parcel-cache", + home + "/.android/build-cache", + home + "/.android/cache", + home + "/.cache/swift-package-manager", + home + "/.expo/expo-go", + home + "/.expo/android-apk-cache", + home + "/.expo/ios-simulator-app-cache", + home + "/.expo/native-modules-cache", + home + "/.expo/schema-cache", + home + "/.expo/template-cache", + home + "/.expo/versions-cache", + home + "/.vagrant.d/tmp", + ] + if safePrefixes.contains(where: { path == $0 || path.hasPrefix($0 + "/") }) { + return true + } + + let safeFragments = [ + "/__pycache__", + "/.next/cache", + "/component_crx_cache", + "/GoogleUpdater", + "/CoreSimulator.log", + ] + if safeFragments.contains(where: { path.contains($0) }) { + return true + } + + let basename = targetURL.lastPathComponent.lowercased() + if basename.hasSuffix(".pyc") { + return true + } + + let safeBasenamePrefixes = [ + ".zcompdump", + ".zsh_history.bak", + ] + return safeBasenamePrefixes.contains(where: { basename.hasPrefix($0) }) + } + + public static func isSupportedExecutionTarget(_ targetURL: URL, homeDirectoryURL: URL = FileManager.default.homeDirectoryForCurrentUser) -> Bool { + requiresHelper(for: targetURL, homeDirectoryURL: homeDirectoryURL) + || isDirectlyTrashable(targetURL, homeDirectoryURL: homeDirectoryURL) + } + + public static func isFindingExecutionSupported(_ finding: Finding, homeDirectoryURL: URL = FileManager.default.homeDirectoryForCurrentUser) -> Bool { + guard let targetPaths = finding.targetPaths, !targetPaths.isEmpty else { + return false + } + return targetPaths.allSatisfy { rawPath in + let url = URL(fileURLWithPath: rawPath).resolvingSymlinksInPath() + return isSupportedExecutionTarget(url, homeDirectoryURL: homeDirectoryURL) + } + } +} + +public struct AtlasWorkspaceRepository: Sendable { + private let stateFileURL: URL + + public init(stateFileURL: URL? = nil) { + self.stateFileURL = stateFileURL ?? Self.defaultStateFileURL + } + + public func loadState() -> AtlasWorkspaceState { + let decoder = JSONDecoder() + + if FileManager.default.fileExists(atPath: stateFileURL.path) { + do { + let data = try Data(contentsOf: stateFileURL) + return try decoder.decode(AtlasWorkspaceState.self, from: data) + } catch let repositoryError as AtlasWorkspaceRepositoryError { + reportFailure(repositoryError, operation: "load existing workspace state from \(stateFileURL.path)") + } catch { + reportFailure( + AtlasWorkspaceRepositoryError.decodeFailed(error.localizedDescription), + operation: "decode workspace state from \(stateFileURL.path)" + ) + } + } + + let state = AtlasScaffoldWorkspace.state() + do { + _ = try saveState(state) + } catch { + reportFailure(error, operation: "seed initial workspace state at \(stateFileURL.path)") + } + return state + } + + @discardableResult + public func saveState(_ state: AtlasWorkspaceState) throws -> AtlasWorkspaceState { + let encoder = JSONEncoder() + encoder.outputFormatting = [.prettyPrinted, .sortedKeys] + + do { + try FileManager.default.createDirectory( + at: stateFileURL.deletingLastPathComponent(), + withIntermediateDirectories: true + ) + } catch { + throw AtlasWorkspaceRepositoryError.createDirectoryFailed(error.localizedDescription) + } + + let data: Data + do { + data = try encoder.encode(state) + } catch { + throw AtlasWorkspaceRepositoryError.encodeFailed(error.localizedDescription) + } + + do { + try data.write(to: stateFileURL, options: .atomic) + } catch { + throw AtlasWorkspaceRepositoryError.writeFailed(error.localizedDescription) + } + + return state + } + + public func loadScaffoldSnapshot() -> AtlasWorkspaceSnapshot { + loadState().snapshot + } + + public func loadCurrentPlan() -> ActionPlan { + loadState().currentPlan + } + + public func loadSettings() -> AtlasSettings { + loadState().settings + } + + private func reportFailure(_ error: Error, operation: String) { + let message = "[AtlasWorkspaceRepository] Failed to \(operation): \(error.localizedDescription)\n" + if let data = message.data(using: .utf8) { + try? FileHandle.standardError.write(contentsOf: data) + } + } + + private static var defaultStateFileURL: URL { + if let explicit = ProcessInfo.processInfo.environment["ATLAS_STATE_FILE"], !explicit.isEmpty { + return URL(fileURLWithPath: explicit) + } + + let baseDirectory: URL + if let explicitDirectory = ProcessInfo.processInfo.environment["ATLAS_STATE_DIR"], !explicitDirectory.isEmpty { + baseDirectory = URL(fileURLWithPath: explicitDirectory, isDirectory: true) + } else { + let applicationSupport = FileManager.default.urls(for: .applicationSupportDirectory, in: .userDomainMask).first + ?? URL(fileURLWithPath: NSHomeDirectory()).appendingPathComponent("Library/Application Support", isDirectory: true) + baseDirectory = applicationSupport.appendingPathComponent("AtlasForMac", isDirectory: true) + } + + return baseDirectory.appendingPathComponent("workspace-state.json") + } +} + +public actor AtlasScaffoldWorkerService: AtlasWorkerServing { + private let repository: AtlasWorkspaceRepository + private let auditStore: AtlasAuditStore + private let permissionInspector: AtlasPermissionInspector + private let healthSnapshotProvider: (any AtlasHealthSnapshotProviding)? + private let smartCleanScanProvider: (any AtlasSmartCleanScanProviding)? + private let appsInventoryProvider: (any AtlasAppInventoryProviding)? + private let helperExecutor: (any AtlasPrivilegedActionExecuting)? + private let allowProviderFailureFallback: Bool + private let allowStateOnlyCleanExecution: Bool + private var state: AtlasWorkspaceState + + public init( + repository: AtlasWorkspaceRepository = AtlasWorkspaceRepository(), + permissionInspector: AtlasPermissionInspector = AtlasPermissionInspector(), + healthSnapshotProvider: (any AtlasHealthSnapshotProviding)? = nil, + smartCleanScanProvider: (any AtlasSmartCleanScanProviding)? = nil, + appsInventoryProvider: (any AtlasAppInventoryProviding)? = nil, + helperExecutor: (any AtlasPrivilegedActionExecuting)? = nil, + auditStore: AtlasAuditStore = AtlasAuditStore(), + allowProviderFailureFallback: Bool = ProcessInfo.processInfo.environment["ATLAS_ALLOW_PROVIDER_FAILURE_FALLBACK"] == "1", + allowStateOnlyCleanExecution: Bool = ProcessInfo.processInfo.environment["ATLAS_ALLOW_STATE_ONLY_CLEAN_EXECUTION"] == "1" + ) { + self.repository = repository + self.auditStore = auditStore + self.permissionInspector = permissionInspector + self.healthSnapshotProvider = healthSnapshotProvider + self.smartCleanScanProvider = smartCleanScanProvider + self.appsInventoryProvider = appsInventoryProvider + self.helperExecutor = helperExecutor + self.allowProviderFailureFallback = allowProviderFailureFallback + self.allowStateOnlyCleanExecution = allowStateOnlyCleanExecution + self.state = repository.loadState() + AtlasL10n.setCurrentLanguage(self.state.settings.language) + } + + public func submit(_ request: AtlasRequestEnvelope) async throws -> AtlasWorkerCommandResult { + AtlasL10n.setCurrentLanguage(state.settings.language) + switch request.command { + case .healthSnapshot: + return try await healthSnapshot(using: request) + case .inspectPermissions: + return await inspectPermissions(using: request) + case let .startScan(taskID): + return await startScan(using: request, taskID: taskID) + case let .previewPlan(_, findingIDs): + return await previewPlan(using: request, findingIDs: findingIDs) + case let .executePlan(planID): + return await executePlan(using: request, planID: planID) + case let .restoreItems(taskID, itemIDs): + return await restoreItems(using: request, taskID: taskID, itemIDs: itemIDs) + case .appsList: + return await listApps(using: request) + case let .previewAppUninstall(appID): + return await previewAppUninstall(using: request, appID: appID) + case let .executeAppUninstall(appID): + return await executeAppUninstall(using: request, appID: appID) + case .settingsGet: + return await settingsGet(using: request) + case let .settingsSet(settings): + return await settingsSet(using: request, settings: settings) + } + } + + private func healthSnapshot(using request: AtlasRequestEnvelope) async throws -> AtlasWorkerCommandResult { + let healthSnapshot: AtlasHealthSnapshot + + if let healthSnapshotProvider { + healthSnapshot = try await healthSnapshotProvider.collectHealthSnapshot() + } else { + healthSnapshot = AtlasScaffoldFixtures.healthSnapshot + } + + state.snapshot.healthSnapshot = healthSnapshot + await persistState(context: "health snapshot") + let response = AtlasResponseEnvelope(requestID: request.id, response: .health(healthSnapshot)) + await auditStore.append("Collected health snapshot for request \(request.id.uuidString)") + return AtlasWorkerCommandResult(request: request, response: response, events: [], snapshot: state.snapshot) + } + + private func inspectPermissions(using request: AtlasRequestEnvelope) async -> AtlasWorkerCommandResult { + let permissions = await permissionInspector.snapshot() + state.snapshot.permissions = permissions + await persistState(context: "permission inspection") + let events = permissions.map { permission in + AtlasEventEnvelope(event: .permissionUpdated(permission)) + } + let response = AtlasResponseEnvelope(requestID: request.id, response: .permissions(permissions)) + await auditStore.append("Inspected permissions for request \(request.id.uuidString)") + return AtlasWorkerCommandResult(request: request, response: response, events: events, snapshot: state.snapshot) + } + + private func startScan(using request: AtlasRequestEnvelope, taskID: UUID) async -> AtlasWorkerCommandResult { + let scanSummary: String + if let smartCleanScanProvider { + do { + let scanResult = try await smartCleanScanProvider.collectSmartCleanScan() + state.snapshot.findings = scanResult.findings + scanSummary = scanResult.summary + } catch { + guard allowProviderFailureFallback else { + return rejectedResult( + for: request, + code: .executionUnavailable, + reason: "Smart Clean scan is unavailable because the upstream clean workflow could not complete: \(error.localizedDescription)" + ) + } + state.snapshot.findings = AtlasScaffoldFixtures.findings(language: state.settings.language) + scanSummary = AtlasL10n.string( + state.snapshot.findings.count == 1 ? "infrastructure.scan.completed.one" : "infrastructure.scan.completed.other", + language: state.settings.language, + state.snapshot.findings.count + ) + } + } else { + state.snapshot.findings = AtlasScaffoldFixtures.findings(language: state.settings.language) + scanSummary = AtlasL10n.string( + state.snapshot.findings.count == 1 ? "infrastructure.scan.completed.one" : "infrastructure.scan.completed.other", + language: state.settings.language, + state.snapshot.findings.count + ) + } + recalculateReclaimableSpace() + + let response = AtlasResponseEnvelope( + requestID: request.id, + response: .accepted(task: AtlasTaskDescriptor(taskID: taskID, kind: .scan)) + ) + + let progressEvents = (1 ... 4).map { step in + AtlasEventEnvelope(event: .taskProgress(taskID: taskID, completed: step, total: 4)) + } + + let completedRun = TaskRun( + id: taskID, + kind: .scan, + status: .completed, + summary: scanSummary, + startedAt: request.issuedAt, + finishedAt: Date() + ) + + state.snapshot.taskRuns.removeAll { $0.id == taskID } + state.snapshot.taskRuns.insert(completedRun, at: 0) + let previewPlan = makePreviewPlan(findingIDs: state.snapshot.findings.map(\.id)) + state.currentPlan = previewPlan + await persistState(context: "smart clean scan") + let events = progressEvents + [AtlasEventEnvelope(event: .taskFinished(completedRun))] + await auditStore.append("Completed Smart Clean scan \(taskID.uuidString)") + return AtlasWorkerCommandResult( + request: request, + response: response, + events: events, + snapshot: state.snapshot, + previewPlan: previewPlan + ) + } + + private func previewPlan(using request: AtlasRequestEnvelope, findingIDs: [UUID]) async -> AtlasWorkerCommandResult { + let plan = makePreviewPlan(findingIDs: findingIDs) + state.currentPlan = plan + await persistState(context: "preview plan refresh") + let response = AtlasResponseEnvelope(requestID: request.id, response: .preview(plan)) + await auditStore.append("Prepared preview plan \(plan.id.uuidString) for request \(request.id.uuidString)") + return AtlasWorkerCommandResult( + request: request, + response: response, + events: [], + snapshot: state.snapshot, + previewPlan: plan + ) + } + + private func executePlan(using request: AtlasRequestEnvelope, planID: UUID) async -> AtlasWorkerCommandResult { + guard state.currentPlan.id == planID else { + return rejectedResult( + for: request, + code: .invalidSelection, + reason: "The requested Smart Clean plan is no longer current. Refresh the preview and try again." + ) + } + + let selectedIDs = Set(state.currentPlan.items.map(\.id)) + let selectedFindings = state.snapshot.findings.filter { selectedIDs.contains($0.id) } + + guard !selectedFindings.isEmpty else { + return rejectedResult( + for: request, + code: .invalidSelection, + reason: "The requested Smart Clean items are no longer available. Refresh the preview and try again." + ) + } + + let executableFindings = selectedFindings.filter { actionKind(for: $0) != .inspectPermission } + let missingExecutableTargets = executableFindings.filter { ($0.targetPaths ?? []).isEmpty } + + if !missingExecutableTargets.isEmpty && !allowStateOnlyCleanExecution { + return rejectedResult( + for: request, + code: .executionUnavailable, + reason: "Smart Clean execution is unavailable because one or more plan items do not include executable cleanup targets in this build." + ) + } + let skippedCount = selectedFindings.count - executableFindings.count + let taskID = UUID() + + let response = AtlasResponseEnvelope( + requestID: request.id, + response: .accepted(task: AtlasTaskDescriptor(taskID: taskID, kind: .executePlan)) + ) + + var restoreMappingsByFindingID: [UUID: [RecoveryPathMapping]] = [:] + if !executableFindings.isEmpty { + do { + restoreMappingsByFindingID = try await executeSmartCleanFindings(executableFindings) + } catch { + if !allowStateOnlyCleanExecution { + return rejectedResult( + for: request, + code: .executionUnavailable, + reason: error.localizedDescription + ) + } + } + } + + let recoveryItems = executableFindings.map { makeRecoveryItem(for: $0, deletedAt: Date(), restoreMappings: restoreMappingsByFindingID[$0.id]) } + let executedIDs = Set(executableFindings.map(\.id)) + state.snapshot.findings.removeAll { executedIDs.contains($0.id) } + state.snapshot.recoveryItems.insert(contentsOf: recoveryItems, at: 0) + recalculateReclaimableSpace() + state.currentPlan = makePreviewPlan(findingIDs: state.snapshot.findings.map(\.id)) + + let executedCount = executableFindings.count + let summary = skippedCount == 0 + ? AtlasL10n.string(executedCount == 1 ? "infrastructure.execute.summary.clean.one" : "infrastructure.execute.summary.clean.other", language: state.settings.language, executedCount) + : AtlasL10n.string( + "infrastructure.execute.summary.clean.mixed", + language: state.settings.language, + executedCount, + skippedCount + ) + + let completedRun = TaskRun( + id: taskID, + kind: .executePlan, + status: .completed, + summary: summary, + startedAt: request.issuedAt, + finishedAt: Date() + ) + state.snapshot.taskRuns.removeAll { $0.id == taskID } + state.snapshot.taskRuns.insert(completedRun, at: 0) + await persistState(context: "execute Smart Clean plan") + let events = progressEvents(taskID: taskID, total: 3) + [AtlasEventEnvelope(event: .taskFinished(completedRun))] + await auditStore.append("Executed Smart Clean plan \(planID.uuidString)") + return AtlasWorkerCommandResult(request: request, response: response, events: events, snapshot: state.snapshot) + } + + private func restoreItems(using request: AtlasRequestEnvelope, taskID: UUID, itemIDs: [UUID]) async -> AtlasWorkerCommandResult { + let itemsToRestore = state.snapshot.recoveryItems.filter { itemIDs.contains($0.id) } + + guard !itemsToRestore.isEmpty else { + return rejectedResult( + for: request, + code: .invalidSelection, + reason: "The selected recovery item is no longer available." + ) + } + + for item in itemsToRestore { + if let restoreMappings = item.restoreMappings, !restoreMappings.isEmpty { + do { + try await restoreRecoveryMappings(restoreMappings) + } catch { + return rejectedResult( + for: request, + code: .executionUnavailable, + reason: error.localizedDescription + ) + } + } + + switch item.payload { + case let .finding(finding): + if !state.snapshot.findings.contains(where: { $0.id == finding.id }) { + state.snapshot.findings.insert(finding, at: 0) + } + case let .app(app): + if !state.snapshot.apps.contains(where: { $0.id == app.id }) { + state.snapshot.apps.insert(app, at: 0) + } + case nil: + break + } + } + + state.snapshot.recoveryItems.removeAll { itemIDs.contains($0.id) } + recalculateReclaimableSpace() + state.currentPlan = makePreviewPlan(findingIDs: state.snapshot.findings.map(\.id)) + + let completedRun = TaskRun( + id: taskID, + kind: .restore, + status: .completed, + summary: "Restored \(itemsToRestore.count) recovery item\(itemsToRestore.count == 1 ? "" : "s").", + startedAt: request.issuedAt, + finishedAt: Date() + ) + state.snapshot.taskRuns.removeAll { $0.id == taskID } + state.snapshot.taskRuns.insert(completedRun, at: 0) + await persistState(context: "restore recovery items") + let response = AtlasResponseEnvelope( + requestID: request.id, + response: .accepted(task: AtlasTaskDescriptor(taskID: taskID, kind: .restore)) + ) + let events = progressEvents(taskID: taskID, total: 2) + [AtlasEventEnvelope(event: .taskFinished(completedRun))] + await auditStore.append("Restored \(itemsToRestore.count) item(s) for task \(taskID.uuidString)") + return AtlasWorkerCommandResult(request: request, response: response, events: events, snapshot: state.snapshot) + } + + private func listApps(using request: AtlasRequestEnvelope) async -> AtlasWorkerCommandResult { + if let appsInventoryProvider, let apps = try? await appsInventoryProvider.collectInstalledApps(), !apps.isEmpty { + state.snapshot.apps = apps + await persistState(context: "refresh app inventory") + } + + let apps = state.snapshot.apps.sorted { lhs, rhs in + if lhs.bytes == rhs.bytes { + return lhs.name < rhs.name + } + return lhs.bytes > rhs.bytes + } + let response = AtlasResponseEnvelope(requestID: request.id, response: .apps(apps)) + await auditStore.append("Listed \(apps.count) apps for request \(request.id.uuidString)") + return AtlasWorkerCommandResult(request: request, response: response, events: [], snapshot: state.snapshot) + } + + private func previewAppUninstall(using request: AtlasRequestEnvelope, appID: UUID) async -> AtlasWorkerCommandResult { + guard let app = state.snapshot.apps.first(where: { $0.id == appID }) else { + return rejectedResult( + for: request, + code: .invalidSelection, + reason: "The selected app is no longer available for uninstall preview." + ) + } + + let preview = makeAppUninstallPreview(for: app) + let response = AtlasResponseEnvelope(requestID: request.id, response: .preview(preview)) + await auditStore.append("Prepared uninstall preview for \(app.name)") + return AtlasWorkerCommandResult( + request: request, + response: response, + events: [], + snapshot: state.snapshot, + previewPlan: preview + ) + } + + private func executeAppUninstall(using request: AtlasRequestEnvelope, appID: UUID) async -> AtlasWorkerCommandResult { + guard let app = state.snapshot.apps.first(where: { $0.id == appID }) else { + return rejectedResult( + for: request, + code: .invalidSelection, + reason: "The selected app is no longer available to uninstall." + ) + } + + var appRestoreMappings: [RecoveryPathMapping]? + if !app.bundlePath.isEmpty, FileManager.default.fileExists(atPath: app.bundlePath) { + guard let helperExecutor else { + return rejectedResult( + for: request, + code: .helperUnavailable, + reason: "Bundled helper unavailable for app uninstall. Build or package the helper and try again." + ) + } + + do { + let result = try await helperExecutor.perform( + AtlasHelperAction(kind: .trashItems, targetPath: app.bundlePath) + ) + guard result.success else { + return rejectedResult(for: request, code: .helperUnavailable, reason: result.message) + } + if let trashedPath = result.resolvedPath { + appRestoreMappings = [RecoveryPathMapping(originalPath: app.bundlePath, trashedPath: trashedPath)] + } + } catch { + return rejectedResult(for: request, code: .helperUnavailable, reason: error.localizedDescription) + } + } + + let taskID = UUID() + state.snapshot.apps.removeAll { $0.id == appID } + state.snapshot.recoveryItems.insert(makeRecoveryItem(for: app, deletedAt: Date(), restoreMappings: appRestoreMappings), at: 0) + + let completedRun = TaskRun( + id: taskID, + kind: .uninstallApp, + status: .completed, + summary: AtlasL10n.string("infrastructure.apps.uninstall.summary", language: state.settings.language, app.name), + startedAt: request.issuedAt, + finishedAt: Date() + ) + state.snapshot.taskRuns.removeAll { $0.id == taskID } + state.snapshot.taskRuns.insert(completedRun, at: 0) + await persistState(context: "execute app uninstall") + + let response = AtlasResponseEnvelope( + requestID: request.id, + response: .accepted(task: AtlasTaskDescriptor(taskID: taskID, kind: .uninstallApp)) + ) + let events = progressEvents(taskID: taskID, total: 3) + [AtlasEventEnvelope(event: .taskFinished(completedRun))] + await auditStore.append("Executed uninstall preview for \(app.name)") + return AtlasWorkerCommandResult(request: request, response: response, events: events, snapshot: state.snapshot) + } + + private func settingsGet(using request: AtlasRequestEnvelope) async -> AtlasWorkerCommandResult { + let response = AtlasResponseEnvelope(requestID: request.id, response: .settings(state.settings)) + return AtlasWorkerCommandResult(request: request, response: response, events: [], snapshot: state.snapshot) + } + + private func settingsSet(using request: AtlasRequestEnvelope, settings: AtlasSettings) async -> AtlasWorkerCommandResult { + state.settings = sanitized(settings: settings) + await persistState(context: "restore recovery items") + let response = AtlasResponseEnvelope(requestID: request.id, response: .settings(state.settings)) + await auditStore.append("Updated settings for request \(request.id.uuidString)") + return AtlasWorkerCommandResult(request: request, response: response, events: [], snapshot: state.snapshot) + } + + private func rejectedResult( + for request: AtlasRequestEnvelope, + code: AtlasProtocolErrorCode, + reason: String + ) -> AtlasWorkerCommandResult { + AtlasWorkerCommandResult( + request: request, + response: AtlasResponseEnvelope(requestID: request.id, response: .rejected(code: code, reason: reason)), + events: [], + snapshot: state.snapshot + ) + } + + private func persistState(context: String) async { + do { + _ = try repository.saveState(state) + } catch { + await auditStore.append("Failed to persist state after \(context): \(error.localizedDescription)") + let message = "[AtlasScaffoldWorkerService] Failed to persist state after \(context): \(error.localizedDescription)\n" + if let data = message.data(using: .utf8) { + try? FileHandle.standardError.write(contentsOf: data) + } + } + } + + private func recalculateReclaimableSpace() { + state.snapshot.reclaimableSpaceBytes = state.snapshot.findings.map(\.bytes).reduce(0, +) + } + + private func progressEvents(taskID: UUID, total: Int) -> [AtlasEventEnvelope] { + (1 ... total).map { step in + AtlasEventEnvelope(event: .taskProgress(taskID: taskID, completed: step, total: total)) + } + } + + private func executeSmartCleanFindings(_ findings: [Finding]) async throws -> [UUID: [RecoveryPathMapping]] { + var mappingsByFindingID: [UUID: [RecoveryPathMapping]] = [:] + for finding in findings { + let targetPaths = Array(Set(finding.targetPaths ?? [])).sorted() + guard !targetPaths.isEmpty else { + throw AtlasWorkspaceRepositoryError.writeFailed("Smart Clean finding is missing executable targets.") + } + var mappings: [RecoveryPathMapping] = [] + for targetPath in targetPaths { + if let mapping = try await trashSmartCleanTarget(at: targetPath) { + mappings.append(mapping) + } + } + mappingsByFindingID[finding.id] = mappings + } + return mappingsByFindingID + } + + private func trashSmartCleanTarget(at targetPath: String) async throws -> RecoveryPathMapping? { + let targetURL = URL(fileURLWithPath: targetPath).resolvingSymlinksInPath() + guard FileManager.default.fileExists(atPath: targetURL.path) else { + return nil + } + + if shouldUseHelperForSmartCleanTarget(targetURL) { + guard let helperExecutor else { + throw AtlasWorkspaceRepositoryError.writeFailed("Bundled helper unavailable for Smart Clean target: \(targetURL.path)") + } + let result = try await helperExecutor.perform(AtlasHelperAction(kind: .trashItems, targetPath: targetURL.path)) + guard result.success else { + throw AtlasWorkspaceRepositoryError.writeFailed(result.message) + } + guard let trashedPath = result.resolvedPath else { + throw AtlasWorkspaceRepositoryError.writeFailed("Smart Clean target was trashed but no recovery path was returned.") + } + return RecoveryPathMapping(originalPath: targetURL.path, trashedPath: trashedPath) + } + + guard isDirectlyTrashableSmartCleanTarget(targetURL) else { + throw AtlasWorkspaceRepositoryError.writeFailed("Smart Clean target is outside the supported execution allowlist: \(targetURL.path)") + } + + var trashedURL: NSURL? + try FileManager.default.trashItem(at: targetURL, resultingItemURL: &trashedURL) + guard let trashedPath = (trashedURL as URL?)?.path else { + throw AtlasWorkspaceRepositoryError.writeFailed("Smart Clean target was trashed but no recovery path was returned.") + } + return RecoveryPathMapping(originalPath: targetURL.path, trashedPath: trashedPath) + } + + private func restoreRecoveryMappings(_ restoreMappings: [RecoveryPathMapping]) async throws { + for mapping in restoreMappings { + try await restoreRecoveryTarget(mapping) + } + } + + private func restoreRecoveryTarget(_ mapping: RecoveryPathMapping) async throws { + let sourceURL = URL(fileURLWithPath: mapping.trashedPath).resolvingSymlinksInPath() + let destinationURL = URL(fileURLWithPath: mapping.originalPath).resolvingSymlinksInPath() + guard FileManager.default.fileExists(atPath: sourceURL.path) else { + throw AtlasWorkspaceRepositoryError.writeFailed("Recovery source is no longer available on disk: \(sourceURL.path)") + } + if shouldUseHelperForSmartCleanTarget(destinationURL) { + guard let helperExecutor else { + throw AtlasWorkspaceRepositoryError.writeFailed("Bundled helper unavailable for recovery target: \(destinationURL.path)") + } + let result = try await helperExecutor.perform(AtlasHelperAction(kind: .restoreItem, targetPath: sourceURL.path, destinationPath: destinationURL.path)) + guard result.success else { + throw AtlasWorkspaceRepositoryError.writeFailed(result.message) + } + return + } + guard isDirectlyTrashableSmartCleanTarget(destinationURL) else { + throw AtlasWorkspaceRepositoryError.writeFailed("Recovery target is outside the supported execution allowlist: \(destinationURL.path)") + } + if FileManager.default.fileExists(atPath: destinationURL.path) { + throw AtlasWorkspaceRepositoryError.writeFailed("Recovery target already exists: \(destinationURL.path)") + } + try FileManager.default.createDirectory(at: destinationURL.deletingLastPathComponent(), withIntermediateDirectories: true) + try FileManager.default.moveItem(at: sourceURL, to: destinationURL) + } + + private func shouldUseHelperForSmartCleanTarget(_ targetURL: URL) -> Bool { + AtlasSmartCleanExecutionSupport.requiresHelper(for: targetURL) + } + + private func isDirectlyTrashableSmartCleanTarget(_ targetURL: URL) -> Bool { + AtlasSmartCleanExecutionSupport.isDirectlyTrashable(targetURL) + } + + + private func recoveryExpiryDate(from deletedAt: Date) -> Date { + deletedAt.addingTimeInterval(TimeInterval(state.settings.recoveryRetentionDays * 86_400)) + } + + private func makeRecoveryItem(for finding: Finding, deletedAt: Date, restoreMappings: [RecoveryPathMapping]? = nil) -> RecoveryItem { + RecoveryItem( + title: finding.title, + detail: finding.detail, + originalPath: inferredPath(for: finding), + bytes: finding.bytes, + deletedAt: deletedAt, + expiresAt: recoveryExpiryDate(from: deletedAt), + payload: .finding(finding), + restoreMappings: restoreMappings + ) + } + + private func makeRecoveryItem(for app: AppFootprint, deletedAt: Date, restoreMappings: [RecoveryPathMapping]? = nil) -> RecoveryItem { + RecoveryItem( + title: app.name, + detail: AtlasL10n.string(app.leftoverItems == 1 ? "infrastructure.recovery.app.detail.one" : "infrastructure.recovery.app.detail.other", language: state.settings.language, app.leftoverItems), + originalPath: app.bundlePath, + bytes: app.bytes, + deletedAt: deletedAt, + expiresAt: recoveryExpiryDate(from: deletedAt), + payload: .app(app), + restoreMappings: restoreMappings + ) + } + + private func inferredPath(for finding: Finding) -> String { + if let firstTargetPath = finding.targetPaths?.first { + return firstTargetPath + } + switch finding.category.lowercased() { + case "developer": + return "~/Library/Developer" + case "system": + return "~/Library/Caches" + case "apps": + return "~/Library/Application Support" + case "browsers": + return "~/Library/Caches" + default: + return "~/Library" + } + } + + private func makePreviewPlan(findingIDs: [UUID]) -> ActionPlan { + let selectedFindings: [Finding] + + if findingIDs.isEmpty { + selectedFindings = state.snapshot.findings + } else { + let selected = state.snapshot.findings.filter { findingIDs.contains($0.id) } + selectedFindings = selected.isEmpty ? state.snapshot.findings : selected + } + + let items = selectedFindings.map { finding in + ActionItem( + id: finding.id, + title: actionTitle(for: finding), + detail: finding.detail, + kind: actionKind(for: finding), + recoverable: finding.risk != .advanced + ) + } + + let estimatedBytes = selectedFindings.map(\.bytes).reduce(0, +) + + return ActionPlan( + title: AtlasL10n.string(selectedFindings.count == 1 ? "infrastructure.plan.review.one" : "infrastructure.plan.review.other", language: state.settings.language, selectedFindings.count), + items: items, + estimatedBytes: estimatedBytes + ) + } + + private func makeAppUninstallPreview(for app: AppFootprint) -> ActionPlan { + ActionPlan( + title: AtlasL10n.string("infrastructure.plan.uninstall.title", language: state.settings.language, app.name), + items: [ + ActionItem( + id: app.id, + title: AtlasL10n.string("infrastructure.plan.uninstall.moveBundle.title", language: state.settings.language, app.name), + detail: AtlasL10n.string("infrastructure.plan.uninstall.moveBundle.detail", language: state.settings.language, app.bundlePath), + kind: .removeApp, + recoverable: true + ), + ActionItem( + title: AtlasL10n.string(app.leftoverItems == 1 ? "infrastructure.plan.uninstall.archive.one" : "infrastructure.plan.uninstall.archive.other", language: state.settings.language, app.leftoverItems), + detail: AtlasL10n.string("infrastructure.plan.uninstall.archive.detail", language: state.settings.language), + kind: .removeCache, + recoverable: true + ), + ], + estimatedBytes: app.bytes + ) + } + + private func actionTitle(for finding: Finding) -> String { + switch actionKind(for: finding) { + case .removeApp: + return AtlasL10n.string("infrastructure.action.reviewUninstall", language: state.settings.language, finding.title) + case .inspectPermission: + return AtlasL10n.string("infrastructure.action.inspectPrivileged", language: state.settings.language, finding.title) + case .archiveFile: + return AtlasL10n.string("infrastructure.action.archiveRecovery", language: state.settings.language, finding.title) + case .removeCache: + return AtlasL10n.string("infrastructure.action.moveToTrash", language: state.settings.language, finding.title) + } + } + + private func actionKind(for finding: Finding) -> ActionItem.Kind { + if finding.risk == .advanced { + return .inspectPermission + } + + if !AtlasSmartCleanExecutionSupport.isFindingExecutionSupported(finding) { + return .inspectPermission + } + + if finding.category == "Apps" { + return .removeApp + } + + if finding.risk == .review { + return .archiveFile + } + + return .removeCache + } + + private func sanitized(settings: AtlasSettings) -> AtlasSettings { + AtlasSettings( + recoveryRetentionDays: min(max(settings.recoveryRetentionDays, 1), 30), + notificationsEnabled: settings.notificationsEnabled, + excludedPaths: Array(Set(settings.excludedPaths.filter { !$0.trimmingCharacters(in: .whitespacesAndNewlines).isEmpty })).sorted(), + language: settings.language, + acknowledgementText: AtlasL10n.acknowledgement(language: settings.language), + thirdPartyNoticesText: AtlasL10n.thirdPartyNotices(language: settings.language) + ) + } +} + +import AtlasProtocol +import Foundation + +public protocol AtlasPrivilegedActionExecuting: Sendable { + func perform(_ action: AtlasHelperAction) async throws -> AtlasHelperActionResult +} + +public enum AtlasHelperClientError: LocalizedError, Sendable { + case helperUnavailable(attemptedPaths: [String]) + case encodingFailed(String) + case decodingFailed(String) + case invocationFailed(String) + + public var errorDescription: String? { + switch self { + case let .helperUnavailable(attemptedPaths): + let joined = attemptedPaths.isEmpty ? "" : attemptedPaths.joined(separator: ", ") + return "Bundled privileged helper is unavailable. Attempted: \(joined)" + case let .encodingFailed(reason): + return "Failed to encode helper action: \(reason)" + case let .decodingFailed(reason): + return "Failed to decode helper response: \(reason)" + case let .invocationFailed(reason): + return "Privileged helper failed: \(reason)" + } + } +} + +public actor AtlasPrivilegedHelperClient: AtlasPrivilegedActionExecuting { + private let explicitExecutableURL: URL? + private let encoder = JSONEncoder() + private let decoder = JSONDecoder() + + public init(executableURL: URL? = nil) { + self.explicitExecutableURL = executableURL + } + + public func perform(_ action: AtlasHelperAction) async throws -> AtlasHelperActionResult { + let resolution = resolveExecutableURL() + guard let executableURL = resolution.url else { + throw AtlasHelperClientError.helperUnavailable(attemptedPaths: resolution.attemptedPaths) + } + + let requestData: Data + do { + requestData = try encoder.encode(action) + } catch { + throw AtlasHelperClientError.encodingFailed(error.localizedDescription) + } + + let process = Process() + process.executableURL = executableURL + process.arguments = ["--action-json"] + + let stdin = Pipe() + let stdout = Pipe() + let stderr = Pipe() + process.standardInput = stdin + process.standardOutput = stdout + process.standardError = stderr + + try process.run() + stdin.fileHandleForWriting.write(requestData) + stdin.fileHandleForWriting.closeFile() + process.waitUntilExit() + + let outputData = stdout.fileHandleForReading.readDataToEndOfFile() + let errorData = stderr.fileHandleForReading.readDataToEndOfFile() + + guard process.terminationStatus == 0 else { + let errorMessage = String(data: errorData, encoding: .utf8) ?? "unknown helper error" + throw AtlasHelperClientError.invocationFailed(errorMessage) + } + + do { + return try decoder.decode(AtlasHelperActionResult.self, from: outputData) + } catch { + let raw = String(data: outputData, encoding: .utf8) ?? "" + throw AtlasHelperClientError.decodingFailed("\(error.localizedDescription). Output: \(raw)") + } + } + + private func resolveExecutableURL() -> (url: URL?, attemptedPaths: [String]) { + var attemptedPaths: [String] = [] + + if let explicitExecutableURL { + attemptedPaths.append(explicitExecutableURL.path) + if FileManager.default.isExecutableFile(atPath: explicitExecutableURL.path) { + return (explicitExecutableURL, attemptedPaths) + } + } + + if let environmentPath = ProcessInfo.processInfo.environment["ATLAS_HELPER_EXECUTABLE"] { + attemptedPaths.append(environmentPath) + if FileManager.default.isExecutableFile(atPath: environmentPath) { + return (URL(fileURLWithPath: environmentPath), attemptedPaths) + } + } + + let bundleCandidates = bundledHelperCandidates() + attemptedPaths.append(contentsOf: bundleCandidates.map(\.path)) + for candidate in bundleCandidates where FileManager.default.isExecutableFile(atPath: candidate.path) { + return (candidate, attemptedPaths) + } + + let sourceURL = URL(fileURLWithPath: #filePath) + let repoRoot = sourceURL + .deletingLastPathComponent() + .deletingLastPathComponent() + .deletingLastPathComponent() + .deletingLastPathComponent() + .deletingLastPathComponent() + + let devCandidates = [ + repoRoot.appendingPathComponent("Helpers/.build/debug/AtlasPrivilegedHelper"), + repoRoot.appendingPathComponent("Helpers/.build/release/AtlasPrivilegedHelper"), + ] + attemptedPaths.append(contentsOf: devCandidates.map(\.path)) + + for candidate in devCandidates where FileManager.default.isExecutableFile(atPath: candidate.path) { + return (candidate, attemptedPaths) + } + + return (nil, attemptedPaths) + } + + private func bundledHelperCandidates() -> [URL] { + let mainBundleURL = Bundle.main.bundleURL + let appHelper = mainBundleURL.appendingPathComponent("Contents/Helpers/AtlasPrivilegedHelper") + let xpcHelper = mainBundleURL + .deletingLastPathComponent() + .deletingLastPathComponent() + .appendingPathComponent("Helpers/AtlasPrivilegedHelper") + return [appHelper, xpcHelper] + } +} + diff --git a/Packages/AtlasInfrastructure/Sources/AtlasInfrastructure/AtlasXPCTransport.swift b/Packages/AtlasInfrastructure/Sources/AtlasInfrastructure/AtlasXPCTransport.swift new file mode 100644 index 0000000..0f85bea --- /dev/null +++ b/Packages/AtlasInfrastructure/Sources/AtlasInfrastructure/AtlasXPCTransport.swift @@ -0,0 +1,324 @@ +import AtlasApplication +import AtlasDomain +import AtlasProtocol +import Foundation + +public enum AtlasXPCWorkerConstants { + public static let serviceName = "com.atlasformac.app.worker" +} + +@objc public protocol AtlasXPCWorkerServiceProtocol: NSObjectProtocol { + func sendRequestData(_ requestData: Data, withReply reply: @escaping (Data?, NSError?) -> Void) +} + +public struct AtlasXPCRequestConfiguration: Sendable { + public var timeout: TimeInterval + public var retryCount: Int + public var retryDelay: TimeInterval + + public init(timeout: TimeInterval = 30, retryCount: Int = 1, retryDelay: TimeInterval = 0.25) { + self.timeout = timeout + self.retryCount = retryCount + self.retryDelay = retryDelay + } +} + +public enum AtlasXPCTransportError: LocalizedError, Sendable, Equatable { + case encodingFailed(String) + case decodingFailed(String) + case invalidResponse + case connectionUnavailable(String) + case timedOut(TimeInterval) + + public var errorDescription: String? { + switch self { + case let .encodingFailed(reason): + return AtlasL10n.string("xpc.error.encodingFailed", reason) + case let .decodingFailed(reason): + return AtlasL10n.string("xpc.error.decodingFailed", reason) + case .invalidResponse: + return AtlasL10n.string("xpc.error.invalidResponse") + case let .connectionUnavailable(reason): + return AtlasL10n.string("xpc.error.connectionUnavailable", reason) + case let .timedOut(seconds): + return AtlasL10n.string("xpc.error.timedOut", seconds) + } + } +} + +public final class AtlasXPCWorkerServiceHost: NSObject, AtlasXPCWorkerServiceProtocol { + private let worker: AtlasScaffoldWorkerService + private let encoder: JSONEncoder + private let decoder: JSONDecoder + + public init(worker: AtlasScaffoldWorkerService = AtlasScaffoldWorkerService()) { + self.worker = worker + self.encoder = JSONEncoder() + self.decoder = JSONDecoder() + super.init() + } + + public func sendRequestData(_ requestData: Data, withReply reply: @escaping (Data?, NSError?) -> Void) { + Task { + do { + let request = try decoder.decode(AtlasRequestEnvelope.self, from: requestData) + let result = try await worker.submit(request) + let payload = try encoder.encode(result) + reply(payload, nil) + } catch { + reply(nil, error as NSError) + } + } + } +} + +public final class AtlasXPCListenerDelegate: NSObject, NSXPCListenerDelegate { + private let host: AtlasXPCWorkerServiceHost + + public init(host: AtlasXPCWorkerServiceHost = AtlasXPCWorkerServiceHost()) { + self.host = host + super.init() + } + + public func listener(_ listener: NSXPCListener, shouldAcceptNewConnection newConnection: NSXPCConnection) -> Bool { + newConnection.exportedInterface = NSXPCInterface(with: AtlasXPCWorkerServiceProtocol.self) + newConnection.exportedObject = host + newConnection.resume() + return true + } +} + +public typealias AtlasXPCDataRequestExecutor = @Sendable (Data) async throws -> Data + +private final class AtlasXPCConnectionBox: @unchecked Sendable { + let connection: NSXPCConnection + + init(_ connection: NSXPCConnection) { + self.connection = connection + } +} + +public actor AtlasXPCWorkerClient: AtlasWorkerServing { + private let serviceName: String + private let requestConfiguration: AtlasXPCRequestConfiguration + private let requestExecutor: AtlasXPCDataRequestExecutor? + private var connection: NSXPCConnection? + private let encoder = JSONEncoder() + private let decoder = JSONDecoder() + + public init( + serviceName: String = AtlasXPCWorkerConstants.serviceName, + requestConfiguration: AtlasXPCRequestConfiguration = AtlasXPCRequestConfiguration(), + requestExecutor: AtlasXPCDataRequestExecutor? = nil + ) { + self.serviceName = serviceName + self.requestConfiguration = requestConfiguration + self.requestExecutor = requestExecutor + } + + public func submit(_ request: AtlasRequestEnvelope) async throws -> AtlasWorkerCommandResult { + let requestData: Data + + do { + requestData = try encoder.encode(request) + } catch { + throw AtlasXPCTransportError.encodingFailed(error.localizedDescription) + } + + let responseData = try await submitRequestDataWithRetry(requestData) + + do { + return try decoder.decode(AtlasWorkerCommandResult.self, from: responseData) + } catch { + throw AtlasXPCTransportError.decodingFailed(error.localizedDescription) + } + } + + private func submitRequestDataWithRetry(_ requestData: Data) async throws -> Data { + var attempt = 0 + var lastError: Error = AtlasXPCTransportError.invalidResponse + + while attempt <= requestConfiguration.retryCount { + do { + return try await submitRequestDataOnce(requestData) + } catch { + lastError = error + + guard attempt < requestConfiguration.retryCount, + shouldRetry(after: error) else { + throw error + } + + resetConnection() + + if requestConfiguration.retryDelay > 0 { + let delay = UInt64(requestConfiguration.retryDelay * 1_000_000_000) + try? await Task.sleep(nanoseconds: delay) + } + + attempt += 1 + } + } + + throw lastError + } + + private func submitRequestDataOnce(_ requestData: Data) async throws -> Data { + if let requestExecutor { + return try await withTimeout { + try await requestExecutor(requestData) + } + } + + let connectionBox = AtlasXPCConnectionBox(ensureConnection()) + return try await withTimeout { + try await self.sendRequestData(requestData, over: connectionBox.connection) + } + } + + private func withTimeout(operation: @escaping @Sendable () async throws -> Data) async throws -> Data { + guard requestConfiguration.timeout > 0 else { + return try await operation() + } + + return try await withThrowingTaskGroup(of: Data.self) { group in + group.addTask { + try await operation() + } + group.addTask { + let timeoutNanoseconds = UInt64(self.requestConfiguration.timeout * 1_000_000_000) + try await Task.sleep(nanoseconds: timeoutNanoseconds) + throw AtlasXPCTransportError.timedOut(self.requestConfiguration.timeout) + } + + let result = try await group.next() ?? { throw AtlasXPCTransportError.invalidResponse }() + group.cancelAll() + return result + } + } + + private func sendRequestData(_ requestData: Data, over connection: NSXPCConnection) async throws -> Data { + try await withCheckedThrowingContinuation { continuation in + guard let proxy = connection.remoteObjectProxyWithErrorHandler({ error in + continuation.resume(throwing: AtlasXPCTransportError.connectionUnavailable(error.localizedDescription)) + }) as? AtlasXPCWorkerServiceProtocol else { + continuation.resume(throwing: AtlasXPCTransportError.connectionUnavailable("Remote object proxy is unavailable.")) + return + } + + proxy.sendRequestData(requestData) { responseData, responseError in + if let responseError { + continuation.resume(throwing: AtlasXPCTransportError.connectionUnavailable(responseError.localizedDescription)) + return + } + + guard let responseData else { + continuation.resume(throwing: AtlasXPCTransportError.invalidResponse) + return + } + + continuation.resume(returning: responseData) + } + } + } + + private func shouldRetry(after error: Error) -> Bool { + guard let transportError = error as? AtlasXPCTransportError else { + return false + } + + switch transportError { + case .connectionUnavailable, .invalidResponse, .timedOut: + return true + case .encodingFailed, .decodingFailed: + return false + } + } + + private func ensureConnection() -> NSXPCConnection { + if let connection { + return connection + } + + let connection = NSXPCConnection(serviceName: serviceName) + connection.remoteObjectInterface = NSXPCInterface(with: AtlasXPCWorkerServiceProtocol.self) + connection.invalidationHandler = { [weak connection] in + Task { self.clearConnection(ifMatching: connection) } + } + connection.interruptionHandler = { [weak connection] in + Task { self.clearConnection(ifMatching: connection) } + } + connection.resume() + self.connection = connection + return connection + } + + private func clearConnection(ifMatching disconnectedConnection: NSXPCConnection?) { + guard let disconnectedConnection else { + connection = nil + return + } + + if connection === disconnectedConnection { + connection = nil + } + } + + private func resetConnection() { + connection?.invalidate() + connection = nil + } +} + +public actor AtlasPreferredWorkerService: AtlasWorkerServing { + private let xpcClient: AtlasXPCWorkerClient + private let fallbackWorker: AtlasScaffoldWorkerService + private let allowFallback: Bool + + public init( + serviceName: String = AtlasXPCWorkerConstants.serviceName, + requestConfiguration: AtlasXPCRequestConfiguration = AtlasXPCRequestConfiguration(), + requestExecutor: AtlasXPCDataRequestExecutor? = nil, + fallbackWorker: AtlasScaffoldWorkerService = AtlasScaffoldWorkerService(), + allowFallback: Bool = ProcessInfo.processInfo.environment["ATLAS_ALLOW_SCAFFOLD_FALLBACK"] == "1" + ) { + self.xpcClient = AtlasXPCWorkerClient( + serviceName: serviceName, + requestConfiguration: requestConfiguration, + requestExecutor: requestExecutor + ) + self.fallbackWorker = fallbackWorker + self.allowFallback = allowFallback + } + + public func submit(_ request: AtlasRequestEnvelope) async throws -> AtlasWorkerCommandResult { + do { + let result = try await xpcClient.submit(request) + if shouldFallback(from: result) { + guard allowFallback else { + return result + } + return try await fallbackWorker.submit(request) + } + return result + } catch { + guard allowFallback else { + throw error + } + return try await fallbackWorker.submit(request) + } + } + + private func shouldFallback(from result: AtlasWorkerCommandResult) -> Bool { + guard case let .rejected(code, _) = result.response.response else { + return false + } + + switch code { + case .executionUnavailable, .helperUnavailable: + return true + default: + return false + } + } +} diff --git a/Packages/AtlasInfrastructure/Tests/AtlasInfrastructureTests/AtlasHelperClientTests.swift b/Packages/AtlasInfrastructure/Tests/AtlasInfrastructureTests/AtlasHelperClientTests.swift new file mode 100644 index 0000000..5e27b9d --- /dev/null +++ b/Packages/AtlasInfrastructure/Tests/AtlasInfrastructureTests/AtlasHelperClientTests.swift @@ -0,0 +1,19 @@ +import XCTest +@testable import AtlasInfrastructure +import AtlasProtocol + +final class AtlasHelperClientTests: XCTestCase { + func testHelperClientDecodesStructuredJSONResponse() async throws { + let scriptURL = FileManager.default.temporaryDirectory.appendingPathComponent(UUID().uuidString) + let script = "#!/bin/sh\ncat >/dev/null\nprintf '%s' '{\"action\":{\"id\":\"00000000-0000-0000-0000-000000000111\",\"kind\":\"trashItems\",\"targetPath\":\"/Applications/Sample.app\"},\"message\":\"ok\",\"resolvedPath\":\"/Trash/Sample.app\",\"success\":true}'\n" + try script.write(to: scriptURL, atomically: true, encoding: .utf8) + try FileManager.default.setAttributes([.posixPermissions: 0o755], ofItemAtPath: scriptURL.path) + + let client = AtlasPrivilegedHelperClient(executableURL: scriptURL) + let result = try await client.perform(AtlasHelperAction(kind: .trashItems, targetPath: "/Applications/Sample.app")) + + XCTAssertTrue(result.success) + XCTAssertEqual(result.message, "ok") + XCTAssertEqual(result.resolvedPath, "/Trash/Sample.app") + } +} diff --git a/Packages/AtlasInfrastructure/Tests/AtlasInfrastructureTests/AtlasInfrastructureTests.swift b/Packages/AtlasInfrastructure/Tests/AtlasInfrastructureTests/AtlasInfrastructureTests.swift new file mode 100644 index 0000000..38a2cea --- /dev/null +++ b/Packages/AtlasInfrastructure/Tests/AtlasInfrastructureTests/AtlasInfrastructureTests.swift @@ -0,0 +1,414 @@ +import XCTest +@testable import AtlasInfrastructure +import AtlasApplication +import AtlasDomain +import AtlasProtocol + +final class AtlasInfrastructureTests: XCTestCase { + func testRepositoryPersistsWorkspaceState() { + let fileURL = temporaryStateFileURL() + let repository = AtlasWorkspaceRepository(stateFileURL: fileURL) + var state = AtlasScaffoldWorkspace.state() + state.settings.recoveryRetentionDays = 21 + + XCTAssertNoThrow(try repository.saveState(state)) + let loaded = repository.loadState() + + XCTAssertEqual(loaded.settings.recoveryRetentionDays, 21) + XCTAssertEqual(loaded.snapshot.apps.count, state.snapshot.apps.count) + } + + + func testRepositorySaveStateThrowsForInvalidParentURL() { + let repository = AtlasWorkspaceRepository( + stateFileURL: URL(fileURLWithPath: "/dev/null/workspace-state.json") + ) + + XCTAssertThrowsError(try repository.saveState(AtlasScaffoldWorkspace.state())) + } + + func testExecutePlanMovesSupportedFindingsIntoRecoveryWhileKeepingInspectionOnlyItems() async throws { + let repository = AtlasWorkspaceRepository(stateFileURL: temporaryStateFileURL()) + let home = FileManager.default.homeDirectoryForCurrentUser + let targetDirectory = home.appendingPathComponent("Library/Caches/AtlasExecutionTests/" + UUID().uuidString, isDirectory: true) + try FileManager.default.createDirectory(at: targetDirectory, withIntermediateDirectories: true) + let targetFile = targetDirectory.appendingPathComponent("sample.cache") + try Data("cache".utf8).write(to: targetFile) + + let supportedFinding = Finding( + id: UUID(), + title: "Sample cache", + detail: targetFile.path, + bytes: 5, + risk: .safe, + category: "Developer tools", + targetPaths: [targetFile.path] + ) + let unsupportedPath = home.appendingPathComponent("Documents/AtlasUnsupported/" + UUID().uuidString).path + let unsupportedFinding = Finding( + id: UUID(), + title: "Unsupported cache", + detail: unsupportedPath, + bytes: 7, + risk: .safe, + category: "Developer tools", + targetPaths: [unsupportedPath] + ) + let state = AtlasWorkspaceState( + snapshot: AtlasWorkspaceSnapshot( + reclaimableSpaceBytes: 12, + findings: [supportedFinding, unsupportedFinding], + apps: [], + taskRuns: [], + recoveryItems: [], + permissions: [], + healthSnapshot: nil + ), + currentPlan: ActionPlan( + title: "Review 2 selected findings", + items: [ + ActionItem(id: supportedFinding.id, title: "Move Sample cache to Trash", detail: supportedFinding.detail, kind: .removeCache, recoverable: true), + ActionItem(id: unsupportedFinding.id, title: "Inspect Unsupported cache", detail: unsupportedFinding.detail, kind: .inspectPermission, recoverable: false), + ], + estimatedBytes: 12 + ), + settings: AtlasScaffoldWorkspace.state().settings + ) + _ = try repository.saveState(state) + + let worker = AtlasScaffoldWorkerService(repository: repository, allowStateOnlyCleanExecution: false) + let executeResult = try await worker.submit( + AtlasRequestEnvelope(command: .executePlan(planID: state.currentPlan.id)) + ) + + if case let .accepted(task) = executeResult.response.response { + XCTAssertEqual(task.kind, .executePlan) + } else { + XCTFail("Expected accepted execute-plan response") + } + XCTAssertFalse(FileManager.default.fileExists(atPath: targetFile.path)) + XCTAssertEqual(executeResult.snapshot.findings.count, 1) + XCTAssertEqual(executeResult.snapshot.findings.first?.id, unsupportedFinding.id) + XCTAssertEqual(executeResult.snapshot.recoveryItems.count, 1) + + let restoredItemID = try XCTUnwrap(executeResult.snapshot.recoveryItems.first?.id) + let restoreTaskID = UUID() + let restoreResult = try await worker.submit( + AtlasRequestEnvelope(command: .restoreItems(taskID: restoreTaskID, itemIDs: [restoredItemID])) + ) + + XCTAssertTrue(FileManager.default.fileExists(atPath: targetFile.path)) + XCTAssertEqual(Set(restoreResult.snapshot.findings.map(\.id)), Set([supportedFinding.id, unsupportedFinding.id])) + XCTAssertEqual(restoreResult.snapshot.recoveryItems.count, 0) + } + + func testStartScanRejectsWhenProviderFailsWithoutFallback() async throws { + let repository = AtlasWorkspaceRepository(stateFileURL: temporaryStateFileURL()) + let worker = AtlasScaffoldWorkerService( + repository: repository, + smartCleanScanProvider: FailingSmartCleanProvider(), + allowProviderFailureFallback: false + ) + + let result = try await worker.submit( + AtlasRequestEnvelope(command: .startScan(taskID: UUID())) + ) + + guard case let .rejected(code, reason) = result.response.response else { + return XCTFail("Expected rejected scan response") + } + XCTAssertEqual(code, .executionUnavailable) + XCTAssertTrue(reason.contains("Smart Clean scan is unavailable")) + } + + func testPermissionInspectorMarksFullDiskAccessGrantedWhenAnyProbeIsReadable() async { + let probeURLs = [ + URL(fileURLWithPath: "/tmp/unreadable"), + URL(fileURLWithPath: "/tmp/readable"), + ] + let inspector = AtlasPermissionInspector( + homeDirectoryURL: URL(fileURLWithPath: "/tmp"), + fullDiskAccessProbeURLs: probeURLs, + protectedLocationReader: { url in url.path == "/tmp/readable" }, + accessibilityStatusProvider: { false }, + notificationsAuthorizationProvider: { false } + ) + + let permissions = await inspector.snapshot() + let fullDiskAccess = permissions.first(where: { $0.kind == .fullDiskAccess }) + + XCTAssertEqual(fullDiskAccess?.isGranted, true) + } + + func testPermissionInspectorMarksFullDiskAccessMissingWhenAllProbesFail() async { + let probeURLs = [ + URL(fileURLWithPath: "/tmp/probe-a"), + URL(fileURLWithPath: "/tmp/probe-b"), + ] + let inspector = AtlasPermissionInspector( + homeDirectoryURL: URL(fileURLWithPath: "/tmp"), + fullDiskAccessProbeURLs: probeURLs, + protectedLocationReader: { _ in false }, + accessibilityStatusProvider: { false }, + notificationsAuthorizationProvider: { false } + ) + + let permissions = await inspector.snapshot() + let fullDiskAccess = permissions.first(where: { $0.kind == .fullDiskAccess }) + + XCTAssertEqual(fullDiskAccess?.isGranted, false) + XCTAssertTrue(fullDiskAccess?.rationale.contains("重新打开") == true || fullDiskAccess?.rationale.contains("reopen Atlas") == true) + } + + func testUnsupportedTargetIsDowngradedToInspectionAndDoesNotFailExecution() async throws { + let repository = AtlasWorkspaceRepository(stateFileURL: temporaryStateFileURL()) + let unsupportedPath = FileManager.default.homeDirectoryForCurrentUser.appendingPathComponent("Documents/AtlasUnsupported/" + UUID().uuidString).path + let finding = Finding( + id: UUID(), + title: "Unsupported cache", + detail: unsupportedPath, + bytes: 5, + risk: .safe, + category: "Developer tools", + targetPaths: [unsupportedPath] + ) + XCTAssertFalse(AtlasSmartCleanExecutionSupport.isFindingExecutionSupported(finding)) + let state = AtlasWorkspaceState( + snapshot: AtlasWorkspaceSnapshot( + reclaimableSpaceBytes: 5, + findings: [finding], + apps: [], + taskRuns: [], + recoveryItems: [], + permissions: [], + healthSnapshot: nil + ), + currentPlan: ActionPlan( + title: "Review 1 selected finding", + items: [ActionItem(id: finding.id, title: "Inspect Unsupported cache", detail: finding.detail, kind: .inspectPermission, recoverable: false)], + estimatedBytes: 5 + ), + settings: AtlasScaffoldWorkspace.state().settings + ) + _ = try repository.saveState(state) + + let worker = AtlasScaffoldWorkerService(repository: repository, allowStateOnlyCleanExecution: false) + let result = try await worker.submit(AtlasRequestEnvelope(command: .executePlan(planID: state.currentPlan.id))) + + if case let .accepted(task) = result.response.response { + XCTAssertEqual(task.kind, .executePlan) + } else { + XCTFail("Expected accepted execute-plan response") + } + XCTAssertEqual(result.snapshot.findings.count, 1) + XCTAssertEqual(result.snapshot.recoveryItems.count, 0) + } + + func testInspectionOnlyPlanIsAcceptedWithoutMutatingState() async throws { + let repository = AtlasWorkspaceRepository(stateFileURL: temporaryStateFileURL()) + let worker = AtlasScaffoldWorkerService(repository: repository, allowStateOnlyCleanExecution: false) + let initialState = repository.loadState() + + let result = try await worker.submit( + AtlasRequestEnvelope(command: .executePlan(planID: initialState.currentPlan.id)) + ) + + if case let .accepted(task) = result.response.response { + XCTAssertEqual(task.kind, .executePlan) + } else { + XCTFail("Expected accepted execute-plan response") + } + XCTAssertEqual(result.snapshot.findings.count, initialState.snapshot.findings.count) + XCTAssertEqual(result.snapshot.recoveryItems.count, initialState.snapshot.recoveryItems.count) + } + + func testZcompdumpTargetIsSupportedExecutionTarget() { + let targetURL = FileManager.default.homeDirectoryForCurrentUser.appendingPathComponent(".zcompdump") + let finding = Finding( + id: UUID(), + title: "Zsh completion cache", + detail: targetURL.path, + bytes: 1, + risk: .safe, + category: "Developer tools", + targetPaths: [targetURL.path] + ) + + XCTAssertTrue(AtlasSmartCleanExecutionSupport.isSupportedExecutionTarget(targetURL)) + XCTAssertTrue(AtlasSmartCleanExecutionSupport.isFindingExecutionSupported(finding)) + } + + func testExecutePlanTrashesRealTargetsWhenAvailable() async throws { + let repository = AtlasWorkspaceRepository(stateFileURL: temporaryStateFileURL()) + let home = FileManager.default.homeDirectoryForCurrentUser + let targetDirectory = home.appendingPathComponent("Library/Caches/AtlasExecutionTests/" + UUID().uuidString, isDirectory: true) + try FileManager.default.createDirectory(at: targetDirectory, withIntermediateDirectories: true) + let targetFile = targetDirectory.appendingPathComponent("sample.cache") + try Data("cache".utf8).write(to: targetFile) + + let finding = Finding( + id: UUID(), + title: "Sample cache", + detail: targetFile.path, + bytes: 5, + risk: .safe, + category: "Developer tools", + targetPaths: [targetFile.path] + ) + let state = AtlasWorkspaceState( + snapshot: AtlasWorkspaceSnapshot( + reclaimableSpaceBytes: 5, + findings: [finding], + apps: [], + taskRuns: [], + recoveryItems: [], + permissions: [], + healthSnapshot: nil + ), + currentPlan: ActionPlan(title: "Review 1 selected finding", items: [ActionItem(id: finding.id, title: "Move Sample cache to Trash", detail: finding.detail, kind: .removeCache, recoverable: true)], estimatedBytes: 5), + settings: AtlasScaffoldWorkspace.state().settings + ) + _ = try repository.saveState(state) + + let worker = AtlasScaffoldWorkerService(repository: repository, allowStateOnlyCleanExecution: false) + let result = try await worker.submit(AtlasRequestEnvelope(command: .executePlan(planID: state.currentPlan.id))) + + if case let .accepted(task) = result.response.response { + XCTAssertEqual(task.kind, .executePlan) + } else { + XCTFail("Expected accepted execute-plan response") + } + XCTAssertFalse(FileManager.default.fileExists(atPath: targetFile.path)) + XCTAssertEqual(result.snapshot.findings.count, 0) + } + + func testScanExecuteRescanRemovesExecutedTargetFromRealResults() async throws { + let repository = AtlasWorkspaceRepository(stateFileURL: temporaryStateFileURL()) + let home = FileManager.default.homeDirectoryForCurrentUser + let targetDirectory = home.appendingPathComponent("Library/Caches/AtlasExecutionTests/" + UUID().uuidString, isDirectory: true) + try FileManager.default.createDirectory(at: targetDirectory, withIntermediateDirectories: true) + let targetFile = targetDirectory.appendingPathComponent("sample.cache") + try Data("cache".utf8).write(to: targetFile) + + let provider = FileBackedSmartCleanProvider(targetFileURL: targetFile) + let worker = AtlasScaffoldWorkerService( + repository: repository, + smartCleanScanProvider: provider, + allowProviderFailureFallback: false, + allowStateOnlyCleanExecution: false + ) + + let firstScan = try await worker.submit(AtlasRequestEnvelope(command: .startScan(taskID: UUID()))) + XCTAssertEqual(firstScan.snapshot.findings.count, 1) + let planID = try XCTUnwrap(firstScan.previewPlan?.id) + + let execute = try await worker.submit(AtlasRequestEnvelope(command: .executePlan(planID: planID))) + if case let .accepted(task) = execute.response.response { + XCTAssertEqual(task.kind, .executePlan) + } else { + XCTFail("Expected accepted execute-plan response") + } + XCTAssertFalse(FileManager.default.fileExists(atPath: targetFile.path)) + + let secondScan = try await worker.submit(AtlasRequestEnvelope(command: .startScan(taskID: UUID()))) + XCTAssertEqual(secondScan.snapshot.findings.count, 0) + XCTAssertEqual(secondScan.snapshot.reclaimableSpaceBytes, 0) + } + + func testRestoreRecoveryItemPhysicallyRestoresRealTargets() async throws { + let repository = AtlasWorkspaceRepository(stateFileURL: temporaryStateFileURL()) + let home = FileManager.default.homeDirectoryForCurrentUser + let targetDirectory = home.appendingPathComponent("Library/Caches/AtlasExecutionTests/" + UUID().uuidString, isDirectory: true) + try FileManager.default.createDirectory(at: targetDirectory, withIntermediateDirectories: true) + let targetFile = targetDirectory.appendingPathComponent("sample.cache") + try Data("cache".utf8).write(to: targetFile) + + let finding = Finding( + id: UUID(), + title: "Sample cache", + detail: targetFile.path, + bytes: 5, + risk: .safe, + category: "Developer tools", + targetPaths: [targetFile.path] + ) + let state = AtlasWorkspaceState( + snapshot: AtlasWorkspaceSnapshot( + reclaimableSpaceBytes: 5, + findings: [finding], + apps: [], + taskRuns: [], + recoveryItems: [], + permissions: [], + healthSnapshot: nil + ), + currentPlan: ActionPlan(title: "Review 1 selected finding", items: [ActionItem(id: finding.id, title: "Move Sample cache to Trash", detail: finding.detail, kind: .removeCache, recoverable: true)], estimatedBytes: 5), + settings: AtlasScaffoldWorkspace.state().settings + ) + _ = try repository.saveState(state) + + let worker = AtlasScaffoldWorkerService(repository: repository, allowStateOnlyCleanExecution: false) + let execute = try await worker.submit(AtlasRequestEnvelope(command: .executePlan(planID: state.currentPlan.id))) + let recoveryItemID = try XCTUnwrap(execute.snapshot.recoveryItems.first?.id) + XCTAssertFalse(FileManager.default.fileExists(atPath: targetFile.path)) + + let restore = try await worker.submit(AtlasRequestEnvelope(command: .restoreItems(taskID: UUID(), itemIDs: [recoveryItemID]))) + + if case let .accepted(task) = restore.response.response { + XCTAssertEqual(task.kind, .restore) + } else { + XCTFail("Expected accepted restore response") + } + XCTAssertTrue(FileManager.default.fileExists(atPath: targetFile.path)) + } + + func testExecuteAppUninstallRemovesAppAndCreatesRecoveryEntry() async throws { + let repository = AtlasWorkspaceRepository(stateFileURL: temporaryStateFileURL()) + let worker = AtlasScaffoldWorkerService(repository: repository, allowStateOnlyCleanExecution: true) + let initialState = repository.loadState() + let app = try XCTUnwrap(initialState.snapshot.apps.first) + + let result = try await worker.submit( + AtlasRequestEnvelope(command: .executeAppUninstall(appID: app.id)) + ) + + XCTAssertFalse(result.snapshot.apps.contains(where: { $0.id == app.id })) + XCTAssertTrue(result.snapshot.recoveryItems.contains(where: { $0.title == app.name })) + XCTAssertEqual(result.snapshot.taskRuns.first?.kind, .uninstallApp) + } + + private func temporaryStateFileURL() -> URL { + FileManager.default.temporaryDirectory + .appendingPathComponent(UUID().uuidString, isDirectory: true) + .appendingPathComponent("workspace-state.json") + } + +} + +private struct FailingSmartCleanProvider: AtlasSmartCleanScanProviding { + func collectSmartCleanScan() async throws -> AtlasSmartCleanScanResult { + struct SampleError: LocalizedError { var errorDescription: String? { "simulated scan failure" } } + throw SampleError() + } +} + +private struct FileBackedSmartCleanProvider: AtlasSmartCleanScanProviding { + let targetFileURL: URL + + func collectSmartCleanScan() async throws -> AtlasSmartCleanScanResult { + guard FileManager.default.fileExists(atPath: targetFileURL.path) else { + return AtlasSmartCleanScanResult(findings: [], summary: "No reclaimable items remain.") + } + let size = Int64((try? FileManager.default.attributesOfItem(atPath: targetFileURL.path)[.size] as? NSNumber)?.int64Value ?? 0) + let finding = Finding( + id: UUID(uuidString: "30000000-0000-0000-0000-000000000001") ?? UUID(), + title: "Sample cache", + detail: targetFileURL.path, + bytes: size, + risk: .safe, + category: "Developer tools", + targetPaths: [targetFileURL.path] + ) + return AtlasSmartCleanScanResult(findings: [finding], summary: "Found 1 reclaimable item.") + } +} diff --git a/Packages/AtlasInfrastructure/Tests/AtlasInfrastructureTests/AtlasXPCTransportTests.swift b/Packages/AtlasInfrastructure/Tests/AtlasInfrastructureTests/AtlasXPCTransportTests.swift new file mode 100644 index 0000000..1607fdb --- /dev/null +++ b/Packages/AtlasInfrastructure/Tests/AtlasInfrastructureTests/AtlasXPCTransportTests.swift @@ -0,0 +1,126 @@ +import XCTest +@testable import AtlasInfrastructure +import AtlasApplication +import AtlasDomain +import AtlasProtocol + +final class AtlasXPCTransportTests: XCTestCase { + func testXPCClientRetriesRecoverableFailureAndSucceeds() async throws { + let attemptCounter = AttemptCounter() + let request = AtlasRequestEnvelope(command: .healthSnapshot) + let expected = AtlasWorkerCommandResult( + request: request, + response: AtlasResponseEnvelope( + requestID: request.id, + response: .health(AtlasScaffoldWorkspace.state().snapshot.healthSnapshot ?? AtlasScaffoldWorkspace.snapshot().healthSnapshot!) + ), + events: [], + snapshot: AtlasScaffoldWorkspace.snapshot(), + previewPlan: nil + ) + let responseData = try JSONEncoder().encode(expected) + + let client = AtlasXPCWorkerClient( + requestConfiguration: AtlasXPCRequestConfiguration(timeout: 1, retryCount: 1, retryDelay: 0), + requestExecutor: { _ in + let attempt = await attemptCounter.next() + if attempt == 1 { + throw AtlasXPCTransportError.connectionUnavailable("simulated drop") + } + return responseData + } + ) + + let result = try await client.submit(request) + + let attempts = await attemptCounter.current() + XCTAssertEqual(attempts, 2) + XCTAssertEqual(result.snapshot.findings.count, expected.snapshot.findings.count) + } + + func testPreferredWorkerServiceDoesNotFallbackByDefault() async { + let service = AtlasPreferredWorkerService( + requestConfiguration: AtlasXPCRequestConfiguration(timeout: 0.01, retryCount: 0, retryDelay: 0), + fallbackWorker: AtlasScaffoldWorkerService(allowStateOnlyCleanExecution: true), + allowFallback: false + ) + + do { + _ = try await service.submit(AtlasRequestEnvelope(command: .healthSnapshot)) + XCTFail("Expected XPC failure without fallback") + } catch let error as AtlasXPCTransportError { + switch error { + case .connectionUnavailable, .timedOut: + XCTAssertFalse(error.localizedDescription.isEmpty) + default: + XCTFail("Expected connectionUnavailable or timedOut, got \(error)") + } + } catch { + XCTFail("Unexpected error: \(error)") + } + } + + + func testPreferredWorkerServiceFallsBackWhenXPCWorkerRejectsExecutionUnavailable() async throws { + let request = AtlasRequestEnvelope(command: .healthSnapshot) + let rejected = AtlasWorkerCommandResult( + request: request, + response: AtlasResponseEnvelope( + requestID: request.id, + response: .rejected(code: .executionUnavailable, reason: "simulated packaged worker failure") + ), + events: [], + snapshot: AtlasScaffoldWorkspace.snapshot(), + previewPlan: nil + ) + let responseData = try JSONEncoder().encode(rejected) + let service = AtlasPreferredWorkerService( + requestConfiguration: AtlasXPCRequestConfiguration(timeout: 1, retryCount: 0, retryDelay: 0), + requestExecutor: { _ in responseData }, + fallbackWorker: AtlasScaffoldWorkerService(allowStateOnlyCleanExecution: true), + allowFallback: true + ) + + let result = try await service.submit(request) + + XCTAssertEqual(result.response.requestID, request.id) + guard case .health = result.response.response else { + return XCTFail("Expected fallback health response, got \(result.response.response)") + } + } + + func testXPCClientTimesOutSlowRequest() async { + let client = AtlasXPCWorkerClient( + requestConfiguration: AtlasXPCRequestConfiguration(timeout: 0.05, retryCount: 0, retryDelay: 0), + requestExecutor: { _ in + try await Task.sleep(nanoseconds: 300_000_000) + return Data() + } + ) + + do { + _ = try await client.submit(AtlasRequestEnvelope(command: .healthSnapshot)) + XCTFail("Expected timeout") + } catch let error as AtlasXPCTransportError { + guard case let .timedOut(timeout) = error else { + return XCTFail("Expected timedOut error, got \(error)") + } + XCTAssertEqual(timeout, 0.05, accuracy: 0.001) + } catch { + XCTFail("Unexpected error: \(error)") + } + } +} + +private actor AttemptCounter { + private var attempts = 0 + + func next() -> Int { + attempts += 1 + return attempts + } + + func current() -> Int { + attempts + } +} diff --git a/Packages/AtlasProtocol/README.md b/Packages/AtlasProtocol/README.md new file mode 100644 index 0000000..edd7ee8 --- /dev/null +++ b/Packages/AtlasProtocol/README.md @@ -0,0 +1,9 @@ +# AtlasProtocol + +## Responsibility + +- Local JSON protocol models +- Request and response envelopes +- Event payloads +- Error-code mapping +- Protocol versioning diff --git a/Packages/AtlasProtocol/Sources/AtlasProtocol/AtlasProtocol.swift b/Packages/AtlasProtocol/Sources/AtlasProtocol/AtlasProtocol.swift new file mode 100644 index 0000000..03beeed --- /dev/null +++ b/Packages/AtlasProtocol/Sources/AtlasProtocol/AtlasProtocol.swift @@ -0,0 +1,126 @@ +import AtlasDomain +import Foundation + +public enum AtlasProtocolVersion { + public static let current = "0.2.0" +} + +public enum AtlasCommand: Codable, Hashable, Sendable { + case healthSnapshot + case inspectPermissions + case startScan(taskID: UUID) + case previewPlan(taskID: UUID, findingIDs: [UUID]) + case executePlan(planID: UUID) + case restoreItems(taskID: UUID, itemIDs: [UUID]) + case appsList + case previewAppUninstall(appID: UUID) + case executeAppUninstall(appID: UUID) + case settingsGet + case settingsSet(AtlasSettings) +} + +public struct AtlasRequestEnvelope: Codable, Hashable, Sendable, Identifiable { + public var id: UUID + public var issuedAt: Date + public var command: AtlasCommand + + public init(id: UUID = UUID(), issuedAt: Date = Date(), command: AtlasCommand) { + self.id = id + self.issuedAt = issuedAt + self.command = command + } +} + +public struct AtlasTaskDescriptor: Codable, Hashable, Sendable { + public var taskID: UUID + public var kind: TaskKind + + public init(taskID: UUID, kind: TaskKind) { + self.taskID = taskID + self.kind = kind + } +} + +public enum AtlasProtocolErrorCode: String, Codable, CaseIterable, Hashable, Sendable { + case unsupportedCommand + case permissionRequired + case helperUnavailable + case executionUnavailable + case invalidSelection +} + +public enum AtlasResponse: Codable, Hashable, Sendable { + case accepted(task: AtlasTaskDescriptor) + case health(AtlasHealthSnapshot) + case permissions([PermissionState]) + case apps([AppFootprint]) + case preview(ActionPlan) + case settings(AtlasSettings) + case rejected(code: AtlasProtocolErrorCode, reason: String) +} + +public struct AtlasResponseEnvelope: Codable, Hashable, Sendable { + public var requestID: UUID + public var sentAt: Date + public var response: AtlasResponse + + public init(requestID: UUID, sentAt: Date = Date(), response: AtlasResponse) { + self.requestID = requestID + self.sentAt = sentAt + self.response = response + } +} + +public enum AtlasEvent: Codable, Hashable, Sendable { + case taskProgress(taskID: UUID, completed: Int, total: Int) + case taskFinished(TaskRun) + case permissionUpdated(PermissionState) +} + +public struct AtlasEventEnvelope: Codable, Hashable, Sendable, Identifiable { + public var id: UUID + public var emittedAt: Date + public var event: AtlasEvent + + public init(id: UUID = UUID(), emittedAt: Date = Date(), event: AtlasEvent) { + self.id = id + self.emittedAt = emittedAt + self.event = event + } +} + +public enum AtlasPrivilegedActionKind: String, Codable, CaseIterable, Hashable, Sendable { + case trashItems + case restoreItem + case removeLaunchService + case repairOwnership +} + +public struct AtlasHelperAction: Codable, Hashable, Sendable, Identifiable { + public var id: UUID + public var kind: AtlasPrivilegedActionKind + public var targetPath: String + public var destinationPath: String? + + public init(id: UUID = UUID(), kind: AtlasPrivilegedActionKind, targetPath: String, destinationPath: String? = nil) { + self.id = id + self.kind = kind + self.targetPath = targetPath + self.destinationPath = destinationPath + } +} + + +public struct AtlasHelperActionResult: Codable, Hashable, Sendable { + public var action: AtlasHelperAction + public var success: Bool + public var message: String + public var resolvedPath: String? + + public init(action: AtlasHelperAction, success: Bool, message: String, resolvedPath: String? = nil) { + self.action = action + self.success = success + self.message = message + self.resolvedPath = resolvedPath + } +} diff --git a/Packages/AtlasProtocol/Tests/AtlasProtocolTests/AtlasProtocolTests.swift b/Packages/AtlasProtocol/Tests/AtlasProtocolTests/AtlasProtocolTests.swift new file mode 100644 index 0000000..72b158f --- /dev/null +++ b/Packages/AtlasProtocol/Tests/AtlasProtocolTests/AtlasProtocolTests.swift @@ -0,0 +1,34 @@ +import XCTest +@testable import AtlasProtocol +import AtlasDomain + +final class AtlasProtocolTests: XCTestCase { + func testRequestEnvelopeRoundTripsJSON() throws { + let taskID = UUID(uuidString: "10000000-0000-0000-0000-000000000001") ?? UUID() + let envelope = AtlasRequestEnvelope(command: .startScan(taskID: taskID)) + let data = try JSONEncoder().encode(envelope) + let decoded = try JSONDecoder().decode(AtlasRequestEnvelope.self, from: data) + + XCTAssertEqual(decoded.id, envelope.id) + XCTAssertEqual(decoded.command, envelope.command) + } + + func testSettingsRequestRoundTripsJSON() throws { + let envelope = AtlasRequestEnvelope(command: .settingsSet(AtlasScaffoldFixtures.settings)) + let data = try JSONEncoder().encode(envelope) + let decoded = try JSONDecoder().decode(AtlasRequestEnvelope.self, from: data) + + XCTAssertEqual(decoded.command, envelope.command) + } + + func testAppsResponseRoundTripsJSON() throws { + let envelope = AtlasResponseEnvelope( + requestID: UUID(), + response: .apps(AtlasScaffoldFixtures.apps) + ) + let data = try JSONEncoder().encode(envelope) + let decoded = try JSONDecoder().decode(AtlasResponseEnvelope.self, from: data) + + XCTAssertEqual(decoded.response, envelope.response) + } +} diff --git a/Packages/Package.swift b/Packages/Package.swift new file mode 100644 index 0000000..ff5e6e8 --- /dev/null +++ b/Packages/Package.swift @@ -0,0 +1,116 @@ +// swift-tools-version: 5.10 +import PackageDescription + +let package = Package( + name: "AtlasPackages", + defaultLocalization: "zh-Hans", + platforms: [.macOS(.v14)], + products: [ + .library(name: "AtlasApplication", targets: ["AtlasApplication"]), + .library(name: "AtlasCoreAdapters", targets: ["AtlasCoreAdapters"]), + .library(name: "AtlasDesignSystem", targets: ["AtlasDesignSystem"]), + .library(name: "AtlasDomain", targets: ["AtlasDomain"]), + .library(name: "AtlasFeaturesApps", targets: ["AtlasFeaturesApps"]), + .library(name: "AtlasFeaturesHistory", targets: ["AtlasFeaturesHistory"]), + .library(name: "AtlasFeaturesOverview", targets: ["AtlasFeaturesOverview"]), + .library(name: "AtlasFeaturesPermissions", targets: ["AtlasFeaturesPermissions"]), + .library(name: "AtlasFeaturesSettings", targets: ["AtlasFeaturesSettings"]), + .library(name: "AtlasFeaturesSmartClean", targets: ["AtlasFeaturesSmartClean"]), + .library(name: "AtlasFeaturesStorage", targets: ["AtlasFeaturesStorage"]), + .library(name: "AtlasInfrastructure", targets: ["AtlasInfrastructure"]), + .library(name: "AtlasProtocol", targets: ["AtlasProtocol"]), + ], + targets: [ + .target( + name: "AtlasDesignSystem", + path: "AtlasDesignSystem/Sources/AtlasDesignSystem", + resources: [.process("Resources")] + ), + .target( + name: "AtlasDomain", + path: "AtlasDomain/Sources/AtlasDomain", + resources: [.process("Resources")] + ), + .target( + name: "AtlasProtocol", + dependencies: ["AtlasDomain"], + path: "AtlasProtocol/Sources/AtlasProtocol" + ), + .target( + name: "AtlasApplication", + dependencies: ["AtlasDomain", "AtlasProtocol"], + path: "AtlasApplication/Sources/AtlasApplication" + ), + .target( + name: "AtlasInfrastructure", + dependencies: ["AtlasApplication", "AtlasDomain", "AtlasProtocol"], + path: "AtlasInfrastructure/Sources/AtlasInfrastructure" + ), + .target( + name: "AtlasCoreAdapters", + dependencies: ["AtlasApplication", "AtlasDomain", "AtlasInfrastructure", "AtlasProtocol"], + path: "AtlasCoreAdapters/Sources/AtlasCoreAdapters", + resources: [.copy("Resources/MoleRuntime")] + ), + .target( + name: "AtlasFeaturesOverview", + dependencies: ["AtlasApplication", "AtlasDesignSystem", "AtlasDomain"], + path: "AtlasFeaturesOverview/Sources/AtlasFeaturesOverview" + ), + .target( + name: "AtlasFeaturesSmartClean", + dependencies: ["AtlasApplication", "AtlasDesignSystem", "AtlasDomain"], + path: "AtlasFeaturesSmartClean/Sources/AtlasFeaturesSmartClean" + ), + .target( + name: "AtlasFeaturesApps", + dependencies: ["AtlasDesignSystem", "AtlasDomain"], + path: "AtlasFeaturesApps/Sources/AtlasFeaturesApps" + ), + .target( + name: "AtlasFeaturesStorage", + dependencies: ["AtlasDesignSystem", "AtlasDomain"], + path: "AtlasFeaturesStorage/Sources/AtlasFeaturesStorage" + ), + .target( + name: "AtlasFeaturesHistory", + dependencies: ["AtlasDesignSystem", "AtlasDomain"], + path: "AtlasFeaturesHistory/Sources/AtlasFeaturesHistory" + ), + .target( + name: "AtlasFeaturesPermissions", + dependencies: ["AtlasDesignSystem", "AtlasDomain"], + path: "AtlasFeaturesPermissions/Sources/AtlasFeaturesPermissions" + ), + .target( + name: "AtlasFeaturesSettings", + dependencies: ["AtlasDesignSystem", "AtlasDomain"], + path: "AtlasFeaturesSettings/Sources/AtlasFeaturesSettings" + ), + .testTarget( + name: "AtlasApplicationTests", + dependencies: ["AtlasApplication", "AtlasDomain", "AtlasProtocol"], + path: "AtlasApplication/Tests/AtlasApplicationTests" + ), + .testTarget( + name: "AtlasCoreAdaptersTests", + dependencies: ["AtlasCoreAdapters", "AtlasDomain", "AtlasApplication"], + path: "AtlasCoreAdapters/Tests/AtlasCoreAdaptersTests" + ), + .testTarget( + name: "AtlasDomainTests", + dependencies: ["AtlasDomain"], + path: "AtlasDomain/Tests/AtlasDomainTests" + ), + .testTarget( + name: "AtlasProtocolTests", + dependencies: ["AtlasProtocol"], + path: "AtlasProtocol/Tests/AtlasProtocolTests" + ), + .testTarget( + name: "AtlasInfrastructureTests", + dependencies: ["AtlasInfrastructure", "AtlasApplication", "AtlasDomain", "AtlasProtocol"], + path: "AtlasInfrastructure/Tests/AtlasInfrastructureTests" + ), + ] +) diff --git a/Packages/README.md b/Packages/README.md new file mode 100644 index 0000000..bfef821 --- /dev/null +++ b/Packages/README.md @@ -0,0 +1,9 @@ +# Packages + +This directory contains shared Swift packages planned for Atlas for Mac. + +## Current Layout + +- `Package.swift` wires the shared domain, protocol, application, infrastructure, adapters, and feature libraries. +- Each package keeps sources under `Sources//`. +- Contract-style tests live next to the modules they validate. diff --git a/Testing/AtlasTestingSupport/README.md b/Testing/AtlasTestingSupport/README.md new file mode 100644 index 0000000..71487e3 --- /dev/null +++ b/Testing/AtlasTestingSupport/README.md @@ -0,0 +1,8 @@ +# AtlasTestingSupport + +## Responsibility + +- Shared fixtures +- Mock services +- Preview data +- Contract test helpers diff --git a/Testing/AtlasTestingSupport/Sources/AtlasTestingSupport/AtlasTestingSupport.swift b/Testing/AtlasTestingSupport/Sources/AtlasTestingSupport/AtlasTestingSupport.swift new file mode 100644 index 0000000..d1bd13e --- /dev/null +++ b/Testing/AtlasTestingSupport/Sources/AtlasTestingSupport/AtlasTestingSupport.swift @@ -0,0 +1,10 @@ +import AtlasApplication +import AtlasDomain +import AtlasProtocol +import Foundation + +public enum AtlasTestingFixtures { + public static let workspace = AtlasScaffoldWorkspace.snapshot() + public static let request = AtlasRequestEnvelope(command: .inspectPermissions) + public static let firstFinding = AtlasScaffoldFixtures.findings.first +} diff --git a/Testing/Package.swift b/Testing/Package.swift new file mode 100644 index 0000000..3818b3f --- /dev/null +++ b/Testing/Package.swift @@ -0,0 +1,24 @@ +// swift-tools-version: 5.10 +import PackageDescription + +let package = Package( + name: "AtlasTesting", + platforms: [.macOS(.v14)], + products: [ + .library(name: "AtlasTestingSupport", targets: ["AtlasTestingSupport"]), + ], + dependencies: [ + .package(path: "../Packages"), + ], + targets: [ + .target( + name: "AtlasTestingSupport", + dependencies: [ + .product(name: "AtlasApplication", package: "Packages"), + .product(name: "AtlasDomain", package: "Packages"), + .product(name: "AtlasProtocol", package: "Packages"), + ], + path: "AtlasTestingSupport/Sources/AtlasTestingSupport" + ), + ] +) diff --git a/Testing/README.md b/Testing/README.md new file mode 100644 index 0000000..b784232 --- /dev/null +++ b/Testing/README.md @@ -0,0 +1,8 @@ +# Testing + +This directory contains planned test support and app test targets. + +## Current Entry + +- `AtlasTestingSupport/` hosts reusable fixtures and protocol test helpers for the scaffolded app layers. +- `Package.swift` keeps test support separate from the production package graph. diff --git a/Testing/XCUITestRepro/App/XCUITestReproApp.swift b/Testing/XCUITestRepro/App/XCUITestReproApp.swift new file mode 100644 index 0000000..66df022 --- /dev/null +++ b/Testing/XCUITestRepro/App/XCUITestReproApp.swift @@ -0,0 +1,16 @@ +import SwiftUI + +@main +struct XCUITestReproApp: App { + var body: some Scene { + WindowGroup("XCUITestRepro") { + VStack(spacing: 16) { + Text("Hello UI Test") + .accessibilityIdentifier("repro.hello") + Button("Tap Me") {} + .accessibilityIdentifier("repro.tap") + } + .frame(width: 480, height: 320) + } + } +} diff --git a/Testing/XCUITestRepro/UITests/XCUITestReproUITests.swift b/Testing/XCUITestRepro/UITests/XCUITestReproUITests.swift new file mode 100644 index 0000000..7a7a964 --- /dev/null +++ b/Testing/XCUITestRepro/UITests/XCUITestReproUITests.swift @@ -0,0 +1,16 @@ +import XCTest + +final class XCUITestReproUITests: XCTestCase { + override func setUpWithError() throws { + continueAfterFailure = false + } + + func testHelloLabelExists() { + let app = XCUIApplication() + app.launch() + + XCTAssertTrue(app.windows.firstMatch.waitForExistence(timeout: 5)) + XCTAssertTrue(app.staticTexts["repro.hello"].waitForExistence(timeout: 5)) + XCTAssertTrue(app.buttons["repro.tap"].waitForExistence(timeout: 5)) + } +} diff --git a/Testing/XCUITestRepro/XCUITestRepro.xcodeproj/project.pbxproj b/Testing/XCUITestRepro/XCUITestRepro.xcodeproj/project.pbxproj new file mode 100644 index 0000000..8e54163 --- /dev/null +++ b/Testing/XCUITestRepro/XCUITestRepro.xcodeproj/project.pbxproj @@ -0,0 +1,396 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 77; + objects = { + +/* Begin PBXBuildFile section */ + 7BA50A9C5C8694B9E5504F9D /* XCUITestReproUITests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 452F850CD0054D09154359AE /* XCUITestReproUITests.swift */; }; + B68D23AA633AF857A23DDCC2 /* XCUITestReproApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = D808E86C873B6F7B69D85FBA /* XCUITestReproApp.swift */; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + 41EC7040FF0B2762E01A2B64 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 5F98457B850575BD5F65226F /* Project object */; + proxyType = 1; + remoteGlobalIDString = F8A1B5F7B10A4092E0020A83; + remoteInfo = XCUITestRepro; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXFileReference section */ + 29B22F80E4362C6206703247 /* XCUITestReproUITests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = XCUITestReproUITests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; + 452F850CD0054D09154359AE /* XCUITestReproUITests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = XCUITestReproUITests.swift; sourceTree = ""; }; + A07771768DA85256964178AE /* XCUITestRepro.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = XCUITestRepro.app; sourceTree = BUILT_PRODUCTS_DIR; }; + D808E86C873B6F7B69D85FBA /* XCUITestReproApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = XCUITestReproApp.swift; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXGroup section */ + 0A270D1482D091015A9DC47D /* Products */ = { + isa = PBXGroup; + children = ( + A07771768DA85256964178AE /* XCUITestRepro.app */, + 29B22F80E4362C6206703247 /* XCUITestReproUITests.xctest */, + ); + name = Products; + sourceTree = ""; + }; + 9A62A18FF86F36C91B974E0E /* App */ = { + isa = PBXGroup; + children = ( + D808E86C873B6F7B69D85FBA /* XCUITestReproApp.swift */, + ); + path = App; + sourceTree = ""; + }; + AC7FDB3B9CD15021BF775A86 = { + isa = PBXGroup; + children = ( + 9A62A18FF86F36C91B974E0E /* App */, + E40350E8BB4733EFCE8271BE /* UITests */, + 0A270D1482D091015A9DC47D /* Products */, + ); + sourceTree = ""; + }; + E40350E8BB4733EFCE8271BE /* UITests */ = { + isa = PBXGroup; + children = ( + 452F850CD0054D09154359AE /* XCUITestReproUITests.swift */, + ); + path = UITests; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + 7FFB8BC3199A798A39A86F04 /* XCUITestReproUITests */ = { + isa = PBXNativeTarget; + buildConfigurationList = 1D4EF9727426B1794441102A /* Build configuration list for PBXNativeTarget "XCUITestReproUITests" */; + buildPhases = ( + A87D49EF038A094F07E6153E /* Sources */, + ); + buildRules = ( + ); + dependencies = ( + C1CFD8C2D3ADED2A89689317 /* PBXTargetDependency */, + ); + name = XCUITestReproUITests; + packageProductDependencies = ( + ); + productName = XCUITestReproUITests; + productReference = 29B22F80E4362C6206703247 /* XCUITestReproUITests.xctest */; + productType = "com.apple.product-type.bundle.ui-testing"; + }; + F8A1B5F7B10A4092E0020A83 /* XCUITestRepro */ = { + isa = PBXNativeTarget; + buildConfigurationList = B0CA2CF5441599CAC46742AD /* Build configuration list for PBXNativeTarget "XCUITestRepro" */; + buildPhases = ( + 27FBCC1E75FE5777A4878927 /* Sources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = XCUITestRepro; + packageProductDependencies = ( + ); + productName = XCUITestRepro; + productReference = A07771768DA85256964178AE /* XCUITestRepro.app */; + productType = "com.apple.product-type.application"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 5F98457B850575BD5F65226F /* Project object */ = { + isa = PBXProject; + attributes = { + BuildIndependentTargetsInParallel = YES; + LastUpgradeCheck = 1430; + TargetAttributes = { + 7FFB8BC3199A798A39A86F04 = { + TestTargetID = F8A1B5F7B10A4092E0020A83; + }; + }; + }; + buildConfigurationList = F68EDE8B8FADD11455458177 /* Build configuration list for PBXProject "XCUITestRepro" */; + developmentRegion = en; + hasScannedForEncodings = 0; + knownRegions = ( + Base, + en, + ); + mainGroup = AC7FDB3B9CD15021BF775A86; + minimizedProjectReferenceProxies = 1; + preferredProjectObjectVersion = 77; + productRefGroup = 0A270D1482D091015A9DC47D /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + F8A1B5F7B10A4092E0020A83 /* XCUITestRepro */, + 7FFB8BC3199A798A39A86F04 /* XCUITestReproUITests */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXSourcesBuildPhase section */ + 27FBCC1E75FE5777A4878927 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + B68D23AA633AF857A23DDCC2 /* XCUITestReproApp.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + A87D49EF038A094F07E6153E /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 7BA50A9C5C8694B9E5504F9D /* XCUITestReproUITests.swift in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXTargetDependency section */ + C1CFD8C2D3ADED2A89689317 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = F8A1B5F7B10A4092E0020A83 /* XCUITestRepro */; + targetProxy = 41EC7040FF0B2762E01A2B64 /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + +/* Begin XCBuildConfiguration section */ + 24186B5C6DDEA754EB61E29C /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + AD_HOC_CODE_SIGNING_ALLOWED = YES; + BUNDLE_LOADER = "$(TEST_HOST)"; + COMBINE_HIDPI_IMAGES = YES; + GENERATE_INFOPLIST_FILE = YES; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + "@loader_path/Frameworks", + ); + MACOSX_DEPLOYMENT_TARGET = 14.0; + PRODUCT_BUNDLE_IDENTIFIER = com.atlasformac.xcuitestrepro.uitests; + SDKROOT = macosx; + TEST_TARGET_NAME = XCUITestRepro; + }; + name = Release; + }; + 2FB7B63933176A078FF2D60D /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + AD_HOC_CODE_SIGNING_ALLOWED = YES; + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + COMBINE_HIDPI_IMAGES = YES; + GENERATE_INFOPLIST_FILE = YES; + INFOPLIST_KEY_CFBundleDisplayName = XCUITestRepro; + INFOPLIST_KEY_NSPrincipalClass = NSApplication; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/../Frameworks", + ); + MACOSX_DEPLOYMENT_TARGET = 14.0; + PRODUCT_BUNDLE_IDENTIFIER = com.atlasformac.xcuitestrepro; + PRODUCT_NAME = XCUITestRepro; + SDKROOT = macosx; + }; + name = Release; + }; + 683E531A917E47D861B32883 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + AD_HOC_CODE_SIGNING_ALLOWED = YES; + BUNDLE_LOADER = "$(TEST_HOST)"; + COMBINE_HIDPI_IMAGES = YES; + GENERATE_INFOPLIST_FILE = YES; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + "@loader_path/Frameworks", + ); + MACOSX_DEPLOYMENT_TARGET = 14.0; + PRODUCT_BUNDLE_IDENTIFIER = com.atlasformac.xcuitestrepro.uitests; + SDKROOT = macosx; + TEST_TARGET_NAME = XCUITestRepro; + }; + name = Debug; + }; + 82FC2DB66FFAFE4F98FE4EBB /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + AD_HOC_CODE_SIGNING_ALLOWED = YES; + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + COMBINE_HIDPI_IMAGES = YES; + GENERATE_INFOPLIST_FILE = YES; + INFOPLIST_KEY_CFBundleDisplayName = XCUITestRepro; + INFOPLIST_KEY_NSPrincipalClass = NSApplication; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/../Frameworks", + ); + MACOSX_DEPLOYMENT_TARGET = 14.0; + PRODUCT_BUNDLE_IDENTIFIER = com.atlasformac.xcuitestrepro; + PRODUCT_NAME = XCUITestRepro; + SDKROOT = macosx; + }; + name = Debug; + }; + B0D2AE36344F31D2C722D271 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + MACOSX_DEPLOYMENT_TARGET = 14.0; + MTL_ENABLE_DEBUG_INFO = NO; + MTL_FAST_MATH = YES; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = macosx; + SWIFT_COMPILATION_MODE = wholemodule; + SWIFT_OPTIMIZATION_LEVEL = "-O"; + SWIFT_VERSION = 6.0; + }; + name = Release; + }; + DCB2CA322641E6290ABC9973 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "$(inherited)", + "DEBUG=1", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + MACOSX_DEPLOYMENT_TARGET = 14.0; + MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE; + MTL_FAST_MATH = YES; + ONLY_ACTIVE_ARCH = YES; + PRODUCT_NAME = "$(TARGET_NAME)"; + SDKROOT = macosx; + SWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG; + SWIFT_OPTIMIZATION_LEVEL = "-Onone"; + SWIFT_VERSION = 6.0; + }; + name = Debug; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 1D4EF9727426B1794441102A /* Build configuration list for PBXNativeTarget "XCUITestReproUITests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 683E531A917E47D861B32883 /* Debug */, + 24186B5C6DDEA754EB61E29C /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Debug; + }; + B0CA2CF5441599CAC46742AD /* Build configuration list for PBXNativeTarget "XCUITestRepro" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 82FC2DB66FFAFE4F98FE4EBB /* Debug */, + 2FB7B63933176A078FF2D60D /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Debug; + }; + F68EDE8B8FADD11455458177 /* Build configuration list for PBXProject "XCUITestRepro" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + DCB2CA322641E6290ABC9973 /* Debug */, + B0D2AE36344F31D2C722D271 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Debug; + }; +/* End XCConfigurationList section */ + }; + rootObject = 5F98457B850575BD5F65226F /* Project object */; +} diff --git a/Testing/XCUITestRepro/XCUITestRepro.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/Testing/XCUITestRepro/XCUITestRepro.xcodeproj/project.xcworkspace/contents.xcworkspacedata new file mode 100644 index 0000000..919434a --- /dev/null +++ b/Testing/XCUITestRepro/XCUITestRepro.xcodeproj/project.xcworkspace/contents.xcworkspacedata @@ -0,0 +1,7 @@ + + + + + diff --git a/Testing/XCUITestRepro/XCUITestRepro.xcodeproj/xcshareddata/xcschemes/XCUITestRepro.xcscheme b/Testing/XCUITestRepro/XCUITestRepro.xcodeproj/xcshareddata/xcschemes/XCUITestRepro.xcscheme new file mode 100644 index 0000000..9465be8 --- /dev/null +++ b/Testing/XCUITestRepro/XCUITestRepro.xcodeproj/xcshareddata/xcschemes/XCUITestRepro.xcscheme @@ -0,0 +1,102 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/Testing/XCUITestRepro/project.yml b/Testing/XCUITestRepro/project.yml new file mode 100644 index 0000000..e13e16a --- /dev/null +++ b/Testing/XCUITestRepro/project.yml @@ -0,0 +1,49 @@ +name: XCUITestRepro +options: + createIntermediateGroups: true + developmentLanguage: en + deploymentTarget: + macOS: "14.0" +settings: + base: + SWIFT_VERSION: 6.0 + MACOSX_DEPLOYMENT_TARGET: "14.0" +schemes: + XCUITestRepro: + build: + targets: + XCUITestRepro: all + test: + config: Debug + gatherCoverageData: false + targets: + - name: XCUITestReproUITests +targets: + XCUITestRepro: + type: application + platform: macOS + deploymentTarget: "14.0" + sources: + - path: App + settings: + base: + PRODUCT_BUNDLE_IDENTIFIER: com.atlasformac.xcuitestrepro + PRODUCT_NAME: XCUITestRepro + GENERATE_INFOPLIST_FILE: YES + INFOPLIST_KEY_CFBundleDisplayName: XCUITestRepro + INFOPLIST_KEY_NSPrincipalClass: NSApplication + AD_HOC_CODE_SIGNING_ALLOWED: YES + XCUITestReproUITests: + type: bundle.ui-testing + platform: macOS + deploymentTarget: "14.0" + sources: + - path: UITests + settings: + base: + PRODUCT_BUNDLE_IDENTIFIER: com.atlasformac.xcuitestrepro.uitests + GENERATE_INFOPLIST_FILE: YES + TEST_TARGET_NAME: XCUITestRepro + AD_HOC_CODE_SIGNING_ALLOWED: YES + dependencies: + - target: XCUITestRepro diff --git a/XPC/AtlasWorkerXPC/README.md b/XPC/AtlasWorkerXPC/README.md new file mode 100644 index 0000000..2554221 --- /dev/null +++ b/XPC/AtlasWorkerXPC/README.md @@ -0,0 +1,18 @@ +# AtlasWorkerXPC + +## Responsibility + +- Non-privileged task orchestration +- Progress streaming +- Adapter invocation +- Helper-client handoff for allowlisted actions +- Result aggregation and persistence + +## Current Implementation + +- The XPC service hosts a real `NSXPCListener.service()` entry point. +- Requests cross the worker boundary as structured `Data` payloads that encode Atlas protocol envelopes and results. +- The service injects health, Smart Clean, and local app-inventory adapters. +- Release builds rely on bundled `MoleRuntime` resources for upstream shell-based health and clean workflows. +- The service can invoke the packaged or development helper executable through `AtlasPrivilegedHelperClient`. +- Direct-distribution app runs default to the same real worker implementation in-process; use `ATLAS_PREFER_XPC_WORKER=1` to force the bundled XPC path for runtime validation. diff --git a/XPC/AtlasWorkerXPC/Sources/AtlasWorkerXPC/main.swift b/XPC/AtlasWorkerXPC/Sources/AtlasWorkerXPC/main.swift new file mode 100644 index 0000000..e8f8725 --- /dev/null +++ b/XPC/AtlasWorkerXPC/Sources/AtlasWorkerXPC/main.swift @@ -0,0 +1,15 @@ +import AtlasCoreAdapters +import AtlasInfrastructure +import Foundation + +let worker = AtlasScaffoldWorkerService( + healthSnapshotProvider: MoleHealthAdapter(), + smartCleanScanProvider: MoleSmartCleanAdapter(), + appsInventoryProvider: MacAppsInventoryAdapter(), + helperExecutor: AtlasPrivilegedHelperClient() +) +let listener = NSXPCListener.service() +let delegate = AtlasXPCListenerDelegate(host: AtlasXPCWorkerServiceHost(worker: worker)) +listener.delegate = delegate +listener.resume() +RunLoop.current.run() diff --git a/XPC/Package.swift b/XPC/Package.swift new file mode 100644 index 0000000..795a67a --- /dev/null +++ b/XPC/Package.swift @@ -0,0 +1,24 @@ +// swift-tools-version: 5.10 +import PackageDescription + +let package = Package( + name: "AtlasXPC", + platforms: [.macOS(.v14)], + products: [ + .executable(name: "AtlasWorkerXPC", targets: ["AtlasWorkerXPC"]), + ], + dependencies: [ + .package(path: "../Packages"), + ], + targets: [ + .executableTarget( + name: "AtlasWorkerXPC", + dependencies: [ + .product(name: "AtlasCoreAdapters", package: "Packages"), + .product(name: "AtlasInfrastructure", package: "Packages"), + .product(name: "AtlasProtocol", package: "Packages"), + ], + path: "AtlasWorkerXPC/Sources/AtlasWorkerXPC" + ), + ] +) diff --git a/XPC/README.md b/XPC/README.md new file mode 100644 index 0000000..b292854 --- /dev/null +++ b/XPC/README.md @@ -0,0 +1,9 @@ +# XPC + +This directory contains XPC service targets for Atlas for Mac. + +## Current Entry + +- `AtlasWorkerXPC/` hosts the non-privileged worker service. +- The service wires in health, Smart Clean, app inventory, persistence, and helper-client integrations. +- `Package.swift` exposes the worker target for local builds and early integration verification. diff --git a/project.yml b/project.yml new file mode 100644 index 0000000..c84085c --- /dev/null +++ b/project.yml @@ -0,0 +1,105 @@ +name: Atlas +options: + createIntermediateGroups: true + developmentLanguage: zh-Hans + deploymentTarget: + macOS: "14.0" +settings: + base: + SWIFT_VERSION: 6.0 + MACOSX_DEPLOYMENT_TARGET: "14.0" +packages: + Packages: + path: Packages +schemes: + AtlasApp: + build: + targets: + AtlasWorkerXPC: [running] + AtlasApp: all + test: + config: Debug + gatherCoverageData: false + targets: + - name: AtlasAppUITests + run: + config: Debug + archive: + config: Release +targets: + AtlasWorkerXPC: + type: xpc-service + platform: macOS + deploymentTarget: "14.0" + sources: + - path: XPC/AtlasWorkerXPC/Sources/AtlasWorkerXPC + settings: + base: + PRODUCT_BUNDLE_IDENTIFIER: com.atlasformac.app.worker + PRODUCT_NAME: AtlasWorkerXPC + GENERATE_INFOPLIST_FILE: YES + AD_HOC_CODE_SIGNING_ALLOWED: YES + dependencies: + - package: Packages + product: AtlasApplication + - package: Packages + product: AtlasCoreAdapters + - package: Packages + product: AtlasInfrastructure + - package: Packages + product: AtlasProtocol + AtlasApp: + type: application + platform: macOS + deploymentTarget: "14.0" + sources: + - path: Apps/AtlasApp/Sources/AtlasApp + settings: + base: + PRODUCT_BUNDLE_IDENTIFIER: com.atlasformac.app + PRODUCT_NAME: Atlas for Mac + GENERATE_INFOPLIST_FILE: YES + AD_HOC_CODE_SIGNING_ALLOWED: YES + INFOPLIST_KEY_CFBundleDisplayName: Atlas for Mac + INFOPLIST_KEY_LSApplicationCategoryType: public.app-category.utilities + INFOPLIST_KEY_NSPrincipalClass: NSApplication + dependencies: + - target: AtlasWorkerXPC + embed: true + codeSign: false + - package: Packages + product: AtlasApplication + - package: Packages + product: AtlasCoreAdapters + - package: Packages + product: AtlasDesignSystem + - package: Packages + product: AtlasDomain + - package: Packages + product: AtlasFeaturesApps + - package: Packages + product: AtlasFeaturesHistory + - package: Packages + product: AtlasFeaturesOverview + - package: Packages + product: AtlasFeaturesPermissions + - package: Packages + product: AtlasFeaturesSettings + - package: Packages + product: AtlasFeaturesSmartClean + - package: Packages + product: AtlasInfrastructure + AtlasAppUITests: + type: bundle.ui-testing + platform: macOS + deploymentTarget: "14.0" + sources: + - path: Apps/AtlasAppUITests + settings: + base: + PRODUCT_BUNDLE_IDENTIFIER: com.atlasformac.app.uitests + GENERATE_INFOPLIST_FILE: YES + TEST_TARGET_NAME: AtlasApp + AD_HOC_CODE_SIGNING_ALLOWED: YES + dependencies: + - target: AtlasApp diff --git a/scripts/atlas/build-native.sh b/scripts/atlas/build-native.sh new file mode 100755 index 0000000..305b54b --- /dev/null +++ b/scripts/atlas/build-native.sh @@ -0,0 +1,24 @@ +#!/bin/bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)" +PROJECT_PATH="$ROOT_DIR/Atlas.xcodeproj" +SCHEME="AtlasApp" +CONFIGURATION="${CONFIGURATION:-Release}" +DERIVED_DATA_PATH="${DERIVED_DATA_PATH:-$ROOT_DIR/.build/atlas-native/DerivedData}" + +if [[ -f "$ROOT_DIR/project.yml" ]]; then + if command -v xcodegen >/dev/null 2>&1; then + (cd "$ROOT_DIR" && xcodegen generate) + elif [[ ! -d "$PROJECT_PATH" || "$ROOT_DIR/project.yml" -nt "$PROJECT_PATH/project.pbxproj" ]]; then + echo "Atlas.xcodeproj is missing or stale, but xcodegen is not installed." >&2 + exit 1 + fi +fi + +xcodebuild \ + -project "$PROJECT_PATH" \ + -scheme "$SCHEME" \ + -configuration "$CONFIGURATION" \ + -derivedDataPath "$DERIVED_DATA_PATH" \ + build diff --git a/scripts/atlas/ensure-local-signing-identity.sh b/scripts/atlas/ensure-local-signing-identity.sh new file mode 100755 index 0000000..b7a70d6 --- /dev/null +++ b/scripts/atlas/ensure-local-signing-identity.sh @@ -0,0 +1,89 @@ +#!/bin/bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)" +source "$ROOT_DIR/scripts/atlas/signing-common.sh" + +KEYCHAIN_PATH="$(atlas_local_signing_keychain_path)" +KEYCHAIN_PASSWORD="$(atlas_local_signing_keychain_password)" +IDENTITY_NAME="$(atlas_local_signing_identity_name)" +VALID_DAYS="${ATLAS_LOCAL_SIGNING_VALID_DAYS:-3650}" +P12_PASSWORD="${ATLAS_LOCAL_SIGNING_P12_PASSWORD:-atlas-local-signing-p12}" + +if atlas_local_identity_usable; then + atlas_unlock_local_signing_keychain + printf 'Atlas local signing identity ready\n' + printf 'Identity: %s\n' "$IDENTITY_NAME" + printf 'Keychain: %s\n' "$KEYCHAIN_PATH" + exit 0 +fi + +if [[ -f "$KEYCHAIN_PATH" ]]; then + rm -f "$KEYCHAIN_PATH" +fi + +mkdir -p "$(dirname "$KEYCHAIN_PATH")" + +tmpdir="$(mktemp -d "${TMPDIR:-/tmp}/atlas-local-signing.XXXXXX")" +cleanup() { + rm -rf "$tmpdir" +} +trap cleanup EXIT + +cat > "$tmpdir/openssl.cnf" </dev/null 2>&1 + +/usr/bin/openssl pkcs12 \ + -export \ + -inkey "$tmpdir/identity.key" \ + -in "$tmpdir/identity.crt" \ + -out "$tmpdir/identity.p12" \ + -passout "pass:$P12_PASSWORD" >/dev/null 2>&1 + +if [[ ! -f "$KEYCHAIN_PATH" ]]; then + security create-keychain -p "$KEYCHAIN_PASSWORD" "$KEYCHAIN_PATH" >/dev/null +fi + +security unlock-keychain -p "$KEYCHAIN_PASSWORD" "$KEYCHAIN_PATH" +security set-keychain-settings -lut 21600 "$KEYCHAIN_PATH" +security import "$tmpdir/identity.p12" \ + -k "$KEYCHAIN_PATH" \ + -P "$P12_PASSWORD" \ + -f pkcs12 \ + -A \ + -T /usr/bin/codesign \ + -T /usr/bin/security >/dev/null +security set-key-partition-list -S apple-tool:,apple:,codesign: -s -k "$KEYCHAIN_PASSWORD" "$KEYCHAIN_PATH" >/dev/null +atlas_unlock_local_signing_keychain + +if ! atlas_local_identity_usable; then + echo "Failed to provision local Atlas signing identity." >&2 + exit 1 +fi + +printf 'Created Atlas local signing identity\n' +printf 'Identity: %s\n' "$IDENTITY_NAME" +printf 'Keychain: %s\n' "$KEYCHAIN_PATH" +printf 'Use: ./scripts/atlas/package-native.sh\n' diff --git a/scripts/atlas/full-acceptance.sh b/scripts/atlas/full-acceptance.sh new file mode 100755 index 0000000..bb5ae13 --- /dev/null +++ b/scripts/atlas/full-acceptance.sh @@ -0,0 +1,68 @@ +#!/bin/bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)" + +cd "$ROOT_DIR" + +run_ui_acceptance() { + local atlas_log repro_log + atlas_log="$(mktemp -t atlas-ui-acceptance.XXXXXX.log)" + repro_log="$(mktemp -t atlas-ui-repro.XXXXXX.log)" + trap 'rm -f "$atlas_log" "$repro_log"' RETURN + + if ./scripts/atlas/run-ui-automation.sh 2>&1 | tee "$atlas_log"; then + return 0 + fi + + echo "Atlas UI automation failed; checking standalone repro to classify the failure..." + + if xcodebuild test \ + -project Testing/XCUITestRepro/XCUITestRepro.xcodeproj \ + -scheme XCUITestRepro \ + -destination 'platform=macOS' 2>&1 | tee "$repro_log"; then + echo "Standalone repro passed while Atlas UI automation failed; treating this as an Atlas-specific blocker." + return 1 + fi + + if grep -q 'Timed out while enabling automation mode' "$atlas_log" && grep -q 'Timed out while enabling automation mode' "$repro_log"; then + echo "UI automation is blocked by the current macOS automation environment; continuing acceptance with a documented environment condition." + return 0 + fi + + echo "UI automation failed for a reason that was not classified as a shared environment blocker." + return 1 +} + +echo "[1/10] Shared package tests" +swift test --package-path Packages + +echo "[2/10] App package tests" +swift test --package-path Apps + +echo "[3/10] Worker and helper builds" +swift build --package-path XPC +swift test --package-path Helpers +swift build --package-path Testing + +echo "[4/10] Native packaging" +./scripts/atlas/package-native.sh + +echo "[5/10] Bundle structure verification" +./scripts/atlas/verify-bundle-contents.sh + +echo "[6/10] DMG install verification" +KEEP_INSTALLED_APP=1 ./scripts/atlas/verify-dmg-install.sh + +echo "[7/10] Installed app launch smoke" +./scripts/atlas/verify-app-launch.sh + +echo "[8/10] Native UI automation" +run_ui_acceptance + +echo "[9/10] Signing preflight" +./scripts/atlas/signing-preflight.sh || true + +echo "[10/10] Acceptance summary" +echo "Artifacts available in dist/native" +ls -lah dist/native diff --git a/scripts/atlas/package-native.sh b/scripts/atlas/package-native.sh new file mode 100755 index 0000000..5629366 --- /dev/null +++ b/scripts/atlas/package-native.sh @@ -0,0 +1,139 @@ +#!/bin/bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)" +source "$ROOT_DIR/scripts/atlas/signing-common.sh" + +DIST_DIR="${DIST_DIR:-$ROOT_DIR/dist/native}" +DERIVED_DATA_PATH="${DERIVED_DATA_PATH:-$ROOT_DIR/.build/atlas-native/DerivedData}" +APP_NAME="Atlas for Mac.app" +APP_PATH="$DERIVED_DATA_PATH/Build/Products/Release/$APP_NAME" +HELPER_BINARY="$ROOT_DIR/Helpers/.build/release/AtlasPrivilegedHelper" +ZIP_PATH="$DIST_DIR/Atlas-for-Mac.zip" +DMG_PATH="$DIST_DIR/Atlas-for-Mac.dmg" +PKG_PATH="$DIST_DIR/Atlas-for-Mac.pkg" +SHA_PATH="$DIST_DIR/Atlas-for-Mac.sha256" +PACKAGED_APP_PATH="$DIST_DIR/$APP_NAME" +DMG_STAGING_DIR="$DIST_DIR/dmg-root" +REQUESTED_APP_SIGN_IDENTITY="${ATLAS_CODESIGN_IDENTITY:-}" +APP_SIGN_IDENTITY="$(atlas_resolve_app_signing_identity)" +APP_SIGNING_KEYCHAIN="$(atlas_resolve_app_signing_keychain "$APP_SIGN_IDENTITY")" +APP_SIGNING_MODE="$(atlas_signing_mode_for_identity "$APP_SIGN_IDENTITY")" +INSTALLER_SIGN_IDENTITY="$(atlas_resolve_installer_signing_identity)" +NOTARY_PROFILE="${ATLAS_NOTARY_PROFILE:-}" + +mkdir -p "$DIST_DIR" + +sign_app_component() { + local path="$1" + local args=(--force --sign "$APP_SIGN_IDENTITY") + local entitlements_file="" + + if [[ -n "$APP_SIGNING_KEYCHAIN" ]]; then + args+=(--keychain "$APP_SIGNING_KEYCHAIN") + fi + + if [[ "$APP_SIGNING_MODE" == "developer-id" ]]; then + args+=(--options runtime --timestamp) + fi + + entitlements_file="$(mktemp "${TMPDIR:-/tmp}/atlas-entitlements.XXXXXX")" + if /usr/bin/codesign -d --entitlements :- "$path" > "$entitlements_file" 2>/dev/null && /usr/bin/grep -q '&2 + exit 1 +fi + +python3 - "$PACKAGED_APP_PATH" "$DMG_STAGING_DIR" "$DMG_PATH" <<'PY' +from pathlib import Path +import shutil, sys +for raw in sys.argv[1:]: + path = Path(raw) + if path.exists(): + if path.is_dir(): + shutil.rmtree(path) + else: + path.unlink() +PY + +cp -R "$APP_PATH" "$PACKAGED_APP_PATH" +mkdir -p "$PACKAGED_APP_PATH/Contents/Helpers" +cp "$HELPER_BINARY" "$PACKAGED_APP_PATH/Contents/Helpers/AtlasPrivilegedHelper" +chmod +x "$PACKAGED_APP_PATH/Contents/Helpers/AtlasPrivilegedHelper" + +while IFS= read -r xpc; do + sign_app_component "$xpc" +done < <(find "$PACKAGED_APP_PATH/Contents/XPCServices" -maxdepth 1 -name '*.xpc' -type d 2>/dev/null | sort) +sign_app_component "$PACKAGED_APP_PATH/Contents/Helpers/AtlasPrivilegedHelper" +sign_app_component "$PACKAGED_APP_PATH" +codesign --verify --deep --strict --verbose=2 "$PACKAGED_APP_PATH" + +/usr/bin/ditto -c -k --sequesterRsrc --keepParent "$PACKAGED_APP_PATH" "$ZIP_PATH" + +mkdir -p "$DMG_STAGING_DIR" +cp -R "$PACKAGED_APP_PATH" "$DMG_STAGING_DIR/$APP_NAME" +ln -s /Applications "$DMG_STAGING_DIR/Applications" +hdiutil create -volname "Atlas for Mac" -srcfolder "$DMG_STAGING_DIR" -ov -format UDZO "$DMG_PATH" >/dev/null + +productbuild_args=(--component "$PACKAGED_APP_PATH" /Applications "$PKG_PATH") +if [[ -n "$INSTALLER_SIGN_IDENTITY" ]]; then + productbuild_args=(--sign "$INSTALLER_SIGN_IDENTITY" --component "$PACKAGED_APP_PATH" /Applications "$PKG_PATH") +fi +/usr/bin/productbuild "${productbuild_args[@]}" + +( + cd "$DIST_DIR" + /usr/bin/shasum -a 256 \ + "$(basename "$ZIP_PATH")" \ + "$(basename "$DMG_PATH")" \ + "$(basename "$PKG_PATH")" > "$SHA_PATH" +) + +echo "Packaged app: $PACKAGED_APP_PATH" +echo "Zip artifact: $ZIP_PATH" +echo "DMG artifact: $DMG_PATH" +echo "Installer package: $PKG_PATH" +echo "Checksums: $SHA_PATH" + +if [[ -n "$NOTARY_PROFILE" && "$APP_SIGNING_MODE" == "developer-id" && -n "$INSTALLER_SIGN_IDENTITY" ]]; then + xcrun notarytool submit "$PKG_PATH" --keychain-profile "$NOTARY_PROFILE" --wait + xcrun stapler staple "$PKG_PATH" + xcrun notarytool submit "$DMG_PATH" --keychain-profile "$NOTARY_PROFILE" --wait + xcrun notarytool submit "$ZIP_PATH" --keychain-profile "$NOTARY_PROFILE" --wait + xcrun stapler staple "$PACKAGED_APP_PATH" + /usr/bin/ditto -c -k --sequesterRsrc --keepParent "$PACKAGED_APP_PATH" "$ZIP_PATH" + ( + cd "$DIST_DIR" + /usr/bin/shasum -a 256 \ + "$(basename "$ZIP_PATH")" \ + "$(basename "$DMG_PATH")" \ + "$(basename "$PKG_PATH")" > "$SHA_PATH" + ) + echo "Notarization complete" +fi diff --git a/scripts/atlas/run-ui-automation.sh b/scripts/atlas/run-ui-automation.sh new file mode 100755 index 0000000..1ef103e --- /dev/null +++ b/scripts/atlas/run-ui-automation.sh @@ -0,0 +1,42 @@ +#!/bin/bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)" + +cd "$ROOT_DIR" + +if ! ./scripts/atlas/ui-automation-preflight.sh >/dev/null; then + echo "Skipping native UI automation: Accessibility / automation permissions are not ready." + exit 0 +fi + +run_once() { + pkill -f 'Atlas for Mac.app/Contents/MacOS/Atlas for Mac' >/dev/null 2>&1 || true + pkill -f 'AtlasAppUITests-Runner|XCTRunner|xcodebuild test -project Atlas.xcodeproj -scheme AtlasApp' >/dev/null 2>&1 || true + sleep 2 + + xcodegen generate >/dev/null + xcodebuild test \ + -project Atlas.xcodeproj \ + -scheme AtlasApp \ + -destination 'platform=macOS' \ + -only-testing:AtlasAppUITests +} + +LOG_FILE="$(mktemp -t atlas-ui-automation.XXXXXX.log)" +trap 'rm -f "$LOG_FILE"' EXIT + +for attempt in 1 2; do + echo "UI automation attempt $attempt/2" + if run_once 2>&1 | tee "$LOG_FILE"; then + exit 0 + fi + + if grep -q 'Timed out while enabling automation mode' "$LOG_FILE" && [[ "$attempt" -lt 2 ]]; then + echo "UI automation timed out while enabling automation mode; retrying after cleanup..." + sleep 3 + continue + fi + + exit 1 +done diff --git a/scripts/atlas/signing-common.sh b/scripts/atlas/signing-common.sh new file mode 100644 index 0000000..b598fd5 --- /dev/null +++ b/scripts/atlas/signing-common.sh @@ -0,0 +1,168 @@ +#!/bin/bash + +atlas_local_signing_keychain_path() { + printf '%s\n' "${ATLAS_LOCAL_SIGNING_KEYCHAIN_PATH:-$HOME/Library/Keychains/AtlasLocalSigning.keychain-db}" +} + +atlas_local_signing_keychain_password() { + printf '%s\n' "${ATLAS_LOCAL_SIGNING_KEYCHAIN_PASSWORD:-atlas-local-signing}" +} + +atlas_local_signing_identity_name() { + printf '%s\n' "${ATLAS_LOCAL_SIGNING_IDENTITY_NAME:-Atlas Local Development}" +} + +atlas_detect_release_app_identity() { + security find-identity -v -p codesigning 2>/dev/null \ + | sed -n 's/.*"\(Developer ID Application:.*\)"/\1/p' \ + | head -1 +} + +atlas_detect_development_app_identity() { + local output + output="$(security find-identity -v -p codesigning 2>/dev/null || true)" + + local identity + identity="$(printf '%s\n' "$output" | sed -n 's/.*"\(Apple Development:.*\)"/\1/p' | head -1)" + if [[ -n "$identity" ]]; then + printf '%s\n' "$identity" + return 0 + fi + + printf '%s\n' "$output" | sed -n 's/.*"\(Mac Developer:.*\)"/\1/p' | head -1 +} + +atlas_detect_installer_identity() { + security find-identity -v -p basic 2>/dev/null \ + | sed -n 's/.*"\(Developer ID Installer:.*\)"/\1/p' \ + | head -1 +} + +atlas_local_identity_exists() { + local keychain_path identity_name + keychain_path="$(atlas_local_signing_keychain_path)" + identity_name="$(atlas_local_signing_identity_name)" + + [[ -f "$keychain_path" ]] || return 1 + security find-certificate -a -c "$identity_name" "$keychain_path" >/dev/null 2>&1 +} + +atlas_unlock_local_signing_keychain() { + local keychain_path keychain_password + keychain_path="$(atlas_local_signing_keychain_path)" + keychain_password="$(atlas_local_signing_keychain_password)" + + [[ -f "$keychain_path" ]] || return 0 + security unlock-keychain -p "$keychain_password" "$keychain_path" >/dev/null 2>&1 || true + security set-keychain-settings -lut 21600 "$keychain_path" >/dev/null 2>&1 || true + atlas_add_local_signing_keychain_to_search_list +} + +atlas_add_local_signing_keychain_to_search_list() { + local keychain_path + keychain_path="$(atlas_local_signing_keychain_path)" + + [[ -f "$keychain_path" ]] || return 0 + + local current_keychains=() + while IFS= read -r line; do + line="${line#"${line%%[![:space:]]*}"}" + line="${line%\"}" + line="${line#\"}" + [[ -n "$line" ]] && current_keychains+=("$line") + done < <(security list-keychains -d user 2>/dev/null || true) + + if printf '%s\n' "${current_keychains[@]}" | grep -Fx "$keychain_path" >/dev/null 2>&1; then + return 0 + fi + + security list-keychains -d user -s "$keychain_path" "${current_keychains[@]}" >/dev/null 2>&1 || true +} + +atlas_local_identity_usable() { + atlas_local_identity_exists || return 1 + + local keychain_path identity_name sample_file + keychain_path="$(atlas_local_signing_keychain_path)" + identity_name="$(atlas_local_signing_identity_name)" + + atlas_unlock_local_signing_keychain + + sample_file="$(mktemp "${TMPDIR:-/tmp}/atlas-local-signing-check.XXXXXX")" + printf 'atlas local signing check\n' > "$sample_file" + if ! /usr/bin/codesign --force --sign "$identity_name" --keychain "$keychain_path" "$sample_file" >/dev/null 2>&1; then + rm -f "$sample_file" + return 1 + fi + + rm -f "$sample_file" + return 0 +} + +atlas_signing_mode_for_identity() { + local identity="${1:-}" + local local_identity_name + local_identity_name="$(atlas_local_signing_identity_name)" + + if [[ -z "$identity" || "$identity" == "-" ]]; then + printf '%s\n' "adhoc" + elif [[ "$identity" == Developer\ ID\ Application:* ]]; then + printf '%s\n' "developer-id" + elif [[ "$identity" == "$local_identity_name" ]]; then + printf '%s\n' "local-stable" + else + printf '%s\n' "local-stable" + fi +} + +atlas_resolve_app_signing_identity() { + if [[ -n "${ATLAS_CODESIGN_IDENTITY:-}" ]]; then + printf '%s\n' "$ATLAS_CODESIGN_IDENTITY" + return 0 + fi + + local identity + identity="$(atlas_detect_release_app_identity)" + if [[ -n "$identity" ]]; then + printf '%s\n' "$identity" + return 0 + fi + + identity="$(atlas_detect_development_app_identity)" + if [[ -n "$identity" ]]; then + printf '%s\n' "$identity" + return 0 + fi + + if atlas_local_identity_usable; then + printf '%s\n' "$(atlas_local_signing_identity_name)" + return 0 + fi + + printf '%s\n' "-" +} + +atlas_resolve_app_signing_keychain() { + local identity="${1:-}" + + if [[ -n "${ATLAS_CODESIGN_KEYCHAIN:-}" ]]; then + printf '%s\n' "$ATLAS_CODESIGN_KEYCHAIN" + return 0 + fi + + if [[ "$identity" == "$(atlas_local_signing_identity_name)" ]] && atlas_local_identity_exists; then + printf '%s\n' "$(atlas_local_signing_keychain_path)" + return 0 + fi + + printf '%s\n' "" +} + +atlas_resolve_installer_signing_identity() { + if [[ -n "${ATLAS_INSTALLER_SIGN_IDENTITY:-}" ]]; then + printf '%s\n' "$ATLAS_INSTALLER_SIGN_IDENTITY" + return 0 + fi + + atlas_detect_installer_identity +} diff --git a/scripts/atlas/signing-preflight.sh b/scripts/atlas/signing-preflight.sh new file mode 100755 index 0000000..2af9d0d --- /dev/null +++ b/scripts/atlas/signing-preflight.sh @@ -0,0 +1,76 @@ +#!/bin/bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)" +source "$ROOT_DIR/scripts/atlas/signing-common.sh" + +APP_IDENTITY_OVERRIDE="${ATLAS_CODESIGN_IDENTITY:-}" +INSTALLER_IDENTITY_OVERRIDE="${ATLAS_INSTALLER_SIGN_IDENTITY:-}" +NOTARY_PROFILE_OVERRIDE="${ATLAS_NOTARY_PROFILE:-}" + +codesign_output="$(security find-identity -v -p codesigning 2>/dev/null || true)" +basic_output="$(security find-identity -v -p basic 2>/dev/null || true)" + +app_identity_detected="$(printf '%s\n' "$codesign_output" | sed -n 's/.*"\(Developer ID Application:.*\)"/\1/p' | head -1)" +installer_identity_detected="$(printf '%s\n' "$basic_output" | sed -n 's/.*"\(Developer ID Installer:.*\)"/\1/p' | head -1)" + +app_identity="${APP_IDENTITY_OVERRIDE:-$app_identity_detected}" +installer_identity="${INSTALLER_IDENTITY_OVERRIDE:-$installer_identity_detected}" +local_identity="" +if atlas_local_identity_exists; then + local_identity="$(atlas_local_signing_identity_name)" +fi + +printf 'Atlas signing preflight\n' +printf '======================\n' +printf 'Developer ID Application: %s\n' "${app_identity:-MISSING}" +printf 'Developer ID Installer: %s\n' "${installer_identity:-MISSING}" +printf 'Notary profile: %s\n' "${NOTARY_PROFILE_OVERRIDE:-MISSING}" +if [[ -n "$local_identity" ]]; then + printf 'Stable local app identity: %s\n' "$local_identity" +else + printf 'Stable local app identity: MISSING\n' +fi + +status=0 +if [[ -z "$app_identity" ]]; then + echo '✗ Missing Developer ID Application identity' + status=1 +fi +if [[ -z "$installer_identity" ]]; then + echo '✗ Missing Developer ID Installer identity' + status=1 +fi +if [[ -z "$NOTARY_PROFILE_OVERRIDE" ]]; then + echo '✗ Missing notarytool keychain profile name in ATLAS_NOTARY_PROFILE' + status=1 +fi + +if [[ -n "$NOTARY_PROFILE_OVERRIDE" ]]; then + if xcrun notarytool history --keychain-profile "$NOTARY_PROFILE_OVERRIDE" >/dev/null 2>&1; then + echo '✓ notarytool profile is usable' + else + echo '✗ notarytool profile could not be validated' + status=1 + fi +fi + +if [[ $status -eq 0 ]]; then + echo '✓ Release signing prerequisites are present' + echo "export ATLAS_CODESIGN_IDENTITY='$app_identity'" + echo "export ATLAS_INSTALLER_SIGN_IDENTITY='$installer_identity'" + echo "export ATLAS_NOTARY_PROFILE='$NOTARY_PROFILE_OVERRIDE'" +else + echo + echo 'To unblock signed/notarized release packaging, provide or install:' + echo ' 1. Developer ID Application certificate' + echo ' 2. Developer ID Installer certificate' + echo ' 3. notarytool keychain profile name via ATLAS_NOTARY_PROFILE' + if [[ -z "$local_identity" ]]; then + echo + echo 'For stable local TCC-friendly builds without Apple release credentials, run:' + echo ' ./scripts/atlas/ensure-local-signing-identity.sh' + fi +fi + +exit $status diff --git a/scripts/atlas/smart-clean-manual-fixtures.sh b/scripts/atlas/smart-clean-manual-fixtures.sh new file mode 100755 index 0000000..71ac220 --- /dev/null +++ b/scripts/atlas/smart-clean-manual-fixtures.sh @@ -0,0 +1,69 @@ +#!/bin/bash +set -euo pipefail + +CACHE_ROOT="$HOME/Library/Caches/AtlasExecutionFixturesCache" +LOG_ROOT="$HOME/Library/Logs/AtlasExecutionFixturesLogs" +DERIVED_ROOT="$HOME/Library/Developer/Xcode/DerivedData/AtlasExecutionFixturesDerivedData" +PYCACHE_ROOT="$HOME/Library/Caches/AtlasExecutionFixturesPycache" + +create_blob() { + local path="$1" + local size_mb="$2" + mkdir -p "$(dirname "$path")" + if command -v mkfile >/dev/null 2>&1; then + mkfile -n "${size_mb}m" "$path" + else + dd if=/dev/zero of="$path" bs=1m count="$size_mb" status=none + fi +} + +print_status() { + local existing=false + for path in "$CACHE_ROOT" "$LOG_ROOT" "$DERIVED_ROOT" "$PYCACHE_ROOT"; do + if [[ -e "$path" ]]; then + existing=true + du -sh "$path" + find "$path" -maxdepth 3 -type f | sort + fi + done + if [[ "$existing" == false ]]; then + echo "No Smart Clean manual fixtures found." + fi +} + +create_fixtures() { + cleanup_fixtures >/dev/null 2>&1 || true + + create_blob "$CACHE_ROOT/cache-a.bin" 24 + create_blob "$CACHE_ROOT/cache-b.bin" 12 + create_blob "$LOG_ROOT/app.log" 8 + create_blob "$DERIVED_ROOT/Build/Logs/build-products.bin" 16 + mkdir -p "$PYCACHE_ROOT/project/__pycache__" + create_blob "$PYCACHE_ROOT/project/__pycache__/sample.cpython-312.pyc" 4 + + echo "Created Smart Clean manual fixtures:" + print_status + echo "" + echo "Note: bin/clean.sh --dry-run may aggregate these fixtures into higher-level roots such as ~/Library/Caches, ~/Library/Logs, or ~/Library/Developer/Xcode/DerivedData." +} + +cleanup_fixtures() { + rm -rf "$CACHE_ROOT" "$LOG_ROOT" "$DERIVED_ROOT" "$PYCACHE_ROOT" + echo "Removed Smart Clean manual fixtures." +} + +case "${1:-create}" in + create) + create_fixtures + ;; + status) + print_status + ;; + cleanup) + cleanup_fixtures + ;; + *) + echo "Usage: $0 [create|status|cleanup]" >&2 + exit 1 + ;; +esac diff --git a/scripts/atlas/ui-automation-preflight.sh b/scripts/atlas/ui-automation-preflight.sh new file mode 100755 index 0000000..a39aaff --- /dev/null +++ b/scripts/atlas/ui-automation-preflight.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -euo pipefail + +trusted=$(swift -e 'import ApplicationServices; print(AXIsProcessTrusted())' 2>/dev/null || echo false) + +echo "Atlas UI automation preflight" +echo "============================" +echo "Accessibility trusted for current process: $trusted" + +if [[ "$trusted" != "true" ]]; then + cat <<'MSG' +✗ UI automation is currently blocked by macOS Accessibility / automation permissions. + +To unblock local XCUITest on this machine: +1. Open System Settings +2. Privacy & Security -> Accessibility +3. Allow the terminal app you use to run `xcodebuild` (Terminal / iTerm / Warp / etc.) +4. Also allow Xcode if you run tests from Xcode directly +5. Re-run the minimal repro: + xcodebuild test -project Testing/XCUITestRepro/XCUITestRepro.xcodeproj -scheme XCUITestRepro -destination 'platform=macOS' +MSG + exit 1 +fi + +echo "✓ Current process is trusted for Accessibility APIs" diff --git a/scripts/atlas/verify-app-launch.sh b/scripts/atlas/verify-app-launch.sh new file mode 100755 index 0000000..cacaf92 --- /dev/null +++ b/scripts/atlas/verify-app-launch.sh @@ -0,0 +1,35 @@ +#!/bin/bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)" +APP_PATH="${APP_PATH:-$HOME/Applications/Atlas for Mac.app}" +BIN_PATH="$APP_PATH/Contents/MacOS/Atlas for Mac" +STATE_DIR="${STATE_DIR:-$ROOT_DIR/.build/atlas-launch-state}" + +if [[ ! -x "$BIN_PATH" ]]; then + echo "App binary not found: $BIN_PATH" >&2 + exit 1 +fi + +mkdir -p "$STATE_DIR" +ATLAS_STATE_DIR="$STATE_DIR" "$BIN_PATH" >/tmp/atlas-launch.log 2>&1 & +pid=$! + +cleanup() { + if kill -0 "$pid" >/dev/null 2>&1; then + kill "$pid" >/dev/null 2>&1 || true + wait "$pid" >/dev/null 2>&1 || true + fi +} +trap cleanup EXIT + +sleep 3 + +if ! kill -0 "$pid" >/dev/null 2>&1; then + echo "Atlas app exited immediately; see /tmp/atlas-launch.log" >&2 + cat /tmp/atlas-launch.log >&2 || true + exit 1 +fi + +echo "App launch smoke test succeeded" +echo "PID: $pid" diff --git a/scripts/atlas/verify-bundle-contents.sh b/scripts/atlas/verify-bundle-contents.sh new file mode 100755 index 0000000..064168d --- /dev/null +++ b/scripts/atlas/verify-bundle-contents.sh @@ -0,0 +1,56 @@ +#!/bin/bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)" +APP_PATH="${APP_PATH:-$ROOT_DIR/dist/native/Atlas for Mac.app}" +HELPER_PATH="$APP_PATH/Contents/Helpers/AtlasPrivilegedHelper" +XPC_PATH="$APP_PATH/Contents/XPCServices/AtlasWorkerXPC.xpc" +PLIST_PATH="$APP_PATH/Contents/Info.plist" +XPC_PLIST_PATH="$XPC_PATH/Contents/Info.plist" + +if [[ ! -d "$APP_PATH" ]]; then + echo "App bundle not found: $APP_PATH" >&2 + exit 1 +fi +if [[ ! -x "$HELPER_PATH" ]]; then + echo "Helper not found or not executable: $HELPER_PATH" >&2 + exit 1 +fi +if [[ ! -d "$XPC_PATH" ]]; then + echo "Embedded XPC service missing: $XPC_PATH" >&2 + exit 1 +fi +if [[ ! -f "$PLIST_PATH" ]]; then + echo "Missing Info.plist: $PLIST_PATH" >&2 + exit 1 +fi +if [[ ! -f "$XPC_PLIST_PATH" ]]; then + echo "Missing XPC Info.plist: $XPC_PLIST_PATH" >&2 + exit 1 +fi +if ! /usr/bin/codesign --verify --deep --strict "$APP_PATH" >/dev/null 2>&1; then + echo "App bundle failed codesign verification: $APP_PATH" >&2 + exit 1 +fi + +bundle_id=$(/usr/bin/defaults read "$PLIST_PATH" CFBundleIdentifier 2>/dev/null || true) +display_name=$(/usr/bin/defaults read "$PLIST_PATH" CFBundleDisplayName 2>/dev/null || true) +xpc_bundle_id=$(/usr/bin/defaults read "$XPC_PLIST_PATH" CFBundleIdentifier 2>/dev/null || true) + +if [[ "$bundle_id" != "com.atlasformac.app" ]]; then + echo "Unexpected bundle identifier: ${bundle_id:-}" >&2 + exit 1 +fi +if [[ "$display_name" != "Atlas for Mac" ]]; then + echo "Unexpected display name: ${display_name:-}" >&2 + exit 1 +fi +if [[ "$xpc_bundle_id" != "com.atlasformac.app.worker" ]]; then + echo "Unexpected XPC bundle identifier: ${xpc_bundle_id:-}" >&2 + exit 1 +fi + +echo "Bundle verification succeeded" +echo "App: $APP_PATH" +echo "Helper: $HELPER_PATH" +echo "XPC: $XPC_PATH" diff --git a/scripts/atlas/verify-dmg-install.sh b/scripts/atlas/verify-dmg-install.sh new file mode 100755 index 0000000..5cefcab --- /dev/null +++ b/scripts/atlas/verify-dmg-install.sh @@ -0,0 +1,69 @@ +#!/bin/bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)" +DMG_PATH="${DMG_PATH:-$ROOT_DIR/dist/native/Atlas-for-Mac.dmg}" +MOUNT_POINT="${MOUNT_POINT:-$ROOT_DIR/.build/atlas-dmg-verify/mount}" +INSTALL_ROOT="${INSTALL_ROOT:-$HOME}" +APP_NAME="Atlas for Mac.app" +SOURCE_APP_PATH="$MOUNT_POINT/$APP_NAME" +INSTALLED_APP_PATH="$INSTALL_ROOT/Applications/$APP_NAME" +INFO_PLIST="$INSTALLED_APP_PATH/Contents/Info.plist" +KEEP_INSTALLED_APP="${KEEP_INSTALLED_APP:-0}" + +cleanup() { + if mount | grep -q "on $MOUNT_POINT "; then + hdiutil detach "$MOUNT_POINT" -quiet || true + fi + if [[ "$KEEP_INSTALLED_APP" != "1" && -d "$INSTALLED_APP_PATH" ]]; then + python3 - "$INSTALLED_APP_PATH" <<'PY' +from pathlib import Path +import shutil, sys +app = Path(sys.argv[1]) +if app.exists(): + shutil.rmtree(app) +PY + fi +} +trap cleanup EXIT + +if [[ ! -f "$DMG_PATH" ]]; then + echo "DMG not found: $DMG_PATH" >&2 + exit 1 +fi + +python3 - "$MOUNT_POINT" <<'PY' +from pathlib import Path +import shutil, sys +mount_path = Path(sys.argv[1]) +if mount_path.exists(): + shutil.rmtree(mount_path) +mount_path.mkdir(parents=True, exist_ok=True) +PY + +mkdir -p "$INSTALL_ROOT/Applications" +hdiutil attach "$DMG_PATH" -mountpoint "$MOUNT_POINT" -nobrowse -quiet + +if [[ ! -d "$SOURCE_APP_PATH" ]]; then + echo "Mounted app not found at $SOURCE_APP_PATH" >&2 + exit 1 +fi + +python3 - "$SOURCE_APP_PATH" "$INSTALLED_APP_PATH" <<'PY' +from pathlib import Path +import shutil, sys +src = Path(sys.argv[1]) +dst = Path(sys.argv[2]) +if dst.exists(): + shutil.rmtree(dst) +shutil.copytree(src, dst, symlinks=True) +PY + +APP_DISPLAY_NAME=$(/usr/bin/defaults read "$INFO_PLIST" CFBundleDisplayName 2>/dev/null || echo "") +if [[ "$APP_DISPLAY_NAME" != "Atlas for Mac" ]]; then + echo "Unexpected installed app display name: ${APP_DISPLAY_NAME:-}" >&2 + exit 1 +fi + +echo "DMG install validation succeeded" +echo "Installed app path: $INSTALLED_APP_PATH" diff --git a/scripts/atlas/verify-installer.sh b/scripts/atlas/verify-installer.sh new file mode 100755 index 0000000..e751e5a --- /dev/null +++ b/scripts/atlas/verify-installer.sh @@ -0,0 +1,44 @@ +#!/bin/bash +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "$0")/../.." && pwd)" +PKG_PATH="${PKG_PATH:-$ROOT_DIR/dist/native/Atlas-for-Mac.pkg}" +INSTALL_ROOT="${INSTALL_ROOT:-$HOME}" +APP_PATH="$INSTALL_ROOT/Applications/Atlas for Mac.app" +INFO_PLIST="$APP_PATH/Contents/Info.plist" +KEEP_INSTALLED_APP="${KEEP_INSTALLED_APP:-0}" + +cleanup() { + if [[ "$KEEP_INSTALLED_APP" != "1" && -d "$APP_PATH" ]]; then + python3 - <<'PY' +from pathlib import Path +import shutil, os +app = Path(os.environ['APP_PATH']) +if app.exists(): + shutil.rmtree(app) +PY + fi +} +trap cleanup EXIT + +if [[ ! -f "$PKG_PATH" ]]; then + echo "Installer package not found: $PKG_PATH" >&2 + exit 1 +fi + +mkdir -p "$INSTALL_ROOT/Applications" +installer -allowUntrusted -pkg "$PKG_PATH" -target CurrentUserHomeDirectory >/dev/null + +if [[ ! -d "$APP_PATH" ]]; then + echo "Installed app not found at $APP_PATH" >&2 + exit 1 +fi + +APP_DISPLAY_NAME=$(/usr/bin/defaults read "$INFO_PLIST" CFBundleDisplayName 2>/dev/null || echo "") +if [[ "$APP_DISPLAY_NAME" != "Atlas for Mac" ]]; then + echo "Unexpected installed app display name: ${APP_DISPLAY_NAME:-}" >&2 + exit 1 +fi + +echo "Installer validation succeeded" +echo "Installed app path: $APP_PATH"