[analyze-memory] Add RAM symbol analysis by component (#13040)

This commit is contained in:
J. Nick Koston
2026-01-07 08:24:44 -10:00
committed by GitHub
parent 8e40a55d5d
commit 39526e5360
7 changed files with 514 additions and 48 deletions

View File

@@ -1017,6 +1017,7 @@ def command_analyze_memory(args: ArgsProtocol, config: ConfigType) -> int:
idedata.objdump_path,
idedata.readelf_path,
external_components,
idedata=idedata,
)
analyzer.analyze()

View File

@@ -22,6 +22,7 @@ from .helpers import (
map_section_name,
parse_symbol_line,
)
from .toolchain import find_tool, run_tool
if TYPE_CHECKING:
from esphome.platformio_api import IDEData
@@ -53,6 +54,9 @@ _NAMESPACE_STD = "std::"
# Type alias for symbol information: (symbol_name, size, component)
SymbolInfoType = tuple[str, int, str]
# RAM sections - symbols in these sections consume RAM
RAM_SECTIONS = frozenset([".data", ".bss"])
@dataclass
class MemorySection:
@@ -60,7 +64,20 @@ class MemorySection:
name: str
symbols: list[SymbolInfoType] = field(default_factory=list)
total_size: int = 0
total_size: int = 0 # Actual section size from ELF headers
symbol_size: int = 0 # Sum of symbol sizes (may be less than total_size)
@dataclass
class SDKSymbol:
"""Represents a symbol from an SDK library that's not in the ELF symbol table."""
name: str
size: int
library: str # Name of the .a file (e.g., "libpp.a")
section: str # ".bss" or ".data"
is_local: bool # True if static/local symbol (lowercase in nm output)
demangled: str = "" # Demangled name (populated after analysis)
@dataclass
@@ -118,6 +135,10 @@ class MemoryAnalyzer:
self.objdump_path = objdump_path or "objdump"
self.readelf_path = readelf_path or "readelf"
self.external_components = external_components or set()
self._idedata = idedata
# Derive nm path from objdump path using shared toolchain utility
self.nm_path = find_tool("nm", self.objdump_path)
self.sections: dict[str, MemorySection] = {}
self.components: dict[str, ComponentMemory] = defaultdict(
@@ -128,15 +149,25 @@ class MemoryAnalyzer:
self._esphome_core_symbols: list[
tuple[str, str, int]
] = [] # Track core symbols
self._component_symbols: dict[str, list[tuple[str, str, int]]] = defaultdict(
# Track symbols for all components: (symbol_name, demangled, size, section)
self._component_symbols: dict[str, list[tuple[str, str, int, str]]] = (
defaultdict(list)
)
# Track RAM symbols separately for detailed analysis: (symbol_name, demangled, size, section)
self._ram_symbols: dict[str, list[tuple[str, str, int, str]]] = defaultdict(
list
) # Track symbols for all components
)
# Track ELF symbol names for SDK cross-reference
self._elf_symbol_names: set[str] = set()
# SDK symbols not in ELF (static/local symbols from closed-source libs)
self._sdk_symbols: list[SDKSymbol] = []
def analyze(self) -> dict[str, ComponentMemory]:
"""Analyze the ELF file and return component memory usage."""
self._parse_sections()
self._parse_symbols()
self._categorize_symbols()
self._analyze_sdk_libraries()
return dict(self.components)
def _parse_sections(self) -> None:
@@ -190,6 +221,8 @@ class MemoryAnalyzer:
continue
self.sections[section].symbols.append((name, size, ""))
self.sections[section].symbol_size += size
self._elf_symbol_names.add(name)
seen_addresses.add(address)
def _categorize_symbols(self) -> None:
@@ -233,8 +266,13 @@ class MemoryAnalyzer:
if size > 0:
demangled = self._demangle_symbol(symbol_name)
self._component_symbols[component].append(
(symbol_name, demangled, size)
(symbol_name, demangled, size, section_name)
)
# Track RAM symbols separately for detailed RAM analysis
if section_name in RAM_SECTIONS:
self._ram_symbols[component].append(
(symbol_name, demangled, size, section_name)
)
def _identify_component(self, symbol_name: str) -> str:
"""Identify which component a symbol belongs to."""
@@ -328,6 +366,247 @@ class MemoryAnalyzer:
return "Other Core"
def get_unattributed_ram(self) -> tuple[int, int, int]:
"""Get unattributed RAM sizes (SDK/framework overhead).
Returns:
Tuple of (unattributed_bss, unattributed_data, total_unattributed)
These are bytes in RAM sections that have no corresponding symbols.
"""
bss_section = self.sections.get(".bss")
data_section = self.sections.get(".data")
unattributed_bss = 0
unattributed_data = 0
if bss_section:
unattributed_bss = max(0, bss_section.total_size - bss_section.symbol_size)
if data_section:
unattributed_data = max(
0, data_section.total_size - data_section.symbol_size
)
return unattributed_bss, unattributed_data, unattributed_bss + unattributed_data
def _find_sdk_library_dirs(self) -> list[Path]:
"""Find SDK library directories based on platform.
Returns:
List of paths to SDK library directories containing .a files.
"""
sdk_dirs: list[Path] = []
if self._idedata is None:
return sdk_dirs
# Get the CC path to determine the framework location
cc_path = getattr(self._idedata, "cc_path", None)
if not cc_path:
return sdk_dirs
cc_path = Path(cc_path)
# For ESP8266 Arduino framework
# CC is like: ~/.platformio/packages/toolchain-xtensa/bin/xtensa-lx106-elf-gcc
# SDK libs are in: ~/.platformio/packages/framework-arduinoespressif8266/tools/sdk/lib/
if "xtensa-lx106" in str(cc_path):
platformio_dir = cc_path.parent.parent.parent
esp8266_sdk = (
platformio_dir
/ "framework-arduinoespressif8266"
/ "tools"
/ "sdk"
/ "lib"
)
if esp8266_sdk.exists():
sdk_dirs.append(esp8266_sdk)
# Also check for NONOSDK subdirectories (closed-source libs)
sdk_dirs.extend(
subdir
for subdir in esp8266_sdk.iterdir()
if subdir.is_dir() and subdir.name.startswith("NONOSDK")
)
# For ESP32 IDF framework
# CC is like: ~/.platformio/packages/toolchain-xtensa-esp-elf/bin/xtensa-esp32-elf-gcc
# or: ~/.platformio/packages/toolchain-riscv32-esp/bin/riscv32-esp-elf-gcc
elif "xtensa-esp" in str(cc_path) or "riscv32-esp" in str(cc_path):
# Detect ESP32 variant from CC path or defines
variant = self._detect_esp32_variant()
if variant:
platformio_dir = cc_path.parent.parent.parent
espidf_dir = platformio_dir / "framework-espidf" / "components"
if espidf_dir.exists():
# Find all directories named after the variant that contain .a files
# This handles various ESP-IDF library layouts:
# - components/*/lib/<variant>/
# - components/*/<variant>/
# - components/*/lib/lib/<variant>/
# - components/*/*/lib_*/<variant>/
sdk_dirs.extend(
variant_dir
for variant_dir in espidf_dir.rglob(variant)
if variant_dir.is_dir() and any(variant_dir.glob("*.a"))
)
return sdk_dirs
def _detect_esp32_variant(self) -> str | None:
"""Detect ESP32 variant from idedata defines.
Returns:
Variant string like 'esp32', 'esp32s2', 'esp32c3', etc. or None.
"""
if self._idedata is None:
return None
defines = getattr(self._idedata, "defines", [])
if not defines:
return None
# ESPHome always adds USE_ESP32_VARIANT_xxx defines
variant_prefix = "USE_ESP32_VARIANT_"
for define in defines:
if define.startswith(variant_prefix):
# Extract variant name and convert to lowercase
# USE_ESP32_VARIANT_ESP32 -> esp32
# USE_ESP32_VARIANT_ESP32S3 -> esp32s3
return define[len(variant_prefix) :].lower()
return None
def _parse_sdk_library(
self, lib_path: Path
) -> tuple[list[tuple[str, int, str, bool]], set[str]]:
"""Parse a single SDK library for symbols.
Args:
lib_path: Path to the .a library file
Returns:
Tuple of:
- List of BSS/DATA symbols: (symbol_name, size, section, is_local)
- Set of global BSS/DATA symbol names (for checking if RAM is linked)
"""
ram_symbols: list[tuple[str, int, str, bool]] = []
global_ram_symbols: set[str] = set()
result = run_tool([self.nm_path, "--size-sort", str(lib_path)], timeout=10)
if result is None:
return ram_symbols, global_ram_symbols
for line in result.stdout.splitlines():
parts = line.split()
if len(parts) < 3:
continue
try:
size = int(parts[0], 16)
sym_type = parts[1]
name = parts[2]
# Only collect BSS (b/B) and DATA (d/D) for RAM analysis
if sym_type in ("b", "B"):
section = ".bss"
is_local = sym_type == "b"
ram_symbols.append((name, size, section, is_local))
# Track global RAM symbols (B/D) for linking check
if sym_type == "B":
global_ram_symbols.add(name)
elif sym_type in ("d", "D"):
section = ".data"
is_local = sym_type == "d"
ram_symbols.append((name, size, section, is_local))
if sym_type == "D":
global_ram_symbols.add(name)
except (ValueError, IndexError):
continue
return ram_symbols, global_ram_symbols
def _analyze_sdk_libraries(self) -> None:
"""Analyze SDK libraries to find symbols not in the ELF.
This finds static/local symbols from closed-source SDK libraries
that consume RAM but don't appear in the final ELF symbol table.
Only includes symbols from libraries that have RAM actually linked
(at least one global BSS/DATA symbol in the ELF).
"""
sdk_dirs = self._find_sdk_library_dirs()
if not sdk_dirs:
_LOGGER.debug("No SDK library directories found")
return
_LOGGER.debug("Analyzing SDK libraries in %d directories", len(sdk_dirs))
# Track seen symbols to avoid duplicates from multiple SDK versions
seen_symbols: set[str] = set()
for sdk_dir in sdk_dirs:
for lib_path in sorted(sdk_dir.glob("*.a")):
lib_name = lib_path.name
ram_symbols, global_ram_symbols = self._parse_sdk_library(lib_path)
# Check if this library's RAM is actually linked by seeing if any
# of its global BSS/DATA symbols appear in the ELF
if not global_ram_symbols & self._elf_symbol_names:
# No RAM from this library is in the ELF - skip it
continue
for name, size, section, is_local in ram_symbols:
# Skip if already in ELF or already seen from another lib
if name in self._elf_symbol_names or name in seen_symbols:
continue
# Only track symbols with non-zero size
if size > 0:
self._sdk_symbols.append(
SDKSymbol(
name=name,
size=size,
library=lib_name,
section=section,
is_local=is_local,
)
)
seen_symbols.add(name)
# Demangle SDK symbols for better readability
if self._sdk_symbols:
sdk_names = [sym.name for sym in self._sdk_symbols]
demangled_map = batch_demangle(sdk_names, objdump_path=self.objdump_path)
for sym in self._sdk_symbols:
sym.demangled = demangled_map.get(sym.name, sym.name)
# Sort by size descending for reporting
self._sdk_symbols.sort(key=lambda s: s.size, reverse=True)
total_sdk_ram = sum(s.size for s in self._sdk_symbols)
_LOGGER.debug(
"Found %d SDK symbols not in ELF, totaling %d bytes",
len(self._sdk_symbols),
total_sdk_ram,
)
def get_sdk_ram_symbols(self) -> list[SDKSymbol]:
"""Get SDK symbols that consume RAM but aren't in the ELF symbol table.
Returns:
List of SDKSymbol objects sorted by size descending.
"""
return self._sdk_symbols
def get_sdk_ram_by_library(self) -> dict[str, list[SDKSymbol]]:
"""Get SDK RAM symbols grouped by library.
Returns:
Dictionary mapping library name to list of symbols.
"""
by_lib: dict[str, list[SDKSymbol]] = defaultdict(list)
for sym in self._sdk_symbols:
by_lib[sym.library].append(sym)
return dict(by_lib)
if __name__ == "__main__":
from .cli import main

View File

@@ -1,16 +1,24 @@
"""CLI interface for memory analysis with report generation."""
from __future__ import annotations
from collections import defaultdict
from collections.abc import Callable
import sys
from typing import TYPE_CHECKING
from . import (
_COMPONENT_API,
_COMPONENT_CORE,
_COMPONENT_PREFIX_ESPHOME,
_COMPONENT_PREFIX_EXTERNAL,
RAM_SECTIONS,
MemoryAnalyzer,
)
if TYPE_CHECKING:
from . import ComponentMemory
class MemoryAnalyzerCLI(MemoryAnalyzer):
"""Memory analyzer with CLI-specific report generation."""
@@ -19,6 +27,8 @@ class MemoryAnalyzerCLI(MemoryAnalyzer):
SYMBOL_SIZE_THRESHOLD: int = (
100 # Show symbols larger than this in detailed analysis
)
# Lower threshold for RAM symbols (RAM is more constrained)
RAM_SYMBOL_SIZE_THRESHOLD: int = 24
# Column width constants
COL_COMPONENT: int = 29
@@ -83,6 +93,60 @@ class MemoryAnalyzerCLI(MemoryAnalyzer):
COL_CORE_PERCENT,
)
def _add_section_header(self, lines: list[str], title: str) -> None:
"""Add a section header with title centered between separator lines."""
lines.append("")
lines.append("=" * self.TABLE_WIDTH)
lines.append(title.center(self.TABLE_WIDTH))
lines.append("=" * self.TABLE_WIDTH)
lines.append("")
def _add_top_consumers(
self,
lines: list[str],
title: str,
components: list[tuple[str, ComponentMemory]],
get_size: Callable[[ComponentMemory], int],
total: int,
memory_type: str,
limit: int = 25,
) -> None:
"""Add a formatted list of top memory consumers to the report.
Args:
lines: List of report lines to append the output to.
title: Section title to print before the list.
components: Sequence of (name, ComponentMemory) tuples to analyze.
get_size: Callable that takes a ComponentMemory and returns the
size in bytes to use for ranking and display.
total: Total size in bytes for computing percentage usage.
memory_type: Label for the memory region (e.g., "flash" or "RAM").
limit: Maximum number of components to include in the list.
"""
lines.append("")
lines.append(f"{title}:")
for i, (name, mem) in enumerate(components[:limit]):
size = get_size(mem)
if size > 0:
percentage = (size / total * 100) if total > 0 else 0
lines.append(
f"{i + 1}. {name} ({size:,} B) - {percentage:.1f}% of analyzed {memory_type}"
)
def _format_symbol_with_section(
self, demangled: str, size: int, section: str | None = None
) -> str:
"""Format a symbol entry, optionally adding a RAM section label.
If section is one of the RAM sections (.data or .bss), a label like
" [data]" or " [bss]" is appended. For non-RAM sections or when
section is None, no section label is added.
"""
section_label = ""
if section in RAM_SECTIONS:
section_label = f" [{section[1:]}]" # .data -> [data], .bss -> [bss]
return f"{demangled} ({size:,} B){section_label}"
def generate_report(self, detailed: bool = False) -> str:
"""Generate a formatted memory report."""
components = sorted(
@@ -123,43 +187,70 @@ class MemoryAnalyzerCLI(MemoryAnalyzer):
f"{total_flash:>{self.COL_TOTAL_FLASH - 2},} B | {total_ram:>{self.COL_TOTAL_RAM - 2},} B"
)
# Top consumers
lines.append("")
lines.append("Top Flash Consumers:")
for i, (name, mem) in enumerate(components[:25]):
if mem.flash_total > 0:
percentage = (
(mem.flash_total / total_flash * 100) if total_flash > 0 else 0
)
lines.append(
f"{i + 1}. {name} ({mem.flash_total:,} B) - {percentage:.1f}% of analyzed flash"
)
lines.append("")
lines.append("Top RAM Consumers:")
ram_components = sorted(components, key=lambda x: x[1].ram_total, reverse=True)
for i, (name, mem) in enumerate(ram_components[:25]):
if mem.ram_total > 0:
percentage = (mem.ram_total / total_ram * 100) if total_ram > 0 else 0
lines.append(
f"{i + 1}. {name} ({mem.ram_total:,} B) - {percentage:.1f}% of analyzed RAM"
)
lines.append("")
lines.append(
"Note: This analysis covers symbols in the ELF file. Some runtime allocations may not be included."
# Show unattributed RAM (SDK/framework overhead)
unattributed_bss, unattributed_data, unattributed_total = (
self.get_unattributed_ram()
)
if unattributed_total > 0:
lines.append("")
lines.append(
f"Unattributed RAM: {unattributed_total:,} B (SDK/framework overhead)"
)
if unattributed_bss > 0 and unattributed_data > 0:
lines.append(
f" .bss: {unattributed_bss:,} B | .data: {unattributed_data:,} B"
)
# Show SDK symbol breakdown if available
sdk_by_lib = self.get_sdk_ram_by_library()
if sdk_by_lib:
lines.append("")
lines.append("SDK library breakdown (static symbols not in ELF):")
# Sort libraries by total size
lib_totals = [
(lib, sum(s.size for s in syms), syms)
for lib, syms in sdk_by_lib.items()
]
lib_totals.sort(key=lambda x: x[1], reverse=True)
for lib_name, lib_total, syms in lib_totals:
if lib_total == 0:
continue
lines.append(f" {lib_name}: {lib_total:,} B")
# Show top symbols from this library
for sym in sorted(syms, key=lambda s: s.size, reverse=True)[:3]:
section_label = sym.section.lstrip(".")
# Use demangled name (falls back to original if not demangled)
display_name = sym.demangled or sym.name
if len(display_name) > 50:
display_name = f"{display_name[:47]}..."
lines.append(
f" {sym.size:>6,} B [{section_label}] {display_name}"
)
# Top consumers
self._add_top_consumers(
lines,
"Top Flash Consumers",
components,
lambda m: m.flash_total,
total_flash,
"flash",
)
ram_components = sorted(components, key=lambda x: x[1].ram_total, reverse=True)
self._add_top_consumers(
lines,
"Top RAM Consumers",
ram_components,
lambda m: m.ram_total,
total_ram,
"RAM",
)
lines.append("=" * self.TABLE_WIDTH)
# Add ESPHome core detailed analysis if there are core symbols
if self._esphome_core_symbols:
lines.append("")
lines.append("=" * self.TABLE_WIDTH)
lines.append(
f"{_COMPONENT_CORE} Detailed Analysis".center(self.TABLE_WIDTH)
)
lines.append("=" * self.TABLE_WIDTH)
lines.append("")
self._add_section_header(lines, f"{_COMPONENT_CORE} Detailed Analysis")
# Group core symbols by subcategory
core_subcategories: dict[str, list[tuple[str, str, int]]] = defaultdict(
@@ -211,7 +302,11 @@ class MemoryAnalyzerCLI(MemoryAnalyzer):
f"{_COMPONENT_CORE} Symbols > {self.SYMBOL_SIZE_THRESHOLD} B ({len(large_core_symbols)} symbols):"
)
for i, (symbol, demangled, size) in enumerate(large_core_symbols):
lines.append(f"{i + 1}. {demangled} ({size:,} B)")
# Core symbols only track (symbol, demangled, size) without section info,
# so we don't show section labels here
lines.append(
f"{i + 1}. {self._format_symbol_with_section(demangled, size)}"
)
lines.append("=" * self.TABLE_WIDTH)
@@ -267,11 +362,7 @@ class MemoryAnalyzerCLI(MemoryAnalyzer):
for comp_name, comp_mem in components_to_analyze:
if not (comp_symbols := self._component_symbols.get(comp_name, [])):
continue
lines.append("")
lines.append("=" * self.TABLE_WIDTH)
lines.append(f"{comp_name} Detailed Analysis".center(self.TABLE_WIDTH))
lines.append("=" * self.TABLE_WIDTH)
lines.append("")
self._add_section_header(lines, f"{comp_name} Detailed Analysis")
# Sort symbols by size
sorted_symbols = sorted(comp_symbols, key=lambda x: x[2], reverse=True)
@@ -282,19 +373,69 @@ class MemoryAnalyzerCLI(MemoryAnalyzer):
# Show all symbols above threshold for better visibility
large_symbols = [
(sym, dem, size)
for sym, dem, size in sorted_symbols
(sym, dem, size, sec)
for sym, dem, size, sec in sorted_symbols
if size > self.SYMBOL_SIZE_THRESHOLD
]
lines.append(
f"{comp_name} Symbols > {self.SYMBOL_SIZE_THRESHOLD} B ({len(large_symbols)} symbols):"
)
for i, (symbol, demangled, size) in enumerate(large_symbols):
lines.append(f"{i + 1}. {demangled} ({size:,} B)")
for i, (symbol, demangled, size, section) in enumerate(large_symbols):
lines.append(
f"{i + 1}. {self._format_symbol_with_section(demangled, size, section)}"
)
lines.append("=" * self.TABLE_WIDTH)
# Detailed RAM analysis by component (at end, before RAM strings analysis)
self._add_section_header(lines, "RAM Symbol Analysis by Component")
# Show top 15 RAM consumers with their large symbols
for name, mem in ram_components[:15]:
if mem.ram_total == 0:
continue
ram_syms = self._ram_symbols.get(name, [])
if not ram_syms:
continue
# Sort by size descending
sorted_ram_syms = sorted(ram_syms, key=lambda x: x[2], reverse=True)
large_ram_syms = [
s for s in sorted_ram_syms if s[2] > self.RAM_SYMBOL_SIZE_THRESHOLD
]
lines.append(f"{name} ({mem.ram_total:,} B total RAM):")
# Show breakdown by section type
data_size = sum(s[2] for s in ram_syms if s[3] == ".data")
bss_size = sum(s[2] for s in ram_syms if s[3] == ".bss")
lines.append(f" .data (initialized): {data_size:,} B")
lines.append(f" .bss (uninitialized): {bss_size:,} B")
if large_ram_syms:
lines.append(
f" Symbols > {self.RAM_SYMBOL_SIZE_THRESHOLD} B ({len(large_ram_syms)}):"
)
for symbol, demangled, size, section in large_ram_syms[:10]:
# Format section label consistently by stripping leading dot
section_label = section.lstrip(".") if section else ""
# Add ellipsis if name is truncated
demangled_display = (
f"{demangled[:70]}..." if len(demangled) > 70 else demangled
)
lines.append(
f" {size:>6,} B [{section_label}] {demangled_display}"
)
if len(large_ram_syms) > 10:
lines.append(f" ... and {len(large_ram_syms) - 10} more")
lines.append("")
lines.append(
"Note: This analysis covers symbols in the ELF file. Some runtime allocations may not be included."
)
lines.append("=" * self.TABLE_WIDTH)
return "\n".join(lines)
def dump_uncategorized_symbols(self, output_file: str | None = None) -> None:

View File

@@ -7,11 +7,13 @@ ESPHOME_COMPONENT_PATTERN = re.compile(r"esphome::([a-zA-Z0-9_]+)::")
# Section mapping for ELF file sections
# Maps standard section names to their various platform-specific variants
# Note: Order matters! More specific patterns (.bss) must come before general ones (.dram)
# because ESP-IDF uses names like ".dram0.bss" which would match ".dram" otherwise
SECTION_MAPPING = {
".text": frozenset([".text", ".iram"]),
".rodata": frozenset([".rodata"]),
".bss": frozenset([".bss"]), # Must be before .data to catch ".dram0.bss"
".data": frozenset([".data", ".dram"]),
".bss": frozenset([".bss"]),
}
# Section to ComponentMemory attribute mapping

View File

@@ -5,6 +5,10 @@ from __future__ import annotations
import logging
from pathlib import Path
import subprocess
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from collections.abc import Sequence
_LOGGER = logging.getLogger(__name__)
@@ -55,3 +59,35 @@ def find_tool(
_LOGGER.warning("Could not find %s tool", tool_name)
return None
def run_tool(
cmd: Sequence[str],
timeout: int = 30,
) -> subprocess.CompletedProcess[str] | None:
"""Run a toolchain command and return the result.
Args:
cmd: Command and arguments to run
timeout: Timeout in seconds
Returns:
CompletedProcess on success, None on failure
"""
try:
return subprocess.run(
cmd,
capture_output=True,
text=True,
timeout=timeout,
check=False,
)
except subprocess.TimeoutExpired:
_LOGGER.warning("Command timed out: %s", " ".join(cmd))
return None
except FileNotFoundError:
_LOGGER.warning("Command not found: %s", cmd[0])
return None
except OSError as e:
_LOGGER.warning("Failed to run command %s: %s", cmd[0], e)
return None

View File

@@ -420,3 +420,8 @@ class IDEData:
if path.endswith(".exe")
else f"{path[:-3]}readelf"
)
@property
def defines(self) -> list[str]:
"""Return the list of preprocessor defines from idedata."""
return self.raw.get("defines", [])

View File

@@ -2478,6 +2478,7 @@ def test_command_analyze_memory_success(
"/path/to/objdump",
"/path/to/readelf",
set(), # No external components
idedata=mock_get_idedata.return_value,
)
# Verify analysis was run
@@ -2547,6 +2548,7 @@ def test_command_analyze_memory_with_external_components(
"/path/to/objdump",
"/path/to/readelf",
{"my_custom_component"}, # External component detected
idedata=mock_get_idedata.return_value,
)