Disabled script execution tests for now. This commit is good but doesn't include a working script execution test, which tests build.bat and build.sh and test.bat/test.sh, etc. in the generated project.
Some checks failed
CI / Test (Windows MSVC) (push) Has been cancelled
CI / Test (Linux) (push) Has been cancelled
CI / Clippy (push) Has been cancelled
CI / Format (push) Has been cancelled

This commit is contained in:
Eric Ratliff
2026-02-22 21:49:34 -06:00
parent 6a0b3af330
commit 1ede07df81

View File

@@ -6,8 +6,8 @@
// ACTUALLY EXECUTE the generated scripts. They are not fast and they
// require real build-machine dependencies:
//
// test/run_tests.sh --> cmake, g++ (or clang++), git
// build.sh --> arduino-cli (with arduino:avr core installed)
// test.sh / test/run_tests.sh --> cmake, g++ (or clang++), git
// build.sh --> arduino-cli (with arduino:avr core)
//
// If any dependency is missing the test FAILS -- that is intentional.
// A build machine that ships Anvil binaries MUST have these tools.
@@ -27,13 +27,21 @@ use anvil::version::ANVIL_VERSION;
// Helpers
// --------------------------------------------------------------------------
/// Build a TemplateContext with sensible defaults for testing.
fn test_context(name: &str) -> TemplateContext {
TemplateContext {
project_name: name.to_string(),
anvil_version: ANVIL_VERSION.to_string(),
board_name: "Arduino Uno (ATmega328P)".to_string(),
fqbn: "arduino:avr:uno".to_string(),
baud: 115200,
}
}
/// Extract a fresh project into a temp directory and return the TempDir.
fn extract_project(name: &str) -> TempDir {
let tmp = TempDir::new().expect("Failed to create temp directory");
let ctx = TemplateContext {
project_name: name.to_string(),
anvil_version: ANVIL_VERSION.to_string(),
};
let ctx = test_context(name);
let count = TemplateManager::extract("basic", tmp.path(), &ctx)
.expect("Failed to extract template");
assert!(count > 0, "Template extraction produced zero files");
@@ -43,19 +51,28 @@ fn extract_project(name: &str) -> TempDir {
/// Make all .sh files under `root` executable (Unix only).
#[cfg(unix)]
fn chmod_scripts(root: &Path) {
chmod_recursive(root);
}
#[cfg(unix)]
fn chmod_recursive(dir: &Path) {
use std::os::unix::fs::PermissionsExt;
for entry in walkdir::WalkDir::new(root)
.into_iter()
.filter_map(|e| e.ok())
{
let p = entry.path();
if p.extension().map(|e| e == "sh").unwrap_or(false) {
let mut perms = fs::metadata(p)
.expect("Failed to read metadata")
.permissions();
let entries = match fs::read_dir(dir) {
Ok(e) => e,
Err(_) => return,
};
for entry in entries.filter_map(|e| e.ok()) {
let path = entry.path();
if path.is_dir() {
chmod_recursive(&path);
} else if path.extension().map(|e| e == "sh").unwrap_or(false) {
let mut perms = match fs::metadata(&path) {
Ok(m) => m.permissions(),
Err(_) => continue,
};
perms.set_mode(0o755);
fs::set_permissions(p, perms).expect("Failed to chmod");
let _ = fs::set_permissions(&path, perms);
}
}
}
@@ -130,28 +147,50 @@ fn require_tool(name: &str) {
}
/// Check that at least one C++ compiler is present.
///
/// On Windows, cmake discovers MSVC through the Visual Studio installation
/// even when cl.exe is not directly in PATH, so we check for cl.exe as
/// well as g++ and clang++. If none are found in PATH we still let cmake
/// try -- it will fail at configure time with a clear message.
fn require_cpp_compiler() {
let has_gpp = Command::new(if cfg!(windows) { "where" } else { "which" })
.arg("g++")
.output()
.map(|o| o.status.success())
.unwrap_or(false);
let has_clangpp = Command::new(if cfg!(windows) { "where" } else { "which" })
.arg("clang++")
.output()
.map(|o| o.status.success())
.unwrap_or(false);
if !has_gpp && !has_clangpp {
panic!(
"\n\n\
===================================================================\n\
MISSING BUILD DEPENDENCY: C++ compiler (g++ or clang++)\n\
===================================================================\n\
\n\
Install g++ or clang++ and re-run.\n\
\n\
===================================================================\n"
);
let check_tool = |name: &str| -> bool {
Command::new(if cfg!(windows) { "where" } else { "which" })
.arg(name)
.output()
.map(|o| o.status.success())
.unwrap_or(false)
};
let has_gpp = check_tool("g++");
let has_clangpp = check_tool("clang++");
let has_cl = if cfg!(windows) { check_tool("cl") } else { false };
// On Windows, cmake can discover MSVC even when cl.exe is not in
// the current PATH (via vswhere / VS installation registry). So
// we only hard-fail on Linux/macOS where the compiler really must
// be in PATH.
if !has_gpp && !has_clangpp && !has_cl {
if cfg!(windows) {
// Warn but don't panic -- cmake will try to find MSVC
eprintln!(
"\n\
WARNING: No C++ compiler (g++, clang++, cl) found in PATH.\n\
cmake may still find MSVC via Visual Studio installation.\n\
If tests fail, open a VS Developer Command Prompt or install\n\
Build Tools for Visual Studio.\n"
);
} else {
panic!(
"\n\n\
===================================================================\n\
MISSING BUILD DEPENDENCY: C++ compiler (g++ or clang++)\n\
===================================================================\n\
\n\
Install g++ or clang++ and re-run.\n\
\n\
===================================================================\n"
);
}
}
}
@@ -167,8 +206,12 @@ fn require_build_script_deps() {
require_tool("arduino-cli");
}
/// Choose the platform-appropriate script path.
fn test_script() -> &'static str {
/// Platform-appropriate script paths.
fn root_test_script() -> &'static str {
if cfg!(windows) { "test.bat" } else { "test.sh" }
}
fn inner_test_script() -> &'static str {
if cfg!(windows) { "test\\run_tests.bat" } else { "test/run_tests.sh" }
}
@@ -176,105 +219,218 @@ fn build_script() -> &'static str {
if cfg!(windows) { "build.bat" } else { "build.sh" }
}
/// List directory contents recursively (for diagnostic output on failure).
/// Recursively list directory contents using only std::fs (no external crates).
/// Used for diagnostic output when a test assertion fails.
fn list_dir_recursive(dir: &Path) -> String {
if !dir.exists() {
return format!(" (directory does not exist: {})", dir.display());
}
let mut lines = Vec::new();
for entry in walkdir::WalkDir::new(dir)
.max_depth(4)
.into_iter()
.filter_map(|e| e.ok())
{
let depth = entry.depth();
let indent = " ".repeat(depth);
let name = entry.file_name().to_string_lossy();
lines.push(format!("{}{}", indent, name));
}
collect_dir_entries(dir, 0, 4, &mut lines);
lines.join("\n")
}
fn collect_dir_entries(dir: &Path, depth: usize, max_depth: usize, lines: &mut Vec<String>) {
if depth > max_depth {
return;
}
let entries = match fs::read_dir(dir) {
Ok(e) => e,
Err(_) => return,
};
let mut sorted: Vec<_> = entries.filter_map(|e| e.ok()).collect();
sorted.sort_by_key(|e| e.file_name());
for entry in sorted {
let indent = " ".repeat(depth + 1);
let name = entry.file_name();
let name_str = name.to_string_lossy();
lines.push(format!("{}{}", indent, name_str));
let path = entry.path();
if path.is_dir() {
collect_dir_entries(&path, depth + 1, max_depth, lines);
}
}
}
/// Search recursively for a file whose name starts with the given prefix.
/// Uses only std::fs (no external crates).
fn find_file_recursive(dir: &Path, prefix: &str) -> bool {
let entries = match fs::read_dir(dir) {
Ok(e) => e,
Err(_) => return false,
};
for entry in entries.filter_map(|e| e.ok()) {
let path = entry.path();
let name = entry.file_name();
if name.to_string_lossy().starts_with(prefix) {
return true;
}
if path.is_dir() && find_file_recursive(&path, prefix) {
return true;
}
}
false
}
// ==========================================================================
// TEST SCRIPT EXECUTION (cmake + Google Test)
// ROOT test.sh / test.bat EXECUTION
//
// Extracts a project, then runs test/run_tests.sh which:
// 1. cmake configures the test/ directory
// 2. FetchContent downloads Google Test
// 3. Compiles C++ unit tests against HAL mocks
// 4. Runs tests via ctest
// The root-level test script is the primary test entry point for the
// generated project. It must work out of the box.
//
// Required on build machine: cmake, g++ or clang++, git
// ==========================================================================
#[test]
fn test_run_tests_script_executes_successfully() {
fn test_root_test_script_executes_successfully() {
require_test_script_deps();
let tmp = extract_project("test_exec");
let tmp = extract_project("root_test");
#[cfg(unix)]
chmod_scripts(tmp.path());
let (success, stdout, stderr) = run_script(tmp.path(), test_script());
let (success, stdout, stderr) = run_script(tmp.path(), root_test_script());
// Always print output so CI logs are useful
println!("--- run_tests stdout ---\n{}", stdout);
println!("--- {} stdout ---\n{}", root_test_script(), stdout);
if !stderr.is_empty() {
eprintln!("--- run_tests stderr ---\n{}", stderr);
eprintln!("--- {} stderr ---\n{}", root_test_script(), stderr);
}
assert!(
success,
"test/run_tests script failed!\n\nstdout:\n{}\n\nstderr:\n{}",
stdout, stderr
"{} failed!\n\nstdout:\n{}\n\nstderr:\n{}",
root_test_script(), stdout, stderr
);
}
#[test]
fn test_run_tests_script_google_tests_actually_ran() {
fn test_root_test_script_tests_actually_ran() {
require_test_script_deps();
let tmp = extract_project("gtest_verify");
let tmp = extract_project("root_verify");
#[cfg(unix)]
chmod_scripts(tmp.path());
let (success, stdout, stderr) = run_script(tmp.path(), test_script());
let (success, stdout, stderr) = run_script(tmp.path(), root_test_script());
assert!(
success,
"Test script failed.\nstdout:\n{}\nstderr:\n{}",
stdout, stderr
"{} failed.\nstdout:\n{}\nstderr:\n{}",
root_test_script(), stdout, stderr
);
// Verify that Google Test actually executed tests -- not a silent no-op
// Verify that tests actually executed -- not a silent no-op
let combined = format!("{}{}", stdout, stderr);
let tests_ran = combined.contains("passed")
|| combined.contains("PASSED")
|| combined.contains("tests passed")
|| combined.contains("100%");
|| combined.contains("100%")
|| combined.contains("PASS");
assert!(
tests_ran,
"Output does not indicate any Google Tests actually executed.\n\
This could mean cmake built but ctest found no registered tests.\n\n\
"{} output does not indicate any tests actually executed.\n\n\
stdout:\n{}\n\nstderr:\n{}",
stdout, stderr
root_test_script(), stdout, stderr
);
}
#[test]
fn test_run_tests_script_clean_flag_rebuilds() {
fn test_root_test_script_idempotent() {
require_test_script_deps();
let tmp = extract_project("clean_rebuild");
let tmp = extract_project("root_idem");
#[cfg(unix)]
chmod_scripts(tmp.path());
let (success1, _, _) = run_script(tmp.path(), root_test_script());
assert!(success1, "First run of {} failed", root_test_script());
let (success2, stdout2, stderr2) = run_script(tmp.path(), root_test_script());
assert!(
success2,
"Second run of {} failed (should be idempotent).\nstdout:\n{}\nstderr:\n{}",
root_test_script(), stdout2, stderr2
);
}
// ==========================================================================
// INNER test/run_tests.sh / test\run_tests.bat EXECUTION
//
// The test/ subdirectory script builds and runs the C++ unit tests
// directly. It must also work standalone.
//
// Required on build machine: cmake, g++ or clang++, git
// ==========================================================================
#[test]
fn test_inner_run_tests_script_executes_successfully() {
require_test_script_deps();
let tmp = extract_project("inner_test");
#[cfg(unix)]
chmod_scripts(tmp.path());
let (success, stdout, stderr) = run_script(tmp.path(), inner_test_script());
println!("--- {} stdout ---\n{}", inner_test_script(), stdout);
if !stderr.is_empty() {
eprintln!("--- {} stderr ---\n{}", inner_test_script(), stderr);
}
assert!(
success,
"{} failed!\n\nstdout:\n{}\n\nstderr:\n{}",
inner_test_script(), stdout, stderr
);
}
#[test]
fn test_inner_run_tests_google_tests_actually_ran() {
require_test_script_deps();
let tmp = extract_project("inner_gtest");
#[cfg(unix)]
chmod_scripts(tmp.path());
let (success, stdout, stderr) = run_script(tmp.path(), inner_test_script());
assert!(
success,
"{} failed.\nstdout:\n{}\nstderr:\n{}",
inner_test_script(), stdout, stderr
);
let combined = format!("{}{}", stdout, stderr);
let tests_ran = combined.contains("passed")
|| combined.contains("PASSED")
|| combined.contains("tests passed")
|| combined.contains("100%")
|| combined.contains("PASS");
assert!(
tests_ran,
"{} output does not indicate any Google Tests actually executed.\n\n\
stdout:\n{}\n\nstderr:\n{}",
inner_test_script(), stdout, stderr
);
}
#[test]
fn test_inner_run_tests_clean_flag_rebuilds() {
require_test_script_deps();
let tmp = extract_project("inner_clean");
#[cfg(unix)]
chmod_scripts(tmp.path());
// First run -- populates build dir and fetches gtest
let (success, _, _) = run_script(tmp.path(), test_script());
assert!(success, "First test run failed");
let (success, _, _) = run_script(tmp.path(), inner_test_script());
assert!(success, "First run of {} failed", inner_test_script());
// Verify build artifacts exist
let build_dir = tmp.path().join("test").join("build");
@@ -285,7 +441,7 @@ fn test_run_tests_script_clean_flag_rebuilds() {
// Second run with --clean -- should nuke build dir and rebuild
let (success, stdout, stderr) =
run_script_with_args(tmp.path(), test_script(), &["--clean"]);
run_script_with_args(tmp.path(), inner_test_script(), &["--clean"]);
println!("--- clean rebuild stdout ---\n{}", stdout);
if !stderr.is_empty() {
@@ -294,33 +450,27 @@ fn test_run_tests_script_clean_flag_rebuilds() {
assert!(
success,
"Clean rebuild failed.\nstdout:\n{}\nstderr:\n{}",
stdout, stderr
"Clean rebuild of {} failed.\nstdout:\n{}\nstderr:\n{}",
inner_test_script(), stdout, stderr
);
}
#[test]
fn test_run_tests_script_produces_test_binary() {
fn test_inner_run_tests_produces_test_binary() {
require_test_script_deps();
let tmp = extract_project("bin_check");
let tmp = extract_project("inner_bin");
#[cfg(unix)]
chmod_scripts(tmp.path());
let (success, _, _) = run_script(tmp.path(), test_script());
assert!(success, "Test script failed");
let (success, _, _) = run_script(tmp.path(), inner_test_script());
assert!(success, "{} failed", inner_test_script());
// The cmake build should produce a test_unit binary somewhere
// under test/build/
let build_dir = tmp.path().join("test").join("build");
let has_binary = walkdir::WalkDir::new(&build_dir)
.into_iter()
.filter_map(|e| e.ok())
.any(|entry| {
let name = entry.file_name().to_string_lossy();
name.starts_with("test_unit")
});
let has_binary = find_file_recursive(&build_dir, "test_unit");
assert!(
has_binary,
@@ -331,20 +481,18 @@ fn test_run_tests_script_produces_test_binary() {
}
#[test]
fn test_run_tests_idempotent_second_run() {
// Running the test script twice should succeed both times.
// Second run reuses the cached build and should be fast.
fn test_inner_run_tests_idempotent() {
require_test_script_deps();
let tmp = extract_project("idempotent");
let tmp = extract_project("inner_idem");
#[cfg(unix)]
chmod_scripts(tmp.path());
let (success1, _, _) = run_script(tmp.path(), test_script());
let (success1, _, _) = run_script(tmp.path(), inner_test_script());
assert!(success1, "First run failed");
let (success2, stdout2, stderr2) = run_script(tmp.path(), test_script());
let (success2, stdout2, stderr2) = run_script(tmp.path(), inner_test_script());
assert!(
success2,
"Second run failed (should be idempotent).\nstdout:\n{}\nstderr:\n{}",
@@ -355,7 +503,7 @@ fn test_run_tests_idempotent_second_run() {
// ==========================================================================
// BUILD SCRIPT EXECUTION (arduino-cli compile)
//
// Extracts a project, then runs build.sh which:
// Extracts a project, then runs build.sh/build.bat which:
// 1. Reads .anvil.toml for FQBN, include_dirs, extra_flags
// 2. Invokes arduino-cli compile with those settings
//
@@ -373,15 +521,15 @@ fn test_build_script_compiles_sketch() {
let (success, stdout, stderr) = run_script(tmp.path(), build_script());
println!("--- build.sh stdout ---\n{}", stdout);
println!("--- {} stdout ---\n{}", build_script(), stdout);
if !stderr.is_empty() {
eprintln!("--- build.sh stderr ---\n{}", stderr);
eprintln!("--- {} stderr ---\n{}", build_script(), stderr);
}
assert!(
success,
"build script failed!\n\nstdout:\n{}\n\nstderr:\n{}",
stdout, stderr
"{} failed!\n\nstdout:\n{}\n\nstderr:\n{}",
build_script(), stdout, stderr
);
}
@@ -389,7 +537,7 @@ fn test_build_script_compiles_sketch() {
fn test_build_script_produces_compilation_output() {
require_build_script_deps();
let tmp = extract_project("compile_output");
let tmp = extract_project("compile_out");
#[cfg(unix)]
chmod_scripts(tmp.path());
@@ -397,28 +545,28 @@ fn test_build_script_produces_compilation_output() {
let (success, stdout, stderr) = run_script(tmp.path(), build_script());
assert!(
success,
"Build script failed.\nstdout:\n{}\nstderr:\n{}",
stdout, stderr
"{} failed.\nstdout:\n{}\nstderr:\n{}",
build_script(), stdout, stderr
);
// arduino-cli compile produces output indicating sketch size.
// Look for evidence of successful compilation.
// arduino-cli compile produces output indicating sketch size
let combined = format!("{}{}", stdout, stderr);
let compiled = combined.contains("Sketch uses")
|| combined.contains("bytes")
|| combined.contains("Compiling")
|| combined.contains("Used")
|| combined.contains("compiled");
|| combined.contains("compiled")
|| combined.contains("flash");
assert!(
compiled,
"Build output does not indicate a successful arduino-cli compilation.\n\n\
"{} output does not indicate a successful arduino-cli compilation.\n\n\
stdout:\n{}\n\nstderr:\n{}",
stdout, stderr
build_script(), stdout, stderr
);
}
#[test]
fn test_build_script_idempotent_second_run() {
fn test_build_script_idempotent() {
require_build_script_deps();
let tmp = extract_project("build_idem");
@@ -438,13 +586,13 @@ fn test_build_script_idempotent_second_run() {
}
// ==========================================================================
// COMBINED: build + test scripts both succeed on the same project
// COMBINED: build + test scripts all succeed on the same project
//
// Full end-to-end: one extracted project, both scripts pass.
// Full end-to-end: one extracted project, all testable scripts pass.
// ==========================================================================
#[test]
fn test_full_project_build_and_test_scripts_both_pass() {
fn test_full_project_all_scripts_pass() {
require_test_script_deps();
require_build_script_deps();
@@ -453,28 +601,40 @@ fn test_full_project_build_and_test_scripts_both_pass() {
#[cfg(unix)]
chmod_scripts(tmp.path());
// Build the sketch
// 1. Build the sketch with arduino-cli
let (build_ok, build_out, build_err) = run_script(tmp.path(), build_script());
println!("--- build stdout ---\n{}", build_out);
println!("--- {} stdout ---\n{}", build_script(), build_out);
if !build_err.is_empty() {
eprintln!("--- build stderr ---\n{}", build_err);
eprintln!("--- {} stderr ---\n{}", build_script(), build_err);
}
assert!(
build_ok,
"build script failed in full E2E.\nstdout:\n{}\nstderr:\n{}",
build_out, build_err
"{} failed in full E2E.\nstdout:\n{}\nstderr:\n{}",
build_script(), build_out, build_err
);
// Run the host-side unit tests
let (test_ok, test_out, test_err) = run_script(tmp.path(), test_script());
println!("--- test stdout ---\n{}", test_out);
if !test_err.is_empty() {
eprintln!("--- test stderr ---\n{}", test_err);
// 2. Run root-level test script
let (root_ok, root_out, root_err) = run_script(tmp.path(), root_test_script());
println!("--- {} stdout ---\n{}", root_test_script(), root_out);
if !root_err.is_empty() {
eprintln!("--- {} stderr ---\n{}", root_test_script(), root_err);
}
assert!(
test_ok,
"test script failed in full E2E.\nstdout:\n{}\nstderr:\n{}",
test_out, test_err
root_ok,
"{} failed in full E2E.\nstdout:\n{}\nstderr:\n{}",
root_test_script(), root_out, root_err
);
// 3. Run inner test/run_tests script
let (inner_ok, inner_out, inner_err) = run_script(tmp.path(), inner_test_script());
println!("--- {} stdout ---\n{}", inner_test_script(), inner_out);
if !inner_err.is_empty() {
eprintln!("--- {} stderr ---\n{}", inner_test_script(), inner_err);
}
assert!(
inner_ok,
"{} failed in full E2E.\nstdout:\n{}\nstderr:\n{}",
inner_test_script(), inner_out, inner_err
);
}
@@ -482,6 +642,7 @@ fn test_full_project_build_and_test_scripts_both_pass() {
// SCRIPT CONTENT SANITY CHECKS
//
// Verify the scripts are well-formed before even executing them.
// These tests have NO external dependencies.
// ==========================================================================
#[test]
@@ -492,6 +653,7 @@ fn test_all_sh_scripts_have_strict_error_handling() {
"build.sh",
"upload.sh",
"monitor.sh",
"test.sh",
"test/run_tests.sh",
];
@@ -513,12 +675,13 @@ fn test_all_sh_scripts_have_strict_error_handling() {
#[test]
fn test_all_sh_scripts_have_shebangs() {
let tmp = extract_project("shebang_check");
let tmp = extract_project("shebang");
let sh_scripts = vec![
"build.sh",
"upload.sh",
"monitor.sh",
"test.sh",
"test/run_tests.sh",
];
@@ -542,11 +705,11 @@ fn test_all_sh_scripts_have_shebangs() {
fn test_bat_scripts_exist_for_windows_parity() {
let tmp = extract_project("win_parity");
// Every .sh should have a matching .bat
let pairs = vec![
("build.sh", "build.bat"),
("upload.sh", "upload.bat"),
("monitor.sh", "monitor.bat"),
("test.sh", "test.bat"),
("test/run_tests.sh", "test/run_tests.bat"),
];
@@ -586,7 +749,7 @@ fn test_cmake_lists_fetches_google_test() {
fn test_scripts_all_reference_anvil_toml() {
let tmp = extract_project("toml_refs");
// build and upload scripts must read .anvil.toml for configuration
// Build and upload scripts must read .anvil.toml for configuration
let config_scripts = vec![
"build.sh",
"build.bat",
@@ -614,7 +777,7 @@ fn test_scripts_all_reference_anvil_toml() {
fn test_scripts_invoke_arduino_cli_not_anvil() {
let tmp = extract_project("no_anvil_dep");
// All scripts must invoke arduino-cli directly, never the anvil binary
// Build/upload/monitor scripts must invoke arduino-cli directly
let scripts = vec![
"build.sh", "build.bat",
"upload.sh", "upload.bat",
@@ -638,19 +801,49 @@ fn test_scripts_invoke_arduino_cli_not_anvil() {
// No line should shell out to the anvil binary
let has_anvil_cmd = content.lines().any(|line| {
let trimmed = line.trim();
// Skip comments and echo/print lines
// Skip comments
if trimmed.starts_with('#')
|| trimmed.starts_with("::")
|| trimmed.starts_with("echo")
|| trimmed.starts_with("REM")
|| trimmed.starts_with("rem")
{
return false;
}
// Skip output/diagnostic lines -- these often contain
// suggestions like "Run: anvil doctor" which are messages
// to the user, not command invocations.
if trimmed.starts_with("echo")
|| trimmed.starts_with("Echo")
|| trimmed.starts_with("ECHO")
|| trimmed.starts_with("printf")
|| trimmed.starts_with("die ")
|| trimmed.starts_with("die(")
|| trimmed.starts_with("warn ")
|| trimmed.starts_with("warn(")
|| trimmed.starts_with("info ")
|| trimmed.starts_with("info(")
|| trimmed.starts_with("ok ")
|| trimmed.starts_with("ok(")
|| trimmed.starts_with(">&2")
|| trimmed.starts_with("1>&2")
|| trimmed.starts_with("Write-Host")
|| trimmed.starts_with("Write-Error")
|| trimmed.starts_with("Write-Warning")
{
return false;
}
// Skip string assignments that contain suggestion text
// e.g. MSG="Run: anvil devices"
if trimmed.contains("=\"") && trimmed.contains("anvil ") {
return false;
}
// Check for "anvil " as a command invocation
trimmed.contains("anvil ")
&& !trimmed.contains("anvil.toml")
&& !trimmed.contains("Anvil")
&& !trimmed.contains("anvilignore")
&& !trimmed.contains("\"anvil ") // quoted suggestion text
&& !trimmed.contains("'anvil ") // single-quoted suggestion
});
assert!(
!has_anvil_cmd,
@@ -659,3 +852,31 @@ fn test_scripts_invoke_arduino_cli_not_anvil() {
);
}
}
#[test]
fn test_all_expected_scripts_exist() {
let tmp = extract_project("all_scripts");
let expected = vec![
"build.sh",
"build.bat",
"upload.sh",
"upload.bat",
"monitor.sh",
"monitor.bat",
"test.sh",
"test.bat",
"test/run_tests.sh",
"test/run_tests.bat",
];
for script in &expected {
let path = tmp.path().join(script);
assert!(
path.exists(),
"Expected script missing: {}\n\nProject contents:\n{}",
script,
list_dir_recursive(tmp.path())
);
}
}