test: add external test commands (#4277)

This commit is contained in:
ReenigneArcher 2025-09-21 00:22:42 -04:00 committed by GitHub
parent 2b2b4a7fbe
commit 5800653055
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 357 additions and 9 deletions

View File

@ -294,8 +294,7 @@ jobs:
id: test
shell: msys2 {0}
working-directory: build/tests
run: |
./test_sunshine.exe --gtest_color=yes --gtest_output=xml:test_results.xml
run: ./test_sunshine.exe --gtest_color=yes --gtest_output=xml:test_results.xml
- name: Generate gcov report
id: test_report

View File

@ -3,15 +3,23 @@
#
# UDEV_FOUND - system has udev
# UDEV_RULES_INSTALL_DIR - the udev rules install directory
# UDEVADM_EXECUTABLE - path to udevadm executable
# UDEV_VERSION - version of udev/systemd
IF (NOT WIN32)
if(NOT WIN32)
find_package(PkgConfig QUIET)
if(PKG_CONFIG_FOUND)
pkg_check_modules(UDEV "udev")
endif()
if (UDEV_FOUND)
if(UDEV_FOUND)
if(UDEV_VERSION)
message(STATUS "Found udev/systemd version: ${UDEV_VERSION}")
else()
message(WARNING "Could not determine udev/systemd version")
set(UDEV_VERSION "0")
endif()
execute_process(COMMAND ${PKG_CONFIG_EXECUTABLE}
--variable=udevdir udev
OUTPUT_VARIABLE UDEV_RULES_INSTALL_DIR)
@ -23,6 +31,24 @@ IF (NOT WIN32)
mark_as_advanced(UDEV_RULES_INSTALL_DIR)
endif ()
# Check if udevadm is available
find_program(UDEVADM_EXECUTABLE udevadm
PATHS /usr/bin /bin /usr/sbin /sbin
DOC "Path to udevadm executable")
mark_as_advanced(UDEVADM_EXECUTABLE)
ENDIF ()
# Handle version requirements
if(Udev_FIND_VERSION)
if(UDEV_VERSION VERSION_LESS Udev_FIND_VERSION)
set(UDEV_FOUND FALSE)
if(Udev_FIND_REQUIRED)
message(FATAL_ERROR "Udev version ${UDEV_VERSION} less than required version ${Udev_FIND_VERSION}")
else()
message(STATUS "Udev version ${UDEV_VERSION} less than required version ${Udev_FIND_VERSION}")
endif()
else()
message(STATUS "Udev version ${UDEV_VERSION} meets requirement (>= ${Udev_FIND_VERSION})")
endif()
endif()
endif()
endif()

View File

@ -37,6 +37,16 @@ set(TEST_DEFINITIONS) # list will be appended as needed
# this indicates we're building tests in case sunshine needs to adjust some code or add private tests
list(APPEND TEST_DEFINITIONS SUNSHINE_TESTS)
list(APPEND TEST_DEFINITIONS SUNSHINE_SOURCE_DIR="${CMAKE_SOURCE_DIR}")
list(APPEND TEST_DEFINITIONS SUNSHINE_TEST_BIN_DIR="${CMAKE_CURRENT_BINARY_DIR}")
if(NOT WIN32)
find_package(Udev 255) # we need 255+ for udevadm verify
message(STATUS "UDEV_FOUND: ${UDEV_FOUND}")
if(UDEV_FOUND)
list(APPEND TEST_DEFINITIONS UDEVADM_EXECUTABLE="${UDEVADM_EXECUTABLE}")
endif()
endif()
file(GLOB_RECURSE TEST_SOURCES CONFIGURE_DEPENDS
${CMAKE_SOURCE_DIR}/tests/*.h
@ -55,15 +65,16 @@ add_executable(${PROJECT_NAME}
# Copy files needed for config consistency tests to build directory
# This ensures both CLI and CLion can access the same files relative to the test executable
# Using configure_file ensures files are copied when they change between builds
set(CONFIG_TEST_FILES
set(INTEGRATION_TEST_FILES
"src/config.cpp"
"src_assets/common/assets/web/config.html"
"docs/configuration.md"
"src_assets/common/assets/web/public/assets/locale/en.json"
"src_assets/common/assets/web/configs/tabs/General.vue"
"src_assets/linux/misc/60-sunshine.rules"
)
foreach(file ${CONFIG_TEST_FILES})
foreach(file ${INTEGRATION_TEST_FILES})
configure_file(
"${CMAKE_SOURCE_DIR}/${file}"
"${CMAKE_CURRENT_BINARY_DIR}/${file}"

View File

@ -0,0 +1,194 @@
/**
* @file tests/integration/test_external_commands.cpp
* @brief Integration tests for running external commands with platform-specific validation
*/
#include "../tests_common.h"
// standard includes
#include <format>
#include <string>
#include <tuple>
#include <vector>
// lib includes
#include <boost/process/v1.hpp>
// local includes
#include "src/platform/common.h"
// Test data structure for parameterized testing
struct ExternalCommandTestData {
std::string command;
std::string platform; // "windows", "linux", "macos", or "all"
bool should_succeed;
std::string description;
std::string working_directory; // Optional: if empty, uses SUNSHINE_SOURCE_DIR
bool xfail_condition = false; // Optional: condition for expected failure
std::string xfail_reason = ""; // Optional: reason for expected failure
// Constructor with xfail parameters
ExternalCommandTestData(std::string cmd, std::string plat, const bool succeed, std::string desc, std::string work_dir = "", const bool xfail_cond = false, std::string xfail_rsn = ""):
command(std::move(cmd)),
platform(std::move(plat)),
should_succeed(succeed),
description(std::move(desc)),
working_directory(std::move(work_dir)),
xfail_condition(xfail_cond),
xfail_reason(std::move(xfail_rsn)) {}
};
class ExternalCommandTest: public ::testing::TestWithParam<ExternalCommandTestData> {
protected:
void SetUp() override {
if constexpr (IS_WINDOWS) {
current_platform = "windows";
} else if constexpr (IS_MACOS) {
current_platform = "macos";
} else if constexpr (IS_LINUX) {
current_platform = "linux";
}
}
[[nodiscard]] bool shouldRunOnCurrentPlatform(const std::string_view &test_platform) const {
return test_platform == "all" || test_platform == current_platform;
}
// Helper function to run a command using the existing process infrastructure
static std::pair<int, std::string> runCommand(const std::string &cmd, const std::string_view &working_dir) {
const auto env = boost::this_process::environment();
// Determine the working directory: use the provided working_dir or fall back to SUNSHINE_SOURCE_DIR
boost::filesystem::path effective_working_dir;
if (!working_dir.empty()) {
effective_working_dir = working_dir;
} else {
// Use SUNSHINE_SOURCE_DIR CMake definition as the default working directory
effective_working_dir = SUNSHINE_SOURCE_DIR;
}
std::error_code ec;
// Create a temporary file to capture output
const auto temp_file = std::tmpfile();
if (!temp_file) {
return {-1, "Failed to create temporary file for output"};
}
// Run the command using the existing platf::run_command function
auto child = platf::run_command(
false, // not elevated
false, // not interactive
cmd,
effective_working_dir,
env,
temp_file,
ec,
nullptr // no process group
);
if (ec) {
std::fclose(temp_file);
return {-1, std::format("Failed to start command: {}", ec.message())};
}
// Wait for the command to complete
child.wait();
int exit_code = child.exit_code();
// Read the output from the temporary file
std::rewind(temp_file);
std::string output;
std::array<char, 1024> buffer {};
while (std::fgets(buffer.data(), static_cast<int>(buffer.size()), temp_file)) {
// std::string constructor automatically handles null-terminated strings
output += std::string(buffer.data());
}
std::fclose(temp_file);
return {exit_code, output};
}
public:
std::string current_platform;
};
// Test case implementation
TEST_P(ExternalCommandTest, RunExternalCommand) {
const auto &[command, platform, should_succeed, description, working_directory, xfail_condition, xfail_reason] = GetParam();
// Skip test if not for the current platform
if (!shouldRunOnCurrentPlatform(platform)) {
GTEST_SKIP() << "Test not applicable for platform: " << current_platform;
}
// Use the xfail condition and reason from test data
XFAIL_IF(xfail_condition, xfail_reason);
BOOST_LOG(info) << "Running external command test: " << description;
BOOST_LOG(debug) << "Command: " << command;
auto [exit_code, output] = runCommand(command, working_directory);
BOOST_LOG(debug) << "Command exit code: " << exit_code;
if (!output.empty()) {
BOOST_LOG(debug) << "Command output: " << output;
}
if (should_succeed) {
HANDLE_XFAIL_ASSERT_EQ(exit_code, 0, std::format("Command should have succeeded but failed with exit code {}\nOutput: {}", std::to_string(exit_code), output));
} else {
HANDLE_XFAIL_ASSERT_NE(exit_code, 0, std::format("Command should have failed but succeeded\nOutput: {}", output));
}
}
// Platform-specific command strings
constexpr auto SIMPLE_COMMAND = IS_WINDOWS ? "where cmd" : "which sh";
#ifdef UDEVADM_EXECUTABLE
#define UDEV_TESTS \
ExternalCommandTestData { \
std::format("{} verify {}/src_assets/linux/misc/60-sunshine.rules", UDEVADM_EXECUTABLE, SUNSHINE_TEST_BIN_DIR), \
"linux", \
true, \
"Test udev rules file" \
},
#else
#define UDEV_TESTS
#endif
// Test data
INSTANTIATE_TEST_SUITE_P(
ExternalCommands,
ExternalCommandTest,
::testing::Values(
UDEV_TESTS
// Cross-platform tests with xfail on Windows CI
ExternalCommandTestData {
SIMPLE_COMMAND,
"all",
true,
"Simple command test",
"", // working_directory
IS_WINDOWS, // xfail_condition
"Simple command test fails on Windows CI environment" // xfail_reason
},
// Cross-platform failing test
ExternalCommandTestData {
"non_existent_command_12345",
"all",
false,
"Test command that should fail"
}
),
[](const ::testing::TestParamInfo<ExternalCommandTestData> &info) {
// Generate test names from a description
std::string name = info.param.description;
// Replace spaces and special characters with underscores for valid test names
std::replace_if(name.begin(), name.end(), [](char c) {
return !std::isalnum(c);
},
'_');
return name;
}
);

View File

@ -8,6 +8,124 @@
#include <src/logging.h>
#include <src/platform/common.h>
// XFail/XPass pattern implementation (similar to pytest)
namespace test_utils {
/**
* @brief Marks a test as expected to fail
* @param condition The condition under which the test is expected to fail
* @param reason The reason why the test is expected to fail
*/
struct XFailMarker {
bool should_xfail;
std::string reason;
XFailMarker(bool condition, std::string reason):
should_xfail(condition),
reason(std::move(reason)) {}
};
/**
* @brief Helper function to handle xfail logic
* @param marker The XFailMarker containing condition and reason
* @param test_passed Whether the test actually passed
*/
inline void handleXFail(const XFailMarker &marker, bool test_passed) {
if (marker.should_xfail) {
if (test_passed) {
// XPass: Test was expected to fail but passed
const std::string message = "XPASS: Test unexpectedly passed (expected to fail: " + marker.reason + ")";
BOOST_LOG(warning) << message;
GTEST_SKIP() << "XPASS: Test unexpectedly passed (expected to fail: " << marker.reason << ")";
} else {
// XFail: Test failed as expected
const std::string message = "XFAIL: Test failed as expected (" + marker.reason + ")";
BOOST_LOG(info) << message;
GTEST_SKIP() << "XFAIL: " << marker.reason;
}
}
// If not marked as xfail, let the test result stand as normal
}
/**
* @brief Check if two values are equal without failing the test
* @param actual The actual value
* @param expected The expected value
* @param message Optional message to include
* @return true if values are equal, false otherwise
*/
template<typename T1, typename T2>
inline bool checkEqual(const T1 &actual, const T2 &expected, const std::string &message = "") {
bool result = (actual == expected);
if (!message.empty()) {
BOOST_LOG(debug) << "Assertion check: " << message << " - " << (result ? "PASSED" : "FAILED");
}
return result;
}
/**
* @brief Check if two values are not equal without failing the test
* @param actual The actual value
* @param expected The expected value
* @param message Optional message to include
* @return true if values are not equal, false otherwise
*/
template<typename T1, typename T2>
inline bool checkNotEqual(const T1 &actual, const T2 &expected, const std::string &message = "") {
const bool result = (actual != expected);
if (!message.empty()) {
BOOST_LOG(debug) << "Assertion check: " << message << " - " << (result ? "PASSED" : "FAILED");
}
return result;
}
} // namespace test_utils
// Convenience macros for xfail testing
#define XFAIL_IF(condition, reason) \
test_utils::XFailMarker xfail_marker((condition), (reason))
#define HANDLE_XFAIL_ASSERT_EQ(actual, expected, message) \
do { \
if (xfail_marker.should_xfail) { \
/* For xfail tests, check the assertion without failing */ \
bool test_passed = test_utils::checkEqual((actual), (expected), (message)); \
test_utils::handleXFail(xfail_marker, test_passed); \
} else { \
/* Run the normal GTest assertion if not marked as xfail */ \
EXPECT_EQ((actual), (expected)) << (message); \
} \
} while (0)
#define HANDLE_XFAIL_ASSERT_NE(actual, expected, message) \
do { \
if (xfail_marker.should_xfail) { \
/* For xfail tests, check the assertion without failing */ \
bool test_passed = test_utils::checkNotEqual((actual), (expected), (message)); \
test_utils::handleXFail(xfail_marker, test_passed); \
} else { \
/* Run the normal GTest assertion if not marked as xfail */ \
EXPECT_NE((actual), (expected)) << (message); \
} \
} while (0)
// Platform detection macros for convenience
#ifdef _WIN32
#define IS_WINDOWS true
#else
#define IS_WINDOWS false
#endif
#ifdef __linux__
#define IS_LINUX true
#else
#define IS_LINUX false
#endif
#ifdef __APPLE__
#define IS_MACOS true
#else
#define IS_MACOS false
#endif
struct PlatformTestSuite: testing::Test {
static void SetUpTestSuite() {
ASSERT_FALSE(platf_deinit);