mirror of
https://github.com/yogstation13/Yogstation.git
synced 2025-02-26 09:04:50 +00:00
Updates unit testing (+ adds some new ones) (#19186)
* Updates our unit testing to be much better than it is now Updates our unit testing to somewhat more functional ones ported from TG I also added 2 new unit tests to serve as examples, mapping and job landmarks. * Revert some minor things * Fixes the log file * Update unit_tests.dm * adds a missing icon * additional changes
This commit is contained in:
@@ -1,6 +1,9 @@
|
||||
{
|
||||
"map_name": "MultiZ Debug",
|
||||
"map_path": "map_files/debug",
|
||||
"map_file": "multiz.dmm",
|
||||
"traits": [{"Up": 1}, {"Up": 1, "Down": -1}, {"Down": -1}]
|
||||
}
|
||||
{
|
||||
"map_name": "MultiZ Debug",
|
||||
"map_path": "map_files/debug",
|
||||
"map_file": "multiz.dmm",
|
||||
"ignored_unit_tests": [
|
||||
"/datum/unit_test/job_roundstart_spawnpoints"
|
||||
],
|
||||
"traits": [{"Up": 1}, {"Up": 1, "Down": -1}, {"Down": -1}]
|
||||
}
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
{
|
||||
"map_name": "Runtime Station",
|
||||
"map_path": "map_files/debug",
|
||||
"map_file": "runtimestation.dmm",
|
||||
"shuttles": {
|
||||
"cargo": "cargo_delta"
|
||||
}
|
||||
}
|
||||
{
|
||||
"map_name": "Runtime Station",
|
||||
"map_path": "map_files/debug",
|
||||
"map_file": "runtimestation.dmm",
|
||||
"ignored_unit_tests": [
|
||||
"/datum/unit_test/job_roundstart_spawnpoints"
|
||||
],
|
||||
"shuttles": {
|
||||
"cargo": "cargo_delta"
|
||||
}
|
||||
}
|
||||
|
||||
79
_maps/templates/unit_tests.dmm
Normal file
79
_maps/templates/unit_tests.dmm
Normal file
@@ -0,0 +1,79 @@
|
||||
//MAP CONVERTED BY dmm2tgm.py THIS HEADER COMMENT PREVENTS RECONVERSION, DO NOT REMOVE
|
||||
"a" = (
|
||||
/turf/closed/indestructible,
|
||||
/area/testroom)
|
||||
"m" = (
|
||||
/turf/open/floor/plasteel,
|
||||
/area/testroom)
|
||||
"r" = (
|
||||
/obj/effect/landmark/unit_test_top_right,
|
||||
/turf/open/floor/plasteel,
|
||||
/area/testroom)
|
||||
"L" = (
|
||||
/obj/effect/landmark/unit_test_bottom_left,
|
||||
/turf/open/floor/plasteel,
|
||||
/area/testroom)
|
||||
|
||||
(1,1,1) = {"
|
||||
a
|
||||
a
|
||||
a
|
||||
a
|
||||
a
|
||||
a
|
||||
a
|
||||
"}
|
||||
(2,1,1) = {"
|
||||
a
|
||||
m
|
||||
m
|
||||
m
|
||||
m
|
||||
L
|
||||
a
|
||||
"}
|
||||
(3,1,1) = {"
|
||||
a
|
||||
m
|
||||
m
|
||||
m
|
||||
m
|
||||
m
|
||||
a
|
||||
"}
|
||||
(4,1,1) = {"
|
||||
a
|
||||
m
|
||||
m
|
||||
m
|
||||
m
|
||||
m
|
||||
a
|
||||
"}
|
||||
(5,1,1) = {"
|
||||
a
|
||||
m
|
||||
m
|
||||
m
|
||||
m
|
||||
m
|
||||
a
|
||||
"}
|
||||
(6,1,1) = {"
|
||||
a
|
||||
r
|
||||
m
|
||||
m
|
||||
m
|
||||
m
|
||||
a
|
||||
"}
|
||||
(7,1,1) = {"
|
||||
a
|
||||
a
|
||||
a
|
||||
a
|
||||
a
|
||||
a
|
||||
a
|
||||
"}
|
||||
20
code/__DEFINES/unit_tests.dm
Normal file
20
code/__DEFINES/unit_tests.dm
Normal file
@@ -0,0 +1,20 @@
|
||||
/// Are tests enabled with no focus?
|
||||
/// Use this when performing test assertions outside of a unit test,
|
||||
/// since a focused test means that you're trying to run a test quickly.
|
||||
/// If a parameter is provided, will check if the focus is on that test name.
|
||||
/// For example, PERFORM_ALL_TESTS(log_mapping) will only run if either
|
||||
/// no test is focused, or the focus is log_mapping.
|
||||
#ifdef UNIT_TESTS
|
||||
// Bit of a trick here, if focus isn't passed in then it'll check for /datum/unit_test/, which is never the case.
|
||||
#define PERFORM_ALL_TESTS(focus...) (isnull(GLOB.focused_tests) || (/datum/unit_test/##focus in GLOB.focused_tests))
|
||||
#else
|
||||
// UNLINT necessary here so that if (PERFORM_ALL_TESTS()) works
|
||||
#define PERFORM_ALL_TESTS(...) UNLINT(FALSE)
|
||||
#endif
|
||||
|
||||
/// ASSERT(), but it only actually does anything during unit tests
|
||||
#ifdef UNIT_TESTS
|
||||
#define TEST_ONLY_ASSERT(test, explanation) if(!(test)) {CRASH(explanation)}
|
||||
#else
|
||||
#define TEST_ONLY_ASSERT(test, explanation)
|
||||
#endif
|
||||
@@ -35,7 +35,6 @@
|
||||
SEND_TEXT(world.log, text)
|
||||
#endif
|
||||
|
||||
|
||||
/* Items with ADMINPRIVATE prefixed are stripped from public logs. */
|
||||
/proc/log_admin(text)
|
||||
GLOB.admin_log.Add(text)
|
||||
@@ -208,6 +207,9 @@
|
||||
SEND_TEXT(world.log, text)
|
||||
|
||||
/proc/log_mapping(text)
|
||||
#ifdef UNIT_TESTS
|
||||
GLOB.unit_test_mapping_logs += text
|
||||
#endif
|
||||
WRITE_LOG(GLOB.world_map_error_log, text)
|
||||
|
||||
|
||||
|
||||
@@ -20,9 +20,11 @@
|
||||
//#define VISUALIZE_ACTIVE_TURFS //Highlights atmos active turfs in green
|
||||
#endif
|
||||
|
||||
// If this is uncommented, we do a single run though of the game setup and tear down process with unit tests in between
|
||||
// #define UNIT_TESTS
|
||||
|
||||
// If defined, we will NOT defer asset generation till later in the game, and will instead do it all at once, during initiialize
|
||||
//#define DO_NOT_DEFER_ASSETS
|
||||
//#define UNIT_TESTS //Enables unit tests via TEST_RUN_PARAMETER
|
||||
|
||||
#ifndef PRELOAD_RSC //set to:
|
||||
#define PRELOAD_RSC 2 // 0 to allow using external resources or on-demand behaviour;
|
||||
|
||||
@@ -74,6 +74,9 @@ GLOBAL_PROTECT(adminlog)
|
||||
|
||||
GLOBAL_LIST_EMPTY(active_turfs_startlist)
|
||||
|
||||
GLOBAL_LIST_EMPTY(test_log)
|
||||
GLOBAL_PROTECT(test_log)
|
||||
|
||||
/////Picture logging
|
||||
GLOBAL_VAR(picture_log_directory)
|
||||
GLOBAL_PROTECT(picture_log_directory)
|
||||
|
||||
@@ -88,11 +88,11 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
// Highlander-style: there can only be one! Kill off the old and replace it with the new.
|
||||
|
||||
if(!random_seed)
|
||||
#ifdef UNIT_TESTS
|
||||
#ifdef UNIT_TESTS
|
||||
random_seed = 29051994
|
||||
#else
|
||||
#else
|
||||
random_seed = rand(1, 1e9)
|
||||
#endif
|
||||
#endif
|
||||
rand_seed(random_seed)
|
||||
|
||||
var/list/_subsystems = list()
|
||||
|
||||
@@ -549,7 +549,7 @@ SUBSYSTEM_DEF(job)
|
||||
if(S)
|
||||
S.JoinPlayerHere(living_mob, FALSE)
|
||||
if(!S && !spawning_handled) //if there isn't a spawnpoint send them to latejoin, if there's no latejoin go yell at your mapper
|
||||
log_world("Couldn't find a round start spawn point for [rank]")
|
||||
log_mapping("Job [job.title] ([job.type]) couldn't find a round start spawn point.")
|
||||
SendToLateJoin(living_mob)
|
||||
|
||||
var/alt_title = null
|
||||
|
||||
@@ -6,9 +6,13 @@ SUBSYSTEM_DEF(minor_mapping)
|
||||
flags = SS_NO_FIRE
|
||||
|
||||
/datum/controller/subsystem/minor_mapping/Initialize(timeofday)
|
||||
trigger_migration(CONFIG_GET(number/mice_roundstart), FALSE) //we dont want roundstart special rats
|
||||
#ifdef UNIT_TESTS // This whole subsystem just introduces a lot of odd confounding variables into unit test situations, so let's just not bother with doing an initialize here.
|
||||
return SS_INIT_NO_NEED
|
||||
#else
|
||||
trigger_migration(CONFIG_GET(number/mice_roundstart))
|
||||
place_satchels()
|
||||
return SS_INIT_SUCCESS
|
||||
#endif // the mice are easily the bigger problem, but let's just avoid anything that could cause some bullshit.
|
||||
|
||||
/datum/controller/subsystem/minor_mapping/proc/trigger_migration(num_mice = 10, special = TRUE)
|
||||
var/list/exposed_wires = find_exposed_wires()
|
||||
|
||||
@@ -29,7 +29,11 @@
|
||||
"cargo" = "cargo_box",
|
||||
"ferry" = "ferry_fancy",
|
||||
"whiteship" = "whiteship_1",
|
||||
"emergency" = "emergency_box")
|
||||
"emergency" = "emergency_box",
|
||||
)
|
||||
|
||||
/// List of unit tests that are skipped when running this map
|
||||
var/list/skipped_tests
|
||||
|
||||
/proc/load_map_config(filename = "data/next_map.json", default_to_box, delete_after, error_if_missing = TRUE)
|
||||
var/datum/map_config/config = new
|
||||
@@ -130,6 +134,16 @@
|
||||
|
||||
allow_custom_shuttles = json["allow_custom_shuttles"] != FALSE
|
||||
|
||||
#ifdef UNIT_TESTS
|
||||
// Check for unit tests to skip, no reason to check these if we're not running tests
|
||||
for(var/path_as_text in json["ignored_unit_tests"])
|
||||
var/path_real = text2path(path_as_text)
|
||||
if(!ispath(path_real, /datum/unit_test))
|
||||
stack_trace("Invalid path in mapping config for ignored unit tests: \[[path_as_text]\]")
|
||||
continue
|
||||
LAZYADD(skipped_tests, path_real)
|
||||
#endif
|
||||
|
||||
defaulted = FALSE
|
||||
return TRUE
|
||||
#undef CHECK_EXISTS
|
||||
|
||||
@@ -45,8 +45,9 @@ NOTE: there are two lists of areas in the end of this file: centcom and station
|
||||
|
||||
/area/testroom
|
||||
requires_power = FALSE
|
||||
has_gravity = STANDARD_GRAVITY
|
||||
name = "Test Room"
|
||||
icon_state = "storage"
|
||||
icon_state = "test_room"
|
||||
|
||||
//EXTRA
|
||||
|
||||
|
||||
@@ -38,8 +38,10 @@ INITIALIZE_IMMEDIATE(/obj/effect/landmark)
|
||||
var/used = FALSE
|
||||
|
||||
/obj/effect/landmark/start/proc/after_round_start()
|
||||
#ifndef UNIT_TESTS // We'd like to keep these around for unit tests, so we can check that they exist.
|
||||
if(delete_after_roundstart)
|
||||
qdel(src)
|
||||
#endif
|
||||
|
||||
/obj/effect/landmark/start/Initialize()
|
||||
. = ..()
|
||||
@@ -450,6 +452,16 @@ INITIALIZE_IMMEDIATE(/obj/effect/landmark/start/new_player)
|
||||
ruin_template = null
|
||||
. = ..()
|
||||
|
||||
/// Marks the bottom left of the testing zone.
|
||||
/// In landmarks.dm and not unit_test.dm so it is always active in the mapping tools.
|
||||
/obj/effect/landmark/unit_test_bottom_left
|
||||
name = "unit test zone bottom left"
|
||||
|
||||
/// Marks the top right of the testing zone.
|
||||
/// In landmarks.dm and not unit_test.dm so it is always active in the mapping tools.
|
||||
/obj/effect/landmark/unit_test_top_right
|
||||
name = "unit test zone top right"
|
||||
|
||||
/obj/effect/landmark/centcom
|
||||
name = "centcomspawn"
|
||||
icon_state = "x"
|
||||
|
||||
@@ -35,8 +35,9 @@
|
||||
vis_contents.Cut() //removes inherited overlays
|
||||
visibilityChanged()
|
||||
|
||||
if(flags_1 & INITIALIZED_1)
|
||||
stack_trace("Warning: [src]([type]) initialized multiple times!")
|
||||
if (PERFORM_ALL_TESTS(focus_only/multiple_space_initialization))
|
||||
if(flags_1 & INITIALIZED_1)
|
||||
stack_trace("Warning: [src]([type]) initialized multiple times!")
|
||||
flags_1 |= INITIALIZED_1
|
||||
|
||||
var/area/A = loc
|
||||
|
||||
@@ -73,8 +73,12 @@ GLOBAL_VAR(restart_counter)
|
||||
|
||||
Master.Initialize(10, FALSE, TRUE)
|
||||
|
||||
if(TEST_RUN_PARAMETER in params)
|
||||
HandleTestRun()
|
||||
RunUnattendedFunctions()
|
||||
|
||||
/world/proc/RunUnattendedFunctions()
|
||||
#ifdef UNIT_TESTS
|
||||
HandleTestRun()
|
||||
#endif
|
||||
|
||||
/world/proc/HandleTestRun()
|
||||
//trigger things to run the whole process
|
||||
@@ -83,11 +87,11 @@ GLOBAL_VAR(restart_counter)
|
||||
CONFIG_SET(number/round_end_countdown, 0)
|
||||
var/datum/callback/cb
|
||||
#ifdef UNIT_TESTS
|
||||
cb = CALLBACK(GLOBAL_PROC, /proc/RunUnitTests)
|
||||
cb = CALLBACK(GLOBAL_PROC, GLOBAL_PROC_REF(RunUnitTests))
|
||||
#else
|
||||
cb = VARSET_CALLBACK(SSticker, force_ending, TRUE)
|
||||
#endif
|
||||
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, /proc/_addtimer_here, cb, 10 SECONDS))
|
||||
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, GLOBAL_PROC_REF(_addtimer), cb, 10 SECONDS))
|
||||
|
||||
|
||||
/world/proc/SetupLogs()
|
||||
@@ -113,6 +117,7 @@ GLOBAL_VAR(restart_counter)
|
||||
GLOB.picture_log_directory = "data/picture_logs/[override_dir]"
|
||||
|
||||
GLOB.world_game_log = "[GLOB.log_directory]/game.log"
|
||||
GLOB.test_log = "[GLOB.log_directory]/unit_tests.log"
|
||||
GLOB.world_mecha_log = "[GLOB.log_directory]/mecha.log"
|
||||
GLOB.world_virus_log = "[GLOB.log_directory]/virus.log"
|
||||
GLOB.world_cloning_log = "[GLOB.log_directory]/cloning.log"
|
||||
@@ -136,10 +141,7 @@ GLOBAL_VAR(restart_counter)
|
||||
|
||||
GLOB.demo_log = "[GLOB.log_directory]/demo.txt"
|
||||
|
||||
#ifdef UNIT_TESTS
|
||||
GLOB.test_log = file("[GLOB.log_directory]/tests.log")
|
||||
start_log(GLOB.test_log)
|
||||
#endif
|
||||
start_log(GLOB.world_game_log)
|
||||
start_log(GLOB.world_attack_log)
|
||||
start_log(GLOB.world_pda_log)
|
||||
@@ -220,8 +222,8 @@ GLOBAL_VAR(restart_counter)
|
||||
text2file("Success!", "[GLOB.log_directory]/clean_run.lk")
|
||||
else
|
||||
log_world("Test run failed!\n[fail_reasons.Join("\n")]")
|
||||
sleep(0) //yes, 0, this'll let Reboot finish and prevent byond memes
|
||||
qdel(src) //shut it down
|
||||
sleep(0) //yes, 0, this'll let Reboot finish and prevent byond memes
|
||||
qdel(src) //shut it down
|
||||
|
||||
/world/Reboot(reason = 0, fast_track = FALSE)
|
||||
if (reason || fast_track) //special reboot, do none of the normal stuff
|
||||
|
||||
@@ -1,10 +1,93 @@
|
||||
//include unit test files in this module in this ifdef
|
||||
//Keep this sorted alphabetically
|
||||
|
||||
//include unit test files in this module in this ifdef
|
||||
//Keep this sorted alphabetically
|
||||
|
||||
#if defined(UNIT_TESTS) || defined(SPACEMAN_DMM)
|
||||
|
||||
/// For advanced cases, fail unconditionally but don't return (so a test can return multiple results)
|
||||
#define TEST_FAIL(reason) (Fail(reason || "No reason", __FILE__, __LINE__))
|
||||
|
||||
/// Asserts that a condition is true
|
||||
/// If the condition is not true, fails the test
|
||||
#define TEST_ASSERT(assertion, reason) if (!(assertion)) { return Fail("Assertion failed: [reason || "No reason"]", __FILE__, __LINE__) }
|
||||
|
||||
/// Asserts that a parameter is not null
|
||||
#define TEST_ASSERT_NOTNULL(a, reason) if (isnull(a)) { return Fail("Expected non-null value: [reason || "No reason"]", __FILE__, __LINE__) }
|
||||
|
||||
/// Asserts that a parameter is null
|
||||
#define TEST_ASSERT_NULL(a, reason) if (!isnull(a)) { return Fail("Expected null value but received [a]: [reason || "No reason"]", __FILE__, __LINE__) }
|
||||
|
||||
/// Asserts that the two parameters passed are equal, fails otherwise
|
||||
/// Optionally allows an additional message in the case of a failure
|
||||
#define TEST_ASSERT_EQUAL(a, b, message) do { \
|
||||
var/lhs = ##a; \
|
||||
var/rhs = ##b; \
|
||||
if (lhs != rhs) { \
|
||||
return Fail("Expected [isnull(lhs) ? "null" : lhs] to be equal to [isnull(rhs) ? "null" : rhs].[message ? " [message]" : ""]", __FILE__, __LINE__); \
|
||||
} \
|
||||
} while (FALSE)
|
||||
|
||||
/// Asserts that the two parameters passed are not equal, fails otherwise
|
||||
/// Optionally allows an additional message in the case of a failure
|
||||
#define TEST_ASSERT_NOTEQUAL(a, b, message) do { \
|
||||
var/lhs = ##a; \
|
||||
var/rhs = ##b; \
|
||||
if (lhs == rhs) { \
|
||||
return Fail("Expected [isnull(lhs) ? "null" : lhs] to not be equal to [isnull(rhs) ? "null" : rhs].[message ? " [message]" : ""]", __FILE__, __LINE__); \
|
||||
} \
|
||||
} while (FALSE)
|
||||
|
||||
/// *Only* run the test provided within the parentheses
|
||||
/// This is useful for debugging when you want to reduce noise, but should never be pushed
|
||||
/// Intended to be used in the manner of `TEST_FOCUS(/datum/unit_test/math)`
|
||||
#define TEST_FOCUS(test_path) ##test_path { focus = TRUE; }
|
||||
|
||||
/// Logs a noticable message on GitHub, but will not mark as an error.
|
||||
/// Use this when something shouldn't happen and is of note, but shouldn't block CI.
|
||||
/// Does not mark the test as failed.
|
||||
#define TEST_NOTICE(source, message) source.log_for_test((##message), "notice", __FILE__, __LINE__)
|
||||
|
||||
/// Constants indicating unit test completion status
|
||||
#define UNIT_TEST_PASSED 0
|
||||
#define UNIT_TEST_FAILED 1
|
||||
#define UNIT_TEST_SKIPPED 2
|
||||
|
||||
#define TEST_PRE 0
|
||||
#define TEST_DEFAULT 1
|
||||
/// After most test steps, used for tests that run long so shorter issues can be noticed faster
|
||||
#define TEST_LONGER 10
|
||||
/// This must be the last test to run due to the inherent nature of the test iterating every single tangible atom in the game and qdeleting all of them (while taking long sleeps to make sure the garbage collector fires properly) taking a large amount of time.
|
||||
#define TEST_CREATE_AND_DESTROY INFINITY
|
||||
|
||||
/// Change color to red on ANSI terminal output, if enabled with -DANSICOLORS.
|
||||
#ifdef ANSICOLORS
|
||||
#define TEST_OUTPUT_RED(text) "\x1B\x5B1;31m[text]\x1B\x5B0m"
|
||||
#else
|
||||
#define TEST_OUTPUT_RED(text) (text)
|
||||
#endif
|
||||
/// Change color to green on ANSI terminal output, if enabled with -DANSICOLORS.
|
||||
#ifdef ANSICOLORS
|
||||
#define TEST_OUTPUT_GREEN(text) "\x1B\x5B1;32m[text]\x1B\x5B0m"
|
||||
#else
|
||||
#define TEST_OUTPUT_GREEN(text) (text)
|
||||
#endif
|
||||
/// Change color to yellow on ANSI terminal output, if enabled with -DANSICOLORS.
|
||||
#ifdef ANSICOLORS
|
||||
#define TEST_OUTPUT_YELLOW(text) "\x1B\x5B1;33m[text]\x1B\x5B0m"
|
||||
#else
|
||||
#define TEST_OUTPUT_YELLOW(text) (text)
|
||||
#endif
|
||||
/// A trait source when adding traits through unit tests
|
||||
#define TRAIT_SOURCE_UNIT_TESTS "unit_tests"
|
||||
|
||||
#include "anchored_mobs.dm"
|
||||
#include "component_tests.dm"
|
||||
#include "dynamic_ruleset_sanity.dm"
|
||||
#include "focus_only_tests.dm"
|
||||
#include "map_landmarks.dm"
|
||||
#include "mapping.dm"
|
||||
#include "reagent_id_typos.dm"
|
||||
#include "reagent_recipe_collisions.dm"
|
||||
#include "spawn_humans.dm"
|
||||
@@ -12,4 +95,9 @@
|
||||
#include "subsystem_init.dm"
|
||||
#include "timer_sanity.dm"
|
||||
#include "unit_test.dm"
|
||||
|
||||
#undef TEST_ASSERT
|
||||
#undef TEST_ASSERT_EQUAL
|
||||
#undef TEST_ASSERT_NOTEQUAL
|
||||
//#undef TEST_FOCUS - This define is used by vscode unit test extension to pick specific unit tests to run and appended later so needs to be used out of scope here
|
||||
#endif
|
||||
|
||||
@@ -5,5 +5,5 @@
|
||||
if(initial(M.anchored))
|
||||
L += "[i]"
|
||||
if(!L.len)
|
||||
return //passed!
|
||||
Fail("The following mobs are defined as anchored. This is incompatible with the new move force/resist system and needs to be revised.: [L.Join(" ")]")
|
||||
return//passed!
|
||||
TEST_FAIL("The following mobs are defined as anchored. This is incompatible with the new move force/resist system and needs to be revised.: [L.Join(" ")]")
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
/datum/unit_test/component_duping/Run()
|
||||
var/list/bad_dms = list()
|
||||
var/list/bad_dts = list()
|
||||
for(var/t in typesof(/datum/component))
|
||||
var/datum/component/comp = t
|
||||
for(var/datum/component/comp as anything in typesof(/datum/component))
|
||||
if(!isnum(initial(comp.dupe_mode)))
|
||||
bad_dms += t
|
||||
bad_dms += comp
|
||||
var/dupe_type = initial(comp.dupe_type)
|
||||
if(dupe_type && !ispath(dupe_type))
|
||||
bad_dts += t
|
||||
bad_dts += comp
|
||||
if(length(bad_dms) || length(bad_dts))
|
||||
Fail("Components with invalid dupe modes: ([bad_dms.Join(",")]) ||| Components with invalid dupe types: ([bad_dts.Join(",")])")
|
||||
TEST_FAIL("Components with invalid dupe modes: ([bad_dms.Join(",")]) ||| Components with invalid dupe types: ([bad_dts.Join(",")])")
|
||||
|
||||
@@ -2,13 +2,12 @@
|
||||
/datum/unit_test/dynamic_roundstart_ruleset_sanity
|
||||
|
||||
/datum/unit_test/dynamic_roundstart_ruleset_sanity/Run()
|
||||
for (var/_ruleset in subtypesof(/datum/dynamic_ruleset/roundstart))
|
||||
var/datum/dynamic_ruleset/roundstart/ruleset = _ruleset
|
||||
for (var/datum/dynamic_ruleset/roundstart/ruleset as anything in subtypesof(/datum/dynamic_ruleset/roundstart))
|
||||
|
||||
var/has_scaling_cost = initial(ruleset.scaling_cost)
|
||||
var/is_lone = initial(ruleset.flags) & (LONE_RULESET | HIGH_IMPACT_RULESET)
|
||||
|
||||
if (has_scaling_cost && is_lone)
|
||||
Fail("[ruleset] has a scaling_cost, but is also a lone/highlander ruleset.")
|
||||
TEST_FAIL("[ruleset] has a scaling_cost, but is also a lone/highlander ruleset.")
|
||||
else if (!has_scaling_cost && !is_lone)
|
||||
Fail("[ruleset] has no scaling cost, but is also not a lone/highlander ruleset.")
|
||||
TEST_FAIL("[ruleset] has no scaling cost, but is also not a lone/highlander ruleset.")
|
||||
|
||||
10
code/modules/unit_tests/focus_only_tests.dm
Normal file
10
code/modules/unit_tests/focus_only_tests.dm
Normal file
@@ -0,0 +1,10 @@
|
||||
/// These tests perform no behavior of their own, and have their tests offloaded onto other procs.
|
||||
/// This is useful in cases like in build_appearance_list where we want to know if any fail,
|
||||
/// but is not useful to right a test for.
|
||||
/// This file exists so that you can change any of these to TEST_FOCUS and only check for that test.
|
||||
/// For example, change /datum/unit_test/focus_only/invalid_overlays to TEST_FOCUS(/datum/unit_test/focus_only/invalid_overlays),
|
||||
/// and you will only test the check for invalid overlays in appearance building.
|
||||
/datum/unit_test/focus_only
|
||||
|
||||
/// Checks that every created emissive has a valid icon_state
|
||||
/datum/unit_test/focus_only/multiple_space_initialization
|
||||
14
code/modules/unit_tests/map_landmarks.dm
Normal file
14
code/modules/unit_tests/map_landmarks.dm
Normal file
@@ -0,0 +1,14 @@
|
||||
/// Tests that [/datum/job/proc/get_default_roundstart_spawn_point] returns a landmark from all joinable jobs.
|
||||
/datum/unit_test/job_roundstart_spawnpoints
|
||||
|
||||
/datum/unit_test/job_roundstart_spawnpoints/Run()
|
||||
for(var/datum/job/job as anything in SSjob.joinable_occupations)
|
||||
if(job.spawn_positions <= 0)
|
||||
// Zero spawn positions means we don't need to care if they don't have a roundstart landmark
|
||||
continue
|
||||
for(var/obj/effect/landmark/start/start_landmark in GLOB.start_landmarks_list)
|
||||
if(start_landmark.name != job.title)
|
||||
continue
|
||||
return
|
||||
|
||||
TEST_FAIL("Job [job.title] ([job.type]) has no default roundstart spawn landmark.")
|
||||
19
code/modules/unit_tests/mapping.dm
Normal file
19
code/modules/unit_tests/mapping.dm
Normal file
@@ -0,0 +1,19 @@
|
||||
/// Conveys all log_mapping messages as unit test failures, as they all indicate mapping problems.
|
||||
/datum/unit_test/log_mapping
|
||||
// Happen before all other tests, to make sure we only capture normal mapping logs.
|
||||
priority = TEST_PRE
|
||||
|
||||
/datum/unit_test/log_mapping/Run()
|
||||
var/static/regex/test_areacoord_regex = regex(@"\(-?\d+,-?\d+,(-?\d+)\)")
|
||||
|
||||
for(var/log_entry in GLOB.unit_test_mapping_logs)
|
||||
// Only fail if AREACOORD was conveyed, and it's a station or mining z-level.
|
||||
// This is due to mapping errors don't have coords being impossible to diagnose as a unit test,
|
||||
// and various ruins frequently intentionally doing non-standard things.
|
||||
if(!test_areacoord_regex.Find(log_entry))
|
||||
continue
|
||||
var/z = text2num(test_areacoord_regex.group[1])
|
||||
if(!is_station_level(z) && !is_mining_level(z))
|
||||
continue
|
||||
|
||||
TEST_FAIL(log_entry)
|
||||
@@ -11,4 +11,4 @@
|
||||
var/datum/chemical_reaction/R = V
|
||||
for(var/id in (R.required_reagents + R.required_catalysts))
|
||||
if(!GLOB.chemical_reagents_list[id])
|
||||
Fail("Unknown chemical path \"[id]\" in recipe [R.type]")
|
||||
TEST_FAIL("Unknown chemical path \"[id]\" in recipe [R.type]")
|
||||
|
||||
@@ -12,4 +12,4 @@
|
||||
var/datum/chemical_reaction/r1 = reactions[i]
|
||||
var/datum/chemical_reaction/r2 = reactions[i2]
|
||||
if(chem_recipes_do_conflict(r1, r2))
|
||||
Fail("Chemical recipe conflict between [r1.type] and [r2.type]")
|
||||
TEST_FAIL("Chemical recipe conflict between [r1.type] and [r2.type]")
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/datum/unit_test/spawn_humans/Run()
|
||||
var/locs = block(run_loc_bottom_left, run_loc_top_right)
|
||||
var/locs = block(run_loc_floor_bottom_left, run_loc_floor_top_right)
|
||||
|
||||
for(var/I in 1 to 5)
|
||||
new /mob/living/carbon/human(pick(locs))
|
||||
for(var/I in 1 to 5)
|
||||
new /mob/living/carbon/human/consistent(pick(locs))
|
||||
|
||||
sleep(5 SECONDS)
|
||||
sleep(5 SECONDS)
|
||||
|
||||
@@ -2,4 +2,4 @@
|
||||
for(var/typepath in subtypesof(/datum/species))
|
||||
var/datum/species/S = typepath
|
||||
if(initial(S.changesource_flags) == NONE)
|
||||
Fail("A species type was detected with no changesource flags: [S]")
|
||||
TEST_FAIL("A species type was detected with no changesource flags: [S]")
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
/datum/unit_test/subsystem_init/Run()
|
||||
for(var/i in Master.subsystems)
|
||||
var/datum/controller/subsystem/ss = i
|
||||
if(ss.flags & SS_NO_INIT)
|
||||
for(var/datum/controller/subsystem/master_subsystem as anything in Master.subsystems)
|
||||
if(master_subsystem.flags & SS_NO_INIT)
|
||||
continue
|
||||
if(!ss.initialized)
|
||||
Fail("[ss]([ss.type]) is a subsystem meant to initialize but doesn't get set as initialized.")
|
||||
if(!master_subsystem.initialized)
|
||||
TEST_FAIL("[master_subsystem]([master_subsystem.type]) is a subsystem meant to initialize but doesn't get set as initialized.")
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
/datum/unit_test/timer_sanity/Run()
|
||||
if(SStimer.bucket_count < 0)
|
||||
Fail("SStimer is going into negative bucket count from something")
|
||||
TEST_FAIL("SStimer is going into negative bucket count from something")
|
||||
|
||||
@@ -3,59 +3,140 @@
|
||||
Usage:
|
||||
Override /Run() to run your test code
|
||||
|
||||
Call Fail() to fail the test (You should specify a reason)
|
||||
Call TEST_FAIL() to fail the test (You should specify a reason)
|
||||
|
||||
You may use /New() and /Destroy() for setup/teardown respectively
|
||||
|
||||
You can use the run_loc_bottom_left and run_loc_top_right to get turfs for testing
|
||||
You can use the run_loc_floor_bottom_left and run_loc_floor_top_right to get turfs for testing
|
||||
|
||||
*/
|
||||
|
||||
GLOBAL_DATUM(current_test, /datum/unit_test)
|
||||
GLOBAL_VAR_INIT(failed_any_test, FALSE)
|
||||
GLOBAL_VAR(test_log)
|
||||
/// When unit testing, all logs sent to log_mapping are stored here and retrieved in log_mapping unit test.
|
||||
GLOBAL_LIST_EMPTY(unit_test_mapping_logs)
|
||||
/// Global assoc list of required mapping items, [item typepath] to [required item datum].
|
||||
GLOBAL_LIST_EMPTY(required_map_items)
|
||||
|
||||
/// A list of every test that is currently focused.
|
||||
/// Use the PERFORM_ALL_TESTS macro instead.
|
||||
GLOBAL_VAR_INIT(focused_tests, focused_tests())
|
||||
|
||||
/proc/focused_tests()
|
||||
var/list/focused_tests = list()
|
||||
for (var/datum/unit_test/unit_test as anything in subtypesof(/datum/unit_test))
|
||||
if (initial(unit_test.focus))
|
||||
focused_tests += unit_test
|
||||
|
||||
return focused_tests.len > 0 ? focused_tests : null
|
||||
|
||||
/datum/unit_test
|
||||
//Bit of metadata for the future maybe
|
||||
var/list/procs_tested
|
||||
|
||||
//usable vars
|
||||
var/turf/run_loc_bottom_left
|
||||
var/turf/run_loc_top_right
|
||||
/// The bottom left floor turf of the testing zone
|
||||
var/turf/run_loc_floor_bottom_left
|
||||
|
||||
/// The top right floor turf of the testing zone
|
||||
var/turf/run_loc_floor_top_right
|
||||
///The priority of the test, the larger it is the later it fires
|
||||
var/priority = TEST_DEFAULT
|
||||
//internal shit
|
||||
var/focus = FALSE
|
||||
var/succeeded = TRUE
|
||||
var/list/allocated
|
||||
var/list/fail_reasons
|
||||
|
||||
/// Do not instantiate if type matches this
|
||||
var/abstract_type = /datum/unit_test
|
||||
|
||||
var/static/datum/space_level/reservation
|
||||
|
||||
/proc/cmp_unit_test_priority(datum/unit_test/a, datum/unit_test/b)
|
||||
return initial(a.priority) - initial(b.priority)
|
||||
|
||||
/datum/unit_test/New()
|
||||
run_loc_bottom_left = locate(1, 1, 1)
|
||||
run_loc_top_right = locate(5, 5, 1)
|
||||
if (isnull(reservation))
|
||||
var/datum/map_template/unit_tests/template = new
|
||||
reservation = template.load_new_z()
|
||||
|
||||
allocated = new
|
||||
run_loc_floor_bottom_left = get_turf(locate(/obj/effect/landmark/unit_test_bottom_left) in GLOB.landmarks_list)
|
||||
run_loc_floor_top_right = get_turf(locate(/obj/effect/landmark/unit_test_top_right) in GLOB.landmarks_list)
|
||||
|
||||
TEST_ASSERT(isfloorturf(run_loc_floor_bottom_left), "run_loc_floor_bottom_left was not a floor ([run_loc_floor_bottom_left])")
|
||||
TEST_ASSERT(isfloorturf(run_loc_floor_top_right), "run_loc_floor_top_right was not a floor ([run_loc_floor_top_right])")
|
||||
|
||||
/datum/unit_test/Destroy()
|
||||
//clear the test area
|
||||
for(var/atom/movable/AM in block(run_loc_bottom_left, run_loc_top_right))
|
||||
qdel(AM)
|
||||
QDEL_LIST(allocated)
|
||||
// clear the test area
|
||||
for (var/turf/turf in Z_TURFS(run_loc_floor_bottom_left.z))
|
||||
for (var/content in turf.contents)
|
||||
if (istype(content, /obj/effect/landmark))
|
||||
continue
|
||||
qdel(content)
|
||||
return ..()
|
||||
|
||||
/datum/unit_test/proc/Run()
|
||||
Fail("Run() called parent or not implemented")
|
||||
TEST_FAIL("[type]/Run() called parent or not implemented")
|
||||
|
||||
/datum/unit_test/proc/Fail(reason = "No reason")
|
||||
/datum/unit_test/proc/Fail(reason = "No reason", file = "OUTDATED_TEST", line = 1)
|
||||
succeeded = FALSE
|
||||
|
||||
if(!istext(reason))
|
||||
reason = "FORMATTED: [reason != null ? reason : "NULL"]"
|
||||
|
||||
LAZYADD(fail_reasons, reason)
|
||||
LAZYADD(fail_reasons, list(list(reason, file, line)))
|
||||
|
||||
/proc/RunUnitTests()
|
||||
CHECK_TICK
|
||||
/// Allocates an instance of the provided type, and places it somewhere in an available loc
|
||||
/// Instances allocated through this proc will be destroyed when the test is over
|
||||
/datum/unit_test/proc/allocate(type, ...)
|
||||
var/list/arguments = args.Copy(2)
|
||||
if(ispath(type, /atom))
|
||||
if (!arguments.len)
|
||||
arguments = list(run_loc_floor_bottom_left)
|
||||
else if (arguments[1] == null)
|
||||
arguments[1] = run_loc_floor_bottom_left
|
||||
var/instance
|
||||
// Byond will throw an index out of bounds if arguments is empty in that arglist call. Sigh
|
||||
if(length(arguments))
|
||||
instance = new type(arglist(arguments))
|
||||
else
|
||||
instance = new type()
|
||||
allocated += instance
|
||||
return instance
|
||||
|
||||
for(var/I in subtypesof(/datum/unit_test))
|
||||
var/datum/unit_test/test = new I
|
||||
/// Logs a test message. Will use GitHub action syntax found at https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions
|
||||
/datum/unit_test/proc/log_for_test(text, priority, file, line)
|
||||
var/map_name = SSmapping.config.map_name
|
||||
|
||||
GLOB.current_test = test
|
||||
var/duration = REALTIMEOFDAY
|
||||
// Need to escape the text to properly support newlines.
|
||||
var/annotation_text = replacetext(text, "%", "%25")
|
||||
annotation_text = replacetext(annotation_text, "\n", "%0A")
|
||||
|
||||
log_world("::[priority] file=[file],line=[line],title=[map_name]: [type]::[annotation_text]")
|
||||
|
||||
/proc/RunUnitTest(datum/unit_test/test_path, list/test_results)
|
||||
if(ispath(test_path, /datum/unit_test/focus_only))
|
||||
return
|
||||
|
||||
if(initial(test_path.abstract_type) == test_path)
|
||||
return
|
||||
|
||||
var/datum/unit_test/test = new test_path
|
||||
|
||||
GLOB.current_test = test
|
||||
var/duration = REALTIMEOFDAY
|
||||
var/skip_test = (test_path in SSmapping.config.skipped_tests)
|
||||
var/test_output_desc = "[test_path]"
|
||||
var/message = ""
|
||||
|
||||
log_world("::group::[test_path]")
|
||||
|
||||
if(skip_test)
|
||||
log_world("[TEST_OUTPUT_YELLOW("SKIPPED")] Skipped run on map [SSmapping.config.map_name].")
|
||||
|
||||
else
|
||||
|
||||
test.Run()
|
||||
|
||||
@@ -63,15 +144,65 @@ GLOBAL_VAR(test_log)
|
||||
GLOB.current_test = null
|
||||
GLOB.failed_any_test |= !test.succeeded
|
||||
|
||||
var/list/log_entry = list("[test.succeeded ? "PASS" : "FAIL"]: [I] [duration / 10]s")
|
||||
var/list/log_entry = list()
|
||||
var/list/fail_reasons = test.fail_reasons
|
||||
|
||||
qdel(test)
|
||||
for(var/reasonID in 1 to LAZYLEN(fail_reasons))
|
||||
var/text = fail_reasons[reasonID][1]
|
||||
var/file = fail_reasons[reasonID][2]
|
||||
var/line = fail_reasons[reasonID][3]
|
||||
|
||||
for(var/J in 1 to LAZYLEN(fail_reasons))
|
||||
log_entry += "\tREASON #[J]: [fail_reasons[J]]"
|
||||
log_test(log_entry.Join("\n"))
|
||||
test.log_for_test(text, "error", file, line)
|
||||
|
||||
CHECK_TICK
|
||||
// Normal log message
|
||||
log_entry += "\tFAILURE #[reasonID]: [text] at [file]:[line]"
|
||||
|
||||
if(length(log_entry))
|
||||
message = log_entry.Join("\n")
|
||||
log_test(message)
|
||||
|
||||
test_output_desc += " [duration / 10]s"
|
||||
if (test.succeeded)
|
||||
log_world("[TEST_OUTPUT_GREEN("PASS")] [test_output_desc]")
|
||||
|
||||
log_world("::endgroup::")
|
||||
|
||||
if (!test.succeeded && !skip_test)
|
||||
log_world("::error::[TEST_OUTPUT_RED("FAIL")] [test_output_desc]")
|
||||
|
||||
var/final_status = skip_test ? UNIT_TEST_SKIPPED : (test.succeeded ? UNIT_TEST_PASSED : UNIT_TEST_FAILED)
|
||||
test_results[test_path] = list("status" = final_status, "message" = message, "name" = test_path)
|
||||
|
||||
qdel(test)
|
||||
|
||||
/proc/RunUnitTests()
|
||||
CHECK_TICK
|
||||
|
||||
var/list/tests_to_run = subtypesof(/datum/unit_test)
|
||||
var/list/focused_tests = list()
|
||||
for (var/_test_to_run in tests_to_run)
|
||||
var/datum/unit_test/test_to_run = _test_to_run
|
||||
if (initial(test_to_run.focus))
|
||||
focused_tests += test_to_run
|
||||
if(length(focused_tests))
|
||||
tests_to_run = focused_tests
|
||||
|
||||
tests_to_run = sortTim(tests_to_run, GLOBAL_PROC_REF(cmp_unit_test_priority))
|
||||
|
||||
var/list/test_results = list()
|
||||
|
||||
for(var/unit_path in tests_to_run)
|
||||
CHECK_TICK //We check tick first because the unit test we run last may be so expensive that checking tick will lock up this loop forever
|
||||
RunUnitTest(unit_path, test_results)
|
||||
|
||||
var/file_name = "data/unit_tests.json"
|
||||
fdel(file_name)
|
||||
file(file_name) << json_encode(test_results)
|
||||
|
||||
SSticker.force_ending = TRUE
|
||||
//We have to call this manually because del_text can preceed us, and SSticker doesn't fire in the post game
|
||||
SSticker.declare_completion()
|
||||
|
||||
/datum/map_template/unit_tests
|
||||
name = "Unit Tests Zone"
|
||||
mappath = "_maps/templates/unit_tests.dmm"
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 42 KiB After Width: | Height: | Size: 42 KiB |
@@ -127,6 +127,7 @@
|
||||
#include "code\__DEFINES\traits.dm"
|
||||
#include "code\__DEFINES\turfs.dm"
|
||||
#include "code\__DEFINES\typeids.dm"
|
||||
#include "code\__DEFINES\unit_tests.dm"
|
||||
#include "code\__DEFINES\vehicles.dm"
|
||||
#include "code\__DEFINES\vv.dm"
|
||||
#include "code\__DEFINES\wall_dents.dm"
|
||||
|
||||
Reference in New Issue
Block a user