mirror of
https://github.com/SPLURT-Station/S.P.L.U.R.T-Station-13.git
synced 2025-12-11 10:22:13 +00:00
subsystem upgrades
This commit is contained in:
3
.vscode/extensions.json
vendored
3
.vscode/extensions.json
vendored
@@ -4,6 +4,7 @@
|
||||
"platymuus.dm-langclient",
|
||||
"EditorConfig.EditorConfig",
|
||||
"arcanis.vscode-zipfs",
|
||||
"dbaeumer.vscode-eslint"
|
||||
"dbaeumer.vscode-eslint",
|
||||
"kevinkyang.auto-comment-blocks"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
#define EXTOOLS (world.system_type == MS_WINDOWS ? "byond-extools.dll" : "libbyond-extools.so")
|
||||
@@ -284,11 +284,12 @@ GLOBAL_LIST_INIT(atmos_adjacent_savings, list(0,0))
|
||||
GLOBAL_VAR(atmos_extools_initialized) // this must be an uninitialized (null) one or init_monstermos will be called twice because reasons
|
||||
#define ATMOS_EXTOOLS_CHECK if(!GLOB.atmos_extools_initialized){\
|
||||
GLOB.atmos_extools_initialized=TRUE;\
|
||||
if(fexists(EXTOOLS)){\
|
||||
var/result = call(EXTOOLS,"init_monstermos")();\
|
||||
var/extools = world.GetConfig("env", "EXTOOLS_DLL") || (world.system_type == MS_WINDOWS ? "./byond-extools.dll" : "./libbyond-extools.so");\
|
||||
if(fexists(extools)){\
|
||||
var/result = call(extools,"init_monstermos")();\
|
||||
if(result != "ok") {CRASH(result);}\
|
||||
} else {\
|
||||
CRASH("[EXTOOLS] does not exist!");\
|
||||
CRASH("[extools] does not exist!");\
|
||||
}\
|
||||
}
|
||||
|
||||
|
||||
@@ -29,5 +29,6 @@
|
||||
#endif
|
||||
|
||||
/world/proc/enable_debugger()
|
||||
if (fexists(EXTOOLS))
|
||||
call(EXTOOLS, "debug_initialize")()
|
||||
var/dll = world.GetConfig("env", "EXTOOLS_DLL")
|
||||
if (dll)
|
||||
call(dll, "debug_initialize")()
|
||||
|
||||
@@ -1,40 +1,90 @@
|
||||
//Update this whenever the db schema changes
|
||||
//make sure you add an update to the schema_version stable in the db changelog
|
||||
//! Defines for subsystems and overlays
|
||||
//!
|
||||
//! Lots of important stuff in here, make sure you have your brain switched on
|
||||
//! when editing this file
|
||||
|
||||
//! ## DB defines
|
||||
/**
|
||||
* DB major schema version
|
||||
*
|
||||
* Update this whenever the db schema changes
|
||||
*
|
||||
* make sure you add an update to the schema_version stable in the db changelog
|
||||
*/
|
||||
#define DB_MAJOR_VERSION 4
|
||||
|
||||
/**
|
||||
* DB minor schema version
|
||||
*
|
||||
* Update this whenever the db schema changes
|
||||
*
|
||||
* make sure you add an update to the schema_version stable in the db changelog
|
||||
*/
|
||||
#define DB_MINOR_VERSION 7
|
||||
|
||||
//Timing subsystem
|
||||
//Don't run if there is an identical unique timer active
|
||||
//if the arguments to addtimer are the same as an existing timer, it doesn't create a new timer, and returns the id of the existing timer
|
||||
//! ## Timing subsystem
|
||||
/**
|
||||
* Don't run if there is an identical unique timer active
|
||||
*
|
||||
* if the arguments to addtimer are the same as an existing timer, it doesn't create a new timer,
|
||||
* and returns the id of the existing timer
|
||||
*/
|
||||
#define TIMER_UNIQUE (1<<0)
|
||||
//For unique timers: Replace the old timer rather then not start this one
|
||||
|
||||
///For unique timers: Replace the old timer rather then not start this one
|
||||
#define TIMER_OVERRIDE (1<<1)
|
||||
//Timing should be based on how timing progresses on clients, not the sever.
|
||||
// tracking this is more expensive,
|
||||
// should only be used in conjuction with things that have to progress client side, such as animate() or sound()
|
||||
|
||||
/**
|
||||
* Timing should be based on how timing progresses on clients, not the server.
|
||||
*
|
||||
* Tracking this is more expensive,
|
||||
* should only be used in conjuction with things that have to progress client side, such as
|
||||
* animate() or sound()
|
||||
*/
|
||||
#define TIMER_CLIENT_TIME (1<<2)
|
||||
//Timer can be stopped using deltimer()
|
||||
|
||||
///Timer can be stopped using deltimer()
|
||||
#define TIMER_STOPPABLE (1<<3)
|
||||
//To be used with TIMER_UNIQUE
|
||||
//prevents distinguishing identical timers with the wait variable
|
||||
|
||||
///prevents distinguishing identical timers with the wait variable
|
||||
///
|
||||
///To be used with TIMER_UNIQUE
|
||||
#define TIMER_NO_HASH_WAIT (1<<4)
|
||||
//Loops the timer repeatedly until qdeleted
|
||||
//In most cases you want a subsystem instead
|
||||
|
||||
///Loops the timer repeatedly until qdeleted
|
||||
///
|
||||
///In most cases you want a subsystem instead, so don't use this unless you have a good reason
|
||||
#define TIMER_LOOP (1<<5)
|
||||
|
||||
#define TIMER_NO_INVOKE_WARNING 600 //number of byond ticks that are allowed to pass before the timer subsystem thinks it hung on something
|
||||
|
||||
///Empty ID define
|
||||
#define TIMER_ID_NULL -1
|
||||
|
||||
#define INITIALIZATION_INSSATOMS 0 //New should not call Initialize
|
||||
#define INITIALIZATION_INNEW_MAPLOAD 2 //New should call Initialize(TRUE)
|
||||
#define INITIALIZATION_INNEW_REGULAR 1 //New should call Initialize(FALSE)
|
||||
//! ## Initialization subsystem
|
||||
|
||||
#define INITIALIZE_HINT_NORMAL 0 //Nothing happens
|
||||
#define INITIALIZE_HINT_LATELOAD 1 //Call LateInitialize
|
||||
#define INITIALIZE_HINT_QDEL 2 //Call qdel on the atom
|
||||
///New should not call Initialize
|
||||
#define INITIALIZATION_INSSATOMS 0
|
||||
///New should call Initialize(TRUE)
|
||||
#define INITIALIZATION_INNEW_MAPLOAD 2
|
||||
///New should call Initialize(FALSE)
|
||||
#define INITIALIZATION_INNEW_REGULAR 1
|
||||
|
||||
//type and all subtypes should always call Initialize in New()
|
||||
//! ### Initialization hints
|
||||
|
||||
///Nothing happens
|
||||
#define INITIALIZE_HINT_NORMAL 0
|
||||
/**
|
||||
* call LateInitialize at the end of all atom Initalization
|
||||
*
|
||||
* The item will be added to the late_loaders list, this is iterated over after
|
||||
* initalization of subsystems is complete and calls LateInitalize on the atom
|
||||
* see [this file for the LateIntialize proc](atom.html#proc/LateInitialize)
|
||||
*/
|
||||
#define INITIALIZE_HINT_LATELOAD 1
|
||||
|
||||
///Call qdel on the atom after intialization
|
||||
#define INITIALIZE_HINT_QDEL 2
|
||||
|
||||
///type and all subtypes should always immediately call Initialize in New()
|
||||
#define INITIALIZE_IMMEDIATE(X) ##X/New(loc, ...){\
|
||||
..();\
|
||||
if(!(flags_1 & INITIALIZED_1)) {\
|
||||
@@ -47,35 +97,40 @@
|
||||
// Subsystems shutdown in the reverse of the order they initialize in
|
||||
// The numbers just define the ordering, they are meaningless otherwise.
|
||||
|
||||
#define INIT_ORDER_PROFILER 100
|
||||
#define INIT_ORDER_FAIL2TOPIC 99
|
||||
#define INIT_ORDER_TITLE 98
|
||||
#define INIT_ORDER_GARBAGE 95
|
||||
#define INIT_ORDER_DBCORE 94
|
||||
#define INIT_ORDER_STATPANELS 93
|
||||
#define INIT_ORDER_BLACKBOX 92
|
||||
#define INIT_ORDER_SERVER_MAINT 91
|
||||
#define INIT_ORDER_INPUT 90
|
||||
#define INIT_ORDER_SOUNDS 85
|
||||
#define INIT_ORDER_PROFILER 102
|
||||
#define INIT_ORDER_FAIL2TOPIC 101
|
||||
#define INIT_ORDER_TITLE 100
|
||||
#define INIT_ORDER_GARBAGE 99
|
||||
#define INIT_ORDER_DBCORE 95
|
||||
#define INIT_ORDER_BLACKBOX 94
|
||||
#define INIT_ORDER_SERVER_MAINT 93
|
||||
#define INIT_ORDER_INPUT 85
|
||||
#define INIT_ORDER_SOUNDS 83
|
||||
#define INIT_ORDER_INSTRUMENTS 82
|
||||
#define INIT_ORDER_VIS 80
|
||||
// #define INIT_ORDER_ACHIEVEMENTS 77
|
||||
#define INIT_ORDER_RESEARCH 75
|
||||
#define INIT_ORDER_EVENTS 70
|
||||
#define INIT_ORDER_JOBS 65
|
||||
#define INIT_ORDER_QUIRKS 60
|
||||
#define INIT_ORDER_TICKER 55
|
||||
#define INIT_ORDER_INSTRUMENTS 53
|
||||
// #define INIT_ORDER_TCG 55
|
||||
#define INIT_ORDER_MAPPING 50
|
||||
#define INIT_ORDER_ECONOMY 45
|
||||
#define INIT_ORDER_NETWORKS 40
|
||||
// #define INIT_ORDER_TIMETRACK 47
|
||||
#define INIT_ORDER_NETWORKS 45
|
||||
#define INIT_ORDER_ECONOMY 40
|
||||
#define INIT_ORDER_HOLODECK 35
|
||||
// #define INIT_ORDER_OUTPUTS 35
|
||||
#define INIT_ORDER_ATOMS 30
|
||||
#define INIT_ORDER_LANGUAGE 25
|
||||
#define INIT_ORDER_MACHINES 20
|
||||
#define INIT_ORDER_CIRCUIT 15
|
||||
// #define INIT_ORDER_SKILLS 15
|
||||
#define INIT_ORDER_TIMER 1
|
||||
#define INIT_ORDER_DEFAULT 0
|
||||
#define INIT_ORDER_AIR -1
|
||||
#define INIT_ORDER_AIR_TURFS -2
|
||||
#define INIT_ORDER_PERSISTENCE -2 //before assets because some assets take data from SSPersistence
|
||||
#define INIT_ORDER_MINIMAP -3
|
||||
#define INIT_ORDER_ASSETS -4
|
||||
#define INIT_ORDER_ICON_SMOOTHING -5
|
||||
@@ -86,7 +141,9 @@
|
||||
#define INIT_ORDER_SHUTTLE -21
|
||||
#define INIT_ORDER_MINOR_MAPPING -40
|
||||
#define INIT_ORDER_PATH -50
|
||||
#define INIT_ORDER_PERSISTENCE -95
|
||||
// #define INIT_ORDER_DISCORD -60
|
||||
// #define INIT_ORDER_EXPLOSIONS -69
|
||||
#define INIT_ORDER_STATPANELS -98
|
||||
#define INIT_ORDER_DEMO -99 // o avoid a bunch of changes related to initialization being written, do this last
|
||||
#define INIT_ORDER_CHAT -100 //Should be last to ensure chat remains smooth during init.
|
||||
|
||||
@@ -102,6 +159,7 @@
|
||||
#define FIRE_PRIORITY_GARBAGE 15
|
||||
#define FIRE_PRIORITY_WET_FLOORS 20
|
||||
#define FIRE_PRIORITY_AIR 20
|
||||
#define FIRE_PRIORITY_NPC 20
|
||||
#define FIRE_PRIORITY_PROCESS 25
|
||||
#define FIRE_PRIORITY_THROWING 25
|
||||
#define FIRE_PRIORITY_SPACEDRIFT 30
|
||||
@@ -116,7 +174,7 @@
|
||||
#define FIRE_PRIORITY_AIR_TURFS 40
|
||||
#define FIRE_PRIORITY_DEFAULT 50
|
||||
#define FIRE_PRIORITY_PARALLAX 65
|
||||
#define FIRE_PRIORITY_NPC 80
|
||||
#define FIRE_PRIORITY_INSTRUMENTS 80
|
||||
#define FIRE_PRIORITY_MOBS 100
|
||||
#define FIRE_PRIORITY_TGUI 110
|
||||
#define FIRE_PRIORITY_PROJECTILES 200
|
||||
@@ -126,6 +184,8 @@
|
||||
#define FIRE_PRIORITY_CHAT 400
|
||||
#define FIRE_PRIORITY_RUNECHAT 410
|
||||
#define FIRE_PRIORITY_OVERLAYS 500
|
||||
// #define FIRE_PRIORITY_EXPLOSIONS 666
|
||||
#define FIRE_PRIORITY_TIMER 700
|
||||
#define FIRE_PRIORITY_INPUT 1000 // This must always always be the max highest priority. Player input must never be lost.
|
||||
|
||||
// SS runlevels
|
||||
@@ -138,6 +198,37 @@
|
||||
|
||||
#define RUNLEVELS_DEFAULT (RUNLEVEL_SETUP | RUNLEVEL_GAME | RUNLEVEL_POSTGAME)
|
||||
|
||||
|
||||
|
||||
//! ## Overlays subsystem
|
||||
|
||||
///Compile all the overlays for an atom from the cache lists
|
||||
// |= on overlays is not actually guaranteed to not add same appearances but we're optimistically using it anyway.
|
||||
#define COMPILE_OVERLAYS(A)\
|
||||
if (TRUE) {\
|
||||
var/list/ad = A.add_overlays;\
|
||||
var/list/rm = A.remove_overlays;\
|
||||
if(LAZYLEN(rm)){\
|
||||
A.overlays -= rm;\
|
||||
rm.Cut();\
|
||||
}\
|
||||
if(LAZYLEN(ad)){\
|
||||
A.overlays |= ad;\
|
||||
ad.Cut();\
|
||||
}\
|
||||
A.flags_1 &= ~OVERLAY_QUEUED_1;\
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Create a new timer and add it to the queue.
|
||||
* Arguments:
|
||||
* * callback the callback to call on timer finish
|
||||
* * wait deciseconds to run the timer for
|
||||
* * flags flags for this timer, see: code\__DEFINES\subsystems.dm
|
||||
*/
|
||||
#define addtimer(args...) _addtimer(args, file = __FILE__, line = __LINE__)
|
||||
|
||||
// SSair run section
|
||||
#define SSAIR_PIPENETS 1
|
||||
#define SSAIR_ATMOSMACHINERY 2
|
||||
@@ -148,19 +239,3 @@
|
||||
#define SSAIR_REBUILD_PIPENETS 7
|
||||
#define SSAIR_EQUALIZE 8
|
||||
#define SSAIR_ACTIVETURFS 9
|
||||
|
||||
// |= on overlays is not actually guaranteed to not add same appearances but we're optimistically using it anyway.
|
||||
#define COMPILE_OVERLAYS(A)\
|
||||
if (TRUE) {\
|
||||
var/list/ad = A.add_overlays;\
|
||||
var/list/rm = A.remove_overlays;\
|
||||
if(LAZYLEN(rm)){\
|
||||
A.overlays -= rm;\
|
||||
A.remove_overlays = null;\
|
||||
}\
|
||||
if(LAZYLEN(ad)){\
|
||||
A.overlays |= ad;\
|
||||
A.add_overlays = null;\
|
||||
}\
|
||||
A.flags_1 &= ~OVERLAY_QUEUED_1;\
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
* TYPECONT: The typepath of the contents of the list
|
||||
* COMPARE: The object to compare against, usualy the same as INPUT
|
||||
* COMPARISON: The variable on the objects to compare
|
||||
* COMPTYPE: How the current bin item to compare against COMPARE is fetched. By key or value.
|
||||
* COMPTYPE: How should the values be compared? Either COMPARE_KEY or COMPARE_VALUE.
|
||||
*/
|
||||
#define BINARY_INSERT(INPUT, LIST, TYPECONT, COMPARE, COMPARISON, COMPTYPE) \
|
||||
do {\
|
||||
@@ -49,7 +49,7 @@
|
||||
var/__BIN_LEFT = 1;\
|
||||
var/__BIN_RIGHT = __BIN_CTTL;\
|
||||
var/__BIN_MID = (__BIN_LEFT + __BIN_RIGHT) >> 1;\
|
||||
var/##TYPECONT/__BIN_ITEM;\
|
||||
var ##TYPECONT/__BIN_ITEM;\
|
||||
while(__BIN_LEFT < __BIN_RIGHT) {\
|
||||
__BIN_ITEM = COMPTYPE;\
|
||||
if(__BIN_ITEM.##COMPARISON <= COMPARE.##COMPARISON) {\
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
name = "Initializing..."
|
||||
var/target
|
||||
|
||||
INITIALIZE_IMMEDIATE(/obj/effect/statclick) //it's new, but rebranded.
|
||||
INITIALIZE_IMMEDIATE(/obj/effect/statclick)
|
||||
|
||||
/obj/effect/statclick/Initialize(mapload, text, target) //Don't port this to Initialize it's too critical
|
||||
. = ..()
|
||||
@@ -33,14 +33,6 @@ INITIALIZE_IMMEDIATE(/obj/effect/statclick) //it's new, but rebranded.
|
||||
usr.client.debug_variables(target)
|
||||
message_admins("Admin [key_name_admin(usr)] is debugging the [target] [class].")
|
||||
|
||||
/obj/effect/statclick/misc_subsystems/Click()
|
||||
if(!usr.client.holder)
|
||||
return
|
||||
var/subsystem = input(usr, "Debug which subsystem?", "Debug nonprocessing subsystem") as null|anything in (Master.subsystems - Master.statworthy_subsystems)
|
||||
if(!subsystem)
|
||||
return
|
||||
usr.client.debug_variables(subsystem)
|
||||
message_admins("Admin [key_name_admin(usr)] is debugging the [subsystem] subsystem.")
|
||||
|
||||
// Debug verbs.
|
||||
/client/proc/restart_controller(controller in list("Master", "Failsafe"))
|
||||
|
||||
@@ -28,8 +28,6 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
|
||||
// List of subsystems to process().
|
||||
var/list/subsystems
|
||||
/// List of subsystems to include in the MC stat panel.
|
||||
var/list/statworthy_subsystems
|
||||
|
||||
// Vars for keeping track of tick drift.
|
||||
var/init_timeofday
|
||||
@@ -41,7 +39,7 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
///Only run ticker subsystems for the next n ticks.
|
||||
var/skip_ticks = 0
|
||||
|
||||
var/make_runtime = 0
|
||||
var/make_runtime = FALSE
|
||||
|
||||
var/initializations_finished_with_no_players_logged_in //I wonder what this could be?
|
||||
|
||||
@@ -67,9 +65,6 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
//used by CHECK_TICK as well so that the procs subsystems call can obey that SS's tick limits
|
||||
var/static/current_ticklimit = TICK_LIMIT_RUNNING
|
||||
|
||||
/// Statclick for misc subsystems
|
||||
var/obj/effect/statclick/misc_subsystems/misc_statclick
|
||||
|
||||
/datum/controller/master/New()
|
||||
if(!config)
|
||||
config = new
|
||||
@@ -96,11 +91,6 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
_subsystems += new I
|
||||
Master = src
|
||||
|
||||
// We want to see all subsystems during init.
|
||||
statworthy_subsystems = subsystems.Copy()
|
||||
|
||||
misc_statclick = new(null, "Debug")
|
||||
|
||||
if(!GLOB)
|
||||
new /datum/controller/global_vars
|
||||
|
||||
@@ -217,7 +207,7 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
// Sort subsystems by display setting for easy access.
|
||||
sortTim(subsystems, /proc/cmp_subsystem_display)
|
||||
// Set world options.
|
||||
world.fps = CONFIG_GET(number/fps)
|
||||
world.change_fps(CONFIG_GET(number/fps))
|
||||
var/initialized_tod = REALTIMEOFDAY
|
||||
|
||||
if(tgs_prime)
|
||||
@@ -271,14 +261,10 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
var/list/tickersubsystems = list()
|
||||
var/list/runlevel_sorted_subsystems = list(list()) //ensure we always have at least one runlevel
|
||||
var/timer = world.time
|
||||
statworthy_subsystems = list()
|
||||
for (var/thing in subsystems)
|
||||
var/datum/controller/subsystem/SS = thing
|
||||
if (SS.flags & SS_NO_FIRE)
|
||||
if(SS.flags & SS_ALWAYS_SHOW_STAT)
|
||||
statworthy_subsystems += SS
|
||||
continue
|
||||
statworthy_subsystems += SS
|
||||
SS.queued_time = 0
|
||||
SS.queue_next = null
|
||||
SS.queue_prev = null
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
var/priority = FIRE_PRIORITY_DEFAULT
|
||||
|
||||
/// [Subsystem Flags][SS_NO_INIT] to control binary behavior. Flags must be set at compile time or before preinit finishes to take full effect. (You can also restart the mc to force them to process again)
|
||||
var/flags = 0
|
||||
var/flags = NONE
|
||||
|
||||
/// This var is set to TRUE after the subsystem has been initialized.
|
||||
var/initialized = FALSE
|
||||
@@ -114,7 +114,7 @@
|
||||
//previously, this would have been named 'process()' but that name is used everywhere for different things!
|
||||
//fire() seems more suitable. This is the procedure that gets called every 'wait' deciseconds.
|
||||
//Sleeping in here prevents future fires until returned.
|
||||
/datum/controller/subsystem/proc/fire(resumed = 0)
|
||||
/datum/controller/subsystem/proc/fire(resumed = FALSE)
|
||||
flags |= SS_NO_FIRE
|
||||
CRASH("Subsystem [src]([type]) does not fire() but did not set the SS_NO_FIRE flag. Please add the SS_NO_FIRE flag to any subsystem that doesn't fire so it doesn't get added to the processing list and waste cpu.")
|
||||
|
||||
|
||||
@@ -10,25 +10,28 @@ SUBSYSTEM_DEF(atoms)
|
||||
|
||||
var/old_initialized
|
||||
|
||||
var/list/late_loaders
|
||||
var/list/late_loaders = list()
|
||||
|
||||
var/list/BadInitializeCalls = list()
|
||||
|
||||
initialized = INITIALIZATION_INSSATOMS
|
||||
|
||||
/datum/controller/subsystem/atoms/Initialize(timeofday)
|
||||
GLOB.fire_overlay.appearance_flags = RESET_COLOR
|
||||
setupGenetics()
|
||||
setupGenetics() //to set the mutations' sequence
|
||||
|
||||
initialized = INITIALIZATION_INNEW_MAPLOAD
|
||||
InitializeAtoms()
|
||||
initialized = INITIALIZATION_INNEW_REGULAR
|
||||
return ..()
|
||||
|
||||
/datum/controller/subsystem/atoms/proc/InitializeAtoms(list/atoms)
|
||||
if(initialized == INITIALIZATION_INSSATOMS)
|
||||
return
|
||||
|
||||
old_initialized = initialized
|
||||
initialized = INITIALIZATION_INNEW_MAPLOAD
|
||||
|
||||
LAZYINITLIST(late_loaders)
|
||||
|
||||
var/count
|
||||
var/list/mapload_arg = list(TRUE)
|
||||
if(atoms)
|
||||
@@ -49,7 +52,7 @@ SUBSYSTEM_DEF(atoms)
|
||||
testing("Initialized [count] atoms")
|
||||
pass(count)
|
||||
|
||||
initialized = INITIALIZATION_INNEW_REGULAR
|
||||
initialized = old_initialized
|
||||
|
||||
if(late_loaders.len)
|
||||
for(var/I in late_loaders)
|
||||
@@ -58,6 +61,7 @@ SUBSYSTEM_DEF(atoms)
|
||||
testing("Late initialized [late_loaders.len] atoms")
|
||||
late_loaders.Cut()
|
||||
|
||||
/// Init this specific atom
|
||||
/datum/controller/subsystem/atoms/proc/InitAtom(atom/A, list/arguments)
|
||||
var/the_type = A.type
|
||||
if(QDELING(A))
|
||||
@@ -150,8 +154,3 @@ SUBSYSTEM_DEF(atoms)
|
||||
var/initlog = InitLog()
|
||||
if(initlog)
|
||||
text2file(initlog, "[GLOB.log_directory]/initialize.log")
|
||||
|
||||
#undef BAD_INIT_QDEL_BEFORE
|
||||
#undef BAD_INIT_DIDNT_INIT
|
||||
#undef BAD_INIT_SLEPT
|
||||
#undef BAD_INIT_NO_HINT
|
||||
|
||||
@@ -14,12 +14,14 @@ SUBSYSTEM_DEF(blackbox)
|
||||
"explosion" = 2,
|
||||
"time_dilation_current" = 3,
|
||||
"science_techweb_unlock" = 2,
|
||||
"round_end_stats" = 2) //associative list of any feedback variables that have had their format changed since creation and their current version, remember to update this
|
||||
"round_end_stats" = 2,
|
||||
"testmerged_prs" = 2) //associative list of any feedback variables that have had their format changed since creation and their current version, remember to update this
|
||||
|
||||
/datum/controller/subsystem/blackbox/Initialize()
|
||||
triggertime = world.time
|
||||
record_feedback("amount", "random_seed", Master.random_seed)
|
||||
record_feedback("amount", "dm_version", DM_VERSION)
|
||||
record_feedback("amount", "dm_build", DM_BUILD)
|
||||
record_feedback("amount", "byond_version", world.byond_version)
|
||||
record_feedback("amount", "byond_build", world.byond_build)
|
||||
. = ..()
|
||||
@@ -39,10 +41,7 @@ SUBSYSTEM_DEF(blackbox)
|
||||
|
||||
if(!SSdbcore.Connect())
|
||||
return
|
||||
var/playercount = 0
|
||||
for(var/mob/M in GLOB.player_list)
|
||||
if(M.client)
|
||||
playercount += 1
|
||||
var/playercount = LAZYLEN(GLOB.player_list)
|
||||
var/admincount = GLOB.admins.len
|
||||
var/datum/DBQuery/query_record_playercount = SSdbcore.NewQuery("INSERT INTO [format_table_name("legacy_population")] (playercount, admincount, time, server_ip, server_port, round_id) VALUES ([playercount], [admincount], '[SQLtime()]', INET_ATON(IF('[world.internet_address]' LIKE '', '0', '[world.internet_address]')), '[world.port]', '[GLOB.round_id]')")
|
||||
query_record_playercount.Execute()
|
||||
@@ -88,18 +87,24 @@ SUBSYSTEM_DEF(blackbox)
|
||||
if (!SSdbcore.Connect())
|
||||
return
|
||||
|
||||
// var/list/special_columns = list(
|
||||
// "datetime" = "NOW()"
|
||||
// )
|
||||
var/list/sqlrowlist = list()
|
||||
|
||||
for (var/datum/feedback_variable/FV in feedback)
|
||||
var/sqlversion = 1
|
||||
if(FV.key in versions)
|
||||
sqlversion = versions[FV.key]
|
||||
sqlrowlist += list(list("datetime" = "Now()", "round_id" = GLOB.round_id, "key_name" = "'[sanitizeSQL(FV.key)]'", "key_type" = "'[FV.key_type]'", "version" = "[sqlversion]", "json" = "'[sanitizeSQL(json_encode(FV.json))]'"))
|
||||
sqlrowlist += list(list(
|
||||
"datetime" = "Now()", //legacy
|
||||
"round_id" = GLOB.round_id,
|
||||
"key_name" = sanitizeSQL(FV.key),
|
||||
"key_type" = FV.key_type,
|
||||
"version" = versions[FV.key] || 1,
|
||||
"json" = sanitizeSQL(json_encode(FV.json))
|
||||
))
|
||||
|
||||
if (!length(sqlrowlist))
|
||||
return
|
||||
|
||||
SSdbcore.MassInsert(format_table_name("feedback"), sqlrowlist, ignore_errors = TRUE, delayed = TRUE)
|
||||
SSdbcore.MassInsert(format_table_name("feedback"), sqlrowlist, ignore_errors = TRUE, delayed = TRUE)//, special_columns = special_columns)
|
||||
|
||||
/datum/controller/subsystem/blackbox/proc/Seal()
|
||||
if(sealed)
|
||||
@@ -270,6 +275,18 @@ Versioning
|
||||
/datum/feedback_variable/New(new_key, new_key_type)
|
||||
key = new_key
|
||||
key_type = new_key_type
|
||||
/*
|
||||
/datum/controller/subsystem/blackbox/proc/LogAhelp(ticket, action, message, recipient, sender)
|
||||
if(!SSdbcore.Connect())
|
||||
return
|
||||
|
||||
var/datum/db_query/query_log_ahelp = SSdbcore.NewQuery({"
|
||||
INSERT INTO [format_table_name("ticket")] (ticket, action, message, recipient, sender, server_ip, server_port, round_id, timestamp)
|
||||
VALUES (:ticket, :action, :message, :recipient, :sender, INET_ATON(:server_ip), :server_port, :round_id, :time)
|
||||
"}, list("ticket" = ticket, "action" = action, "message" = message, "recipient" = recipient, "sender" = sender, "server_ip" = world.internet_address || "0", "server_port" = world.port, "round_id" = GLOB.round_id, "time" = SQLtime()))
|
||||
query_log_ahelp.Execute()
|
||||
qdel(query_log_ahelp)
|
||||
*/
|
||||
|
||||
/datum/controller/subsystem/blackbox/proc/ReportDeath(mob/living/L)
|
||||
set waitfor = FALSE
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
/**
|
||||
/*!
|
||||
* Copyright (c) 2020 Aleksej Komarov
|
||||
* SPDX-License-Identifier: MIT
|
||||
*/
|
||||
|
||||
@@ -25,7 +25,7 @@ SUBSYSTEM_DEF(events)
|
||||
return ..()
|
||||
|
||||
|
||||
/datum/controller/subsystem/events/fire(resumed = 0)
|
||||
/datum/controller/subsystem/events/fire(resumed = FALSE)
|
||||
if(!resumed)
|
||||
checkEvent() //only check these if we aren't resuming a paused fire
|
||||
src.currentrun = running.Copy()
|
||||
@@ -37,7 +37,7 @@ SUBSYSTEM_DEF(events)
|
||||
var/datum/thing = currentrun[currentrun.len]
|
||||
currentrun.len--
|
||||
if(thing)
|
||||
thing.process()
|
||||
thing.process(wait * 0.1)
|
||||
else
|
||||
running.Remove(thing)
|
||||
if (MC_TICK_CHECK)
|
||||
@@ -91,13 +91,13 @@ SUBSYSTEM_DEF(events)
|
||||
if(. == EVENT_CANT_RUN)//we couldn't run this event for some reason, set its max_occurrences to 0
|
||||
E.max_occurrences = 0
|
||||
else if(. == EVENT_READY)
|
||||
E.random = TRUE
|
||||
E.runEvent(TRUE)
|
||||
E.runEvent(random = TRUE)
|
||||
|
||||
//allows a client to trigger an event
|
||||
//aka Badmin Central
|
||||
// > Not in modules/admin
|
||||
// REEEEEEEEE
|
||||
// Why the heck is this here! Took me so damn long to find!
|
||||
/client/proc/forceEvent()
|
||||
set name = "Trigger Event"
|
||||
set category = "Admin.Events"
|
||||
|
||||
@@ -1,3 +1,26 @@
|
||||
/*!
|
||||
## Debugging GC issues
|
||||
|
||||
In order to debug `qdel()` failures, there are several tools available.
|
||||
To enable these tools, define `TESTING` in [_compile_options.dm](https://github.com/tgstation/-tg-station/blob/master/code/_compile_options.dm).
|
||||
|
||||
First is a verb called "Find References", which lists **every** refererence to an object in the world. This allows you to track down any indirect or obfuscated references that you might have missed.
|
||||
|
||||
Complementing this is another verb, "qdel() then Find References".
|
||||
This does exactly what you'd expect; it calls `qdel()` on the object and then it finds all references remaining.
|
||||
This is great, because it means that `Destroy()` will have been called before it starts to find references,
|
||||
so the only references you'll find will be the ones preventing the object from `qdel()`ing gracefully.
|
||||
|
||||
If you have a datum or something you are not destroying directly (say via the singulo),
|
||||
the next tool is `QDEL_HINT_FINDREFERENCE`. You can return this in `Destroy()` (where you would normally `return ..()`),
|
||||
to print a list of references once it enters the GC queue.
|
||||
|
||||
Finally is a verb, "Show qdel() Log", which shows the deletion log that the garbage subsystem keeps. This is helpful if you are having race conditions or need to review the order of deletions.
|
||||
|
||||
Note that for any of these tools to work `TESTING` must be defined.
|
||||
By using these methods of finding references, you can make your life far, far easier when dealing with `qdel()` failures.
|
||||
*/
|
||||
|
||||
SUBSYSTEM_DEF(garbage)
|
||||
name = "Garbage"
|
||||
priority = FIRE_PRIORITY_GARBAGE
|
||||
@@ -6,7 +29,7 @@ SUBSYSTEM_DEF(garbage)
|
||||
runlevels = RUNLEVELS_DEFAULT | RUNLEVEL_LOBBY
|
||||
init_order = INIT_ORDER_GARBAGE
|
||||
|
||||
var/list/collection_timeout = list(15 SECONDS, 30 SECONDS) // deciseconds to wait before moving something up in the queue to the next level
|
||||
var/list/collection_timeout = list(2 MINUTES, 10 SECONDS) // deciseconds to wait before moving something up in the queue to the next level
|
||||
|
||||
//Stat tracking
|
||||
var/delslasttick = 0 // number of del()'s we've done this tick
|
||||
@@ -24,10 +47,8 @@ SUBSYSTEM_DEF(garbage)
|
||||
|
||||
//Queue
|
||||
var/list/queues
|
||||
|
||||
#ifdef LEGACY_REFERENCE_TRACKING
|
||||
var/list/reference_find_on_fail = list()
|
||||
var/list/reference_find_on_fail_types = list()
|
||||
#endif
|
||||
|
||||
|
||||
@@ -99,6 +120,9 @@ SUBSYSTEM_DEF(garbage)
|
||||
state = SS_RUNNING
|
||||
break
|
||||
|
||||
|
||||
|
||||
|
||||
/datum/controller/subsystem/garbage/proc/HandleQueue(level = GC_QUEUE_CHECK)
|
||||
if (level == GC_QUEUE_CHECK)
|
||||
delslasttick = 0
|
||||
@@ -149,10 +173,10 @@ SUBSYSTEM_DEF(garbage)
|
||||
D.find_references()
|
||||
#elif defined(LEGACY_REFERENCE_TRACKING)
|
||||
if(reference_find_on_fail[refID])
|
||||
D.find_references()
|
||||
D.find_references_legacy()
|
||||
#ifdef GC_FAILURE_HARD_LOOKUP
|
||||
else
|
||||
D.find_references()
|
||||
D.find_references_legacy()
|
||||
#endif
|
||||
reference_find_on_fail -= refID
|
||||
#endif
|
||||
@@ -195,11 +219,6 @@ SUBSYSTEM_DEF(garbage)
|
||||
var/gctime = world.time
|
||||
var/refid = "\ref[D]"
|
||||
|
||||
#ifdef LEGACY_REFERENCE_TRACKING
|
||||
if(reference_find_on_fail_types[D.type])
|
||||
reference_find_on_fail["\ref[D]"] = TRUE
|
||||
#endif
|
||||
|
||||
D.gc_destroyed = gctime
|
||||
var/list/queue = queues[level]
|
||||
if (queue[refid])
|
||||
@@ -207,21 +226,6 @@ SUBSYSTEM_DEF(garbage)
|
||||
|
||||
queue[refid] = gctime
|
||||
|
||||
#ifdef LEGACY_REFERENCE_TRACKING
|
||||
/datum/controller/subsystem/garbage/proc/add_type_to_findref(type)
|
||||
if(!ispath(type))
|
||||
return "NOT A VAILD PATH"
|
||||
reference_find_on_fail_types |= typecacheof(type)
|
||||
|
||||
/datum/controller/subsystem/garbage/proc/remove_type_from_findref(type)
|
||||
if(!ispath(type))
|
||||
return "NOT A VALID PATH"
|
||||
reference_find_on_fail_types -= typesof(type)
|
||||
|
||||
/datum/controller/subsystem/garbage/proc/clear_findref_types()
|
||||
reference_find_on_fail_types = list()
|
||||
#endif
|
||||
|
||||
//this is mainly to separate things profile wise.
|
||||
/datum/controller/subsystem/garbage/proc/HardDelete(datum/D)
|
||||
var/time = world.timeofday
|
||||
@@ -274,8 +278,10 @@ SUBSYSTEM_DEF(garbage)
|
||||
/datum/qdel_item/New(mytype)
|
||||
name = "[mytype]"
|
||||
|
||||
// Should be treated as a replacement for the 'del' keyword.
|
||||
// Datums passed to this will be given a chance to clean up references to allow the GC to collect them.
|
||||
|
||||
/// Should be treated as a replacement for the 'del' keyword.
|
||||
///
|
||||
/// Datums passed to this will be given a chance to clean up references to allow the GC to collect them.
|
||||
/proc/qdel(datum/D, force=FALSE, ...)
|
||||
if(!istype(D))
|
||||
del(D)
|
||||
@@ -330,9 +336,10 @@ SUBSYSTEM_DEF(garbage)
|
||||
#ifdef LEGACY_REFERENCE_TRACKING
|
||||
if (QDEL_HINT_FINDREFERENCE) //qdel will, if LEGACY_REFERENCE_TRACKING is enabled, display all references to this object, then queue the object for deletion.
|
||||
SSgarbage.Queue(D)
|
||||
D.find_references_legacy()
|
||||
if (QDEL_HINT_IFFAIL_FINDREFERENCE)
|
||||
SSgarbage.Queue(D)
|
||||
SSgarbage.reference_find_on_fail["\ref[D]"] = TRUE
|
||||
SSgarbage.reference_find_on_fail[REF(D)] = TRUE
|
||||
#endif
|
||||
else
|
||||
#ifdef TESTING
|
||||
@@ -343,18 +350,3 @@ SUBSYSTEM_DEF(garbage)
|
||||
SSgarbage.Queue(D)
|
||||
else if(D.gc_destroyed == GC_CURRENTLY_BEING_QDELETED)
|
||||
CRASH("[D.type] destroy proc was called multiple times, likely due to a qdel loop in the Destroy logic")
|
||||
|
||||
#ifdef TESTING
|
||||
/proc/writeDatumCount()
|
||||
var/list/datums = list()
|
||||
for(var/datum/D in world)
|
||||
datums[D.type] += 1
|
||||
for(var/datum/D)
|
||||
datums[D.type] += 1
|
||||
datums = sortTim(datums, /proc/cmp_numeric_dsc, associative = TRUE)
|
||||
if(fexists("data/DATUMCOUNT.txt"))
|
||||
fdel("data/DATUMCOUNT.txt")
|
||||
var/outfile = file("data/DATUMCOUNT.txt")
|
||||
for(var/path in datums)
|
||||
outfile << "[datums[path]]\t\t\t\t\t[path]"
|
||||
#endif
|
||||
|
||||
@@ -33,8 +33,9 @@ SUBSYSTEM_DEF(idlenpcpool)
|
||||
while(currentrun.len)
|
||||
var/mob/living/simple_animal/SA = currentrun[currentrun.len]
|
||||
--currentrun.len
|
||||
if (!SA)
|
||||
if (QDELETED(SA))
|
||||
GLOB.simple_animals[AI_IDLE] -= SA
|
||||
log_world("Found a null in simple_animals list!")
|
||||
continue
|
||||
|
||||
if(!SA.ckey)
|
||||
|
||||
@@ -2,13 +2,13 @@ SUBSYSTEM_DEF(ipintel)
|
||||
name = "XKeyScore"
|
||||
init_order = INIT_ORDER_XKEYSCORE
|
||||
flags = SS_NO_FIRE
|
||||
var/enabled = 0 //disable at round start to avoid checking reconnects
|
||||
var/enabled = FALSE //disable at round start to avoid checking reconnects
|
||||
var/throttle = 0
|
||||
var/errors = 0
|
||||
|
||||
var/list/cache = list()
|
||||
|
||||
/datum/controller/subsystem/ipintel/Initialize(timeofday, zlevel)
|
||||
enabled = 1
|
||||
enabled = TRUE
|
||||
. = ..()
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
GLOBAL_LIST_EMPTY(lighting_update_lights) // List of lighting sources queued for update.
|
||||
GLOBAL_LIST_EMPTY(lighting_update_corners) // List of lighting corners queued for update.
|
||||
GLOBAL_LIST_EMPTY(lighting_update_objects) // List of lighting objects queued for update.
|
||||
|
||||
SUBSYSTEM_DEF(lighting)
|
||||
name = "Lighting"
|
||||
wait = 2
|
||||
init_order = INIT_ORDER_LIGHTING
|
||||
flags = SS_TICKER
|
||||
var/static/list/sources_queue = list() // List of lighting sources queued for update.
|
||||
var/static/list/corners_queue = list() // List of lighting corners queued for update.
|
||||
var/static/list/objects_queue = list() // List of lighting objects queued for update.
|
||||
|
||||
/datum/controller/subsystem/lighting/stat_entry(msg)
|
||||
msg = "L:[length(GLOB.lighting_update_lights)]|C:[length(GLOB.lighting_update_corners)]|O:[length(GLOB.lighting_update_objects)]"
|
||||
msg = "L:[length(sources_queue)]|C:[length(corners_queue)]|O:[length(objects_queue)]"
|
||||
return ..()
|
||||
|
||||
|
||||
@@ -31,9 +31,10 @@ SUBSYSTEM_DEF(lighting)
|
||||
MC_SPLIT_TICK_INIT(3)
|
||||
if(!init_tick_checks)
|
||||
MC_SPLIT_TICK
|
||||
var/list/queue = sources_queue
|
||||
var/i = 0
|
||||
for (i in 1 to GLOB.lighting_update_lights.len)
|
||||
var/datum/light_source/L = GLOB.lighting_update_lights[i]
|
||||
for (i in 1 to length(queue))
|
||||
var/datum/light_source/L = queue[i]
|
||||
|
||||
L.update_corners()
|
||||
|
||||
@@ -44,14 +45,15 @@ SUBSYSTEM_DEF(lighting)
|
||||
else if (MC_TICK_CHECK)
|
||||
break
|
||||
if (i)
|
||||
GLOB.lighting_update_lights.Cut(1, i+1)
|
||||
queue.Cut(1, i+1)
|
||||
i = 0
|
||||
|
||||
if(!init_tick_checks)
|
||||
MC_SPLIT_TICK
|
||||
|
||||
for (i in 1 to GLOB.lighting_update_corners.len)
|
||||
var/datum/lighting_corner/C = GLOB.lighting_update_corners[i]
|
||||
queue = corners_queue
|
||||
for (i in 1 to length(queue))
|
||||
var/datum/lighting_corner/C = queue[i]
|
||||
|
||||
C.update_objects()
|
||||
C.needs_update = FALSE
|
||||
@@ -60,15 +62,16 @@ SUBSYSTEM_DEF(lighting)
|
||||
else if (MC_TICK_CHECK)
|
||||
break
|
||||
if (i)
|
||||
GLOB.lighting_update_corners.Cut(1, i+1)
|
||||
queue.Cut(1, i+1)
|
||||
i = 0
|
||||
|
||||
|
||||
if(!init_tick_checks)
|
||||
MC_SPLIT_TICK
|
||||
|
||||
for (i in 1 to GLOB.lighting_update_objects.len)
|
||||
var/atom/movable/lighting_object/O = GLOB.lighting_update_objects[i]
|
||||
queue = objects_queue
|
||||
for (i in 1 to length(queue))
|
||||
var/atom/movable/lighting_object/O = queue[i]
|
||||
|
||||
if (QDELETED(O))
|
||||
continue
|
||||
@@ -80,7 +83,7 @@ SUBSYSTEM_DEF(lighting)
|
||||
else if (MC_TICK_CHECK)
|
||||
break
|
||||
if (i)
|
||||
GLOB.lighting_update_objects.Cut(1, i+1)
|
||||
queue.Cut(1, i+1)
|
||||
|
||||
|
||||
/datum/controller/subsystem/lighting/Recover()
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
//Fires five times every second.
|
||||
|
||||
PROCESSING_SUBSYSTEM_DEF(fastprocess)
|
||||
name = "Fast Processing"
|
||||
wait = 2
|
||||
wait = 0.2 SECONDS
|
||||
stat_tag = "FP"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
PROCESSING_SUBSYSTEM_DEF(nanites)
|
||||
name = "Nanites"
|
||||
flags = SS_BACKGROUND|SS_POST_FIRE_TIMING|SS_NO_INIT
|
||||
wait = 10
|
||||
wait = 1 SECONDS
|
||||
|
||||
var/list/datum/nanite_cloud_backup/cloud_backups = list()
|
||||
var/list/mob/living/nanite_monitored_mobs = list()
|
||||
|
||||
@@ -2,4 +2,4 @@ PROCESSING_SUBSYSTEM_DEF(obj)
|
||||
name = "Objects"
|
||||
priority = FIRE_PRIORITY_OBJ
|
||||
flags = SS_NO_INIT
|
||||
wait = 20
|
||||
wait = 2 SECONDS
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
//Used to process objects. Fires once every second.
|
||||
//Used to process objects.
|
||||
|
||||
SUBSYSTEM_DEF(processing)
|
||||
name = "Processing"
|
||||
priority = FIRE_PRIORITY_PROCESS
|
||||
flags = SS_BACKGROUND|SS_POST_FIRE_TIMING|SS_NO_INIT
|
||||
wait = 10
|
||||
wait = 1 SECONDS
|
||||
|
||||
var/stat_tag = "P" //Used for logging
|
||||
var/list/processing = list()
|
||||
@@ -14,7 +14,7 @@ SUBSYSTEM_DEF(processing)
|
||||
msg = "[stat_tag]:[length(processing)]"
|
||||
return ..()
|
||||
|
||||
/datum/controller/subsystem/processing/fire(resumed = 0)
|
||||
/datum/controller/subsystem/processing/fire(resumed = FALSE)
|
||||
if (!resumed)
|
||||
currentrun = processing.Copy()
|
||||
//cache for sanic speed (lists are references anyways)
|
||||
@@ -25,12 +25,26 @@ SUBSYSTEM_DEF(processing)
|
||||
current_run.len--
|
||||
if(QDELETED(thing))
|
||||
processing -= thing
|
||||
else if(thing.process(wait) == PROCESS_KILL)
|
||||
else if(thing.process(wait * 0.1) == PROCESS_KILL)
|
||||
// fully stop so that a future START_PROCESSING will work
|
||||
STOP_PROCESSING(src, thing)
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
|
||||
/datum/proc/process()
|
||||
set waitfor = 0
|
||||
|
||||
/**
|
||||
* This proc is called on a datum on every "cycle" if it is being processed by a subsystem. The time between each cycle is determined by the subsystem's "wait" setting.
|
||||
* You can start and stop processing a datum using the START_PROCESSING and STOP_PROCESSING defines.
|
||||
*
|
||||
* Since the wait setting of a subsystem can be changed at any time, it is important that any rate-of-change that you implement in this proc is multiplied by the delta_time that is sent as a parameter,
|
||||
* Additionally, any "prob" you use in this proc should instead use the DT_PROB define to make sure that the final probability per second stays the same even if the subsystem's wait is altered.
|
||||
* Examples where this must be considered:
|
||||
* - Implementing a cooldown timer, use `mytimer -= delta_time`, not `mytimer -= 1`. This way, `mytimer` will always have the unit of seconds
|
||||
* - Damaging a mob, do `L.adjustFireLoss(20 * delta_time)`, not `L.adjustFireLoss(20)`. This way, the damage per second stays constant even if the wait of the subsystem is changed
|
||||
* - Probability of something happening, do `if(DT_PROB(25, delta_time))`, not `if(prob(25))`. This way, if the subsystem wait is e.g. lowered, there won't be a higher chance of this event happening per second
|
||||
*
|
||||
* If you override this do not call parent, as it will return PROCESS_KILL. This is done to prevent objects that dont override process() from staying in the processing list
|
||||
*/
|
||||
/datum/proc/process(delta_time)
|
||||
set waitfor = FALSE
|
||||
return PROCESS_KILL
|
||||
|
||||
@@ -5,8 +5,8 @@ PROCESSING_SUBSYSTEM_DEF(quirks)
|
||||
name = "Quirks"
|
||||
init_order = INIT_ORDER_QUIRKS
|
||||
flags = SS_BACKGROUND
|
||||
wait = 10
|
||||
runlevels = RUNLEVEL_GAME
|
||||
wait = 1 SECONDS
|
||||
|
||||
var/list/quirks = list() //Assoc. list of all roundstart quirk datum types; "name" = /path/
|
||||
var/list/quirk_names_by_path = list()
|
||||
|
||||
@@ -169,7 +169,7 @@ SUBSYSTEM_DEF(runechat)
|
||||
|
||||
// Handle insertion into the secondary queue if the required time is outside our tracked amounts
|
||||
if (scheduled_destruction >= BUCKET_LIMIT)
|
||||
BINARY_INSERT(src, SSrunechat.second_queue, datum/chatmessage, src, scheduled_destruction, COMPARE_KEY)
|
||||
BINARY_INSERT(src, SSrunechat.second_queue, /datum/chatmessage, src, scheduled_destruction, COMPARE_KEY)
|
||||
return
|
||||
|
||||
// Get bucket position and a local reference to the datum var, it's faster to access this way
|
||||
|
||||
@@ -1,31 +1,51 @@
|
||||
#define BUCKET_LEN (world.fps*1*60) //how many ticks should we keep in the bucket. (1 minutes worth)
|
||||
/// Controls how many buckets should be kept, each representing a tick. (1 minutes worth)
|
||||
#define BUCKET_LEN (world.fps*1*60)
|
||||
/// Helper for getting the correct bucket for a given timer
|
||||
#define BUCKET_POS(timer) (((round((timer.timeToRun - SStimer.head_offset) / world.tick_lag)+1) % BUCKET_LEN)||BUCKET_LEN)
|
||||
/// Gets the maximum time at which timers will be invoked from buckets, used for deferring to secondary queue
|
||||
#define TIMER_MAX (world.time + TICKS2DS(min(BUCKET_LEN-(SStimer.practical_offset-DS2TICKS(world.time - SStimer.head_offset))-1, BUCKET_LEN-1)))
|
||||
#define TIMER_ID_MAX (2**24) //max float with integer precision
|
||||
/// Max float with integer precision
|
||||
#define TIMER_ID_MAX (2**24)
|
||||
|
||||
/**
|
||||
* # Timer Subsystem
|
||||
*
|
||||
* Handles creation, callbacks, and destruction of timed events.
|
||||
*
|
||||
* It is important to understand the buckets used in the timer subsystem are just a series of circular doubly-linked
|
||||
* lists. The object at a given index in bucket_list is a /datum/timedevent, the head of a circular list, which has prev
|
||||
* and next references for the respective elements in that bucket's circular list.
|
||||
*/
|
||||
SUBSYSTEM_DEF(timer)
|
||||
name = "Timer"
|
||||
wait = 1 // SS_TICKER subsystem, so wait is in ticks
|
||||
init_order = INIT_ORDER_TIMER
|
||||
|
||||
priority = FIRE_PRIORITY_TIMER
|
||||
flags = SS_TICKER|SS_NO_INIT
|
||||
|
||||
var/list/datum/timedevent/second_queue = list() //awe, yes, you've had first queue, but what about second queue?
|
||||
/// Queue used for storing timers that do not fit into the current buckets
|
||||
var/list/datum/timedevent/second_queue = list()
|
||||
/// A hashlist dictionary used for storing unique timers
|
||||
var/list/hashes = list()
|
||||
|
||||
var/head_offset = 0 //world.time of the first entry in the the bucket.
|
||||
var/practical_offset = 1 //index of the first non-empty item in the bucket.
|
||||
var/bucket_resolution = 0 //world.tick_lag the bucket was designed for
|
||||
var/bucket_count = 0 //how many timers are in the buckets
|
||||
|
||||
var/list/bucket_list = list() //list of buckets, each bucket holds every timer that has to run that byond tick.
|
||||
|
||||
var/list/timer_id_dict = list() //list of all active timers assoicated to their timer id (for easy lookup)
|
||||
|
||||
var/list/clienttime_timers = list() //special snowflake timers that run on fancy pansy "client time"
|
||||
|
||||
/// world.time of the first entry in the bucket list, effectively the 'start time' of the current buckets
|
||||
var/head_offset = 0
|
||||
/// Index of the wrap around pivot for buckets. buckets before this are later running buckets wrapped around from the end of the bucket list.
|
||||
var/practical_offset = 1
|
||||
/// world.tick_lag the bucket was designed for
|
||||
var/bucket_resolution = 0
|
||||
/// How many timers are in the buckets
|
||||
var/bucket_count = 0
|
||||
/// List of buckets, each bucket holds every timer that has to run that byond tick
|
||||
var/list/bucket_list = list()
|
||||
/// List of all active timers associated to their timer ID (for easy lookup)
|
||||
var/list/timer_id_dict = list()
|
||||
/// Special timers that run in real-time, not BYOND time; these are more expensive to run and maintain
|
||||
var/list/clienttime_timers = list()
|
||||
/// Contains the last time that a timer's callback was invoked, or the last tick the SS fired if no timers are being processed
|
||||
var/last_invoke_tick = 0
|
||||
/// Contains the last time that a warning was issued for not invoking callbacks
|
||||
var/static/last_invoke_warning = 0
|
||||
/// Boolean operator controlling if the timer SS will automatically reset buckets if it fails to invoke callbacks for an extended period of time
|
||||
var/static/bucket_auto_reset = TRUE
|
||||
|
||||
/datum/controller/subsystem/timer/PreInit()
|
||||
@@ -38,13 +58,17 @@ SUBSYSTEM_DEF(timer)
|
||||
return ..()
|
||||
|
||||
/datum/controller/subsystem/timer/fire(resumed = FALSE)
|
||||
// Store local references to datum vars as it is faster to access them
|
||||
var/lit = last_invoke_tick
|
||||
var/last_check = world.time - TICKS2DS(BUCKET_LEN*1.5)
|
||||
var/list/bucket_list = src.bucket_list
|
||||
var/last_check = world.time - TICKS2DS(BUCKET_LEN * 1.5)
|
||||
|
||||
// If there are no timers being tracked, then consider now to be the last invoked time
|
||||
if(!bucket_count)
|
||||
last_invoke_tick = world.time
|
||||
|
||||
// Check that we have invoked a callback in the last 1.5 minutes of BYOND time,
|
||||
// and throw a warning and reset buckets if this is true
|
||||
if(lit && lit < last_check && head_offset < last_check && last_invoke_warning < last_check)
|
||||
last_invoke_warning = world.time
|
||||
var/msg = "No regular timers processed in the last [BUCKET_LEN * 1.5] ticks[bucket_auto_reset ? ", resetting buckets" : ""]!"
|
||||
@@ -53,29 +77,34 @@ SUBSYSTEM_DEF(timer)
|
||||
if(bucket_auto_reset)
|
||||
bucket_resolution = 0
|
||||
|
||||
log_world("Timer bucket reset. world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
var/list/to_log = list("Timer bucket reset. world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
for (var/i in 1 to length(bucket_list))
|
||||
var/datum/timedevent/bucket_head = bucket_list[i]
|
||||
if (!bucket_head)
|
||||
continue
|
||||
|
||||
log_world("Active timers at index [i]:")
|
||||
|
||||
to_log += "Active timers at index [i]:"
|
||||
var/datum/timedevent/bucket_node = bucket_head
|
||||
var/anti_loop_check = 1000
|
||||
do
|
||||
log_world(get_timer_debug_string(bucket_node))
|
||||
to_log += get_timer_debug_string(bucket_node)
|
||||
bucket_node = bucket_node.next
|
||||
anti_loop_check--
|
||||
while(bucket_node && bucket_node != bucket_head && anti_loop_check)
|
||||
log_world("Active timers in the second_queue queue:")
|
||||
|
||||
to_log += "Active timers in the second_queue queue:"
|
||||
for(var/I in second_queue)
|
||||
log_world(get_timer_debug_string(I))
|
||||
to_log += get_timer_debug_string(I)
|
||||
|
||||
var/next_clienttime_timer_index = 0
|
||||
var/len = length(clienttime_timers)
|
||||
// Dump all the logged data to the world log
|
||||
log_world(to_log.Join("\n"))
|
||||
|
||||
for (next_clienttime_timer_index in 1 to len)
|
||||
// Process client-time timers
|
||||
var/static/next_clienttime_timer_index = 0
|
||||
if (next_clienttime_timer_index)
|
||||
clienttime_timers.Cut(1, next_clienttime_timer_index+1)
|
||||
next_clienttime_timer_index = 0
|
||||
for (next_clienttime_timer_index in 1 to length(clienttime_timers))
|
||||
if (MC_TICK_CHECK)
|
||||
next_clienttime_timer_index--
|
||||
break
|
||||
@@ -86,8 +115,8 @@ SUBSYSTEM_DEF(timer)
|
||||
|
||||
var/datum/callback/callBack = ctime_timer.callBack
|
||||
if (!callBack)
|
||||
clienttime_timers.Cut(next_clienttime_timer_index,next_clienttime_timer_index+1)
|
||||
CRASH("Invalid timer: [get_timer_debug_string(ctime_timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset], REALTIMEOFDAY: [REALTIMEOFDAY]")
|
||||
CRASH("Invalid timer: [get_timer_debug_string(ctime_timer)] world.time: [world.time], \
|
||||
head_offset: [head_offset], practical_offset: [practical_offset], REALTIMEOFDAY: [REALTIMEOFDAY]")
|
||||
|
||||
ctime_timer.spent = REALTIMEOFDAY
|
||||
callBack.InvokeAsync()
|
||||
@@ -95,135 +124,93 @@ SUBSYSTEM_DEF(timer)
|
||||
if(ctime_timer.flags & TIMER_LOOP)
|
||||
ctime_timer.spent = 0
|
||||
ctime_timer.timeToRun = REALTIMEOFDAY + ctime_timer.wait
|
||||
BINARY_INSERT(ctime_timer, clienttime_timers, datum/timedevent, ctime_timer, timeToRun, COMPARE_KEY)
|
||||
BINARY_INSERT(ctime_timer, clienttime_timers, /datum/timedevent, ctime_timer, timeToRun, COMPARE_KEY)
|
||||
else
|
||||
qdel(ctime_timer)
|
||||
|
||||
|
||||
// Remove invoked client-time timers
|
||||
if (next_clienttime_timer_index)
|
||||
clienttime_timers.Cut(1, next_clienttime_timer_index+1)
|
||||
next_clienttime_timer_index = 0
|
||||
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
|
||||
var/static/list/spent = list()
|
||||
var/static/datum/timedevent/timer
|
||||
// Check for when we need to loop the buckets, this occurs when
|
||||
// the head_offset is approaching BUCKET_LEN ticks in the past
|
||||
if (practical_offset > BUCKET_LEN)
|
||||
head_offset += TICKS2DS(BUCKET_LEN)
|
||||
practical_offset = 1
|
||||
resumed = FALSE
|
||||
|
||||
// Check for when we have to reset buckets, typically from auto-reset
|
||||
if ((length(bucket_list) != BUCKET_LEN) || (world.tick_lag != bucket_resolution))
|
||||
reset_buckets()
|
||||
bucket_list = src.bucket_list
|
||||
resumed = FALSE
|
||||
|
||||
|
||||
if (!resumed)
|
||||
timer = null
|
||||
|
||||
// Iterate through each bucket starting from the practical offset
|
||||
while (practical_offset <= BUCKET_LEN && head_offset + ((practical_offset - 1) * world.tick_lag) <= world.time)
|
||||
var/datum/timedevent/head = bucket_list[practical_offset]
|
||||
if (!timer || !head || timer == head)
|
||||
head = bucket_list[practical_offset]
|
||||
timer = head
|
||||
while (timer)
|
||||
var/datum/timedevent/timer
|
||||
while ((timer = bucket_list[practical_offset]))
|
||||
var/datum/callback/callBack = timer.callBack
|
||||
if (!callBack)
|
||||
bucket_resolution = null // force bucket recreation
|
||||
CRASH("Invalid timer: [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
CRASH("Invalid timer: [get_timer_debug_string(timer)] world.time: [world.time], \
|
||||
head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
|
||||
timer.bucketEject() //pop the timer off of the bucket list.
|
||||
|
||||
// Invoke callback if possible
|
||||
if (!timer.spent)
|
||||
spent += timer
|
||||
timer.spent = world.time
|
||||
callBack.InvokeAsync()
|
||||
last_invoke_tick = world.time
|
||||
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
if (timer.flags & TIMER_LOOP) // Prepare looping timers to re-enter the queue
|
||||
timer.spent = 0
|
||||
timer.timeToRun = world.time + timer.wait
|
||||
timer.bucketJoin()
|
||||
else
|
||||
qdel(timer)
|
||||
|
||||
timer = timer.next
|
||||
if (timer == head)
|
||||
if (MC_TICK_CHECK)
|
||||
break
|
||||
|
||||
|
||||
if (!bucket_list[practical_offset])
|
||||
// Empty the bucket, check if anything in the secondary queue should be shifted to this bucket
|
||||
bucket_list[practical_offset++] = null
|
||||
|
||||
//we freed up a bucket, lets see if anything in second_queue needs to be shifted to that bucket.
|
||||
var/i = 0
|
||||
var/L = length(second_queue)
|
||||
for (i in 1 to L)
|
||||
for (i in 1 to length(second_queue))
|
||||
timer = second_queue[i]
|
||||
if (timer.timeToRun >= TIMER_MAX)
|
||||
i--
|
||||
break
|
||||
|
||||
// Check for timers that are scheduled to run in the past
|
||||
if (timer.timeToRun < head_offset)
|
||||
bucket_resolution = null // force bucket recreation
|
||||
stack_trace("[i] Invalid timer state: Timer in long run queue with a time to run less then head_offset. [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
|
||||
if (timer.callBack && !timer.spent)
|
||||
timer.callBack.InvokeAsync()
|
||||
spent += timer
|
||||
bucket_count++
|
||||
else if(!QDELETED(timer))
|
||||
qdel(timer)
|
||||
continue
|
||||
stack_trace("[i] Invalid timer state: Timer in long run queue with a time to run less then head_offset. \
|
||||
[get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
break
|
||||
|
||||
// Check for timers that are not capable of being scheduled to run without rebuilding buckets
|
||||
if (timer.timeToRun < head_offset + TICKS2DS(practical_offset - 1))
|
||||
bucket_resolution = null // force bucket recreation
|
||||
stack_trace("[i] Invalid timer state: Timer in long run queue that would require a backtrack to transfer to short run queue. [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
if (timer.callBack && !timer.spent)
|
||||
timer.callBack.InvokeAsync()
|
||||
spent += timer
|
||||
bucket_count++
|
||||
else if(!QDELETED(timer))
|
||||
qdel(timer)
|
||||
continue
|
||||
stack_trace("[i] Invalid timer state: Timer in long run queue that would require a backtrack to transfer to \
|
||||
short run queue. [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
break
|
||||
|
||||
bucket_count++
|
||||
var/bucket_pos = max(1, BUCKET_POS(timer))
|
||||
|
||||
var/datum/timedevent/bucket_head = bucket_list[bucket_pos]
|
||||
if (!bucket_head)
|
||||
bucket_list[bucket_pos] = timer
|
||||
timer.next = null
|
||||
timer.prev = null
|
||||
continue
|
||||
|
||||
if (!bucket_head.prev)
|
||||
bucket_head.prev = bucket_head
|
||||
timer.next = bucket_head
|
||||
timer.prev = bucket_head.prev
|
||||
timer.next.prev = timer
|
||||
timer.prev.next = timer
|
||||
timer.bucketJoin()
|
||||
if (i)
|
||||
second_queue.Cut(1, i+1)
|
||||
if (MC_TICK_CHECK)
|
||||
break
|
||||
|
||||
timer = null
|
||||
|
||||
bucket_count -= length(spent)
|
||||
|
||||
for (var/i in spent)
|
||||
var/datum/timedevent/qtimer = i
|
||||
if(QDELETED(qtimer))
|
||||
bucket_count++
|
||||
continue
|
||||
if(!(qtimer.flags & TIMER_LOOP))
|
||||
qdel(qtimer)
|
||||
else
|
||||
bucket_count++
|
||||
qtimer.spent = 0
|
||||
qtimer.bucketEject()
|
||||
if(qtimer.flags & TIMER_CLIENT_TIME)
|
||||
qtimer.timeToRun = REALTIMEOFDAY + qtimer.wait
|
||||
else
|
||||
qtimer.timeToRun = world.time + qtimer.wait
|
||||
qtimer.bucketJoin()
|
||||
|
||||
spent.len = 0
|
||||
|
||||
//formated this way to be runtime resistant
|
||||
/**
|
||||
* Generates a string with details about the timed event for debugging purposes
|
||||
*/
|
||||
/datum/controller/subsystem/timer/proc/get_timer_debug_string(datum/timedevent/TE)
|
||||
. = "Timer: [TE]"
|
||||
. += "Prev: [TE.prev ? TE.prev : "NULL"], Next: [TE.next ? TE.next : "NULL"]"
|
||||
@@ -234,12 +221,16 @@ SUBSYSTEM_DEF(timer)
|
||||
if(!TE.callBack)
|
||||
. += ", NO CALLBACK"
|
||||
|
||||
/**
|
||||
* Destroys the existing buckets and creates new buckets from the existing timed events
|
||||
*/
|
||||
/datum/controller/subsystem/timer/proc/reset_buckets()
|
||||
var/list/bucket_list = src.bucket_list
|
||||
var/list/bucket_list = src.bucket_list // Store local reference to datum var, this is faster
|
||||
var/list/alltimers = list()
|
||||
//collect the timers currently in the bucket
|
||||
|
||||
// Get all timers currently in the buckets
|
||||
for (var/bucket_head in bucket_list)
|
||||
if (!bucket_head)
|
||||
if (!bucket_head) // if bucket is empty for this tick
|
||||
continue
|
||||
var/datum/timedevent/bucket_node = bucket_head
|
||||
do
|
||||
@@ -247,25 +238,38 @@ SUBSYSTEM_DEF(timer)
|
||||
bucket_node = bucket_node.next
|
||||
while(bucket_node && bucket_node != bucket_head)
|
||||
|
||||
// Empty the list by zeroing and re-assigning the length
|
||||
bucket_list.len = 0
|
||||
bucket_list.len = BUCKET_LEN
|
||||
|
||||
// Reset values for the subsystem to their initial values
|
||||
practical_offset = 1
|
||||
bucket_count = 0
|
||||
head_offset = world.time
|
||||
bucket_resolution = world.tick_lag
|
||||
|
||||
// Add all timed events from the secondary queue as well
|
||||
alltimers += second_queue
|
||||
|
||||
// If there are no timers being tracked by the subsystem,
|
||||
// there is no need to do any further rebuilding
|
||||
if (!length(alltimers))
|
||||
return
|
||||
|
||||
// Sort all timers by time to run
|
||||
sortTim(alltimers, .proc/cmp_timer)
|
||||
|
||||
// Get the earliest timer, and if the TTR is earlier than the current world.time,
|
||||
// then set the head offset appropriately to be the earliest time tracked by the
|
||||
// current set of buckets
|
||||
var/datum/timedevent/head = alltimers[1]
|
||||
|
||||
if (head.timeToRun < head_offset)
|
||||
head_offset = head.timeToRun
|
||||
|
||||
// Iterate through each timed event and insert it into an appropriate bucket,
|
||||
// up unto the point that we can no longer insert into buckets as the TTR
|
||||
// is outside the range we are tracking, then insert the remainder into the
|
||||
// secondary queue
|
||||
var/new_bucket_count
|
||||
var/i = 1
|
||||
for (i in 1 to length(alltimers))
|
||||
@@ -273,32 +277,36 @@ SUBSYSTEM_DEF(timer)
|
||||
if (!timer)
|
||||
continue
|
||||
|
||||
var/bucket_pos = BUCKET_POS(timer)
|
||||
// Check that the TTR is within the range covered by buckets, when exceeded we've finished
|
||||
if (timer.timeToRun >= TIMER_MAX)
|
||||
i--
|
||||
break
|
||||
|
||||
|
||||
// Check that timer has a valid callback and hasn't been invoked
|
||||
if (!timer.callBack || timer.spent)
|
||||
WARNING("Invalid timer: [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
WARNING("Invalid timer: [get_timer_debug_string(timer)] world.time: [world.time], \
|
||||
head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
if (timer.callBack)
|
||||
qdel(timer)
|
||||
continue
|
||||
|
||||
// Insert the timer into the bucket, and perform necessary circular doubly-linked list operations
|
||||
new_bucket_count++
|
||||
var/bucket_pos = BUCKET_POS(timer)
|
||||
var/datum/timedevent/bucket_head = bucket_list[bucket_pos]
|
||||
if (!bucket_head)
|
||||
bucket_list[bucket_pos] = timer
|
||||
timer.next = null
|
||||
timer.prev = null
|
||||
continue
|
||||
|
||||
if (!bucket_head.prev)
|
||||
bucket_head.prev = bucket_head
|
||||
timer.next = bucket_head
|
||||
timer.prev = bucket_head.prev
|
||||
timer.next.prev = timer
|
||||
timer.prev.next = timer
|
||||
|
||||
// Cut the timers that are tracked by the buckets from the secondary queue
|
||||
if (i)
|
||||
alltimers.Cut(1, i + 1)
|
||||
second_queue = alltimers
|
||||
@@ -311,35 +319,56 @@ SUBSYSTEM_DEF(timer)
|
||||
timer_id_dict |= SStimer.timer_id_dict
|
||||
bucket_list |= SStimer.bucket_list
|
||||
|
||||
/**
|
||||
* # Timed Event
|
||||
*
|
||||
* This is the actual timer, it contains the callback and necessary data to maintain
|
||||
* the timer.
|
||||
*
|
||||
* See the documentation for the timer subsystem for an explanation of the buckets referenced
|
||||
* below in next and prev
|
||||
*/
|
||||
/datum/timedevent
|
||||
/// ID used for timers when the TIMER_STOPPABLE flag is present
|
||||
var/id
|
||||
/// The callback to invoke after the timer completes
|
||||
var/datum/callback/callBack
|
||||
/// The time at which the callback should be invoked at
|
||||
var/timeToRun
|
||||
/// The length of the timer
|
||||
var/wait
|
||||
/// Unique hash generated when TIMER_UNIQUE flag is present
|
||||
var/hash
|
||||
/// The source of the timedevent, whatever called addtimer
|
||||
var/source
|
||||
/// Flags associated with the timer, see _DEFINES/subsystems.dm
|
||||
var/list/flags
|
||||
var/spent = 0 //time we ran the timer.
|
||||
var/name //for easy debugging.
|
||||
//cicular doublely linked list
|
||||
/// Time at which the timer was invoked or destroyed
|
||||
var/spent = 0
|
||||
/// An informative name generated for the timer as its representation in strings, useful for debugging
|
||||
var/name
|
||||
/// Next timed event in the bucket
|
||||
var/datum/timedevent/next
|
||||
/// Previous timed event in the bucket
|
||||
var/datum/timedevent/prev
|
||||
|
||||
/datum/timedevent/New(datum/callback/callBack, wait, flags, hash)
|
||||
/datum/timedevent/New(datum/callback/callBack, wait, flags, hash, source)
|
||||
var/static/nextid = 1
|
||||
id = TIMER_ID_NULL
|
||||
src.callBack = callBack
|
||||
src.wait = wait
|
||||
src.flags = flags
|
||||
src.hash = hash
|
||||
src.source = source
|
||||
|
||||
if (flags & TIMER_CLIENT_TIME)
|
||||
timeToRun = REALTIMEOFDAY + wait
|
||||
else
|
||||
timeToRun = world.time + wait
|
||||
// Determine time at which the timer's callback should be invoked
|
||||
timeToRun = (flags & TIMER_CLIENT_TIME ? REALTIMEOFDAY : world.time) + wait
|
||||
|
||||
// Include the timer in the hash table if the timer is unique
|
||||
if (flags & TIMER_UNIQUE)
|
||||
SStimer.hashes[hash] = src
|
||||
|
||||
// Generate ID for the timer if the timer is stoppable, include in the timer id dictionary
|
||||
if (flags & TIMER_STOPPABLE)
|
||||
id = num2text(nextid, 100)
|
||||
if (nextid >= SHORT_REAL_LIMIT)
|
||||
@@ -348,8 +377,6 @@ SUBSYSTEM_DEF(timer)
|
||||
nextid++
|
||||
SStimer.timer_id_dict[id] = src
|
||||
|
||||
name = "Timer: [id] (\ref[src]), TTR: [timeToRun], Flags: [jointext(bitfield2list(flags, list("TIMER_UNIQUE", "TIMER_OVERRIDE", "TIMER_CLIENT_TIME", "TIMER_STOPPABLE", "TIMER_NO_HASH_WAIT", "TIMER_LOOP")), ", ")], callBack: \ref[callBack], callBack.object: [callBack.object]\ref[callBack.object]([getcallingtype()]), callBack.delegate:[callBack.delegate]([callBack.arguments ? callBack.arguments.Join(", ") : ""])"
|
||||
|
||||
if ((timeToRun < world.time || timeToRun < SStimer.head_offset) && !(flags & TIMER_CLIENT_TIME))
|
||||
CRASH("Invalid timer state: Timer created that would require a backtrack to run (addtimer would never let this happen): [SStimer.get_timer_debug_string(src)]")
|
||||
|
||||
@@ -390,23 +417,39 @@ SUBSYSTEM_DEF(timer)
|
||||
prev = null
|
||||
return QDEL_HINT_IWILLGC
|
||||
|
||||
/**
|
||||
* Removes this timed event from any relevant buckets, or the secondary queue
|
||||
*/
|
||||
/datum/timedevent/proc/bucketEject()
|
||||
// Attempt to find bucket that contains this timed event
|
||||
var/bucketpos = BUCKET_POS(src)
|
||||
|
||||
// Store local references for the bucket list and secondary queue
|
||||
// This is faster than referencing them from the datum itself
|
||||
var/list/bucket_list = SStimer.bucket_list
|
||||
var/list/second_queue = SStimer.second_queue
|
||||
|
||||
// Attempt to get the head of the bucket
|
||||
var/datum/timedevent/buckethead
|
||||
if(bucketpos > 0)
|
||||
buckethead = bucket_list[bucketpos]
|
||||
|
||||
// Decrement the number of timers in buckets if the timed event is
|
||||
// the head of the bucket, or has a TTR less than TIMER_MAX implying it fits
|
||||
// into an existing bucket, or is otherwise not present in the secondary queue
|
||||
if(buckethead == src)
|
||||
bucket_list[bucketpos] = next
|
||||
SStimer.bucket_count--
|
||||
else if(timeToRun < TIMER_MAX || next || prev)
|
||||
else if(timeToRun < TIMER_MAX)
|
||||
SStimer.bucket_count--
|
||||
else
|
||||
var/l = length(second_queue)
|
||||
second_queue -= src
|
||||
if(l == length(second_queue))
|
||||
SStimer.bucket_count--
|
||||
|
||||
// Remove the timed event from the bucket, ensuring to maintain
|
||||
// the integrity of the bucket's list if relevant
|
||||
if(prev != next)
|
||||
prev.next = next
|
||||
next.prev = prev
|
||||
@@ -415,32 +458,47 @@ SUBSYSTEM_DEF(timer)
|
||||
next?.prev = null
|
||||
prev = next = null
|
||||
|
||||
/**
|
||||
* Attempts to add this timed event to a bucket, will enter the secondary queue
|
||||
* if there are no appropriate buckets at this time.
|
||||
*
|
||||
* Secondary queueing of timed events will occur when the timespan covered by the existing
|
||||
* buckets is exceeded by the time at which this timed event is scheduled to be invoked.
|
||||
* If the timed event is tracking client time, it will be added to a special bucket.
|
||||
*/
|
||||
/datum/timedevent/proc/bucketJoin()
|
||||
var/list/L
|
||||
// Generate debug-friendly name for timer
|
||||
var/static/list/bitfield_flags = list("TIMER_UNIQUE", "TIMER_OVERRIDE", "TIMER_CLIENT_TIME", "TIMER_STOPPABLE", "TIMER_NO_HASH_WAIT", "TIMER_LOOP")
|
||||
name = "Timer: [id] (\ref[src]), TTR: [timeToRun], wait:[wait] Flags: [jointext(bitfield2list(flags, bitfield_flags), ", ")], \
|
||||
callBack: \ref[callBack], callBack.object: [callBack.object]\ref[callBack.object]([getcallingtype()]), \
|
||||
callBack.delegate:[callBack.delegate]([callBack.arguments ? callBack.arguments.Join(", ") : ""]), source: [source]"
|
||||
|
||||
// Check if this timed event should be diverted to the client time bucket, or the secondary queue
|
||||
var/list/L
|
||||
if (flags & TIMER_CLIENT_TIME)
|
||||
L = SStimer.clienttime_timers
|
||||
else if (timeToRun >= TIMER_MAX)
|
||||
L = SStimer.second_queue
|
||||
|
||||
if(L)
|
||||
BINARY_INSERT(src, L, datum/timedevent, src, timeToRun, COMPARE_KEY)
|
||||
BINARY_INSERT(src, L, /datum/timedevent, src, timeToRun, COMPARE_KEY)
|
||||
return
|
||||
|
||||
//get the list of buckets
|
||||
// Get a local reference to the bucket list, this is faster than referencing the datum
|
||||
var/list/bucket_list = SStimer.bucket_list
|
||||
|
||||
//calculate our place in the bucket list
|
||||
// Find the correct bucket for this timed event
|
||||
var/bucket_pos = BUCKET_POS(src)
|
||||
|
||||
//get the bucket for our tick
|
||||
var/datum/timedevent/bucket_head = bucket_list[bucket_pos]
|
||||
SStimer.bucket_count++
|
||||
//empty bucket, we will just add ourselves
|
||||
|
||||
// If there is no timed event at this position, then the bucket is 'empty'
|
||||
// and we can just set this event to that position
|
||||
if (!bucket_head)
|
||||
bucket_list[bucket_pos] = src
|
||||
return
|
||||
//other wise, lets do a simplified linked list add.
|
||||
|
||||
// Otherwise, we merely add this timed event into the bucket, which is a
|
||||
// circularly doubly-linked list
|
||||
if (!bucket_head.prev)
|
||||
bucket_head.prev = bucket_head
|
||||
next = bucket_head
|
||||
@@ -448,7 +506,9 @@ SUBSYSTEM_DEF(timer)
|
||||
next.prev = src
|
||||
prev.next = src
|
||||
|
||||
///Returns a string of the type of the callback for this timer
|
||||
/**
|
||||
* Returns a string of the type of the callback for this timer
|
||||
*/
|
||||
/datum/timedevent/proc/getcallingtype()
|
||||
. = "ERROR"
|
||||
if (callBack.object == GLOBAL_PROC)
|
||||
@@ -457,14 +517,15 @@ SUBSYSTEM_DEF(timer)
|
||||
. = "[callBack.object.type]"
|
||||
|
||||
/**
|
||||
* Create a new timer and insert it in the queue
|
||||
* Create a new timer and insert it in the queue.
|
||||
* You should not call this directly, and should instead use the addtimer macro, which includes source information.
|
||||
*
|
||||
* Arguments:
|
||||
* * callback the callback to call on timer finish
|
||||
* * wait deciseconds to run the timer for
|
||||
* * flags flags for this timer, see: code\__DEFINES\subsystems.dm
|
||||
*/
|
||||
/proc/addtimer(datum/callback/callback, wait = 0, flags = 0)
|
||||
/proc/_addtimer(datum/callback/callback, wait = 0, flags = 0, file, line)
|
||||
if (!callback)
|
||||
CRASH("addtimer called without a callback")
|
||||
|
||||
@@ -472,21 +533,20 @@ SUBSYSTEM_DEF(timer)
|
||||
stack_trace("addtimer called with a negative wait. Converting to [world.tick_lag]")
|
||||
|
||||
if (callback.object != GLOBAL_PROC && QDELETED(callback.object) && !QDESTROYING(callback.object))
|
||||
stack_trace("addtimer called with a callback assigned to a qdeleted object. In the future such timers will not be supported and may refuse to run or run with a 0 wait")
|
||||
stack_trace("addtimer called with a callback assigned to a qdeleted object. In the future such timers will not \
|
||||
be supported and may refuse to run or run with a 0 wait")
|
||||
|
||||
wait = max(CEILING(wait, world.tick_lag), world.tick_lag)
|
||||
|
||||
if(wait >= INFINITY)
|
||||
CRASH("Attempted to create timer with INFINITY delay")
|
||||
|
||||
// Generate hash if relevant for timed events with the TIMER_UNIQUE flag
|
||||
var/hash
|
||||
|
||||
if (flags & TIMER_UNIQUE)
|
||||
var/list/hashlist
|
||||
if(flags & TIMER_NO_HASH_WAIT)
|
||||
hashlist = list(callback.object, "([REF(callback.object)])", callback.delegate, flags & TIMER_CLIENT_TIME)
|
||||
else
|
||||
hashlist = list(callback.object, "([REF(callback.object)])", callback.delegate, wait, flags & TIMER_CLIENT_TIME)
|
||||
var/list/hashlist = list(callback.object, "([REF(callback.object)])", callback.delegate, flags & TIMER_CLIENT_TIME)
|
||||
if(!(flags & TIMER_NO_HASH_WAIT))
|
||||
hashlist += wait
|
||||
hashlist += callback.arguments
|
||||
hash = hashlist.Join("|||||||")
|
||||
|
||||
@@ -505,7 +565,7 @@ SUBSYSTEM_DEF(timer)
|
||||
else if(flags & TIMER_OVERRIDE)
|
||||
stack_trace("TIMER_OVERRIDE used without TIMER_UNIQUE")
|
||||
|
||||
var/datum/timedevent/timer = new(callback, wait, flags, hash)
|
||||
var/datum/timedevent/timer = new(callback, wait, flags, hash, file && "[file]:[line]")
|
||||
return timer.id
|
||||
|
||||
/**
|
||||
@@ -519,7 +579,6 @@ SUBSYSTEM_DEF(timer)
|
||||
return FALSE
|
||||
if (id == TIMER_ID_NULL)
|
||||
CRASH("Tried to delete a null timerid. Use TIMER_STOPPABLE flag")
|
||||
if (!istext(id))
|
||||
if (istype(id, /datum/timedevent))
|
||||
qdel(id)
|
||||
return TRUE
|
||||
@@ -541,15 +600,12 @@ SUBSYSTEM_DEF(timer)
|
||||
return null
|
||||
if (id == TIMER_ID_NULL)
|
||||
CRASH("Tried to get timeleft of a null timerid. Use TIMER_STOPPABLE flag")
|
||||
if (!istext(id))
|
||||
if (istype(id, /datum/timedevent))
|
||||
var/datum/timedevent/timer = id
|
||||
return timer.timeToRun - world.time
|
||||
//id is string
|
||||
var/datum/timedevent/timer = SStimer.timer_id_dict[id]
|
||||
if (timer && !timer.spent)
|
||||
return timer.timeToRun - world.time
|
||||
return null
|
||||
return (timer && !timer.spent) ? timer.timeToRun - world.time : null
|
||||
|
||||
#undef BUCKET_LEN
|
||||
#undef BUCKET_POS
|
||||
|
||||
@@ -47,7 +47,7 @@ GLOBAL_LIST_EMPTY(potential_mods_per_skill)
|
||||
if(!mod_L)
|
||||
mod_L = GLOB.potential_mods_per_skill[target_skills] = list()
|
||||
else
|
||||
BINARY_INSERT(identifier, mod_L, datum/skill_modifier, src, priority, COMPARE_VALUE)
|
||||
BINARY_INSERT(identifier, mod_L, /datum/skill_modifier, src, priority, COMPARE_VALUE)
|
||||
mod_L[identifier] = src
|
||||
GLOB.potential_skills_per_mod[target_skills_key] = list(target_skills)
|
||||
else //Should be a list.
|
||||
@@ -66,7 +66,7 @@ GLOBAL_LIST_EMPTY(potential_mods_per_skill)
|
||||
if(!mod_L)
|
||||
mod_L = GLOB.potential_mods_per_skill[path] = list()
|
||||
else
|
||||
BINARY_INSERT(identifier, mod_L, datum/skill_modifier, src, priority, COMPARE_VALUE)
|
||||
BINARY_INSERT(identifier, mod_L, /datum/skill_modifier, src, priority, COMPARE_VALUE)
|
||||
mod_L[identifier] = src
|
||||
|
||||
/datum/skill_modifier/Destroy()
|
||||
|
||||
@@ -15,5 +15,5 @@
|
||||
message_admins("An alien egg has been delivered to [ADMIN_VERBOSEJMP(T)].")
|
||||
log_game("An alien egg has been delivered to [AREACOORD(T)]")
|
||||
var/message = "Attention [station_name()], we have entrusted you with a research specimen in [get_area_name(T, TRUE)]. Remember to follow all safety precautions when dealing with the specimen."
|
||||
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, /proc/addtimer, CALLBACK(GLOBAL_PROC, /proc/print_command_report, message), announcement_time))
|
||||
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, /proc/_addtimer, CALLBACK(GLOBAL_PROC, /proc/print_command_report, message), announcement_time))
|
||||
return INITIALIZE_HINT_QDEL
|
||||
|
||||
@@ -1,20 +1,46 @@
|
||||
#define RESTART_COUNTER_PATH "data/round_counter.txt"
|
||||
|
||||
GLOBAL_VAR(restart_counter)
|
||||
|
||||
GLOBAL_VAR_INIT(tgs_initialized, FALSE)
|
||||
|
||||
GLOBAL_VAR(topic_status_lastcache)
|
||||
GLOBAL_LIST(topic_status_cache)
|
||||
|
||||
//This happens after the Master subsystem new(s) (it's a global datum)
|
||||
//So subsystems globals exist, but are not initialised
|
||||
/**
|
||||
* World creation
|
||||
*
|
||||
* Here is where a round itself is actually begun and setup.
|
||||
* * db connection setup
|
||||
* * config loaded from files
|
||||
* * loads admins
|
||||
* * Sets up the dynamic menu system
|
||||
* * and most importantly, calls initialize on the master subsystem, starting the game loop that causes the rest of the game to begin processing and setting up
|
||||
*
|
||||
*
|
||||
* Nothing happens until something moves. ~Albert Einstein
|
||||
*
|
||||
* For clarity, this proc gets triggered later in the initialization pipeline, it is not the first thing to happen, as it might seem.
|
||||
*
|
||||
* Initialization Pipeline:
|
||||
* Global vars are new()'ed, (including config, glob, and the master controller will also new and preinit all subsystems when it gets new()ed)
|
||||
* Compiled in maps are loaded (mainly centcom). all areas/turfs/objs/mobs(ATOMs) in these maps will be new()ed
|
||||
* world/New() (You are here)
|
||||
* Once world/New() returns, client's can connect.
|
||||
* 1 second sleep
|
||||
* Master Controller initialization.
|
||||
* Subsystem initialization.
|
||||
* Non-compiled-in maps are maploaded, all atoms are new()ed
|
||||
* All atoms in both compiled and uncompiled maps are initialized()
|
||||
*/
|
||||
/world/New()
|
||||
if (fexists(EXTOOLS))
|
||||
call(EXTOOLS, "maptick_initialize")()
|
||||
var/extools = world.GetConfig("env", "EXTOOLS_DLL") || (world.system_type == MS_WINDOWS ? "./byond-extools.dll" : "./libbyond-extools.so")
|
||||
if (fexists(extools))
|
||||
call(extools, "maptick_initialize")()
|
||||
#ifdef EXTOOLS_LOGGING
|
||||
call(EXTOOLS, "init_logging")()
|
||||
call(extools, "init_logging")()
|
||||
else
|
||||
CRASH("[EXTOOLS] does not exist!")
|
||||
CRASH("[extools] does not exist!")
|
||||
#endif
|
||||
enable_debugger()
|
||||
#ifdef REFERENCE_TRACKING
|
||||
@@ -25,10 +51,9 @@ GLOBAL_LIST(topic_status_cache)
|
||||
|
||||
log_world("World loaded at [TIME_STAMP("hh:mm:ss", FALSE)]!")
|
||||
|
||||
GLOB.config_error_log = GLOB.world_manifest_log = GLOB.world_pda_log = GLOB.world_job_debug_log = GLOB.sql_error_log = GLOB.world_href_log = GLOB.world_runtime_log = GLOB.world_attack_log = GLOB.world_game_log = "data/logs/config_error.[GUID()].log" //temporary file used to record errors with loading config, moved to log directory once logging is set bl
|
||||
|
||||
make_datum_references_lists() //initialises global lists for referencing frequently used datums (so that we only ever do it once)
|
||||
|
||||
GLOB.config_error_log = GLOB.world_manifest_log = GLOB.world_pda_log = GLOB.world_job_debug_log = GLOB.sql_error_log = GLOB.world_href_log = GLOB.world_runtime_log = GLOB.world_attack_log = GLOB.world_game_log = "data/logs/config_error.[GUID()].log" //temporary file used to record errors with loading config, moved to log directory once logging is set bl
|
||||
|
||||
GLOB.revdata = new
|
||||
|
||||
@@ -36,6 +61,9 @@ GLOBAL_LIST(topic_status_cache)
|
||||
|
||||
config.Load(params[OVERRIDE_CONFIG_DIRECTORY_PARAMETER])
|
||||
|
||||
load_admins()
|
||||
load_mentors()
|
||||
|
||||
//SetupLogs depends on the RoundID, so lets check
|
||||
//DB schema and set RoundID if we can
|
||||
SSdbcore.CheckSchemaVersion()
|
||||
@@ -49,14 +77,9 @@ GLOBAL_LIST(topic_status_cache)
|
||||
world.log = file("[GLOB.log_directory]/dd.log") //not all runtimes trigger world/Error, so this is the only way to ensure we can see all of them.
|
||||
#endif
|
||||
|
||||
load_admins()
|
||||
load_mentors()
|
||||
LoadVerbs(/datum/verbs/menu)
|
||||
if(CONFIG_GET(flag/usewhitelist))
|
||||
load_whitelist()
|
||||
LoadBans()
|
||||
initialize_global_loadout_items()
|
||||
reload_custom_roundstart_items_list()//Cit change - loads donator items. Remind me to remove when I port over bay's loadout system
|
||||
|
||||
GLOB.timezoneOffset = text2num(time2text(0,"hh")) * 36000
|
||||
|
||||
@@ -67,6 +90,10 @@ GLOBAL_LIST(topic_status_cache)
|
||||
if(NO_INIT_PARAMETER in params)
|
||||
return
|
||||
|
||||
LoadBans()
|
||||
initialize_global_loadout_items()
|
||||
reload_custom_roundstart_items_list()//Cit change - loads donator items. Remind me to remove when I port over bay's loadout system
|
||||
|
||||
Master.Initialize(10, FALSE, TRUE)
|
||||
|
||||
if(TEST_RUN_PARAMETER in params)
|
||||
@@ -88,7 +115,7 @@ GLOBAL_LIST(topic_status_cache)
|
||||
#else
|
||||
cb = VARSET_CALLBACK(SSticker, force_ending, TRUE)
|
||||
#endif
|
||||
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, /proc/addtimer, cb, 10 SECONDS))
|
||||
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, /proc/_addtimer, cb, 10 SECONDS))
|
||||
|
||||
/world/proc/SetupLogs()
|
||||
var/override_dir = params[OVERRIDE_LOG_DIRECTORY_PARAMETER]
|
||||
@@ -136,7 +163,7 @@ GLOBAL_LIST(topic_status_cache)
|
||||
|
||||
|
||||
#ifdef UNIT_TESTS
|
||||
GLOB.test_log = file("[GLOB.log_directory]/tests.log")
|
||||
GLOB.test_log = "[GLOB.log_directory]/tests.log"
|
||||
start_log(GLOB.test_log)
|
||||
#endif
|
||||
start_log(GLOB.world_game_log)
|
||||
@@ -170,14 +197,6 @@ GLOBAL_LIST(topic_status_cache)
|
||||
/world/Topic(T, addr, master, key)
|
||||
TGS_TOPIC //redirect to server tools if necessary
|
||||
|
||||
if(!SSfail2topic)
|
||||
return "Server not initialized."
|
||||
else if(SSfail2topic.IsRateLimited(addr))
|
||||
return "Rate limited."
|
||||
|
||||
if(length(T) > CONFIG_GET(number/topic_max_size))
|
||||
return "Payload too large!"
|
||||
|
||||
var/static/list/topic_handlers = TopicHandlers()
|
||||
|
||||
var/list/input = params2list(T)
|
||||
@@ -194,7 +213,7 @@ GLOBAL_LIST(topic_status_cache)
|
||||
return
|
||||
|
||||
handler = new handler()
|
||||
return handler.TryRun(input, addr)
|
||||
return handler.TryRun(input)
|
||||
|
||||
/world/proc/AnnouncePR(announcement, list/payload)
|
||||
var/static/list/PRcounts = list() //PR id -> number of times announced this round
|
||||
@@ -280,11 +299,11 @@ GLOBAL_LIST(topic_status_cache)
|
||||
|
||||
var/list/features = list()
|
||||
|
||||
/*if(GLOB.master_mode) CIT CHANGE - hides the gamemode from the hub entry, removes some useless info from the hub entry
|
||||
features += GLOB.master_mode
|
||||
// if(GLOB.master_mode)
|
||||
// features += GLOB.master_mode
|
||||
|
||||
if (!GLOB.enter_allowed)
|
||||
features += "closed"*/
|
||||
// if (!GLOB.enter_allowed)
|
||||
// features += "closed"
|
||||
|
||||
var/s = ""
|
||||
var/hostedby
|
||||
@@ -292,25 +311,22 @@ GLOBAL_LIST(topic_status_cache)
|
||||
var/server_name = CONFIG_GET(string/servername)
|
||||
if (server_name)
|
||||
s += "<b>[server_name]</b> — "
|
||||
/*features += "[CONFIG_GET(flag/norespawn) ? "no " : ""]respawn" CIT CHANGE - removes some useless info from the hub entry
|
||||
if(CONFIG_GET(flag/allow_vote_mode))
|
||||
features += "vote"
|
||||
if(CONFIG_GET(flag/allow_ai))
|
||||
features += "AI allowed"*/
|
||||
// features += "[CONFIG_GET(flag/norespawn) ? "no " : ""]respawn"
|
||||
// if(CONFIG_GET(flag/allow_vote_mode))
|
||||
// features += "vote"
|
||||
// if(CONFIG_GET(flag/allow_ai))
|
||||
// features += "AI allowed"
|
||||
hostedby = CONFIG_GET(string/hostedby)
|
||||
|
||||
s += "<b>[station_name()]</b>";
|
||||
s += " ("
|
||||
s += "<a href=\"https://citadel-station.net/home/\">" //Change this to wherever you want the hub to link to. CIT CHANGE - links to cit's website on the hub
|
||||
s += "Citadel" //Replace this with something else. Or ever better, delete it and uncomment the game version. CIT CHANGE - modifies the hub entry link
|
||||
s += "<a href=\"https://citadel-station.net/home\">" //Change this to wherever you want the hub to link to.
|
||||
s += "Citadel" //Replace this with something else. Or ever better, delete it and uncomment the game version.
|
||||
s += "</a>"
|
||||
s += ")\]" //CIT CHANGE - encloses the server title in brackets to make the hub entry fancier
|
||||
s += "<br>[CONFIG_GET(string/servertagline)]<br>" //CIT CHANGE - adds a tagline!
|
||||
|
||||
var/n = 0
|
||||
for (var/mob/M in GLOB.player_list)
|
||||
if (M.client)
|
||||
n++
|
||||
var/players = GLOB.clients.len
|
||||
|
||||
if(SSmapping.config) // this just stops the runtime, honk.
|
||||
features += "[SSmapping.config.map_name]" //CIT CHANGE - makes the hub entry display the current map
|
||||
@@ -318,16 +334,23 @@ GLOBAL_LIST(topic_status_cache)
|
||||
if(NUM2SECLEVEL(GLOB.security_level))//CIT CHANGE - makes the hub entry show the security level
|
||||
features += "[NUM2SECLEVEL(GLOB.security_level)] alert"
|
||||
|
||||
if (n > 1)
|
||||
features += "~[n] players"
|
||||
else if (n > 0)
|
||||
features += "~[n] player"
|
||||
var/popcaptext = ""
|
||||
var/popcap = max(CONFIG_GET(number/extreme_popcap), CONFIG_GET(number/hard_popcap), CONFIG_GET(number/soft_popcap))
|
||||
if (popcap)
|
||||
popcaptext = "/[popcap]"
|
||||
|
||||
if (players > 1)
|
||||
features += "[players][popcaptext] players"
|
||||
else if (players > 0)
|
||||
features += "[players][popcaptext] player"
|
||||
|
||||
game_state = (CONFIG_GET(number/extreme_popcap) && players >= CONFIG_GET(number/extreme_popcap)) //tells the hub if we are full
|
||||
|
||||
if (!host && hostedby)
|
||||
features += "hosted by <b>[hostedby]</b>"
|
||||
|
||||
if (features)
|
||||
s += "\[[jointext(features, ", ")]" //CIT CHANGE - replaces the colon here with a left bracket
|
||||
s += "\[[jointext(features, ", ")]"
|
||||
|
||||
status = s
|
||||
|
||||
@@ -344,6 +367,26 @@ GLOBAL_LIST(topic_status_cache)
|
||||
maxz++
|
||||
SSmobs.MaxZChanged()
|
||||
SSidlenpcpool.MaxZChanged()
|
||||
world.refresh_atmos_grid()
|
||||
|
||||
/world/proc/refresh_atmos_grid()
|
||||
/world/proc/change_fps(new_value = 20)
|
||||
if(new_value <= 0)
|
||||
CRASH("change_fps() called with [new_value] new_value.")
|
||||
if(fps == new_value)
|
||||
return //No change required.
|
||||
|
||||
fps = new_value
|
||||
on_tickrate_change()
|
||||
|
||||
|
||||
/world/proc/change_tick_lag(new_value = 0.5)
|
||||
if(new_value <= 0)
|
||||
CRASH("change_tick_lag() called with [new_value] new_value.")
|
||||
if(tick_lag == new_value)
|
||||
return //No change required.
|
||||
|
||||
tick_lag = new_value
|
||||
on_tickrate_change()
|
||||
|
||||
|
||||
/world/proc/on_tickrate_change()
|
||||
SStimer?.reset_buckets()
|
||||
|
||||
@@ -69,7 +69,7 @@ GLOBAL_LIST_EMPTY(actionspeed_modification_cache)
|
||||
return TRUE
|
||||
remove_actionspeed_modifier(existing, FALSE)
|
||||
if(length(actionspeed_modification))
|
||||
BINARY_INSERT(type_or_datum.id, actionspeed_modification, datum/actionspeed_modifier, type_or_datum, priority, COMPARE_VALUE)
|
||||
BINARY_INSERT(type_or_datum.id, actionspeed_modification, /datum/actionspeed_modifier, type_or_datum, priority, COMPARE_VALUE)
|
||||
LAZYSET(actionspeed_modification, type_or_datum.id, type_or_datum)
|
||||
if(update)
|
||||
update_actionspeed()
|
||||
|
||||
@@ -3,8 +3,9 @@
|
||||
GLOBAL_LIST_EMPTY(deletion_failures)
|
||||
|
||||
/world/proc/enable_reference_tracking()
|
||||
if (fexists(EXTOOLS))
|
||||
call(EXTOOLS, "ref_tracking_initialize")()
|
||||
var/extools = world.GetConfig("env", "EXTOOLS_DLL") || (world.system_type == MS_WINDOWS ? "./byond-extools.dll" : "./libbyond-extools.so")
|
||||
if (fexists(extools))
|
||||
call(extools, "ref_tracking_initialize")()
|
||||
|
||||
/proc/get_back_references(datum/D)
|
||||
CRASH("/proc/get_back_references not hooked by extools, reference tracking will not function!")
|
||||
@@ -109,7 +110,7 @@ GLOBAL_LIST_EMPTY(deletion_failures)
|
||||
set name = "Find References"
|
||||
set src in world
|
||||
|
||||
find_references(FALSE)
|
||||
find_references_legacy(FALSE)
|
||||
|
||||
|
||||
/datum/proc/find_references_legacy(skip_alert)
|
||||
@@ -164,7 +165,7 @@ GLOBAL_LIST_EMPTY(deletion_failures)
|
||||
|
||||
qdel(src, TRUE) //force a qdel
|
||||
if(!running_find_references)
|
||||
find_references(TRUE)
|
||||
find_references_legacy(TRUE)
|
||||
|
||||
|
||||
/datum/verb/qdel_then_if_fail_find_references()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
/// Process asset cache client topic calls for "asset_cache_confirm_arrival=[INT]"
|
||||
/// Process asset cache client topic calls for `"asset_cache_confirm_arrival=[INT]"`
|
||||
/client/proc/asset_cache_confirm_arrival(job_id)
|
||||
var/asset_cache_job = round(text2num(job_id))
|
||||
//because we skip the limiter, we have to make sure this is a valid arrival and not somebody tricking us into letting them append to a list without limit.
|
||||
@@ -10,7 +10,7 @@
|
||||
return asset_cache_job || TRUE
|
||||
|
||||
|
||||
/// Process asset cache client topic calls for "asset_cache_preload_data=[HTML+JSON_STRING]
|
||||
/// Process asset cache client topic calls for `"asset_cache_preload_data=[HTML+JSON_STRING]"`
|
||||
/client/proc/asset_cache_preload_data(data)
|
||||
var/json = data
|
||||
var/list/preloaded_assets = json_decode(json)
|
||||
|
||||
@@ -47,8 +47,9 @@
|
||||
"smmon_3.gif" = 'icons/program_icons/smmon_3.gif',
|
||||
"smmon_4.gif" = 'icons/program_icons/smmon_4.gif',
|
||||
"smmon_5.gif" = 'icons/program_icons/smmon_5.gif',
|
||||
"smmon_6.gif" = 'icons/program_icons/smmon_6.gif',
|
||||
"borg_mon.gif" = 'icons/program_icons/borg_mon.gif'
|
||||
"smmon_6.gif" = 'icons/program_icons/smmon_6.gif'
|
||||
// "borg_mon.gif" = 'icons/program_icons/borg_mon.gif',
|
||||
// "robotact.gif" = 'icons/program_icons/robotact.gif'
|
||||
)
|
||||
|
||||
/datum/asset/simple/radar_assets
|
||||
@@ -156,7 +157,6 @@
|
||||
)
|
||||
|
||||
/datum/asset/simple/namespaced/fontawesome
|
||||
legacy = TRUE
|
||||
assets = list(
|
||||
"fa-regular-400.eot" = 'html/font-awesome/webfonts/fa-regular-400.eot',
|
||||
"fa-regular-400.woff" = 'html/font-awesome/webfonts/fa-regular-400.woff',
|
||||
@@ -248,6 +248,10 @@
|
||||
"rule8" = 'icons/UI_Icons/Achievements/Misc/rule8.png',
|
||||
"snail" = 'icons/UI_Icons/Achievements/Misc/snail.png',
|
||||
"ascension" = 'icons/UI_Icons/Achievements/Misc/ascension.png',
|
||||
"ashascend" = 'icons/UI_Icons/Achievements/Misc/ashascend.png',
|
||||
"fleshascend" = 'icons/UI_Icons/Achievements/Misc/fleshascend.png',
|
||||
"rustascend" = 'icons/UI_Icons/Achievements/Misc/rustascend.png',
|
||||
"voidascend" = 'icons/UI_Icons/Achievements/Misc/voidascend.png',
|
||||
"mining" = 'icons/UI_Icons/Achievements/Skills/mining.png',
|
||||
"assistant" = 'icons/UI_Icons/Achievements/Mafia/assistant.png',
|
||||
"changeling" = 'icons/UI_Icons/Achievements/Mafia/changeling.png',
|
||||
@@ -313,7 +317,28 @@
|
||||
"pill21" = 'icons/UI_Icons/Pills/pill21.png',
|
||||
"pill22" = 'icons/UI_Icons/Pills/pill22.png',
|
||||
)
|
||||
|
||||
/*
|
||||
/datum/asset/spritesheet/simple/condiments
|
||||
name = "condiments"
|
||||
assets = list(
|
||||
CONDIMASTER_STYLE_FALLBACK = 'icons/UI_Icons/Condiments/emptycondiment.png',
|
||||
"enzyme" = 'icons/UI_Icons/Condiments/enzyme.png',
|
||||
"flour" = 'icons/UI_Icons/Condiments/flour.png',
|
||||
"mayonnaise" = 'icons/UI_Icons/Condiments/mayonnaise.png',
|
||||
"milk" = 'icons/UI_Icons/Condiments/milk.png',
|
||||
"blackpepper" = 'icons/UI_Icons/Condiments/peppermillsmall.png',
|
||||
"rice" = 'icons/UI_Icons/Condiments/rice.png',
|
||||
"sodiumchloride" = 'icons/UI_Icons/Condiments/saltshakersmall.png',
|
||||
"soymilk" = 'icons/UI_Icons/Condiments/soymilk.png',
|
||||
"soysauce" = 'icons/UI_Icons/Condiments/soysauce.png',
|
||||
"sugar" = 'icons/UI_Icons/Condiments/sugar.png',
|
||||
"ketchup" = 'icons/UI_Icons/Condiments/ketchup.png',
|
||||
"capsaicin" = 'icons/UI_Icons/Condiments/hotsauce.png',
|
||||
"frostoil" = 'icons/UI_Icons/Condiments/coldsauce.png',
|
||||
"bbqsauce" = 'icons/UI_Icons/Condiments/bbqsauce.png',
|
||||
"cornoil" = 'icons/UI_Icons/Condiments/oliveoil.png',
|
||||
)
|
||||
*/
|
||||
//this exists purely to avoid meta by pre-loading all language icons.
|
||||
/datum/asset/language/register()
|
||||
for(var/path in typesof(/datum/language))
|
||||
@@ -484,3 +509,31 @@
|
||||
/datum/asset/spritesheet/mafia/register()
|
||||
InsertAll("", 'icons/obj/mafia.dmi')
|
||||
..()
|
||||
|
||||
/datum/asset/simple/portraits
|
||||
var/tab = "use subtypes of this please"
|
||||
assets = list()
|
||||
|
||||
/datum/asset/simple/portraits/New()
|
||||
if(!SSpersistence.paintings || !SSpersistence.paintings[tab] || !length(SSpersistence.paintings[tab]))
|
||||
return
|
||||
for(var/p in SSpersistence.paintings[tab])
|
||||
var/list/portrait = p
|
||||
var/png = "data/paintings/[tab]/[portrait["md5"]].png"
|
||||
if(fexists(png))
|
||||
assets[portrait["title"]] = png
|
||||
..() //this is where it registers all these assets we added to the list
|
||||
|
||||
/datum/asset/simple/portraits/library
|
||||
tab = "library"
|
||||
|
||||
/datum/asset/simple/portraits/library_secure
|
||||
tab = "library_secure"
|
||||
|
||||
/datum/asset/simple/portraits/library_private
|
||||
tab = "library_private"
|
||||
|
||||
/datum/asset/simple/safe
|
||||
assets = list(
|
||||
"safe_dial.png" = 'html/safe_dial.png'
|
||||
)
|
||||
|
||||
@@ -92,7 +92,7 @@
|
||||
log_admin_private("[key_name(usr)] cancelled event [name].")
|
||||
SSblackbox.record_feedback("tally", "event_admin_cancelled", 1, typepath)
|
||||
|
||||
/datum/round_event_control/proc/runEvent()
|
||||
/datum/round_event_control/proc/runEvent(random = FALSE)
|
||||
var/datum/round_event/E = new typepath()
|
||||
E.current_players = get_active_player_count(alive_check = 1, afk_check = 1, human_check = 1)
|
||||
E.control = src
|
||||
|
||||
@@ -175,7 +175,7 @@
|
||||
/datum/job/proc/announce_head(var/mob/living/carbon/human/H, var/channels) //tells the given channel that the given mob is the new department head. See communications.dm for valid channels.
|
||||
if(H && GLOB.announcement_systems.len)
|
||||
//timer because these should come after the captain announcement
|
||||
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, .proc/addtimer, CALLBACK(pick(GLOB.announcement_systems), /obj/machinery/announcement_system/proc/announce, "NEWHEAD", H.real_name, H.job, channels), 1))
|
||||
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, .proc/_addtimer, CALLBACK(pick(GLOB.announcement_systems), /obj/machinery/announcement_system/proc/announce, "NEWHEAD", H.real_name, H.job, channels), 1))
|
||||
|
||||
//If the configuration option is set to require players to be logged as old enough to play certain jobs, then this proc checks that they are, otherwise it just returns 1
|
||||
/datum/job/proc/player_old_enough(client/C)
|
||||
|
||||
@@ -305,9 +305,6 @@
|
||||
testing("Skipped loading [turfsSkipped] default turfs")
|
||||
#endif
|
||||
|
||||
if(did_expand)
|
||||
world.refresh_atmos_grid()
|
||||
|
||||
return TRUE
|
||||
|
||||
/datum/parsed_map/proc/build_cache(no_changeturf, bad_paths=null)
|
||||
|
||||
@@ -1,12 +1,22 @@
|
||||
//This file is just for the necessary /world definition
|
||||
//Try looking in game/world.dm
|
||||
|
||||
/**
|
||||
* # World
|
||||
*
|
||||
* Two possibilities exist: either we are alone in the Universe or we are not. Both are equally terrifying. ~ Arthur C. Clarke
|
||||
*
|
||||
* The byond world object stores some basic byond level config, and has a few hub specific procs for managing hub visiblity
|
||||
*
|
||||
* The world /New() is the root of where a round itself begins
|
||||
*/
|
||||
/world
|
||||
mob = /mob/dead/new_player
|
||||
turf = /turf/open/space/basic
|
||||
area = /area/space
|
||||
view = "15x15"
|
||||
hub = "Exadv1.spacestation13"
|
||||
hub_password = "kMZy3U5jJHSiBQjr"
|
||||
name = "/tg/ Station 13"
|
||||
fps = 20
|
||||
#ifdef FIND_REF_NO_CHECK_TICK
|
||||
|
||||
BIN
html/safe_dial.png
Normal file
BIN
html/safe_dial.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 131 KiB |
@@ -17,7 +17,6 @@
|
||||
#include "_maps\_basemap.dm"
|
||||
#include "code\_compile_options.dm"
|
||||
#include "code\world.dm"
|
||||
#include "code\__DEFINES\_extools.dm"
|
||||
#include "code\__DEFINES\_globals.dm"
|
||||
#include "code\__DEFINES\_protect.dm"
|
||||
#include "code\__DEFINES\_tick.dm"
|
||||
|
||||
Reference in New Issue
Block a user