subsystem upgrades

This commit is contained in:
Letter N
2020-12-14 15:28:59 +08:00
parent 4b52e3c6a3
commit cf692e8426
40 changed files with 734 additions and 496 deletions

View File

@@ -3,7 +3,7 @@
name = "Initializing..."
var/target
INITIALIZE_IMMEDIATE(/obj/effect/statclick) //it's new, but rebranded.
INITIALIZE_IMMEDIATE(/obj/effect/statclick)
/obj/effect/statclick/Initialize(mapload, text, target) //Don't port this to Initialize it's too critical
. = ..()
@@ -33,14 +33,6 @@ INITIALIZE_IMMEDIATE(/obj/effect/statclick) //it's new, but rebranded.
usr.client.debug_variables(target)
message_admins("Admin [key_name_admin(usr)] is debugging the [target] [class].")
/obj/effect/statclick/misc_subsystems/Click()
if(!usr.client.holder)
return
var/subsystem = input(usr, "Debug which subsystem?", "Debug nonprocessing subsystem") as null|anything in (Master.subsystems - Master.statworthy_subsystems)
if(!subsystem)
return
usr.client.debug_variables(subsystem)
message_admins("Admin [key_name_admin(usr)] is debugging the [subsystem] subsystem.")
// Debug verbs.
/client/proc/restart_controller(controller in list("Master", "Failsafe"))

View File

@@ -1,7 +1,7 @@
/**
* Failsafe
*
* Pretty much pokes the MC to make sure it's still alive.
* Failsafe
*
* Pretty much pokes the MC to make sure it's still alive.
**/
GLOBAL_REAL(Failsafe, /datum/controller/failsafe)

View File

@@ -1,10 +1,10 @@
/**
* StonedMC
*
* Designed to properly split up a given tick among subsystems
* Note: if you read parts of this code and think "why is it doing it that way"
* Odds are, there is a reason
*
/**
* StonedMC
*
* Designed to properly split up a given tick among subsystems
* Note: if you read parts of this code and think "why is it doing it that way"
* Odds are, there is a reason
*
**/
//This is the ABSOLUTE ONLY THING that should init globally like this
@@ -28,8 +28,6 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
// List of subsystems to process().
var/list/subsystems
/// List of subsystems to include in the MC stat panel.
var/list/statworthy_subsystems
// Vars for keeping track of tick drift.
var/init_timeofday
@@ -41,7 +39,7 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
///Only run ticker subsystems for the next n ticks.
var/skip_ticks = 0
var/make_runtime = 0
var/make_runtime = FALSE
var/initializations_finished_with_no_players_logged_in //I wonder what this could be?
@@ -67,9 +65,6 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
//used by CHECK_TICK as well so that the procs subsystems call can obey that SS's tick limits
var/static/current_ticklimit = TICK_LIMIT_RUNNING
/// Statclick for misc subsystems
var/obj/effect/statclick/misc_subsystems/misc_statclick
/datum/controller/master/New()
if(!config)
config = new
@@ -96,11 +91,6 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
_subsystems += new I
Master = src
// We want to see all subsystems during init.
statworthy_subsystems = subsystems.Copy()
misc_statclick = new(null, "Debug")
if(!GLOB)
new /datum/controller/global_vars
@@ -217,7 +207,7 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
// Sort subsystems by display setting for easy access.
sortTim(subsystems, /proc/cmp_subsystem_display)
// Set world options.
world.fps = CONFIG_GET(number/fps)
world.change_fps(CONFIG_GET(number/fps))
var/initialized_tod = REALTIMEOFDAY
if(tgs_prime)
@@ -271,14 +261,10 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
var/list/tickersubsystems = list()
var/list/runlevel_sorted_subsystems = list(list()) //ensure we always have at least one runlevel
var/timer = world.time
statworthy_subsystems = list()
for (var/thing in subsystems)
var/datum/controller/subsystem/SS = thing
if (SS.flags & SS_NO_FIRE)
if(SS.flags & SS_ALWAYS_SHOW_STAT)
statworthy_subsystems += SS
continue
statworthy_subsystems += SS
SS.queued_time = 0
SS.queue_next = null
SS.queue_prev = null

View File

@@ -23,7 +23,7 @@
var/priority = FIRE_PRIORITY_DEFAULT
/// [Subsystem Flags][SS_NO_INIT] to control binary behavior. Flags must be set at compile time or before preinit finishes to take full effect. (You can also restart the mc to force them to process again)
var/flags = 0
var/flags = NONE
/// This var is set to TRUE after the subsystem has been initialized.
var/initialized = FALSE
@@ -114,7 +114,7 @@
//previously, this would have been named 'process()' but that name is used everywhere for different things!
//fire() seems more suitable. This is the procedure that gets called every 'wait' deciseconds.
//Sleeping in here prevents future fires until returned.
/datum/controller/subsystem/proc/fire(resumed = 0)
/datum/controller/subsystem/proc/fire(resumed = FALSE)
flags |= SS_NO_FIRE
CRASH("Subsystem [src]([type]) does not fire() but did not set the SS_NO_FIRE flag. Please add the SS_NO_FIRE flag to any subsystem that doesn't fire so it doesn't get added to the processing list and waste cpu.")

View File

@@ -11,7 +11,7 @@ SUBSYSTEM_DEF(assets)
switch (CONFIG_GET(string/asset_transport))
if ("webroot")
newtransporttype = /datum/asset_transport/webroot
if (newtransporttype == transport.type)
return

View File

@@ -10,25 +10,28 @@ SUBSYSTEM_DEF(atoms)
var/old_initialized
var/list/late_loaders
var/list/late_loaders = list()
var/list/BadInitializeCalls = list()
initialized = INITIALIZATION_INSSATOMS
/datum/controller/subsystem/atoms/Initialize(timeofday)
GLOB.fire_overlay.appearance_flags = RESET_COLOR
setupGenetics()
setupGenetics() //to set the mutations' sequence
initialized = INITIALIZATION_INNEW_MAPLOAD
InitializeAtoms()
initialized = INITIALIZATION_INNEW_REGULAR
return ..()
/datum/controller/subsystem/atoms/proc/InitializeAtoms(list/atoms)
if(initialized == INITIALIZATION_INSSATOMS)
return
old_initialized = initialized
initialized = INITIALIZATION_INNEW_MAPLOAD
LAZYINITLIST(late_loaders)
var/count
var/list/mapload_arg = list(TRUE)
if(atoms)
@@ -49,7 +52,7 @@ SUBSYSTEM_DEF(atoms)
testing("Initialized [count] atoms")
pass(count)
initialized = INITIALIZATION_INNEW_REGULAR
initialized = old_initialized
if(late_loaders.len)
for(var/I in late_loaders)
@@ -58,6 +61,7 @@ SUBSYSTEM_DEF(atoms)
testing("Late initialized [late_loaders.len] atoms")
late_loaders.Cut()
/// Init this specific atom
/datum/controller/subsystem/atoms/proc/InitAtom(atom/A, list/arguments)
var/the_type = A.type
if(QDELING(A))
@@ -150,8 +154,3 @@ SUBSYSTEM_DEF(atoms)
var/initlog = InitLog()
if(initlog)
text2file(initlog, "[GLOB.log_directory]/initialize.log")
#undef BAD_INIT_QDEL_BEFORE
#undef BAD_INIT_DIDNT_INIT
#undef BAD_INIT_SLEPT
#undef BAD_INIT_NO_HINT

View File

@@ -14,12 +14,14 @@ SUBSYSTEM_DEF(blackbox)
"explosion" = 2,
"time_dilation_current" = 3,
"science_techweb_unlock" = 2,
"round_end_stats" = 2) //associative list of any feedback variables that have had their format changed since creation and their current version, remember to update this
"round_end_stats" = 2,
"testmerged_prs" = 2) //associative list of any feedback variables that have had their format changed since creation and their current version, remember to update this
/datum/controller/subsystem/blackbox/Initialize()
triggertime = world.time
record_feedback("amount", "random_seed", Master.random_seed)
record_feedback("amount", "dm_version", DM_VERSION)
record_feedback("amount", "dm_build", DM_BUILD)
record_feedback("amount", "byond_version", world.byond_version)
record_feedback("amount", "byond_build", world.byond_build)
. = ..()
@@ -39,10 +41,7 @@ SUBSYSTEM_DEF(blackbox)
if(!SSdbcore.Connect())
return
var/playercount = 0
for(var/mob/M in GLOB.player_list)
if(M.client)
playercount += 1
var/playercount = LAZYLEN(GLOB.player_list)
var/admincount = GLOB.admins.len
var/datum/DBQuery/query_record_playercount = SSdbcore.NewQuery("INSERT INTO [format_table_name("legacy_population")] (playercount, admincount, time, server_ip, server_port, round_id) VALUES ([playercount], [admincount], '[SQLtime()]', INET_ATON(IF('[world.internet_address]' LIKE '', '0', '[world.internet_address]')), '[world.port]', '[GLOB.round_id]')")
query_record_playercount.Execute()
@@ -88,18 +87,24 @@ SUBSYSTEM_DEF(blackbox)
if (!SSdbcore.Connect())
return
// var/list/special_columns = list(
// "datetime" = "NOW()"
// )
var/list/sqlrowlist = list()
for (var/datum/feedback_variable/FV in feedback)
var/sqlversion = 1
if(FV.key in versions)
sqlversion = versions[FV.key]
sqlrowlist += list(list("datetime" = "Now()", "round_id" = GLOB.round_id, "key_name" = "'[sanitizeSQL(FV.key)]'", "key_type" = "'[FV.key_type]'", "version" = "[sqlversion]", "json" = "'[sanitizeSQL(json_encode(FV.json))]'"))
sqlrowlist += list(list(
"datetime" = "Now()", //legacy
"round_id" = GLOB.round_id,
"key_name" = sanitizeSQL(FV.key),
"key_type" = FV.key_type,
"version" = versions[FV.key] || 1,
"json" = sanitizeSQL(json_encode(FV.json))
))
if (!length(sqlrowlist))
return
SSdbcore.MassInsert(format_table_name("feedback"), sqlrowlist, ignore_errors = TRUE, delayed = TRUE)
SSdbcore.MassInsert(format_table_name("feedback"), sqlrowlist, ignore_errors = TRUE, delayed = TRUE)//, special_columns = special_columns)
/datum/controller/subsystem/blackbox/proc/Seal()
if(sealed)
@@ -169,7 +174,7 @@ feedback data can be recorded in 5 formats:
"tally"
used to track the number of occurances of multiple related values i.e. how many times each type of gun is fired
further calls to the same key will:
add or subtract from the saved value of the data key if it already exists
add or subtract from the saved value of the data key if it already exists
append the key and it's value if it doesn't exist
calls: SSblackbox.record_feedback("tally", "example", 1, "sample data")
SSblackbox.record_feedback("tally", "example", 4, "sample data")
@@ -181,7 +186,7 @@ feedback data can be recorded in 5 formats:
the final element in the data list is used as the tracking key, all prior elements are used for nesting
all data list elements must be strings
further calls to the same key will:
add or subtract from the saved value of the data key if it already exists in the same multi-dimensional position
add or subtract from the saved value of the data key if it already exists in the same multi-dimensional position
append the key and it's value if it doesn't exist
calls: SSblackbox.record_feedback("nested tally", "example", 1, list("fruit", "orange", "apricot"))
SSblackbox.record_feedback("nested tally", "example", 2, list("fruit", "orange", "orange"))
@@ -270,6 +275,18 @@ Versioning
/datum/feedback_variable/New(new_key, new_key_type)
key = new_key
key_type = new_key_type
/*
/datum/controller/subsystem/blackbox/proc/LogAhelp(ticket, action, message, recipient, sender)
if(!SSdbcore.Connect())
return
var/datum/db_query/query_log_ahelp = SSdbcore.NewQuery({"
INSERT INTO [format_table_name("ticket")] (ticket, action, message, recipient, sender, server_ip, server_port, round_id, timestamp)
VALUES (:ticket, :action, :message, :recipient, :sender, INET_ATON(:server_ip), :server_port, :round_id, :time)
"}, list("ticket" = ticket, "action" = action, "message" = message, "recipient" = recipient, "sender" = sender, "server_ip" = world.internet_address || "0", "server_port" = world.port, "round_id" = GLOB.round_id, "time" = SQLtime()))
query_log_ahelp.Execute()
qdel(query_log_ahelp)
*/
/datum/controller/subsystem/blackbox/proc/ReportDeath(mob/living/L)
set waitfor = FALSE

View File

@@ -1,4 +1,4 @@
/**
/*!
* Copyright (c) 2020 Aleksej Komarov
* SPDX-License-Identifier: MIT
*/

View File

@@ -25,7 +25,7 @@ SUBSYSTEM_DEF(events)
return ..()
/datum/controller/subsystem/events/fire(resumed = 0)
/datum/controller/subsystem/events/fire(resumed = FALSE)
if(!resumed)
checkEvent() //only check these if we aren't resuming a paused fire
src.currentrun = running.Copy()
@@ -37,7 +37,7 @@ SUBSYSTEM_DEF(events)
var/datum/thing = currentrun[currentrun.len]
currentrun.len--
if(thing)
thing.process()
thing.process(wait * 0.1)
else
running.Remove(thing)
if (MC_TICK_CHECK)
@@ -91,13 +91,13 @@ SUBSYSTEM_DEF(events)
if(. == EVENT_CANT_RUN)//we couldn't run this event for some reason, set its max_occurrences to 0
E.max_occurrences = 0
else if(. == EVENT_READY)
E.random = TRUE
E.runEvent(TRUE)
E.runEvent(random = TRUE)
//allows a client to trigger an event
//aka Badmin Central
// > Not in modules/admin
// REEEEEEEEE
// Why the heck is this here! Took me so damn long to find!
/client/proc/forceEvent()
set name = "Trigger Event"
set category = "Admin.Events"

View File

@@ -1,3 +1,26 @@
/*!
## Debugging GC issues
In order to debug `qdel()` failures, there are several tools available.
To enable these tools, define `TESTING` in [_compile_options.dm](https://github.com/tgstation/-tg-station/blob/master/code/_compile_options.dm).
First is a verb called "Find References", which lists **every** refererence to an object in the world. This allows you to track down any indirect or obfuscated references that you might have missed.
Complementing this is another verb, "qdel() then Find References".
This does exactly what you'd expect; it calls `qdel()` on the object and then it finds all references remaining.
This is great, because it means that `Destroy()` will have been called before it starts to find references,
so the only references you'll find will be the ones preventing the object from `qdel()`ing gracefully.
If you have a datum or something you are not destroying directly (say via the singulo),
the next tool is `QDEL_HINT_FINDREFERENCE`. You can return this in `Destroy()` (where you would normally `return ..()`),
to print a list of references once it enters the GC queue.
Finally is a verb, "Show qdel() Log", which shows the deletion log that the garbage subsystem keeps. This is helpful if you are having race conditions or need to review the order of deletions.
Note that for any of these tools to work `TESTING` must be defined.
By using these methods of finding references, you can make your life far, far easier when dealing with `qdel()` failures.
*/
SUBSYSTEM_DEF(garbage)
name = "Garbage"
priority = FIRE_PRIORITY_GARBAGE
@@ -6,7 +29,7 @@ SUBSYSTEM_DEF(garbage)
runlevels = RUNLEVELS_DEFAULT | RUNLEVEL_LOBBY
init_order = INIT_ORDER_GARBAGE
var/list/collection_timeout = list(15 SECONDS, 30 SECONDS) // deciseconds to wait before moving something up in the queue to the next level
var/list/collection_timeout = list(2 MINUTES, 10 SECONDS) // deciseconds to wait before moving something up in the queue to the next level
//Stat tracking
var/delslasttick = 0 // number of del()'s we've done this tick
@@ -24,10 +47,8 @@ SUBSYSTEM_DEF(garbage)
//Queue
var/list/queues
#ifdef LEGACY_REFERENCE_TRACKING
var/list/reference_find_on_fail = list()
var/list/reference_find_on_fail_types = list()
#endif
@@ -99,6 +120,9 @@ SUBSYSTEM_DEF(garbage)
state = SS_RUNNING
break
/datum/controller/subsystem/garbage/proc/HandleQueue(level = GC_QUEUE_CHECK)
if (level == GC_QUEUE_CHECK)
delslasttick = 0
@@ -135,7 +159,7 @@ SUBSYSTEM_DEF(garbage)
++totalgcs
pass_counts[level]++
#ifdef LEGACY_REFERENCE_TRACKING
reference_find_on_fail -= refID //It's deleted we don't care anymore.
reference_find_on_fail -= refID //It's deleted we don't care anymore.
#endif
if (MC_TICK_CHECK)
return
@@ -149,10 +173,10 @@ SUBSYSTEM_DEF(garbage)
D.find_references()
#elif defined(LEGACY_REFERENCE_TRACKING)
if(reference_find_on_fail[refID])
D.find_references()
D.find_references_legacy()
#ifdef GC_FAILURE_HARD_LOOKUP
else
D.find_references()
D.find_references_legacy()
#endif
reference_find_on_fail -= refID
#endif
@@ -195,11 +219,6 @@ SUBSYSTEM_DEF(garbage)
var/gctime = world.time
var/refid = "\ref[D]"
#ifdef LEGACY_REFERENCE_TRACKING
if(reference_find_on_fail_types[D.type])
reference_find_on_fail["\ref[D]"] = TRUE
#endif
D.gc_destroyed = gctime
var/list/queue = queues[level]
if (queue[refid])
@@ -207,21 +226,6 @@ SUBSYSTEM_DEF(garbage)
queue[refid] = gctime
#ifdef LEGACY_REFERENCE_TRACKING
/datum/controller/subsystem/garbage/proc/add_type_to_findref(type)
if(!ispath(type))
return "NOT A VAILD PATH"
reference_find_on_fail_types |= typecacheof(type)
/datum/controller/subsystem/garbage/proc/remove_type_from_findref(type)
if(!ispath(type))
return "NOT A VALID PATH"
reference_find_on_fail_types -= typesof(type)
/datum/controller/subsystem/garbage/proc/clear_findref_types()
reference_find_on_fail_types = list()
#endif
//this is mainly to separate things profile wise.
/datum/controller/subsystem/garbage/proc/HardDelete(datum/D)
var/time = world.timeofday
@@ -274,8 +278,10 @@ SUBSYSTEM_DEF(garbage)
/datum/qdel_item/New(mytype)
name = "[mytype]"
// Should be treated as a replacement for the 'del' keyword.
// Datums passed to this will be given a chance to clean up references to allow the GC to collect them.
/// Should be treated as a replacement for the 'del' keyword.
///
/// Datums passed to this will be given a chance to clean up references to allow the GC to collect them.
/proc/qdel(datum/D, force=FALSE, ...)
if(!istype(D))
del(D)
@@ -330,9 +336,10 @@ SUBSYSTEM_DEF(garbage)
#ifdef LEGACY_REFERENCE_TRACKING
if (QDEL_HINT_FINDREFERENCE) //qdel will, if LEGACY_REFERENCE_TRACKING is enabled, display all references to this object, then queue the object for deletion.
SSgarbage.Queue(D)
D.find_references_legacy()
if (QDEL_HINT_IFFAIL_FINDREFERENCE)
SSgarbage.Queue(D)
SSgarbage.reference_find_on_fail["\ref[D]"] = TRUE
SSgarbage.reference_find_on_fail[REF(D)] = TRUE
#endif
else
#ifdef TESTING
@@ -343,18 +350,3 @@ SUBSYSTEM_DEF(garbage)
SSgarbage.Queue(D)
else if(D.gc_destroyed == GC_CURRENTLY_BEING_QDELETED)
CRASH("[D.type] destroy proc was called multiple times, likely due to a qdel loop in the Destroy logic")
#ifdef TESTING
/proc/writeDatumCount()
var/list/datums = list()
for(var/datum/D in world)
datums[D.type] += 1
for(var/datum/D)
datums[D.type] += 1
datums = sortTim(datums, /proc/cmp_numeric_dsc, associative = TRUE)
if(fexists("data/DATUMCOUNT.txt"))
fdel("data/DATUMCOUNT.txt")
var/outfile = file("data/DATUMCOUNT.txt")
for(var/path in datums)
outfile << "[datums[path]]\t\t\t\t\t[path]"
#endif

View File

@@ -33,8 +33,9 @@ SUBSYSTEM_DEF(idlenpcpool)
while(currentrun.len)
var/mob/living/simple_animal/SA = currentrun[currentrun.len]
--currentrun.len
if (!SA)
if (QDELETED(SA))
GLOB.simple_animals[AI_IDLE] -= SA
log_world("Found a null in simple_animals list!")
continue
if(!SA.ckey)

View File

@@ -2,13 +2,13 @@ SUBSYSTEM_DEF(ipintel)
name = "XKeyScore"
init_order = INIT_ORDER_XKEYSCORE
flags = SS_NO_FIRE
var/enabled = 0 //disable at round start to avoid checking reconnects
var/enabled = FALSE //disable at round start to avoid checking reconnects
var/throttle = 0
var/errors = 0
var/list/cache = list()
/datum/controller/subsystem/ipintel/Initialize(timeofday, zlevel)
enabled = 1
enabled = TRUE
. = ..()

View File

@@ -1,14 +1,14 @@
GLOBAL_LIST_EMPTY(lighting_update_lights) // List of lighting sources queued for update.
GLOBAL_LIST_EMPTY(lighting_update_corners) // List of lighting corners queued for update.
GLOBAL_LIST_EMPTY(lighting_update_objects) // List of lighting objects queued for update.
SUBSYSTEM_DEF(lighting)
name = "Lighting"
wait = 2
init_order = INIT_ORDER_LIGHTING
flags = SS_TICKER
var/static/list/sources_queue = list() // List of lighting sources queued for update.
var/static/list/corners_queue = list() // List of lighting corners queued for update.
var/static/list/objects_queue = list() // List of lighting objects queued for update.
/datum/controller/subsystem/lighting/stat_entry(msg)
msg = "L:[length(GLOB.lighting_update_lights)]|C:[length(GLOB.lighting_update_corners)]|O:[length(GLOB.lighting_update_objects)]"
msg = "L:[length(sources_queue)]|C:[length(corners_queue)]|O:[length(objects_queue)]"
return ..()
@@ -31,9 +31,10 @@ SUBSYSTEM_DEF(lighting)
MC_SPLIT_TICK_INIT(3)
if(!init_tick_checks)
MC_SPLIT_TICK
var/list/queue = sources_queue
var/i = 0
for (i in 1 to GLOB.lighting_update_lights.len)
var/datum/light_source/L = GLOB.lighting_update_lights[i]
for (i in 1 to length(queue))
var/datum/light_source/L = queue[i]
L.update_corners()
@@ -44,14 +45,15 @@ SUBSYSTEM_DEF(lighting)
else if (MC_TICK_CHECK)
break
if (i)
GLOB.lighting_update_lights.Cut(1, i+1)
queue.Cut(1, i+1)
i = 0
if(!init_tick_checks)
MC_SPLIT_TICK
for (i in 1 to GLOB.lighting_update_corners.len)
var/datum/lighting_corner/C = GLOB.lighting_update_corners[i]
queue = corners_queue
for (i in 1 to length(queue))
var/datum/lighting_corner/C = queue[i]
C.update_objects()
C.needs_update = FALSE
@@ -60,15 +62,16 @@ SUBSYSTEM_DEF(lighting)
else if (MC_TICK_CHECK)
break
if (i)
GLOB.lighting_update_corners.Cut(1, i+1)
queue.Cut(1, i+1)
i = 0
if(!init_tick_checks)
MC_SPLIT_TICK
for (i in 1 to GLOB.lighting_update_objects.len)
var/atom/movable/lighting_object/O = GLOB.lighting_update_objects[i]
queue = objects_queue
for (i in 1 to length(queue))
var/atom/movable/lighting_object/O = queue[i]
if (QDELETED(O))
continue
@@ -80,7 +83,7 @@ SUBSYSTEM_DEF(lighting)
else if (MC_TICK_CHECK)
break
if (i)
GLOB.lighting_update_objects.Cut(1, i+1)
queue.Cut(1, i+1)
/datum/controller/subsystem/lighting/Recover()

View File

@@ -1,6 +1,4 @@
//Fires five times every second.
PROCESSING_SUBSYSTEM_DEF(fastprocess)
name = "Fast Processing"
wait = 2
wait = 0.2 SECONDS
stat_tag = "FP"

View File

@@ -1,7 +1,7 @@
PROCESSING_SUBSYSTEM_DEF(nanites)
name = "Nanites"
flags = SS_BACKGROUND|SS_POST_FIRE_TIMING|SS_NO_INIT
wait = 10
wait = 1 SECONDS
var/list/datum/nanite_cloud_backup/cloud_backups = list()
var/list/mob/living/nanite_monitored_mobs = list()

View File

@@ -2,4 +2,4 @@ PROCESSING_SUBSYSTEM_DEF(obj)
name = "Objects"
priority = FIRE_PRIORITY_OBJ
flags = SS_NO_INIT
wait = 20
wait = 2 SECONDS

View File

@@ -1,10 +1,10 @@
//Used to process objects. Fires once every second.
//Used to process objects.
SUBSYSTEM_DEF(processing)
name = "Processing"
priority = FIRE_PRIORITY_PROCESS
flags = SS_BACKGROUND|SS_POST_FIRE_TIMING|SS_NO_INIT
wait = 10
wait = 1 SECONDS
var/stat_tag = "P" //Used for logging
var/list/processing = list()
@@ -14,7 +14,7 @@ SUBSYSTEM_DEF(processing)
msg = "[stat_tag]:[length(processing)]"
return ..()
/datum/controller/subsystem/processing/fire(resumed = 0)
/datum/controller/subsystem/processing/fire(resumed = FALSE)
if (!resumed)
currentrun = processing.Copy()
//cache for sanic speed (lists are references anyways)
@@ -25,12 +25,26 @@ SUBSYSTEM_DEF(processing)
current_run.len--
if(QDELETED(thing))
processing -= thing
else if(thing.process(wait) == PROCESS_KILL)
else if(thing.process(wait * 0.1) == PROCESS_KILL)
// fully stop so that a future START_PROCESSING will work
STOP_PROCESSING(src, thing)
if (MC_TICK_CHECK)
return
/datum/proc/process()
set waitfor = 0
/**
* This proc is called on a datum on every "cycle" if it is being processed by a subsystem. The time between each cycle is determined by the subsystem's "wait" setting.
* You can start and stop processing a datum using the START_PROCESSING and STOP_PROCESSING defines.
*
* Since the wait setting of a subsystem can be changed at any time, it is important that any rate-of-change that you implement in this proc is multiplied by the delta_time that is sent as a parameter,
* Additionally, any "prob" you use in this proc should instead use the DT_PROB define to make sure that the final probability per second stays the same even if the subsystem's wait is altered.
* Examples where this must be considered:
* - Implementing a cooldown timer, use `mytimer -= delta_time`, not `mytimer -= 1`. This way, `mytimer` will always have the unit of seconds
* - Damaging a mob, do `L.adjustFireLoss(20 * delta_time)`, not `L.adjustFireLoss(20)`. This way, the damage per second stays constant even if the wait of the subsystem is changed
* - Probability of something happening, do `if(DT_PROB(25, delta_time))`, not `if(prob(25))`. This way, if the subsystem wait is e.g. lowered, there won't be a higher chance of this event happening per second
*
* If you override this do not call parent, as it will return PROCESS_KILL. This is done to prevent objects that dont override process() from staying in the processing list
*/
/datum/proc/process(delta_time)
set waitfor = FALSE
return PROCESS_KILL

View File

@@ -5,8 +5,8 @@ PROCESSING_SUBSYSTEM_DEF(quirks)
name = "Quirks"
init_order = INIT_ORDER_QUIRKS
flags = SS_BACKGROUND
wait = 10
runlevels = RUNLEVEL_GAME
wait = 1 SECONDS
var/list/quirks = list() //Assoc. list of all roundstart quirk datum types; "name" = /path/
var/list/quirk_names_by_path = list()

View File

@@ -6,15 +6,15 @@
#define BUCKET_LIMIT (world.time + TICKS2DS(min(BUCKET_LEN - (SSrunechat.practical_offset - DS2TICKS(world.time - SSrunechat.head_offset)) - 1, BUCKET_LEN - 1)))
/**
* # Runechat Subsystem
*
* Maintains a timer-like system to handle destruction of runechat messages. Much of this code is modeled
* after or adapted from the timer subsystem.
*
* Note that this has the same structure for storing and queueing messages as the timer subsystem does
* for handling timers: the bucket_list is a list of chatmessage datums, each of which are the head
* of a circularly linked list. Any given index in bucket_list could be null, representing an empty bucket.
*/
* # Runechat Subsystem
*
* Maintains a timer-like system to handle destruction of runechat messages. Much of this code is modeled
* after or adapted from the timer subsystem.
*
* Note that this has the same structure for storing and queueing messages as the timer subsystem does
* for handling timers: the bucket_list is a list of chatmessage datums, each of which are the head
* of a circularly linked list. Any given index in bucket_list could be null, representing an empty bucket.
*/
SUBSYSTEM_DEF(runechat)
name = "Runechat"
flags = SS_TICKER | SS_NO_INIT
@@ -131,14 +131,14 @@ SUBSYSTEM_DEF(runechat)
bucket_resolution = world.tick_lag
/**
* Enters the runechat subsystem with this chatmessage, inserting it into the end-of-life queue
*
* This will also account for a chatmessage already being registered, and in which case
* the position will be updated to remove it from the previous location if necessary
*
* Arguments:
* * new_sched_destruction Optional, when provided is used to update an existing message with the new specified time
*/
* Enters the runechat subsystem with this chatmessage, inserting it into the end-of-life queue
*
* This will also account for a chatmessage already being registered, and in which case
* the position will be updated to remove it from the previous location if necessary
*
* Arguments:
* * new_sched_destruction Optional, when provided is used to update an existing message with the new specified time
*/
/datum/chatmessage/proc/enter_subsystem(new_sched_destruction = 0)
// Get local references from subsystem as they are faster to access than the datum references
var/list/bucket_list = SSrunechat.bucket_list
@@ -169,7 +169,7 @@ SUBSYSTEM_DEF(runechat)
// Handle insertion into the secondary queue if the required time is outside our tracked amounts
if (scheduled_destruction >= BUCKET_LIMIT)
BINARY_INSERT(src, SSrunechat.second_queue, datum/chatmessage, src, scheduled_destruction, COMPARE_KEY)
BINARY_INSERT(src, SSrunechat.second_queue, /datum/chatmessage, src, scheduled_destruction, COMPARE_KEY)
return
// Get bucket position and a local reference to the datum var, it's faster to access this way
@@ -194,8 +194,8 @@ SUBSYSTEM_DEF(runechat)
/**
* Removes this chatmessage datum from the runechat subsystem
*/
* Removes this chatmessage datum from the runechat subsystem
*/
/datum/chatmessage/proc/leave_subsystem()
// Attempt to find the bucket that contains this chat message
var/bucket_pos = BUCKET_POS(scheduled_destruction)

View File

@@ -1,31 +1,51 @@
#define BUCKET_LEN (world.fps*1*60) //how many ticks should we keep in the bucket. (1 minutes worth)
/// Controls how many buckets should be kept, each representing a tick. (1 minutes worth)
#define BUCKET_LEN (world.fps*1*60)
/// Helper for getting the correct bucket for a given timer
#define BUCKET_POS(timer) (((round((timer.timeToRun - SStimer.head_offset) / world.tick_lag)+1) % BUCKET_LEN)||BUCKET_LEN)
/// Gets the maximum time at which timers will be invoked from buckets, used for deferring to secondary queue
#define TIMER_MAX (world.time + TICKS2DS(min(BUCKET_LEN-(SStimer.practical_offset-DS2TICKS(world.time - SStimer.head_offset))-1, BUCKET_LEN-1)))
#define TIMER_ID_MAX (2**24) //max float with integer precision
/// Max float with integer precision
#define TIMER_ID_MAX (2**24)
/**
* # Timer Subsystem
*
* Handles creation, callbacks, and destruction of timed events.
*
* It is important to understand the buckets used in the timer subsystem are just a series of circular doubly-linked
* lists. The object at a given index in bucket_list is a /datum/timedevent, the head of a circular list, which has prev
* and next references for the respective elements in that bucket's circular list.
*/
SUBSYSTEM_DEF(timer)
name = "Timer"
wait = 1 //SS_TICKER subsystem, so wait is in ticks
wait = 1 // SS_TICKER subsystem, so wait is in ticks
init_order = INIT_ORDER_TIMER
priority = FIRE_PRIORITY_TIMER
flags = SS_TICKER|SS_NO_INIT
var/list/datum/timedevent/second_queue = list() //awe, yes, you've had first queue, but what about second queue?
/// Queue used for storing timers that do not fit into the current buckets
var/list/datum/timedevent/second_queue = list()
/// A hashlist dictionary used for storing unique timers
var/list/hashes = list()
var/head_offset = 0 //world.time of the first entry in the the bucket.
var/practical_offset = 1 //index of the first non-empty item in the bucket.
var/bucket_resolution = 0 //world.tick_lag the bucket was designed for
var/bucket_count = 0 //how many timers are in the buckets
var/list/bucket_list = list() //list of buckets, each bucket holds every timer that has to run that byond tick.
var/list/timer_id_dict = list() //list of all active timers assoicated to their timer id (for easy lookup)
var/list/clienttime_timers = list() //special snowflake timers that run on fancy pansy "client time"
/// world.time of the first entry in the bucket list, effectively the 'start time' of the current buckets
var/head_offset = 0
/// Index of the wrap around pivot for buckets. buckets before this are later running buckets wrapped around from the end of the bucket list.
var/practical_offset = 1
/// world.tick_lag the bucket was designed for
var/bucket_resolution = 0
/// How many timers are in the buckets
var/bucket_count = 0
/// List of buckets, each bucket holds every timer that has to run that byond tick
var/list/bucket_list = list()
/// List of all active timers associated to their timer ID (for easy lookup)
var/list/timer_id_dict = list()
/// Special timers that run in real-time, not BYOND time; these are more expensive to run and maintain
var/list/clienttime_timers = list()
/// Contains the last time that a timer's callback was invoked, or the last tick the SS fired if no timers are being processed
var/last_invoke_tick = 0
/// Contains the last time that a warning was issued for not invoking callbacks
var/static/last_invoke_warning = 0
/// Boolean operator controlling if the timer SS will automatically reset buckets if it fails to invoke callbacks for an extended period of time
var/static/bucket_auto_reset = TRUE
/datum/controller/subsystem/timer/PreInit()
@@ -38,44 +58,53 @@ SUBSYSTEM_DEF(timer)
return ..()
/datum/controller/subsystem/timer/fire(resumed = FALSE)
// Store local references to datum vars as it is faster to access them
var/lit = last_invoke_tick
var/last_check = world.time - TICKS2DS(BUCKET_LEN*1.5)
var/list/bucket_list = src.bucket_list
var/last_check = world.time - TICKS2DS(BUCKET_LEN * 1.5)
// If there are no timers being tracked, then consider now to be the last invoked time
if(!bucket_count)
last_invoke_tick = world.time
// Check that we have invoked a callback in the last 1.5 minutes of BYOND time,
// and throw a warning and reset buckets if this is true
if(lit && lit < last_check && head_offset < last_check && last_invoke_warning < last_check)
last_invoke_warning = world.time
var/msg = "No regular timers processed in the last [BUCKET_LEN*1.5] ticks[bucket_auto_reset ? ", resetting buckets" : ""]!"
var/msg = "No regular timers processed in the last [BUCKET_LEN * 1.5] ticks[bucket_auto_reset ? ", resetting buckets" : ""]!"
message_admins(msg)
WARNING(msg)
if(bucket_auto_reset)
bucket_resolution = 0
log_world("Timer bucket reset. world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
var/list/to_log = list("Timer bucket reset. world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
for (var/i in 1 to length(bucket_list))
var/datum/timedevent/bucket_head = bucket_list[i]
if (!bucket_head)
continue
log_world("Active timers at index [i]:")
to_log += "Active timers at index [i]:"
var/datum/timedevent/bucket_node = bucket_head
var/anti_loop_check = 1000
do
log_world(get_timer_debug_string(bucket_node))
to_log += get_timer_debug_string(bucket_node)
bucket_node = bucket_node.next
anti_loop_check--
while(bucket_node && bucket_node != bucket_head && anti_loop_check)
log_world("Active timers in the second_queue queue:")
to_log += "Active timers in the second_queue queue:"
for(var/I in second_queue)
log_world(get_timer_debug_string(I))
to_log += get_timer_debug_string(I)
var/next_clienttime_timer_index = 0
var/len = length(clienttime_timers)
// Dump all the logged data to the world log
log_world(to_log.Join("\n"))
for (next_clienttime_timer_index in 1 to len)
// Process client-time timers
var/static/next_clienttime_timer_index = 0
if (next_clienttime_timer_index)
clienttime_timers.Cut(1, next_clienttime_timer_index+1)
next_clienttime_timer_index = 0
for (next_clienttime_timer_index in 1 to length(clienttime_timers))
if (MC_TICK_CHECK)
next_clienttime_timer_index--
break
@@ -86,8 +115,8 @@ SUBSYSTEM_DEF(timer)
var/datum/callback/callBack = ctime_timer.callBack
if (!callBack)
clienttime_timers.Cut(next_clienttime_timer_index,next_clienttime_timer_index+1)
CRASH("Invalid timer: [get_timer_debug_string(ctime_timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset], REALTIMEOFDAY: [REALTIMEOFDAY]")
CRASH("Invalid timer: [get_timer_debug_string(ctime_timer)] world.time: [world.time], \
head_offset: [head_offset], practical_offset: [practical_offset], REALTIMEOFDAY: [REALTIMEOFDAY]")
ctime_timer.spent = REALTIMEOFDAY
callBack.InvokeAsync()
@@ -95,135 +124,93 @@ SUBSYSTEM_DEF(timer)
if(ctime_timer.flags & TIMER_LOOP)
ctime_timer.spent = 0
ctime_timer.timeToRun = REALTIMEOFDAY + ctime_timer.wait
BINARY_INSERT(ctime_timer, clienttime_timers, datum/timedevent, ctime_timer, timeToRun, COMPARE_KEY)
BINARY_INSERT(ctime_timer, clienttime_timers, /datum/timedevent, ctime_timer, timeToRun, COMPARE_KEY)
else
qdel(ctime_timer)
// Remove invoked client-time timers
if (next_clienttime_timer_index)
clienttime_timers.Cut(1, next_clienttime_timer_index+1)
next_clienttime_timer_index = 0
if (MC_TICK_CHECK)
return
var/static/list/spent = list()
var/static/datum/timedevent/timer
// Check for when we need to loop the buckets, this occurs when
// the head_offset is approaching BUCKET_LEN ticks in the past
if (practical_offset > BUCKET_LEN)
head_offset += TICKS2DS(BUCKET_LEN)
practical_offset = 1
resumed = FALSE
// Check for when we have to reset buckets, typically from auto-reset
if ((length(bucket_list) != BUCKET_LEN) || (world.tick_lag != bucket_resolution))
reset_buckets()
bucket_list = src.bucket_list
resumed = FALSE
if (!resumed)
timer = null
while (practical_offset <= BUCKET_LEN && head_offset + ((practical_offset-1)*world.tick_lag) <= world.time)
var/datum/timedevent/head = bucket_list[practical_offset]
if (!timer || !head || timer == head)
head = bucket_list[practical_offset]
timer = head
while (timer)
// Iterate through each bucket starting from the practical offset
while (practical_offset <= BUCKET_LEN && head_offset + ((practical_offset - 1) * world.tick_lag) <= world.time)
var/datum/timedevent/timer
while ((timer = bucket_list[practical_offset]))
var/datum/callback/callBack = timer.callBack
if (!callBack)
bucket_resolution = null //force bucket recreation
CRASH("Invalid timer: [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
bucket_resolution = null // force bucket recreation
CRASH("Invalid timer: [get_timer_debug_string(timer)] world.time: [world.time], \
head_offset: [head_offset], practical_offset: [practical_offset]")
timer.bucketEject() //pop the timer off of the bucket list.
// Invoke callback if possible
if (!timer.spent)
spent += timer
timer.spent = world.time
callBack.InvokeAsync()
last_invoke_tick = world.time
if (MC_TICK_CHECK)
return
timer = timer.next
if (timer == head)
break
bucket_list[practical_offset++] = null
//we freed up a bucket, lets see if anything in second_queue needs to be shifted to that bucket.
var/i = 0
var/L = length(second_queue)
for (i in 1 to L)
timer = second_queue[i]
if (timer.timeToRun >= TIMER_MAX)
i--
break
if (timer.timeToRun < head_offset)
bucket_resolution = null //force bucket recreation
stack_trace("[i] Invalid timer state: Timer in long run queue with a time to run less then head_offset. [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
if (timer.callBack && !timer.spent)
timer.callBack.InvokeAsync()
spent += timer
bucket_count++
else if(!QDELETED(timer))
qdel(timer)
continue
if (timer.timeToRun < head_offset + TICKS2DS(practical_offset-1))
bucket_resolution = null //force bucket recreation
stack_trace("[i] Invalid timer state: Timer in long run queue that would require a backtrack to transfer to short run queue. [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
if (timer.callBack && !timer.spent)
timer.callBack.InvokeAsync()
spent += timer
bucket_count++
else if(!QDELETED(timer))
qdel(timer)
continue
bucket_count++
var/bucket_pos = max(1, BUCKET_POS(timer))
var/datum/timedevent/bucket_head = bucket_list[bucket_pos]
if (!bucket_head)
bucket_list[bucket_pos] = timer
timer.next = null
timer.prev = null
continue
if (!bucket_head.prev)
bucket_head.prev = bucket_head
timer.next = bucket_head
timer.prev = bucket_head.prev
timer.next.prev = timer
timer.prev.next = timer
if (i)
second_queue.Cut(1, i+1)
timer = null
bucket_count -= length(spent)
for (var/i in spent)
var/datum/timedevent/qtimer = i
if(QDELETED(qtimer))
bucket_count++
continue
if(!(qtimer.flags & TIMER_LOOP))
qdel(qtimer)
else
bucket_count++
qtimer.spent = 0
qtimer.bucketEject()
if(qtimer.flags & TIMER_CLIENT_TIME)
qtimer.timeToRun = REALTIMEOFDAY + qtimer.wait
if (timer.flags & TIMER_LOOP) // Prepare looping timers to re-enter the queue
timer.spent = 0
timer.timeToRun = world.time + timer.wait
timer.bucketJoin()
else
qtimer.timeToRun = world.time + qtimer.wait
qtimer.bucketJoin()
qdel(timer)
spent.len = 0
if (MC_TICK_CHECK)
break
//formated this way to be runtime resistant
if (!bucket_list[practical_offset])
// Empty the bucket, check if anything in the secondary queue should be shifted to this bucket
bucket_list[practical_offset++] = null
var/i = 0
for (i in 1 to length(second_queue))
timer = second_queue[i]
if (timer.timeToRun >= TIMER_MAX)
i--
break
// Check for timers that are scheduled to run in the past
if (timer.timeToRun < head_offset)
bucket_resolution = null // force bucket recreation
stack_trace("[i] Invalid timer state: Timer in long run queue with a time to run less then head_offset. \
[get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
break
// Check for timers that are not capable of being scheduled to run without rebuilding buckets
if (timer.timeToRun < head_offset + TICKS2DS(practical_offset - 1))
bucket_resolution = null // force bucket recreation
stack_trace("[i] Invalid timer state: Timer in long run queue that would require a backtrack to transfer to \
short run queue. [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
break
timer.bucketJoin()
if (i)
second_queue.Cut(1, i+1)
if (MC_TICK_CHECK)
break
/**
* Generates a string with details about the timed event for debugging purposes
*/
/datum/controller/subsystem/timer/proc/get_timer_debug_string(datum/timedevent/TE)
. = "Timer: [TE]"
. += "Prev: [TE.prev ? TE.prev : "NULL"], Next: [TE.next ? TE.next : "NULL"]"
@@ -234,12 +221,16 @@ SUBSYSTEM_DEF(timer)
if(!TE.callBack)
. += ", NO CALLBACK"
/**
* Destroys the existing buckets and creates new buckets from the existing timed events
*/
/datum/controller/subsystem/timer/proc/reset_buckets()
var/list/bucket_list = src.bucket_list
var/list/bucket_list = src.bucket_list // Store local reference to datum var, this is faster
var/list/alltimers = list()
//collect the timers currently in the bucket
// Get all timers currently in the buckets
for (var/bucket_head in bucket_list)
if (!bucket_head)
if (!bucket_head) // if bucket is empty for this tick
continue
var/datum/timedevent/bucket_node = bucket_head
do
@@ -247,25 +238,38 @@ SUBSYSTEM_DEF(timer)
bucket_node = bucket_node.next
while(bucket_node && bucket_node != bucket_head)
// Empty the list by zeroing and re-assigning the length
bucket_list.len = 0
bucket_list.len = BUCKET_LEN
// Reset values for the subsystem to their initial values
practical_offset = 1
bucket_count = 0
head_offset = world.time
bucket_resolution = world.tick_lag
// Add all timed events from the secondary queue as well
alltimers += second_queue
// If there are no timers being tracked by the subsystem,
// there is no need to do any further rebuilding
if (!length(alltimers))
return
// Sort all timers by time to run
sortTim(alltimers, .proc/cmp_timer)
// Get the earliest timer, and if the TTR is earlier than the current world.time,
// then set the head offset appropriately to be the earliest time tracked by the
// current set of buckets
var/datum/timedevent/head = alltimers[1]
if (head.timeToRun < head_offset)
head_offset = head.timeToRun
// Iterate through each timed event and insert it into an appropriate bucket,
// up unto the point that we can no longer insert into buckets as the TTR
// is outside the range we are tracking, then insert the remainder into the
// secondary queue
var/new_bucket_count
var/i = 1
for (i in 1 to length(alltimers))
@@ -273,34 +277,38 @@ SUBSYSTEM_DEF(timer)
if (!timer)
continue
var/bucket_pos = BUCKET_POS(timer)
// Check that the TTR is within the range covered by buckets, when exceeded we've finished
if (timer.timeToRun >= TIMER_MAX)
i--
break
// Check that timer has a valid callback and hasn't been invoked
if (!timer.callBack || timer.spent)
WARNING("Invalid timer: [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
WARNING("Invalid timer: [get_timer_debug_string(timer)] world.time: [world.time], \
head_offset: [head_offset], practical_offset: [practical_offset]")
if (timer.callBack)
qdel(timer)
continue
// Insert the timer into the bucket, and perform necessary circular doubly-linked list operations
new_bucket_count++
var/bucket_pos = BUCKET_POS(timer)
var/datum/timedevent/bucket_head = bucket_list[bucket_pos]
if (!bucket_head)
bucket_list[bucket_pos] = timer
timer.next = null
timer.prev = null
continue
if (!bucket_head.prev)
bucket_head.prev = bucket_head
timer.next = bucket_head
timer.prev = bucket_head.prev
timer.next.prev = timer
timer.prev.next = timer
// Cut the timers that are tracked by the buckets from the secondary queue
if (i)
alltimers.Cut(1, i+1)
alltimers.Cut(1, i + 1)
second_queue = alltimers
bucket_count = new_bucket_count
@@ -311,45 +319,64 @@ SUBSYSTEM_DEF(timer)
timer_id_dict |= SStimer.timer_id_dict
bucket_list |= SStimer.bucket_list
/**
* # Timed Event
*
* This is the actual timer, it contains the callback and necessary data to maintain
* the timer.
*
* See the documentation for the timer subsystem for an explanation of the buckets referenced
* below in next and prev
*/
/datum/timedevent
/// ID used for timers when the TIMER_STOPPABLE flag is present
var/id
/// The callback to invoke after the timer completes
var/datum/callback/callBack
/// The time at which the callback should be invoked at
var/timeToRun
/// The length of the timer
var/wait
/// Unique hash generated when TIMER_UNIQUE flag is present
var/hash
/// The source of the timedevent, whatever called addtimer
var/source
/// Flags associated with the timer, see _DEFINES/subsystems.dm
var/list/flags
var/spent = 0 //time we ran the timer.
var/name //for easy debugging.
//cicular doublely linked list
/// Time at which the timer was invoked or destroyed
var/spent = 0
/// An informative name generated for the timer as its representation in strings, useful for debugging
var/name
/// Next timed event in the bucket
var/datum/timedevent/next
/// Previous timed event in the bucket
var/datum/timedevent/prev
/datum/timedevent/New(datum/callback/callBack, wait, flags, hash)
/datum/timedevent/New(datum/callback/callBack, wait, flags, hash, source)
var/static/nextid = 1
id = TIMER_ID_NULL
src.callBack = callBack
src.wait = wait
src.flags = flags
src.hash = hash
src.source = source
if (flags & TIMER_CLIENT_TIME)
timeToRun = REALTIMEOFDAY + wait
else
timeToRun = world.time + wait
// Determine time at which the timer's callback should be invoked
timeToRun = (flags & TIMER_CLIENT_TIME ? REALTIMEOFDAY : world.time) + wait
// Include the timer in the hash table if the timer is unique
if (flags & TIMER_UNIQUE)
SStimer.hashes[hash] = src
// Generate ID for the timer if the timer is stoppable, include in the timer id dictionary
if (flags & TIMER_STOPPABLE)
id = num2text(nextid, 100)
if (nextid >= SHORT_REAL_LIMIT)
nextid += min(1, 2**round(nextid/SHORT_REAL_LIMIT))
nextid += min(1, 2 ** round(nextid / SHORT_REAL_LIMIT))
else
nextid++
SStimer.timer_id_dict[id] = src
name = "Timer: [id] (\ref[src]), TTR: [timeToRun], Flags: [jointext(bitfield2list(flags, list("TIMER_UNIQUE", "TIMER_OVERRIDE", "TIMER_CLIENT_TIME", "TIMER_STOPPABLE", "TIMER_NO_HASH_WAIT", "TIMER_LOOP")), ", ")], callBack: \ref[callBack], callBack.object: [callBack.object]\ref[callBack.object]([getcallingtype()]), callBack.delegate:[callBack.delegate]([callBack.arguments ? callBack.arguments.Join(", ") : ""])"
if ((timeToRun < world.time || timeToRun < SStimer.head_offset) && !(flags & TIMER_CLIENT_TIME))
CRASH("Invalid timer state: Timer created that would require a backtrack to run (addtimer would never let this happen): [SStimer.get_timer_debug_string(src)]")
@@ -390,23 +417,39 @@ SUBSYSTEM_DEF(timer)
prev = null
return QDEL_HINT_IWILLGC
/**
* Removes this timed event from any relevant buckets, or the secondary queue
*/
/datum/timedevent/proc/bucketEject()
// Attempt to find bucket that contains this timed event
var/bucketpos = BUCKET_POS(src)
// Store local references for the bucket list and secondary queue
// This is faster than referencing them from the datum itself
var/list/bucket_list = SStimer.bucket_list
var/list/second_queue = SStimer.second_queue
// Attempt to get the head of the bucket
var/datum/timedevent/buckethead
if(bucketpos > 0)
buckethead = bucket_list[bucketpos]
// Decrement the number of timers in buckets if the timed event is
// the head of the bucket, or has a TTR less than TIMER_MAX implying it fits
// into an existing bucket, or is otherwise not present in the secondary queue
if(buckethead == src)
bucket_list[bucketpos] = next
SStimer.bucket_count--
else if(timeToRun < TIMER_MAX || next || prev)
else if(timeToRun < TIMER_MAX)
SStimer.bucket_count--
else
var/l = length(second_queue)
second_queue -= src
if(l == length(second_queue))
SStimer.bucket_count--
// Remove the timed event from the bucket, ensuring to maintain
// the integrity of the bucket's list if relevant
if(prev != next)
prev.next = next
next.prev = prev
@@ -415,32 +458,47 @@ SUBSYSTEM_DEF(timer)
next?.prev = null
prev = next = null
/**
* Attempts to add this timed event to a bucket, will enter the secondary queue
* if there are no appropriate buckets at this time.
*
* Secondary queueing of timed events will occur when the timespan covered by the existing
* buckets is exceeded by the time at which this timed event is scheduled to be invoked.
* If the timed event is tracking client time, it will be added to a special bucket.
*/
/datum/timedevent/proc/bucketJoin()
var/list/L
// Generate debug-friendly name for timer
var/static/list/bitfield_flags = list("TIMER_UNIQUE", "TIMER_OVERRIDE", "TIMER_CLIENT_TIME", "TIMER_STOPPABLE", "TIMER_NO_HASH_WAIT", "TIMER_LOOP")
name = "Timer: [id] (\ref[src]), TTR: [timeToRun], wait:[wait] Flags: [jointext(bitfield2list(flags, bitfield_flags), ", ")], \
callBack: \ref[callBack], callBack.object: [callBack.object]\ref[callBack.object]([getcallingtype()]), \
callBack.delegate:[callBack.delegate]([callBack.arguments ? callBack.arguments.Join(", ") : ""]), source: [source]"
// Check if this timed event should be diverted to the client time bucket, or the secondary queue
var/list/L
if (flags & TIMER_CLIENT_TIME)
L = SStimer.clienttime_timers
else if (timeToRun >= TIMER_MAX)
L = SStimer.second_queue
if(L)
BINARY_INSERT(src, L, datum/timedevent, src, timeToRun, COMPARE_KEY)
BINARY_INSERT(src, L, /datum/timedevent, src, timeToRun, COMPARE_KEY)
return
//get the list of buckets
// Get a local reference to the bucket list, this is faster than referencing the datum
var/list/bucket_list = SStimer.bucket_list
//calculate our place in the bucket list
// Find the correct bucket for this timed event
var/bucket_pos = BUCKET_POS(src)
//get the bucket for our tick
var/datum/timedevent/bucket_head = bucket_list[bucket_pos]
SStimer.bucket_count++
//empty bucket, we will just add ourselves
// If there is no timed event at this position, then the bucket is 'empty'
// and we can just set this event to that position
if (!bucket_head)
bucket_list[bucket_pos] = src
return
//other wise, lets do a simplified linked list add.
// Otherwise, we merely add this timed event into the bucket, which is a
// circularly doubly-linked list
if (!bucket_head.prev)
bucket_head.prev = bucket_head
next = bucket_head
@@ -448,7 +506,9 @@ SUBSYSTEM_DEF(timer)
next.prev = src
prev.next = src
///Returns a string of the type of the callback for this timer
/**
* Returns a string of the type of the callback for this timer
*/
/datum/timedevent/proc/getcallingtype()
. = "ERROR"
if (callBack.object == GLOBAL_PROC)
@@ -457,14 +517,15 @@ SUBSYSTEM_DEF(timer)
. = "[callBack.object.type]"
/**
* Create a new timer and insert it in the queue
*
* Arguments:
* * callback the callback to call on timer finish
* * wait deciseconds to run the timer for
* * flags flags for this timer, see: code\__DEFINES\subsystems.dm
*/
/proc/addtimer(datum/callback/callback, wait = 0, flags = 0)
* Create a new timer and insert it in the queue.
* You should not call this directly, and should instead use the addtimer macro, which includes source information.
*
* Arguments:
* * callback the callback to call on timer finish
* * wait deciseconds to run the timer for
* * flags flags for this timer, see: code\__DEFINES\subsystems.dm
*/
/proc/_addtimer(datum/callback/callback, wait = 0, flags = 0, file, line)
if (!callback)
CRASH("addtimer called without a callback")
@@ -472,31 +533,30 @@ SUBSYSTEM_DEF(timer)
stack_trace("addtimer called with a negative wait. Converting to [world.tick_lag]")
if (callback.object != GLOBAL_PROC && QDELETED(callback.object) && !QDESTROYING(callback.object))
stack_trace("addtimer called with a callback assigned to a qdeleted object. In the future such timers will not be supported and may refuse to run or run with a 0 wait")
stack_trace("addtimer called with a callback assigned to a qdeleted object. In the future such timers will not \
be supported and may refuse to run or run with a 0 wait")
wait = max(CEILING(wait, world.tick_lag), world.tick_lag)
if(wait >= INFINITY)
CRASH("Attempted to create timer with INFINITY delay")
// Generate hash if relevant for timed events with the TIMER_UNIQUE flag
var/hash
if (flags & TIMER_UNIQUE)
var/list/hashlist
if(flags & TIMER_NO_HASH_WAIT)
hashlist = list(callback.object, "([REF(callback.object)])", callback.delegate, flags & TIMER_CLIENT_TIME)
else
hashlist = list(callback.object, "([REF(callback.object)])", callback.delegate, wait, flags & TIMER_CLIENT_TIME)
var/list/hashlist = list(callback.object, "([REF(callback.object)])", callback.delegate, flags & TIMER_CLIENT_TIME)
if(!(flags & TIMER_NO_HASH_WAIT))
hashlist += wait
hashlist += callback.arguments
hash = hashlist.Join("|||||||")
var/datum/timedevent/hash_timer = SStimer.hashes[hash]
if(hash_timer)
if (hash_timer.spent) //it's pending deletion, pretend it doesn't exist.
hash_timer.hash = null //but keep it from accidentally deleting us
if (hash_timer.spent) // it's pending deletion, pretend it doesn't exist.
hash_timer.hash = null // but keep it from accidentally deleting us
else
if (flags & TIMER_OVERRIDE)
hash_timer.hash = null //no need having it delete it's hash if we are going to replace it
hash_timer.hash = null // no need having it delete it's hash if we are going to replace it
qdel(hash_timer)
else
if (hash_timer.flags & TIMER_STOPPABLE)
@@ -505,24 +565,23 @@ SUBSYSTEM_DEF(timer)
else if(flags & TIMER_OVERRIDE)
stack_trace("TIMER_OVERRIDE used without TIMER_UNIQUE")
var/datum/timedevent/timer = new(callback, wait, flags, hash)
var/datum/timedevent/timer = new(callback, wait, flags, hash, file && "[file]:[line]")
return timer.id
/**
* Delete a timer
*
* Arguments:
* * id a timerid or a /datum/timedevent
*/
* Delete a timer
*
* Arguments:
* * id a timerid or a /datum/timedevent
*/
/proc/deltimer(id)
if (!id)
return FALSE
if (id == TIMER_ID_NULL)
CRASH("Tried to delete a null timerid. Use TIMER_STOPPABLE flag")
if (!istext(id))
if (istype(id, /datum/timedevent))
qdel(id)
return TRUE
if (istype(id, /datum/timedevent))
qdel(id)
return TRUE
//id is string
var/datum/timedevent/timer = SStimer.timer_id_dict[id]
if (timer && !timer.spent)
@@ -531,25 +590,22 @@ SUBSYSTEM_DEF(timer)
return FALSE
/**
* Get the remaining deciseconds on a timer
*
* Arguments:
* * id a timerid or a /datum/timedevent
*/
* Get the remaining deciseconds on a timer
*
* Arguments:
* * id a timerid or a /datum/timedevent
*/
/proc/timeleft(id)
if (!id)
return null
if (id == TIMER_ID_NULL)
CRASH("Tried to get timeleft of a null timerid. Use TIMER_STOPPABLE flag")
if (!istext(id))
if (istype(id, /datum/timedevent))
var/datum/timedevent/timer = id
return timer.timeToRun - world.time
if (istype(id, /datum/timedevent))
var/datum/timedevent/timer = id
return timer.timeToRun - world.time
//id is string
var/datum/timedevent/timer = SStimer.timer_id_dict[id]
if (timer && !timer.spent)
return timer.timeToRun - world.time
return null
return (timer && !timer.spent) ? timer.timeToRun - world.time : null
#undef BUCKET_LEN
#undef BUCKET_POS