mirror of
https://github.com/PolarisSS13/Polaris.git
synced 2025-12-16 05:02:18 +00:00
JFC
This commit is contained in:
@@ -155,7 +155,7 @@
|
||||
|
||||
/datum/controller/process/proc/setup()
|
||||
|
||||
/datum/controller/process/proc/process()
|
||||
/datum/controller/process/process()
|
||||
started()
|
||||
doWork()
|
||||
finished()
|
||||
|
||||
@@ -70,7 +70,7 @@ var/global/datum/controller/processScheduler/processScheduler
|
||||
spawn(0)
|
||||
process()
|
||||
|
||||
/datum/controller/processScheduler/proc/process()
|
||||
/datum/controller/processScheduler/process()
|
||||
while(isRunning)
|
||||
checkRunningProcesses()
|
||||
queueProcesses()
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
/datum/controller/process/obj/setup()
|
||||
name = "obj"
|
||||
schedule_interval = 20 // every 2 seconds
|
||||
start_delay = 8
|
||||
|
||||
/datum/controller/process/obj/started()
|
||||
..()
|
||||
if(!processing_objects)
|
||||
processing_objects = list()
|
||||
|
||||
/datum/controller/process/obj/doWork()
|
||||
for(last_object in processing_objects)
|
||||
var/datum/O = last_object
|
||||
if(!QDELETED(O))
|
||||
try
|
||||
O:process()
|
||||
catch(var/exception/e)
|
||||
catchException(e, O)
|
||||
SCHECK
|
||||
else
|
||||
catchBadType(O)
|
||||
processing_objects -= O
|
||||
|
||||
/datum/controller/process/obj/statProcess()
|
||||
..()
|
||||
stat(null, "[processing_objects.len] objects")
|
||||
@@ -66,7 +66,7 @@ var/datum/controller/supply/supply_controller = new()
|
||||
|
||||
// Supply shuttle ticker - handles supply point regeneration
|
||||
// This is called by the process scheduler every thirty seconds
|
||||
/datum/controller/supply/proc/process()
|
||||
/datum/controller/supply/process()
|
||||
points += points_per_process
|
||||
|
||||
//To stop things being sent to CentCom which should not be sent to centcomm. Recursively checks for these types.
|
||||
|
||||
@@ -10,7 +10,7 @@ datum/controller/transfer_controller/New()
|
||||
datum/controller/transfer_controller/Destroy()
|
||||
processing_objects -= src
|
||||
|
||||
datum/controller/transfer_controller/proc/process()
|
||||
datum/controller/transfer_controller/process()
|
||||
currenttick = currenttick + 1
|
||||
if (round_duration_in_ticks >= timerbuffer - 1 MINUTE)
|
||||
vote.autotransfer()
|
||||
|
||||
@@ -26,7 +26,7 @@ var/global/datum/emergency_shuttle_controller/emergency_shuttle
|
||||
escape_pods = list()
|
||||
..()
|
||||
|
||||
/datum/emergency_shuttle_controller/proc/process()
|
||||
/datum/emergency_shuttle_controller/process()
|
||||
if (wait_for_launch)
|
||||
if (evac && auto_recall && world.time >= auto_recall_time)
|
||||
recall()
|
||||
|
||||
@@ -13,14 +13,6 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
//THIS IS THE INIT ORDER
|
||||
//Master -> SSPreInit -> GLOB -> world -> config -> SSInit -> Failsafe
|
||||
//GOT IT MEMORIZED?
|
||||
GLOBAL_VAR_INIT(MC_restart_clear, 0)
|
||||
GLOBAL_VAR_INIT(MC_restart_timeout, 0)
|
||||
GLOBAL_VAR_INIT(MC_restart_count, 0)
|
||||
|
||||
//current tick limit, assigned by the queue controller before running a subsystem.
|
||||
//used by check_tick as well so that the procs subsystems call can obey that SS's tick limits
|
||||
GLOBAL_VAR_INIT(CURRENT_TICKLIMIT, TICK_LIMIT_RUNNING)
|
||||
|
||||
|
||||
/datum/controller/master
|
||||
name = "Master"
|
||||
@@ -62,6 +54,10 @@ GLOBAL_VAR_INIT(CURRENT_TICKLIMIT, TICK_LIMIT_RUNNING)
|
||||
var/static/restart_timeout = 0
|
||||
var/static/restart_count = 0
|
||||
|
||||
//current tick limit, assigned by the queue controller before running a subsystem.
|
||||
//used by check_tick as well so that the procs subsystems call can obey that SS's tick limits
|
||||
var/static/current_ticklimit
|
||||
|
||||
/datum/controller/master/New()
|
||||
// Highlander-style: there can only be one! Kill off the old and replace it with the new.
|
||||
var/list/_subsystems = list()
|
||||
@@ -98,14 +94,14 @@ GLOBAL_VAR_INIT(CURRENT_TICKLIMIT, TICK_LIMIT_RUNNING)
|
||||
// -1 if we encountered a runtime trying to recreate it
|
||||
/proc/Recreate_MC()
|
||||
. = -1 //so if we runtime, things know we failed
|
||||
if (world.time < GLOB.MC_restart_timeout)
|
||||
if (world.time < Master.restart_timeout)
|
||||
return 0
|
||||
if (world.time < GLOB.MC_restart_clear)
|
||||
GLOB.MC_restart_count *= 0.5
|
||||
if (world.time < Master.restart_clear)
|
||||
Master.restart_count *= 0.5
|
||||
|
||||
var/delay = 50 * ++GLOB.MC_restart_count
|
||||
GLOB.MC_restart_timeout = world.time + delay
|
||||
GLOB.MC_restart_clear = world.time + (delay * 2)
|
||||
var/delay = 50 * ++Master.restart_count
|
||||
Master.restart_timeout = world.time + delay
|
||||
Master.restart_clear = world.time + (delay * 2)
|
||||
Master.processing = FALSE //stop ticking this one
|
||||
try
|
||||
new/datum/controller/master()
|
||||
@@ -176,13 +172,13 @@ GLOBAL_VAR_INIT(CURRENT_TICKLIMIT, TICK_LIMIT_RUNNING)
|
||||
|
||||
var/start_timeofday = REALTIMEOFDAY
|
||||
// Initialize subsystems.
|
||||
GLOB.CURRENT_TICKLIMIT = config.tick_limit_mc_init
|
||||
current_ticklimit = config.tick_limit_mc_init
|
||||
for (var/datum/controller/subsystem/SS in subsystems)
|
||||
if (SS.flags & SS_NO_INIT)
|
||||
continue
|
||||
SS.Initialize(REALTIMEOFDAY)
|
||||
CHECK_TICK
|
||||
GLOB.CURRENT_TICKLIMIT = TICK_LIMIT_RUNNING
|
||||
current_ticklimit = TICK_LIMIT_RUNNING
|
||||
var/time = (REALTIMEOFDAY - start_timeofday) / 10
|
||||
|
||||
var/msg = "Initializations complete within [time] second[time == 1 ? "" : "s"]!"
|
||||
@@ -291,7 +287,7 @@ GLOBAL_VAR_INIT(CURRENT_TICKLIMIT, TICK_LIMIT_RUNNING)
|
||||
tickdrift = max(0, MC_AVERAGE_FAST(tickdrift, (((REALTIMEOFDAY - init_timeofday) - (world.time - init_time)) / world.tick_lag)))
|
||||
var/starting_tick_usage = TICK_USAGE
|
||||
if (processing <= 0)
|
||||
GLOB.CURRENT_TICKLIMIT = TICK_LIMIT_RUNNING
|
||||
current_ticklimit = TICK_LIMIT_RUNNING
|
||||
sleep(10)
|
||||
continue
|
||||
|
||||
@@ -300,7 +296,7 @@ GLOBAL_VAR_INIT(CURRENT_TICKLIMIT, TICK_LIMIT_RUNNING)
|
||||
// (because sleeps are processed in the order received, longer sleeps are more likely to run first)
|
||||
if (starting_tick_usage > TICK_LIMIT_MC) //if there isn't enough time to bother doing anything this tick, sleep a bit.
|
||||
sleep_delta *= 2
|
||||
GLOB.CURRENT_TICKLIMIT = TICK_LIMIT_RUNNING * 0.5
|
||||
current_ticklimit = TICK_LIMIT_RUNNING * 0.5
|
||||
sleep(world.tick_lag * (processing * sleep_delta))
|
||||
continue
|
||||
|
||||
@@ -346,7 +342,7 @@ GLOBAL_VAR_INIT(CURRENT_TICKLIMIT, TICK_LIMIT_RUNNING)
|
||||
if (!error_level)
|
||||
iteration++
|
||||
error_level++
|
||||
GLOB.CURRENT_TICKLIMIT = TICK_LIMIT_RUNNING
|
||||
current_ticklimit = TICK_LIMIT_RUNNING
|
||||
sleep(10)
|
||||
continue
|
||||
|
||||
@@ -358,7 +354,7 @@ GLOBAL_VAR_INIT(CURRENT_TICKLIMIT, TICK_LIMIT_RUNNING)
|
||||
if (!error_level)
|
||||
iteration++
|
||||
error_level++
|
||||
GLOB.CURRENT_TICKLIMIT = TICK_LIMIT_RUNNING
|
||||
current_ticklimit = TICK_LIMIT_RUNNING
|
||||
sleep(10)
|
||||
continue
|
||||
error_level--
|
||||
@@ -369,9 +365,9 @@ GLOBAL_VAR_INIT(CURRENT_TICKLIMIT, TICK_LIMIT_RUNNING)
|
||||
iteration++
|
||||
last_run = world.time
|
||||
src.sleep_delta = MC_AVERAGE_FAST(src.sleep_delta, sleep_delta)
|
||||
GLOB.CURRENT_TICKLIMIT = TICK_LIMIT_RUNNING
|
||||
current_ticklimit = TICK_LIMIT_RUNNING
|
||||
if (processing * sleep_delta <= world.tick_lag)
|
||||
GLOB.CURRENT_TICKLIMIT -= (TICK_LIMIT_RUNNING * 0.25) //reserve the tail 1/4 of the next tick for the mc if we plan on running next tick
|
||||
current_ticklimit -= (TICK_LIMIT_RUNNING * 0.25) //reserve the tail 1/4 of the next tick for the mc if we plan on running next tick
|
||||
sleep(world.tick_lag * (processing * sleep_delta))
|
||||
|
||||
|
||||
@@ -463,7 +459,7 @@ GLOBAL_VAR_INIT(CURRENT_TICKLIMIT, TICK_LIMIT_RUNNING)
|
||||
// Reduce tick allocation for subsystems that overran on their last tick.
|
||||
tick_precentage = max(tick_precentage*0.5, tick_precentage-queue_node.tick_overrun)
|
||||
|
||||
GLOB.CURRENT_TICKLIMIT = round(TICK_USAGE + tick_precentage)
|
||||
current_ticklimit = round(TICK_USAGE + tick_precentage)
|
||||
|
||||
if (!(queue_node_flags & SS_TICKER))
|
||||
ran_non_ticker = TRUE
|
||||
|
||||
6
code/controllers/subsystems/processing/fastprocess.dm
Normal file
6
code/controllers/subsystems/processing/fastprocess.dm
Normal file
@@ -0,0 +1,6 @@
|
||||
//Fires five times every second.
|
||||
|
||||
PROCESSING_SUBSYSTEM_DEF(fastprocess)
|
||||
name = "Fast Processing"
|
||||
wait = 2
|
||||
stat_tag = "FP"
|
||||
5
code/controllers/subsystems/processing/obj.dm
Normal file
5
code/controllers/subsystems/processing/obj.dm
Normal file
@@ -0,0 +1,5 @@
|
||||
PROCESSING_SUBSYSTEM_DEF(obj)
|
||||
name = "Objects"
|
||||
priority = FIRE_PRIORITY_OBJ
|
||||
flags = SS_NO_INIT
|
||||
wait = 20
|
||||
35
code/controllers/subsystems/processing/processing.dm
Normal file
35
code/controllers/subsystems/processing/processing.dm
Normal file
@@ -0,0 +1,35 @@
|
||||
//Used to process objects. Fires once every second.
|
||||
|
||||
SUBSYSTEM_DEF(processing)
|
||||
name = "Processing"
|
||||
priority = FIRE_PRIORITY_PROCESS
|
||||
flags = SS_BACKGROUND|SS_POST_FIRE_TIMING|SS_NO_INIT
|
||||
wait = 10
|
||||
|
||||
var/stat_tag = "P" //Used for logging
|
||||
var/list/processing = list()
|
||||
var/list/currentrun = list()
|
||||
|
||||
/datum/controller/subsystem/processing/stat_entry()
|
||||
..("[stat_tag]:[processing.len]")
|
||||
|
||||
/datum/controller/subsystem/processing/fire(resumed = 0)
|
||||
if (!resumed)
|
||||
currentrun = processing.Copy()
|
||||
//cache for sanic speed (lists are references anyways)
|
||||
var/list/current_run = currentrun
|
||||
|
||||
while(current_run.len)
|
||||
var/datum/thing = current_run[current_run.len]
|
||||
current_run.len--
|
||||
if(QDELETED(thing))
|
||||
processing -= thing
|
||||
else if(thing.process(wait) == PROCESS_KILL)
|
||||
// fully stop so that a future START_PROCESSING will work
|
||||
STOP_PROCESSING(src, thing)
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
|
||||
/datum/proc/process()
|
||||
set waitfor = 0
|
||||
return PROCESS_KILL
|
||||
37
code/controllers/subsystems/time_track.dm
Normal file
37
code/controllers/subsystems/time_track.dm
Normal file
@@ -0,0 +1,37 @@
|
||||
SUBSYSTEM_DEF(time_track)
|
||||
name = "Time Tracking"
|
||||
wait = 600
|
||||
flags = SS_NO_INIT|SS_NO_TICK_CHECK
|
||||
runlevels = RUNLEVEL_LOBBY | RUNLEVELS_DEFAULT
|
||||
|
||||
var/time_dilation_current = 0
|
||||
|
||||
var/time_dilation_avg_fast = 0
|
||||
var/time_dilation_avg = 0
|
||||
var/time_dilation_avg_slow = 0
|
||||
|
||||
var/first_run = TRUE
|
||||
|
||||
var/last_tick_realtime = 0
|
||||
var/last_tick_byond_time = 0
|
||||
var/last_tick_tickcount = 0
|
||||
|
||||
/datum/controller/subsystem/time_track/fire()
|
||||
|
||||
var/current_realtime = REALTIMEOFDAY
|
||||
var/current_byondtime = world.time
|
||||
var/current_tickcount = world.time/world.tick_lag
|
||||
|
||||
if (!first_run)
|
||||
var/tick_drift = max(0, (((current_realtime - last_tick_realtime) - (current_byondtime - last_tick_byond_time)) / world.tick_lag))
|
||||
|
||||
time_dilation_current = tick_drift / (current_tickcount - last_tick_tickcount) * 100
|
||||
|
||||
time_dilation_avg_fast = MC_AVERAGE_FAST(time_dilation_avg_fast, time_dilation_current)
|
||||
time_dilation_avg = MC_AVERAGE(time_dilation_avg, time_dilation_avg_fast)
|
||||
time_dilation_avg_slow = MC_AVERAGE_SLOW(time_dilation_avg_slow, time_dilation_avg)
|
||||
else
|
||||
first_run = FALSE
|
||||
last_tick_realtime = current_realtime
|
||||
last_tick_byond_time = current_byondtime
|
||||
last_tick_tickcount = current_tickcount
|
||||
522
code/controllers/subsystems/timer.dm
Normal file
522
code/controllers/subsystems/timer.dm
Normal file
@@ -0,0 +1,522 @@
|
||||
#define BUCKET_LEN (world.fps*1*60) //how many ticks should we keep in the bucket. (1 minutes worth)
|
||||
#define BUCKET_POS(timer) ((round((timer.timeToRun - SStimer.head_offset) / world.tick_lag) % BUCKET_LEN)||BUCKET_LEN)
|
||||
#define TIMER_MAX (world.time + TICKS2DS(min(BUCKET_LEN-(SStimer.practical_offset-DS2TICKS(world.time - SStimer.head_offset))-1, BUCKET_LEN-1)))
|
||||
#define TIMER_ID_MAX (2**24) //max float with integer precision
|
||||
|
||||
SUBSYSTEM_DEF(timer)
|
||||
name = "Timer"
|
||||
wait = 1 //SS_TICKER subsystem, so wait is in ticks
|
||||
init_order = INIT_ORDER_TIMER
|
||||
|
||||
flags = SS_TICKER|SS_NO_INIT
|
||||
|
||||
var/list/datum/timedevent/second_queue = list() //awe, yes, you've had first queue, but what about second queue?
|
||||
var/list/hashes = list()
|
||||
|
||||
var/head_offset = 0 //world.time of the first entry in the the bucket.
|
||||
var/practical_offset = 1 //index of the first non-empty item in the bucket.
|
||||
var/bucket_resolution = 0 //world.tick_lag the bucket was designed for
|
||||
var/bucket_count = 0 //how many timers are in the buckets
|
||||
|
||||
var/list/bucket_list = list() //list of buckets, each bucket holds every timer that has to run that byond tick.
|
||||
|
||||
var/list/timer_id_dict = list() //list of all active timers assoicated to their timer id (for easy lookup)
|
||||
|
||||
var/list/clienttime_timers = list() //special snowflake timers that run on fancy pansy "client time"
|
||||
|
||||
var/last_invoke_tick = 0
|
||||
var/static/last_invoke_warning = 0
|
||||
var/static/bucket_auto_reset = TRUE
|
||||
|
||||
/datum/controller/subsystem/timer/PreInit()
|
||||
bucket_list.len = BUCKET_LEN
|
||||
head_offset = world.time
|
||||
bucket_resolution = world.tick_lag
|
||||
|
||||
/datum/controller/subsystem/timer/stat_entry(msg)
|
||||
..("B:[bucket_count] P:[length(second_queue)] H:[length(hashes)] C:[length(clienttime_timers)] S:[length(timer_id_dict)]")
|
||||
|
||||
/datum/controller/subsystem/timer/fire(resumed = FALSE)
|
||||
var/lit = last_invoke_tick
|
||||
var/last_check = world.time - TICKS2DS(BUCKET_LEN*1.5)
|
||||
var/list/bucket_list = src.bucket_list
|
||||
|
||||
if(!bucket_count)
|
||||
last_invoke_tick = world.time
|
||||
|
||||
if(lit && lit < last_check && head_offset < last_check && last_invoke_warning < last_check)
|
||||
last_invoke_warning = world.time
|
||||
var/msg = "No regular timers processed in the last [BUCKET_LEN*1.5] ticks[bucket_auto_reset ? ", resetting buckets" : ""]!"
|
||||
message_admins(msg)
|
||||
WARNING(msg)
|
||||
if(bucket_auto_reset)
|
||||
bucket_resolution = 0
|
||||
|
||||
log_world("Timer bucket reset. world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
for (var/i in 1 to length(bucket_list))
|
||||
var/datum/timedevent/bucket_head = bucket_list[i]
|
||||
if (!bucket_head)
|
||||
continue
|
||||
|
||||
log_world("Active timers at index [i]:")
|
||||
|
||||
var/datum/timedevent/bucket_node = bucket_head
|
||||
var/anti_loop_check = 1000
|
||||
do
|
||||
log_world(get_timer_debug_string(bucket_node))
|
||||
bucket_node = bucket_node.next
|
||||
anti_loop_check--
|
||||
while(bucket_node && bucket_node != bucket_head && anti_loop_check)
|
||||
log_world("Active timers in the second_queue queue:")
|
||||
for(var/I in second_queue)
|
||||
log_world(get_timer_debug_string(I))
|
||||
|
||||
var/cut_start_index = 1
|
||||
var/next_clienttime_timer_index = 0
|
||||
var/len = length(clienttime_timers)
|
||||
|
||||
for (next_clienttime_timer_index in 1 to len)
|
||||
if (MC_TICK_CHECK)
|
||||
next_clienttime_timer_index--
|
||||
break
|
||||
var/datum/timedevent/ctime_timer = clienttime_timers[next_clienttime_timer_index]
|
||||
if (ctime_timer.timeToRun > REALTIMEOFDAY)
|
||||
next_clienttime_timer_index--
|
||||
break
|
||||
|
||||
var/datum/callback/callBack = ctime_timer.callBack
|
||||
if (!callBack)
|
||||
clienttime_timers.Cut(next_clienttime_timer_index,next_clienttime_timer_index+1)
|
||||
CRASH("Invalid timer: [get_timer_debug_string(ctime_timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset], REALTIMEOFDAY: [REALTIMEOFDAY]")
|
||||
|
||||
ctime_timer.spent = REALTIMEOFDAY
|
||||
callBack.InvokeAsync()
|
||||
|
||||
if(ctime_timer.flags & TIMER_LOOP)
|
||||
ctime_timer.spent = 0
|
||||
clienttime_timers.Insert(ctime_timer, 1)
|
||||
cut_start_index++
|
||||
else
|
||||
qdel(ctime_timer)
|
||||
|
||||
|
||||
if (next_clienttime_timer_index)
|
||||
clienttime_timers.Cut(cut_start_index,next_clienttime_timer_index+1)
|
||||
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
|
||||
var/static/list/spent = list()
|
||||
var/static/datum/timedevent/timer
|
||||
if (practical_offset > BUCKET_LEN)
|
||||
head_offset += TICKS2DS(BUCKET_LEN)
|
||||
practical_offset = 1
|
||||
resumed = FALSE
|
||||
|
||||
if ((length(bucket_list) != BUCKET_LEN) || (world.tick_lag != bucket_resolution))
|
||||
reset_buckets()
|
||||
bucket_list = src.bucket_list
|
||||
resumed = FALSE
|
||||
|
||||
|
||||
if (!resumed)
|
||||
timer = null
|
||||
|
||||
while (practical_offset <= BUCKET_LEN && head_offset + (practical_offset*world.tick_lag) <= world.time)
|
||||
var/datum/timedevent/head = bucket_list[practical_offset]
|
||||
if (!timer || !head || timer == head)
|
||||
head = bucket_list[practical_offset]
|
||||
timer = head
|
||||
while (timer)
|
||||
var/datum/callback/callBack = timer.callBack
|
||||
if (!callBack)
|
||||
bucket_resolution = null //force bucket recreation
|
||||
CRASH("Invalid timer: [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
|
||||
if (!timer.spent)
|
||||
spent += timer
|
||||
timer.spent = world.time
|
||||
callBack.InvokeAsync()
|
||||
last_invoke_tick = world.time
|
||||
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
|
||||
timer = timer.next
|
||||
if (timer == head)
|
||||
break
|
||||
|
||||
|
||||
bucket_list[practical_offset++] = null
|
||||
|
||||
//we freed up a bucket, lets see if anything in second_queue needs to be shifted to that bucket.
|
||||
var/i = 0
|
||||
var/L = length(second_queue)
|
||||
for (i in 1 to L)
|
||||
timer = second_queue[i]
|
||||
if (timer.timeToRun >= TIMER_MAX)
|
||||
i--
|
||||
break
|
||||
|
||||
if (timer.timeToRun < head_offset)
|
||||
bucket_resolution = null //force bucket recreation
|
||||
CRASH("[i] Invalid timer state: Timer in long run queue with a time to run less then head_offset. [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
|
||||
if (timer.callBack && !timer.spent)
|
||||
timer.callBack.InvokeAsync()
|
||||
spent += timer
|
||||
bucket_count++
|
||||
else if(!QDELETED(timer))
|
||||
qdel(timer)
|
||||
continue
|
||||
|
||||
if (timer.timeToRun < head_offset + TICKS2DS(practical_offset))
|
||||
bucket_resolution = null //force bucket recreation
|
||||
CRASH("[i] Invalid timer state: Timer in long run queue that would require a backtrack to transfer to short run queue. [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
if (timer.callBack && !timer.spent)
|
||||
timer.callBack.InvokeAsync()
|
||||
spent += timer
|
||||
bucket_count++
|
||||
else if(!QDELETED(timer))
|
||||
qdel(timer)
|
||||
continue
|
||||
|
||||
bucket_count++
|
||||
var/bucket_pos = max(1, BUCKET_POS(timer))
|
||||
|
||||
var/datum/timedevent/bucket_head = bucket_list[bucket_pos]
|
||||
if (!bucket_head)
|
||||
bucket_list[bucket_pos] = timer
|
||||
timer.next = null
|
||||
timer.prev = null
|
||||
continue
|
||||
|
||||
if (!bucket_head.prev)
|
||||
bucket_head.prev = bucket_head
|
||||
timer.next = bucket_head
|
||||
timer.prev = bucket_head.prev
|
||||
timer.next.prev = timer
|
||||
timer.prev.next = timer
|
||||
if (i)
|
||||
second_queue.Cut(1, i+1)
|
||||
|
||||
timer = null
|
||||
|
||||
bucket_count -= length(spent)
|
||||
|
||||
for (var/i in spent)
|
||||
var/datum/timedevent/qtimer = i
|
||||
if(QDELETED(qtimer))
|
||||
bucket_count++
|
||||
continue
|
||||
if(!(qtimer.flags & TIMER_LOOP))
|
||||
qdel(qtimer)
|
||||
else
|
||||
bucket_count++
|
||||
qtimer.spent = 0
|
||||
qtimer.bucketEject()
|
||||
if(qtimer.flags & TIMER_CLIENT_TIME)
|
||||
qtimer.timeToRun = REALTIMEOFDAY + qtimer.wait
|
||||
else
|
||||
qtimer.timeToRun = world.time + qtimer.wait
|
||||
qtimer.bucketJoin()
|
||||
|
||||
spent.len = 0
|
||||
|
||||
//formated this way to be runtime resistant
|
||||
/datum/controller/subsystem/timer/proc/get_timer_debug_string(datum/timedevent/TE)
|
||||
. = "Timer: [TE]"
|
||||
. += "Prev: [TE.prev ? TE.prev : "NULL"], Next: [TE.next ? TE.next : "NULL"]"
|
||||
if(TE.spent)
|
||||
. += ", SPENT([TE.spent])"
|
||||
if(QDELETED(TE))
|
||||
. += ", QDELETED"
|
||||
if(!TE.callBack)
|
||||
. += ", NO CALLBACK"
|
||||
|
||||
/datum/controller/subsystem/timer/proc/reset_buckets()
|
||||
var/list/bucket_list = src.bucket_list
|
||||
var/list/alltimers = list()
|
||||
//collect the timers currently in the bucket
|
||||
for (var/bucket_head in bucket_list)
|
||||
if (!bucket_head)
|
||||
continue
|
||||
var/datum/timedevent/bucket_node = bucket_head
|
||||
do
|
||||
alltimers += bucket_node
|
||||
bucket_node = bucket_node.next
|
||||
while(bucket_node && bucket_node != bucket_head)
|
||||
|
||||
bucket_list.len = 0
|
||||
bucket_list.len = BUCKET_LEN
|
||||
|
||||
practical_offset = 1
|
||||
bucket_count = 0
|
||||
head_offset = world.time
|
||||
bucket_resolution = world.tick_lag
|
||||
|
||||
alltimers += second_queue
|
||||
if (!length(alltimers))
|
||||
return
|
||||
|
||||
sortTim(alltimers, .proc/cmp_timer)
|
||||
|
||||
var/datum/timedevent/head = alltimers[1]
|
||||
|
||||
if (head.timeToRun < head_offset)
|
||||
head_offset = head.timeToRun
|
||||
|
||||
var/new_bucket_count
|
||||
var/i = 1
|
||||
for (i in 1 to length(alltimers))
|
||||
var/datum/timedevent/timer = alltimers[1]
|
||||
if (!timer)
|
||||
continue
|
||||
|
||||
var/bucket_pos = BUCKET_POS(timer)
|
||||
if (timer.timeToRun >= TIMER_MAX)
|
||||
i--
|
||||
break
|
||||
|
||||
|
||||
if (!timer.callBack || timer.spent)
|
||||
WARNING("Invalid timer: [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
if (timer.callBack)
|
||||
qdel(timer)
|
||||
continue
|
||||
|
||||
new_bucket_count++
|
||||
var/datum/timedevent/bucket_head = bucket_list[bucket_pos]
|
||||
if (!bucket_head)
|
||||
bucket_list[bucket_pos] = timer
|
||||
timer.next = null
|
||||
timer.prev = null
|
||||
continue
|
||||
|
||||
if (!bucket_head.prev)
|
||||
bucket_head.prev = bucket_head
|
||||
timer.next = bucket_head
|
||||
timer.prev = bucket_head.prev
|
||||
timer.next.prev = timer
|
||||
timer.prev.next = timer
|
||||
if (i)
|
||||
alltimers.Cut(1, i+1)
|
||||
second_queue = alltimers
|
||||
bucket_count = new_bucket_count
|
||||
|
||||
|
||||
/datum/controller/subsystem/timer/Recover()
|
||||
second_queue |= SStimer.second_queue
|
||||
hashes |= SStimer.hashes
|
||||
timer_id_dict |= SStimer.timer_id_dict
|
||||
bucket_list |= SStimer.bucket_list
|
||||
|
||||
/datum/timedevent
|
||||
var/id
|
||||
var/datum/callback/callBack
|
||||
var/timeToRun
|
||||
var/wait
|
||||
var/hash
|
||||
var/list/flags
|
||||
var/spent = 0 //time we ran the timer.
|
||||
var/name //for easy debugging.
|
||||
//cicular doublely linked list
|
||||
var/datum/timedevent/next
|
||||
var/datum/timedevent/prev
|
||||
|
||||
/datum/timedevent/New(datum/callback/callBack, wait, flags, hash)
|
||||
var/static/nextid = 1
|
||||
id = TIMER_ID_NULL
|
||||
src.callBack = callBack
|
||||
src.wait = wait
|
||||
src.flags = flags
|
||||
src.hash = hash
|
||||
|
||||
if (flags & TIMER_CLIENT_TIME)
|
||||
timeToRun = REALTIMEOFDAY + wait
|
||||
else
|
||||
timeToRun = world.time + wait
|
||||
|
||||
if (flags & TIMER_UNIQUE)
|
||||
SStimer.hashes[hash] = src
|
||||
|
||||
if (flags & TIMER_STOPPABLE)
|
||||
id = num2text(nextid, 100)
|
||||
if (nextid >= SHORT_REAL_LIMIT)
|
||||
nextid += min(1, 2**round(nextid/SHORT_REAL_LIMIT))
|
||||
else
|
||||
nextid++
|
||||
SStimer.timer_id_dict[id] = src
|
||||
|
||||
name = "Timer: [id] (\ref[src]), TTR: [timeToRun], Flags: [jointext(bitfield2list(flags, list("TIMER_UNIQUE", "TIMER_OVERRIDE", "TIMER_CLIENT_TIME", "TIMER_STOPPABLE", "TIMER_NO_HASH_WAIT", "TIMER_LOOP")), ", ")], callBack: \ref[callBack], callBack.object: [callBack.object]\ref[callBack.object]([getcallingtype()]), callBack.delegate:[callBack.delegate]([callBack.arguments ? callBack.arguments.Join(", ") : ""])"
|
||||
|
||||
if ((timeToRun < world.time || timeToRun < SStimer.head_offset) && !(flags & TIMER_CLIENT_TIME))
|
||||
CRASH("Invalid timer state: Timer created that would require a backtrack to run (addtimer would never let this happen): [SStimer.get_timer_debug_string(src)]")
|
||||
|
||||
if (callBack.object != GLOBAL_PROC && !QDESTROYING(callBack.object))
|
||||
LAZYADD(callBack.object.active_timers, src)
|
||||
|
||||
bucketJoin()
|
||||
|
||||
/datum/timedevent/Destroy()
|
||||
..()
|
||||
if (flags & TIMER_UNIQUE && hash)
|
||||
SStimer.hashes -= hash
|
||||
|
||||
if (callBack && callBack.object && callBack.object != GLOBAL_PROC && callBack.object.active_timers)
|
||||
callBack.object.active_timers -= src
|
||||
UNSETEMPTY(callBack.object.active_timers)
|
||||
|
||||
callBack = null
|
||||
|
||||
if (flags & TIMER_STOPPABLE)
|
||||
SStimer.timer_id_dict -= id
|
||||
|
||||
if (flags & TIMER_CLIENT_TIME)
|
||||
if (!spent)
|
||||
spent = world.time
|
||||
SStimer.clienttime_timers -= src
|
||||
return QDEL_HINT_IWILLGC
|
||||
|
||||
if (!spent)
|
||||
spent = world.time
|
||||
bucketEject()
|
||||
else
|
||||
if (prev && prev.next == src)
|
||||
prev.next = next
|
||||
if (next && next.prev == src)
|
||||
next.prev = prev
|
||||
next = null
|
||||
prev = null
|
||||
return QDEL_HINT_IWILLGC
|
||||
|
||||
/datum/timedevent/proc/bucketEject()
|
||||
var/bucketpos = BUCKET_POS(src)
|
||||
var/list/bucket_list = SStimer.bucket_list
|
||||
var/list/second_queue = SStimer.second_queue
|
||||
var/datum/timedevent/buckethead
|
||||
if(bucketpos > 0)
|
||||
buckethead = bucket_list[bucketpos]
|
||||
if(buckethead == src)
|
||||
bucket_list[bucketpos] = next
|
||||
SStimer.bucket_count--
|
||||
else if(timeToRun < TIMER_MAX || next || prev)
|
||||
SStimer.bucket_count--
|
||||
else
|
||||
var/l = length(second_queue)
|
||||
second_queue -= src
|
||||
if(l == length(second_queue))
|
||||
SStimer.bucket_count--
|
||||
if(prev != next)
|
||||
prev.next = next
|
||||
next.prev = prev
|
||||
else
|
||||
prev?.next = null
|
||||
next?.prev = null
|
||||
prev = next = null
|
||||
|
||||
/datum/timedevent/proc/bucketJoin()
|
||||
var/list/L
|
||||
|
||||
if (flags & TIMER_CLIENT_TIME)
|
||||
L = SStimer.clienttime_timers
|
||||
else if (timeToRun >= TIMER_MAX)
|
||||
L = SStimer.second_queue
|
||||
|
||||
if(L)
|
||||
BINARY_INSERT(src, L, datum/timedevent, timeToRun)
|
||||
return
|
||||
|
||||
//get the list of buckets
|
||||
var/list/bucket_list = SStimer.bucket_list
|
||||
|
||||
//calculate our place in the bucket list
|
||||
var/bucket_pos = BUCKET_POS(src)
|
||||
|
||||
//get the bucket for our tick
|
||||
var/datum/timedevent/bucket_head = bucket_list[bucket_pos]
|
||||
SStimer.bucket_count++
|
||||
//empty bucket, we will just add ourselves
|
||||
if (!bucket_head)
|
||||
bucket_list[bucket_pos] = src
|
||||
return
|
||||
//other wise, lets do a simplified linked list add.
|
||||
if (!bucket_head.prev)
|
||||
bucket_head.prev = bucket_head
|
||||
next = bucket_head
|
||||
prev = bucket_head.prev
|
||||
next.prev = src
|
||||
prev.next = src
|
||||
|
||||
/datum/timedevent/proc/getcallingtype()
|
||||
. = "ERROR"
|
||||
if (callBack.object == GLOBAL_PROC)
|
||||
. = "GLOBAL_PROC"
|
||||
else
|
||||
. = "[callBack.object.type]"
|
||||
|
||||
/proc/addtimer(datum/callback/callback, wait = 0, flags = 0)
|
||||
if (!callback)
|
||||
CRASH("addtimer called without a callback")
|
||||
|
||||
if (wait < 0)
|
||||
crash_with("addtimer called with a negative wait. Converting to [world.tick_lag]")
|
||||
|
||||
if (callback.object != GLOBAL_PROC && QDELETED(callback.object) && !QDESTROYING(callback.object))
|
||||
crash_with("addtimer called with a callback assigned to a qdeleted object. In the future such timers will not be supported and may refuse to run or run with a 0 wait")
|
||||
|
||||
wait = max(CEILING(wait, world.tick_lag), world.tick_lag)
|
||||
|
||||
if(wait >= INFINITY)
|
||||
CRASH("Attempted to create timer with INFINITY delay")
|
||||
|
||||
var/hash
|
||||
|
||||
if (flags & TIMER_UNIQUE)
|
||||
var/list/hashlist
|
||||
if(flags & TIMER_NO_HASH_WAIT)
|
||||
hashlist = list(callback.object, "(\ref[callback.object])", callback.delegate, flags & TIMER_CLIENT_TIME)
|
||||
else
|
||||
hashlist = list(callback.object, "(\ref[callback.object])", callback.delegate, wait, flags & TIMER_CLIENT_TIME)
|
||||
hashlist += callback.arguments
|
||||
hash = hashlist.Join("|||||||")
|
||||
|
||||
var/datum/timedevent/hash_timer = SStimer.hashes[hash]
|
||||
if(hash_timer)
|
||||
if (hash_timer.spent) //it's pending deletion, pretend it doesn't exist.
|
||||
hash_timer.hash = null //but keep it from accidentally deleting us
|
||||
else
|
||||
if (flags & TIMER_OVERRIDE)
|
||||
hash_timer.hash = null //no need having it delete it's hash if we are going to replace it
|
||||
qdel(hash_timer)
|
||||
else
|
||||
if (hash_timer.flags & TIMER_STOPPABLE)
|
||||
. = hash_timer.id
|
||||
return
|
||||
else if(flags & TIMER_OVERRIDE)
|
||||
crash_with("TIMER_OVERRIDE used without TIMER_UNIQUE")
|
||||
|
||||
var/datum/timedevent/timer = new(callback, wait, flags, hash)
|
||||
return timer.id
|
||||
|
||||
/proc/deltimer(id)
|
||||
if (!id)
|
||||
return FALSE
|
||||
if (id == TIMER_ID_NULL)
|
||||
CRASH("Tried to delete a null timerid. Use TIMER_STOPPABLE flag")
|
||||
if (!istext(id))
|
||||
if (istype(id, /datum/timedevent))
|
||||
qdel(id)
|
||||
return TRUE
|
||||
//id is string
|
||||
var/datum/timedevent/timer = SStimer.timer_id_dict[id]
|
||||
if (timer && !timer.spent)
|
||||
qdel(timer)
|
||||
return TRUE
|
||||
return FALSE
|
||||
|
||||
|
||||
#undef BUCKET_LEN
|
||||
#undef BUCKET_POS
|
||||
#undef TIMER_MAX
|
||||
#undef TIMER_ID_MAX
|
||||
@@ -32,7 +32,7 @@ var/global/list/round_voters = list() // Keeps track of the individuals voting f
|
||||
// Tell qdel() to Del() this object.
|
||||
return QDEL_HINT_HARDDEL_NOW
|
||||
|
||||
/datum/controller/vote/proc/process() //called by master_controller
|
||||
/datum/controller/vote/process() //called by master_controller
|
||||
if(mode)
|
||||
// No more change mode votes after the game has started.
|
||||
if(mode == VOTE_GAMEMODE && ticker.current_state >= GAME_STATE_SETTING_UP)
|
||||
|
||||
Reference in New Issue
Block a user