mirror of
https://github.com/CHOMPStation2/CHOMPStation2.git
synced 2025-12-10 18:22:39 +00:00
Scheduler now uses btime, and other improvements
This commit is contained in:
18
code/__defines/btime.dm
Normal file
18
code/__defines/btime.dm
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
// Comment this out if the external btime library is unavailable
|
||||||
|
#define PRECISE_TIMER_AVAILABLE
|
||||||
|
|
||||||
|
#ifdef PRECISE_TIMER_AVAILABLE
|
||||||
|
var/global/__btime__libName = "btime.[world.system_type==MS_WINDOWS?"dll":"so"]"
|
||||||
|
#define TimeOfHour (__extern__timeofhour)
|
||||||
|
#define __extern__timeofhour text2num(call(__btime__libName, "gettime")())
|
||||||
|
/hook/startup/proc/checkbtime()
|
||||||
|
try
|
||||||
|
// This will always return 1 unless the btime library cannot be accessed
|
||||||
|
if(TimeOfHour || 1) return 1
|
||||||
|
catch(var/exception/e)
|
||||||
|
log_to_dd("PRECISE_TIMER_AVAILABLE is defined in btime.dm, but calling the btime library failed: [e]")
|
||||||
|
log_to_dd("This is a fatal error. The world will now shut down.")
|
||||||
|
del(world)
|
||||||
|
#else
|
||||||
|
#define TimeOfHour (world.timeofday % 36000)
|
||||||
|
#endif
|
||||||
@@ -11,7 +11,10 @@
|
|||||||
#define PROCESS_DEFAULT_HANG_ALERT_TIME 600 // 60 seconds
|
#define PROCESS_DEFAULT_HANG_ALERT_TIME 600 // 60 seconds
|
||||||
#define PROCESS_DEFAULT_HANG_RESTART_TIME 900 // 90 seconds
|
#define PROCESS_DEFAULT_HANG_RESTART_TIME 900 // 90 seconds
|
||||||
#define PROCESS_DEFAULT_SCHEDULE_INTERVAL 50 // 50 ticks
|
#define PROCESS_DEFAULT_SCHEDULE_INTERVAL 50 // 50 ticks
|
||||||
#define PROCESS_DEFAULT_SLEEP_INTERVAL 2 // 2 ticks
|
#define PROCESS_DEFAULT_SLEEP_INTERVAL 8 // 2 ticks
|
||||||
#define PROCESS_DEFAULT_CPU_THRESHOLD 90 // 90%
|
#define PROCESS_DEFAULT_CPU_THRESHOLD 90 // 90%
|
||||||
|
|
||||||
//#define UPDATE_QUEUE_DEBUG
|
// SCHECK macros
|
||||||
|
// This references src directly to work around a weird bug with try/catch
|
||||||
|
#define SCHECK_EVERY(this_many_calls) if(++src.calls_since_last_scheck >= this_many_calls) sleepCheck()
|
||||||
|
#define SCHECK SCHECK_EVERY(50)
|
||||||
@@ -74,7 +74,6 @@ var/global/list/GlobalPool = list()
|
|||||||
|
|
||||||
D.Destroy()
|
D.Destroy()
|
||||||
D.ResetVars()
|
D.ResetVars()
|
||||||
D.disposed = 1 //Set to stop processing while pooled
|
|
||||||
|
|
||||||
/proc/IsPooled(var/datum/D)
|
/proc/IsPooled(var/datum/D)
|
||||||
if(isnull(GlobalPool[D.type]))
|
if(isnull(GlobalPool[D.type]))
|
||||||
@@ -86,7 +85,6 @@ var/global/list/GlobalPool = list()
|
|||||||
New(arglist(args))
|
New(arglist(args))
|
||||||
else
|
else
|
||||||
New(args)
|
New(args)
|
||||||
disposed = null
|
|
||||||
|
|
||||||
/atom/movable/Prepare(args)
|
/atom/movable/Prepare(args)
|
||||||
var/list/args_list = args
|
var/list/args_list = args
|
||||||
|
|||||||
@@ -602,13 +602,13 @@ proc/dd_sortedTextList(list/incoming)
|
|||||||
/datum/alarm/dd_SortValue()
|
/datum/alarm/dd_SortValue()
|
||||||
return "[sanitize_old(last_name)]"
|
return "[sanitize_old(last_name)]"
|
||||||
|
|
||||||
/proc/subtypes(prototype)
|
/proc/subtypesof(prototype)
|
||||||
return (typesof(prototype) - prototype)
|
return (typesof(prototype) - prototype)
|
||||||
|
|
||||||
//creates every subtype of prototype (excluding prototype) and adds it to list L.
|
//creates every subtype of prototype (excluding prototype) and adds it to list L.
|
||||||
//if no list/L is provided, one is created.
|
//if no list/L is provided, one is created.
|
||||||
/proc/init_subtypes(prototype, list/L)
|
/proc/init_subtypes(prototype, list/L)
|
||||||
if(!istype(L)) L = list()
|
if(!istype(L)) L = list()
|
||||||
for(var/path in subtypes(prototype))
|
for(var/path in subtypesof(prototype))
|
||||||
L += new path()
|
L += new path()
|
||||||
return L
|
return L
|
||||||
|
|||||||
@@ -25,7 +25,6 @@
|
|||||||
if (config.log_admin)
|
if (config.log_admin)
|
||||||
diary << "\[[time_stamp()]]ADMIN: [text][log_end]"
|
diary << "\[[time_stamp()]]ADMIN: [text][log_end]"
|
||||||
|
|
||||||
|
|
||||||
/proc/log_debug(text)
|
/proc/log_debug(text)
|
||||||
if (config.log_debug)
|
if (config.log_debug)
|
||||||
diary << "\[[time_stamp()]]DEBUG: [text][log_end]"
|
diary << "\[[time_stamp()]]DEBUG: [text][log_end]"
|
||||||
@@ -34,7 +33,6 @@
|
|||||||
if(C.is_preference_enabled(/datum/client_preference/debug/show_debug_logs))
|
if(C.is_preference_enabled(/datum/client_preference/debug/show_debug_logs))
|
||||||
C << "DEBUG: [text]"
|
C << "DEBUG: [text]"
|
||||||
|
|
||||||
|
|
||||||
/proc/log_game(text)
|
/proc/log_game(text)
|
||||||
if (config.log_game)
|
if (config.log_game)
|
||||||
diary << "\[[time_stamp()]]GAME: [text][log_end]"
|
diary << "\[[time_stamp()]]GAME: [text][log_end]"
|
||||||
@@ -79,6 +77,11 @@
|
|||||||
if (config.log_pda)
|
if (config.log_pda)
|
||||||
diary << "\[[time_stamp()]]PDA: [text][log_end]"
|
diary << "\[[time_stamp()]]PDA: [text][log_end]"
|
||||||
|
|
||||||
|
/proc/log_to_dd(text)
|
||||||
|
world.log << text //this comes before the config check because it can't possibly runtime
|
||||||
|
if(config.log_world_output)
|
||||||
|
diary << "\[[time_stamp()]]DD_OUTPUT: [text][log_end]"
|
||||||
|
|
||||||
/proc/log_misc(text)
|
/proc/log_misc(text)
|
||||||
diary << "\[[time_stamp()]]MISC: [text][log_end]"
|
diary << "\[[time_stamp()]]MISC: [text][log_end]"
|
||||||
|
|
||||||
|
|||||||
@@ -1,32 +0,0 @@
|
|||||||
// DM Environment file for ProcessScheduler.dme.
|
|
||||||
// All manual changes should be made outside the BEGIN_ and END_ blocks.
|
|
||||||
// New source code should be placed in .dm files: choose File/New --> Code File.
|
|
||||||
|
|
||||||
// BEGIN_INTERNALS
|
|
||||||
// END_INTERNALS
|
|
||||||
|
|
||||||
// BEGIN_FILE_DIR
|
|
||||||
#define FILE_DIR .
|
|
||||||
// END_FILE_DIR
|
|
||||||
|
|
||||||
// BEGIN_PREFERENCES
|
|
||||||
// END_PREFERENCES
|
|
||||||
|
|
||||||
// BEGIN_INCLUDE
|
|
||||||
#include "core\_define.dm"
|
|
||||||
#include "core\_stubs.dm"
|
|
||||||
#include "core\process.dm"
|
|
||||||
#include "core\processScheduler.dm"
|
|
||||||
#include "core\updateQueue.dm"
|
|
||||||
#include "core\updateQueueWorker.dm"
|
|
||||||
#include "test\processSchedulerView.dm"
|
|
||||||
#include "test\testDyingUpdateQueueProcess.dm"
|
|
||||||
#include "test\testHarness.dm"
|
|
||||||
#include "test\testHungProcess.dm"
|
|
||||||
#include "test\testNiceProcess.dm"
|
|
||||||
#include "test\testSlowProcess.dm"
|
|
||||||
#include "test\testUpdateQueue.dm"
|
|
||||||
#include "test\testUpdateQueueProcess.dm"
|
|
||||||
#include "test\testZombieProcess.dm"
|
|
||||||
// END_INCLUDE
|
|
||||||
|
|
||||||
@@ -4,15 +4,7 @@
|
|||||||
* This file contains constructs that the process scheduler expects to exist
|
* This file contains constructs that the process scheduler expects to exist
|
||||||
* in a standard ss13 fork.
|
* in a standard ss13 fork.
|
||||||
*/
|
*/
|
||||||
/*
|
|
||||||
/**
|
|
||||||
* message_admins
|
|
||||||
*
|
|
||||||
* sends a message to admins
|
|
||||||
*/
|
|
||||||
/proc/message_admins(msg)
|
|
||||||
world << msg
|
|
||||||
*/
|
|
||||||
/**
|
/**
|
||||||
* logTheThing
|
* logTheThing
|
||||||
*
|
*
|
||||||
@@ -25,14 +17,3 @@
|
|||||||
world << "Diary: \[[diaryType]:[type]] [text]"
|
world << "Diary: \[[diaryType]:[type]] [text]"
|
||||||
else
|
else
|
||||||
world << "Log: \[[type]] [text]"
|
world << "Log: \[[type]] [text]"
|
||||||
|
|
||||||
/**
|
|
||||||
* var/disposed
|
|
||||||
*
|
|
||||||
* In goonstation, disposed is set to 1 after an object enters the delete queue
|
|
||||||
* or the object is placed in an object pool (effectively out-of-play so to speak)
|
|
||||||
*/
|
|
||||||
/datum/var/disposed
|
|
||||||
// Garbage collection (controller).
|
|
||||||
/datum/var/gcDestroyed
|
|
||||||
/datum/var/timeDestroyed
|
|
||||||
@@ -48,7 +48,7 @@
|
|||||||
// This controls how often the process will yield (call sleep(0)) while it is running.
|
// This controls how often the process will yield (call sleep(0)) while it is running.
|
||||||
// Every concurrent process should sleep periodically while running in order to allow other
|
// Every concurrent process should sleep periodically while running in order to allow other
|
||||||
// processes to execute concurrently.
|
// processes to execute concurrently.
|
||||||
var/tmp/sleep_interval = PROCESS_DEFAULT_SLEEP_INTERVAL
|
var/tmp/sleep_interval
|
||||||
|
|
||||||
// hang_warning_time - this is the time (in 1/10 seconds) after which the server will begin to show "maybe hung" in the context window
|
// hang_warning_time - this is the time (in 1/10 seconds) after which the server will begin to show "maybe hung" in the context window
|
||||||
var/tmp/hang_warning_time = PROCESS_DEFAULT_HANG_WARNING_TIME
|
var/tmp/hang_warning_time = PROCESS_DEFAULT_HANG_WARNING_TIME
|
||||||
@@ -59,20 +59,20 @@
|
|||||||
// hang_restart_time - After this much time(in 1/10 seconds), the server will automatically kill and restart the process.
|
// hang_restart_time - After this much time(in 1/10 seconds), the server will automatically kill and restart the process.
|
||||||
var/tmp/hang_restart_time = PROCESS_DEFAULT_HANG_RESTART_TIME
|
var/tmp/hang_restart_time = PROCESS_DEFAULT_HANG_RESTART_TIME
|
||||||
|
|
||||||
// cpu_threshold - if world.cpu >= cpu_threshold, scheck() will call sleep(1) to defer further work until the next tick. This keeps a process from driving a tick into overtime (causing perceptible lag)
|
|
||||||
var/tmp/cpu_threshold = PROCESS_DEFAULT_CPU_THRESHOLD
|
|
||||||
|
|
||||||
// How many times in the current run has the process deferred work till the next tick?
|
// How many times in the current run has the process deferred work till the next tick?
|
||||||
var/tmp/cpu_defer_count = 0
|
var/tmp/cpu_defer_count = 0
|
||||||
|
|
||||||
|
// How many SCHECKs have been skipped (to limit btime calls)
|
||||||
|
var/tmp/calls_since_last_scheck = 0
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* recordkeeping vars
|
* recordkeeping vars
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Records the time (server ticks) at which the process last finished sleeping
|
// Records the time (1/10s timeofday) at which the process last finished sleeping
|
||||||
var/tmp/last_slept = 0
|
var/tmp/last_slept = 0
|
||||||
|
|
||||||
// Records the time (s-ticks) at which the process last began running
|
// Records the time (1/10s timeofday) at which the process last began running
|
||||||
var/tmp/run_start = 0
|
var/tmp/run_start = 0
|
||||||
|
|
||||||
// Records the number of times this process has been killed and restarted
|
// Records the number of times this process has been killed and restarted
|
||||||
@@ -85,26 +85,33 @@
|
|||||||
|
|
||||||
var/tmp/last_object
|
var/tmp/last_object
|
||||||
|
|
||||||
datum/controller/process/New(var/datum/controller/processScheduler/scheduler)
|
// Counts the number of times an exception has occurred; gets reset after 10
|
||||||
|
var/tmp/list/exceptions = list()
|
||||||
|
|
||||||
|
// Number of deciseconds to delay before starting the process
|
||||||
|
var/start_delay = 0
|
||||||
|
|
||||||
|
/datum/controller/process/New(var/datum/controller/processScheduler/scheduler)
|
||||||
..()
|
..()
|
||||||
main = scheduler
|
main = scheduler
|
||||||
previousStatus = "idle"
|
previousStatus = "idle"
|
||||||
idle()
|
idle()
|
||||||
name = "process"
|
name = "process"
|
||||||
schedule_interval = 50
|
schedule_interval = 50
|
||||||
sleep_interval = 2
|
sleep_interval = world.tick_lag / PROCESS_DEFAULT_SLEEP_INTERVAL
|
||||||
last_slept = 0
|
last_slept = 0
|
||||||
run_start = 0
|
run_start = 0
|
||||||
ticks = 0
|
ticks = 0
|
||||||
last_task = 0
|
last_task = 0
|
||||||
last_object = null
|
last_object = null
|
||||||
|
|
||||||
datum/controller/process/proc/started()
|
/datum/controller/process/proc/started()
|
||||||
|
var/timeofhour = TimeOfHour
|
||||||
// Initialize last_slept so we can know when to sleep
|
// Initialize last_slept so we can know when to sleep
|
||||||
last_slept = world.timeofday
|
last_slept = timeofhour
|
||||||
|
|
||||||
// Initialize run_start so we can detect hung processes.
|
// Initialize run_start so we can detect hung processes.
|
||||||
run_start = world.timeofday
|
run_start = timeofhour
|
||||||
|
|
||||||
// Initialize defer count
|
// Initialize defer count
|
||||||
cpu_defer_count = 0
|
cpu_defer_count = 0
|
||||||
@@ -114,65 +121,65 @@ datum/controller/process/proc/started()
|
|||||||
|
|
||||||
onStart()
|
onStart()
|
||||||
|
|
||||||
datum/controller/process/proc/finished()
|
/datum/controller/process/proc/finished()
|
||||||
ticks++
|
ticks++
|
||||||
idle()
|
idle()
|
||||||
main.processFinished(src)
|
main.processFinished(src)
|
||||||
|
|
||||||
onFinish()
|
onFinish()
|
||||||
|
|
||||||
datum/controller/process/proc/doWork()
|
/datum/controller/process/proc/doWork()
|
||||||
|
|
||||||
datum/controller/process/proc/setup()
|
/datum/controller/process/proc/setup()
|
||||||
|
|
||||||
datum/controller/process/proc/process()
|
/datum/controller/process/proc/process()
|
||||||
started()
|
started()
|
||||||
doWork()
|
doWork()
|
||||||
finished()
|
finished()
|
||||||
|
|
||||||
datum/controller/process/proc/running()
|
/datum/controller/process/proc/running()
|
||||||
idle = 0
|
idle = 0
|
||||||
queued = 0
|
queued = 0
|
||||||
running = 1
|
running = 1
|
||||||
hung = 0
|
hung = 0
|
||||||
setStatus(PROCESS_STATUS_RUNNING)
|
setStatus(PROCESS_STATUS_RUNNING)
|
||||||
|
|
||||||
datum/controller/process/proc/idle()
|
/datum/controller/process/proc/idle()
|
||||||
queued = 0
|
queued = 0
|
||||||
running = 0
|
running = 0
|
||||||
idle = 1
|
idle = 1
|
||||||
hung = 0
|
hung = 0
|
||||||
setStatus(PROCESS_STATUS_IDLE)
|
setStatus(PROCESS_STATUS_IDLE)
|
||||||
|
|
||||||
datum/controller/process/proc/queued()
|
/datum/controller/process/proc/queued()
|
||||||
idle = 0
|
idle = 0
|
||||||
running = 0
|
running = 0
|
||||||
queued = 1
|
queued = 1
|
||||||
hung = 0
|
hung = 0
|
||||||
setStatus(PROCESS_STATUS_QUEUED)
|
setStatus(PROCESS_STATUS_QUEUED)
|
||||||
|
|
||||||
datum/controller/process/proc/hung()
|
/datum/controller/process/proc/hung()
|
||||||
hung = 1
|
hung = 1
|
||||||
setStatus(PROCESS_STATUS_HUNG)
|
setStatus(PROCESS_STATUS_HUNG)
|
||||||
|
|
||||||
datum/controller/process/proc/handleHung()
|
/datum/controller/process/proc/handleHung()
|
||||||
|
var/timeofhour = TimeOfHour
|
||||||
var/datum/lastObj = last_object
|
var/datum/lastObj = last_object
|
||||||
var/lastObjType = "null"
|
var/lastObjType = "null"
|
||||||
if(istype(lastObj))
|
if(istype(lastObj))
|
||||||
lastObjType = lastObj.type
|
lastObjType = lastObj.type
|
||||||
|
|
||||||
// If world.timeofday has rolled over, then we need to adjust.
|
// If timeofhour has rolled over, then we need to adjust.
|
||||||
if (world.timeofday < run_start)
|
if (timeofhour < run_start)
|
||||||
run_start -= 864000
|
run_start -= 36000
|
||||||
|
var/msg = "[name] process hung at tick #[ticks]. Process was unresponsive for [(timeofhour - run_start) / 10] seconds and was restarted. Last task: [last_task]. Last Object Type: [lastObjType]"
|
||||||
var/msg = "[name] process hung at tick #[ticks]. Process was unresponsive for [(world.timeofday - run_start) / 10] seconds and was restarted. Last task: [last_task]. Last Object Type: [lastObjType]"
|
|
||||||
logTheThing("debug", null, null, msg)
|
logTheThing("debug", null, null, msg)
|
||||||
logTheThing("diary", null, null, msg, "debug")
|
logTheThing("diary", null, null, msg, "debug")
|
||||||
message_admins(msg)
|
message_admins(msg)
|
||||||
|
|
||||||
main.restartProcess(src.name)
|
main.restartProcess(src.name)
|
||||||
|
|
||||||
datum/controller/process/proc/kill()
|
/datum/controller/process/proc/kill()
|
||||||
if (!killed)
|
if (!killed)
|
||||||
var/msg = "[name] process was killed at tick #[ticks]."
|
var/msg = "[name] process was killed at tick #[ticks]."
|
||||||
logTheThing("debug", null, null, msg)
|
logTheThing("debug", null, null, msg)
|
||||||
@@ -182,59 +189,68 @@ datum/controller/process/proc/kill()
|
|||||||
// Allow inheritors to clean up if needed
|
// Allow inheritors to clean up if needed
|
||||||
onKill()
|
onKill()
|
||||||
|
|
||||||
killed = TRUE
|
// This should del
|
||||||
|
del(src)
|
||||||
|
|
||||||
del(src) // This should del
|
// Do not call this directly - use SHECK or SCHECK_EVERY
|
||||||
|
/datum/controller/process/proc/sleepCheck(var/tickId = 0)
|
||||||
datum/controller/process/proc/scheck(var/tickId = 0)
|
calls_since_last_scheck = 0
|
||||||
if (killed)
|
if (killed)
|
||||||
// The kill proc is the only place where killed is set.
|
// The kill proc is the only place where killed is set.
|
||||||
// The kill proc should have deleted this datum, and all sleeping procs that are
|
// The kill proc should have deleted this datum, and all sleeping procs that are
|
||||||
// owned by it.
|
// owned by it.
|
||||||
CRASH("A killed process is still running somehow...")
|
CRASH("A killed process is still running somehow...")
|
||||||
|
if (hung)
|
||||||
|
// This will only really help if the doWork proc ends up in an infinite loop.
|
||||||
|
handleHung()
|
||||||
|
CRASH("Process [name] hung and was restarted.")
|
||||||
|
|
||||||
// For each tick the process defers, it increments the cpu_defer_count so we don't
|
if (main.getCurrentTickElapsedTime() > main.timeAllowance)
|
||||||
// defer indefinitely
|
sleep(world.tick_lag)
|
||||||
if (world.cpu >= cpu_threshold + cpu_defer_count * 10)
|
|
||||||
sleep(1)
|
|
||||||
cpu_defer_count++
|
cpu_defer_count++
|
||||||
last_slept = world.timeofday
|
last_slept = TimeOfHour
|
||||||
else
|
else
|
||||||
// If world.timeofday has rolled over, then we need to adjust.
|
var/timeofhour = TimeOfHour
|
||||||
if (world.timeofday < last_slept)
|
// If timeofhour has rolled over, then we need to adjust.
|
||||||
last_slept -= 864000
|
if (timeofhour < last_slept)
|
||||||
|
last_slept -= 36000
|
||||||
|
|
||||||
if (world.timeofday > last_slept + sleep_interval)
|
if (timeofhour > last_slept + sleep_interval)
|
||||||
// If we haven't slept in sleep_interval ticks, sleep to allow other work to proceed.
|
// If we haven't slept in sleep_interval deciseconds, sleep to allow other work to proceed.
|
||||||
sleep(0)
|
sleep(0)
|
||||||
last_slept = world.timeofday
|
last_slept = TimeOfHour
|
||||||
|
|
||||||
datum/controller/process/proc/update()
|
/datum/controller/process/proc/update()
|
||||||
// Clear delta
|
// Clear delta
|
||||||
if(previousStatus != status)
|
if(previousStatus != status)
|
||||||
setStatus(status)
|
setStatus(status)
|
||||||
|
|
||||||
var/elapsedTime = getElapsedTime()
|
var/elapsedTime = getElapsedTime()
|
||||||
|
|
||||||
if (elapsedTime > hang_restart_time)
|
if (hung)
|
||||||
|
handleHung()
|
||||||
|
return
|
||||||
|
else if (elapsedTime > hang_restart_time)
|
||||||
hung()
|
hung()
|
||||||
else if (elapsedTime > hang_alert_time)
|
else if (elapsedTime > hang_alert_time)
|
||||||
setStatus(PROCESS_STATUS_PROBABLY_HUNG)
|
setStatus(PROCESS_STATUS_PROBABLY_HUNG)
|
||||||
else if (elapsedTime > hang_warning_time)
|
else if (elapsedTime > hang_warning_time)
|
||||||
setStatus(PROCESS_STATUS_MAYBE_HUNG)
|
setStatus(PROCESS_STATUS_MAYBE_HUNG)
|
||||||
|
|
||||||
datum/controller/process/proc/getElapsedTime()
|
|
||||||
if (world.timeofday < run_start)
|
|
||||||
return world.timeofday - (run_start - 864000)
|
|
||||||
return world.timeofday - run_start
|
|
||||||
|
|
||||||
datum/controller/process/proc/tickDetail()
|
/datum/controller/process/proc/getElapsedTime()
|
||||||
|
var/timeofhour = TimeOfHour
|
||||||
|
if (timeofhour < run_start)
|
||||||
|
return timeofhour - (run_start - 36000)
|
||||||
|
return timeofhour - run_start
|
||||||
|
|
||||||
|
/datum/controller/process/proc/tickDetail()
|
||||||
return
|
return
|
||||||
|
|
||||||
datum/controller/process/proc/getContext()
|
/datum/controller/process/proc/getContext()
|
||||||
return "<tr><td>[name]</td><td>[main.averageRunTime(src)]</td><td>[main.last_run_time[src]]</td><td>[main.highest_run_time[src]]</td><td>[ticks]</td></tr>\n"
|
return "<tr><td>[name]</td><td>[main.averageRunTime(src)]</td><td>[main.last_run_time[src]]</td><td>[main.highest_run_time[src]]</td><td>[ticks]</td></tr>\n"
|
||||||
|
|
||||||
datum/controller/process/proc/getContextData()
|
/datum/controller/process/proc/getContextData()
|
||||||
return list(
|
return list(
|
||||||
"name" = name,
|
"name" = name,
|
||||||
"averageRunTime" = main.averageRunTime(src),
|
"averageRunTime" = main.averageRunTime(src),
|
||||||
@@ -246,10 +262,10 @@ datum/controller/process/proc/getContextData()
|
|||||||
"disabled" = disabled
|
"disabled" = disabled
|
||||||
)
|
)
|
||||||
|
|
||||||
datum/controller/process/proc/getStatus()
|
/datum/controller/process/proc/getStatus()
|
||||||
return status
|
return status
|
||||||
|
|
||||||
datum/controller/process/proc/getStatusText(var/s = 0)
|
/datum/controller/process/proc/getStatusText(var/s = 0)
|
||||||
if(!s)
|
if(!s)
|
||||||
s = status
|
s = status
|
||||||
switch(s)
|
switch(s)
|
||||||
@@ -268,21 +284,21 @@ datum/controller/process/proc/getStatusText(var/s = 0)
|
|||||||
else
|
else
|
||||||
return "UNKNOWN"
|
return "UNKNOWN"
|
||||||
|
|
||||||
datum/controller/process/proc/getPreviousStatus()
|
/datum/controller/process/proc/getPreviousStatus()
|
||||||
return previousStatus
|
return previousStatus
|
||||||
|
|
||||||
datum/controller/process/proc/getPreviousStatusText()
|
/datum/controller/process/proc/getPreviousStatusText()
|
||||||
return getStatusText(previousStatus)
|
return getStatusText(previousStatus)
|
||||||
|
|
||||||
datum/controller/process/proc/setStatus(var/newStatus)
|
/datum/controller/process/proc/setStatus(var/newStatus)
|
||||||
previousStatus = status
|
previousStatus = status
|
||||||
status = newStatus
|
status = newStatus
|
||||||
|
|
||||||
datum/controller/process/proc/setLastTask(var/task, var/object)
|
/datum/controller/process/proc/setLastTask(var/task, var/object)
|
||||||
last_task = task
|
last_task = task
|
||||||
last_object = object
|
last_object = object
|
||||||
|
|
||||||
datum/controller/process/proc/_copyStateFrom(var/datum/controller/process/target)
|
/datum/controller/process/proc/_copyStateFrom(var/datum/controller/process/target)
|
||||||
main = target.main
|
main = target.main
|
||||||
name = target.name
|
name = target.name
|
||||||
schedule_interval = target.schedule_interval
|
schedule_interval = target.schedule_interval
|
||||||
@@ -295,28 +311,62 @@ datum/controller/process/proc/_copyStateFrom(var/datum/controller/process/target
|
|||||||
last_object = target.last_object
|
last_object = target.last_object
|
||||||
copyStateFrom(target)
|
copyStateFrom(target)
|
||||||
|
|
||||||
datum/controller/process/proc/copyStateFrom(var/datum/controller/process/target)
|
/datum/controller/process/proc/copyStateFrom(var/datum/controller/process/target)
|
||||||
|
|
||||||
datum/controller/process/proc/onKill()
|
/datum/controller/process/proc/onKill()
|
||||||
|
|
||||||
datum/controller/process/proc/onStart()
|
/datum/controller/process/proc/onStart()
|
||||||
|
|
||||||
datum/controller/process/proc/onFinish()
|
/datum/controller/process/proc/onFinish()
|
||||||
|
|
||||||
datum/controller/process/proc/disable()
|
/datum/controller/process/proc/disable()
|
||||||
disabled = 1
|
disabled = 1
|
||||||
|
|
||||||
datum/controller/process/proc/enable()
|
/datum/controller/process/proc/enable()
|
||||||
disabled = 0
|
disabled = 0
|
||||||
|
|
||||||
|
/datum/controller/process/proc/getAverageRunTime()
|
||||||
|
return main.averageRunTime(src)
|
||||||
/datum/controller/process/proc/getLastRunTime()
|
/datum/controller/process/proc/getLastRunTime()
|
||||||
return main.getProcessLastRunTime(src)
|
return main.getProcessLastRunTime(src)
|
||||||
|
|
||||||
|
/datum/controller/process/proc/getHighestRunTime()
|
||||||
|
return main.getProcessHighestRunTime(src)
|
||||||
|
|
||||||
/datum/controller/process/proc/getTicks()
|
/datum/controller/process/proc/getTicks()
|
||||||
return ticks
|
return ticks
|
||||||
|
|
||||||
/datum/controller/process/proc/getStatName()
|
/datum/controller/process/proc/statProcess()
|
||||||
return name
|
var/averageRunTime = round(getAverageRunTime(), 0.1)/10
|
||||||
|
var/lastRunTime = round(getLastRunTime(), 0.1)/10
|
||||||
|
var/highestRunTime = round(getHighestRunTime(), 0.1)/10
|
||||||
|
stat("[name]", "T#[getTicks()] | AR [averageRunTime] | LR [lastRunTime] | HR [highestRunTime] | D [cpu_defer_count]")
|
||||||
|
|
||||||
/datum/controller/process/proc/getTickTime()
|
/datum/controller/process/proc/catchException(var/exception/e, var/thrower)
|
||||||
return "#[getTicks()]\t- [getLastRunTime()]"
|
var/etext = "[e]"
|
||||||
|
var/eid = "[e]" // Exception ID, for tracking repeated exceptions
|
||||||
|
var/ptext = "" // "processing..." text, for what was being processed (if known)
|
||||||
|
if(istype(e))
|
||||||
|
etext += " in [e.file], line [e.line]"
|
||||||
|
eid = "[e.file]:[e.line]"
|
||||||
|
if(eid in exceptions)
|
||||||
|
if(exceptions[eid]++ >= 10)
|
||||||
|
return
|
||||||
|
else
|
||||||
|
exceptions[eid] = 1
|
||||||
|
if(istype(thrower, /datum))
|
||||||
|
var/datum/D = thrower
|
||||||
|
ptext = " processing [D.type]"
|
||||||
|
if(istype(thrower, /atom))
|
||||||
|
var/atom/A = thrower
|
||||||
|
ptext += " ([A]) ([A.x],[A.y],[A.z])"
|
||||||
|
log_to_dd("\[[time_stamp()]\] Process [name] caught exception[ptext]: [etext]")
|
||||||
|
if(exceptions[eid] >= 10)
|
||||||
|
log_to_dd("This exception will now be ignored for ten minutes.")
|
||||||
|
spawn(6000)
|
||||||
|
exceptions[eid] = 0
|
||||||
|
|
||||||
|
/datum/controller/process/proc/catchBadType(var/datum/caught)
|
||||||
|
if(isnull(caught) || !istype(caught) || !isnull(caught.gcDestroyed))
|
||||||
|
return // Only bother with types we can identify and that don't belong
|
||||||
|
catchException("Type [caught.type] does not belong in process' queue")
|
||||||
@@ -17,7 +17,10 @@ var/global/datum/controller/processScheduler/processScheduler
|
|||||||
// Process name -> process object map
|
// Process name -> process object map
|
||||||
var/tmp/datum/controller/process/list/nameToProcessMap = new
|
var/tmp/datum/controller/process/list/nameToProcessMap = new
|
||||||
|
|
||||||
// Process last start times
|
// Process last queued times (world time)
|
||||||
|
var/tmp/datum/controller/process/list/last_queued = new
|
||||||
|
|
||||||
|
// Process last start times (real time)
|
||||||
var/tmp/datum/controller/process/list/last_start = new
|
var/tmp/datum/controller/process/list/last_start = new
|
||||||
|
|
||||||
// Process last run durations
|
// Process last run durations
|
||||||
@@ -29,8 +32,8 @@ var/global/datum/controller/processScheduler/processScheduler
|
|||||||
// Process highest run time
|
// Process highest run time
|
||||||
var/tmp/datum/controller/process/list/highest_run_time = new
|
var/tmp/datum/controller/process/list/highest_run_time = new
|
||||||
|
|
||||||
// Sleep 1 tick -- This may be too aggressive.
|
// How long to sleep between runs (set to tick_lag in New)
|
||||||
var/tmp/scheduler_sleep_interval = 1
|
var/tmp/scheduler_sleep_interval
|
||||||
|
|
||||||
// Controls whether the scheduler is running or not
|
// Controls whether the scheduler is running or not
|
||||||
var/tmp/isRunning = 0
|
var/tmp/isRunning = 0
|
||||||
@@ -38,6 +41,25 @@ var/global/datum/controller/processScheduler/processScheduler
|
|||||||
// Setup for these processes will be deferred until all the other processes are set up.
|
// Setup for these processes will be deferred until all the other processes are set up.
|
||||||
var/tmp/list/deferredSetupList = new
|
var/tmp/list/deferredSetupList = new
|
||||||
|
|
||||||
|
var/tmp/currentTick = 0
|
||||||
|
|
||||||
|
var/tmp/currentTickStart = 0
|
||||||
|
|
||||||
|
var/tmp/timeAllowance = 0
|
||||||
|
|
||||||
|
var/tmp/cpuAverage = 0
|
||||||
|
|
||||||
|
var/tmp/timeAllowanceMax = 0
|
||||||
|
|
||||||
|
/datum/controller/processScheduler/New()
|
||||||
|
..()
|
||||||
|
// When the process scheduler is first new'd, tick_lag may be wrong, so these
|
||||||
|
// get re-initialized when the process scheduler is started.
|
||||||
|
// (These are kept here for any processes that decide to process before round start)
|
||||||
|
scheduler_sleep_interval = world.tick_lag
|
||||||
|
timeAllowance = world.tick_lag * 0.5
|
||||||
|
timeAllowanceMax = world.tick_lag
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* deferSetupFor
|
* deferSetupFor
|
||||||
* @param path processPath
|
* @param path processPath
|
||||||
@@ -57,7 +79,7 @@ var/global/datum/controller/processScheduler/processScheduler
|
|||||||
|
|
||||||
var/process
|
var/process
|
||||||
// Add all the processes we can find, except for the ticker
|
// Add all the processes we can find, except for the ticker
|
||||||
for (process in typesof(/datum/controller/process) - /datum/controller/process)
|
for (process in subtypesof(/datum/controller/process))
|
||||||
if (!(process in deferredSetupList))
|
if (!(process in deferredSetupList))
|
||||||
addProcess(new process(src))
|
addProcess(new process(src))
|
||||||
|
|
||||||
@@ -66,11 +88,22 @@ var/global/datum/controller/processScheduler/processScheduler
|
|||||||
|
|
||||||
/datum/controller/processScheduler/proc/start()
|
/datum/controller/processScheduler/proc/start()
|
||||||
isRunning = 1
|
isRunning = 1
|
||||||
|
// tick_lag will have been set by now, so re-initialize these
|
||||||
|
scheduler_sleep_interval = world.tick_lag
|
||||||
|
timeAllowance = world.tick_lag * 0.5
|
||||||
|
timeAllowanceMax = world.tick_lag
|
||||||
|
updateStartDelays()
|
||||||
spawn(0)
|
spawn(0)
|
||||||
process()
|
process()
|
||||||
|
|
||||||
/datum/controller/processScheduler/proc/process()
|
/datum/controller/processScheduler/proc/process()
|
||||||
|
updateCurrentTickData()
|
||||||
|
|
||||||
|
for(var/i=world.tick_lag,i<world.tick_lag*50,i+=world.tick_lag)
|
||||||
|
spawn(i) updateCurrentTickData()
|
||||||
while(isRunning)
|
while(isRunning)
|
||||||
|
// Hopefully spawning this for 50 ticks in the future will make it the first thing in the queue.
|
||||||
|
spawn(world.tick_lag*50) updateCurrentTickData()
|
||||||
checkRunningProcesses()
|
checkRunningProcesses()
|
||||||
queueProcesses()
|
queueProcesses()
|
||||||
runQueuedProcesses()
|
runQueuedProcesses()
|
||||||
@@ -92,15 +125,11 @@ var/global/datum/controller/processScheduler/processScheduler
|
|||||||
// Check status changes
|
// Check status changes
|
||||||
if(status != previousStatus)
|
if(status != previousStatus)
|
||||||
//Status changed.
|
//Status changed.
|
||||||
|
|
||||||
switch(status)
|
switch(status)
|
||||||
if(PROCESS_STATUS_MAYBE_HUNG)
|
|
||||||
message_admins("Process '[p.name]' is [p.getStatusText(status)].")
|
|
||||||
if(PROCESS_STATUS_PROBABLY_HUNG)
|
if(PROCESS_STATUS_PROBABLY_HUNG)
|
||||||
message_admins("Process '[p.name]' is [p.getStatusText(status)].")
|
message_admins("Process '[p.name]' may be hung.")
|
||||||
if(PROCESS_STATUS_HUNG)
|
if(PROCESS_STATUS_HUNG)
|
||||||
message_admins("Process '[p.name]' is [p.getStatusText(status)].")
|
message_admins("Process '[p.name]' is hung and will be restarted.")
|
||||||
p.handleHung()
|
|
||||||
|
|
||||||
/datum/controller/processScheduler/proc/queueProcesses()
|
/datum/controller/processScheduler/proc/queueProcesses()
|
||||||
for(var/datum/controller/process/p in processes)
|
for(var/datum/controller/process/p in processes)
|
||||||
@@ -108,12 +137,8 @@ var/global/datum/controller/processScheduler/processScheduler
|
|||||||
if (p.disabled || p.running || p.queued || !p.idle)
|
if (p.disabled || p.running || p.queued || !p.idle)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
// If world.timeofday has rolled over, then we need to adjust.
|
|
||||||
if (world.timeofday < last_start[p])
|
|
||||||
last_start[p] -= 864000
|
|
||||||
|
|
||||||
// If the process should be running by now, go ahead and queue it
|
// If the process should be running by now, go ahead and queue it
|
||||||
if (world.timeofday > last_start[p] + p.schedule_interval)
|
if (world.time >= last_queued[p] + p.schedule_interval)
|
||||||
setQueuedProcessState(p)
|
setQueuedProcessState(p)
|
||||||
|
|
||||||
/datum/controller/processScheduler/proc/runQueuedProcesses()
|
/datum/controller/processScheduler/proc/runQueuedProcesses()
|
||||||
@@ -176,6 +201,10 @@ var/global/datum/controller/processScheduler/processScheduler
|
|||||||
|
|
||||||
nameToProcessMap[newProcess.name] = newProcess
|
nameToProcessMap[newProcess.name] = newProcess
|
||||||
|
|
||||||
|
/datum/controller/processScheduler/proc/updateStartDelays()
|
||||||
|
for(var/datum/controller/process/p in processes)
|
||||||
|
if(p.start_delay)
|
||||||
|
last_queued[p] = world.time - p.start_delay
|
||||||
|
|
||||||
/datum/controller/processScheduler/proc/runProcess(var/datum/controller/process/process)
|
/datum/controller/processScheduler/proc/runProcess(var/datum/controller/process/process)
|
||||||
spawn(0)
|
spawn(0)
|
||||||
@@ -197,8 +226,6 @@ var/global/datum/controller/processScheduler/processScheduler
|
|||||||
if (!(process in idle))
|
if (!(process in idle))
|
||||||
idle += process
|
idle += process
|
||||||
|
|
||||||
process.idle()
|
|
||||||
|
|
||||||
/datum/controller/processScheduler/proc/setQueuedProcessState(var/datum/controller/process/process)
|
/datum/controller/processScheduler/proc/setQueuedProcessState(var/datum/controller/process/process)
|
||||||
if (process in running)
|
if (process in running)
|
||||||
running -= process
|
running -= process
|
||||||
@@ -218,21 +245,22 @@ var/global/datum/controller/processScheduler/processScheduler
|
|||||||
if (!(process in running))
|
if (!(process in running))
|
||||||
running += process
|
running += process
|
||||||
|
|
||||||
process.running()
|
|
||||||
|
|
||||||
/datum/controller/processScheduler/proc/recordStart(var/datum/controller/process/process, var/time = null)
|
/datum/controller/processScheduler/proc/recordStart(var/datum/controller/process/process, var/time = null)
|
||||||
if (isnull(time))
|
if (isnull(time))
|
||||||
time = world.timeofday
|
time = TimeOfHour
|
||||||
|
last_queued[process] = world.time
|
||||||
|
last_start[process] = time
|
||||||
|
else
|
||||||
|
last_queued[process] = (time == 0 ? 0 : world.time)
|
||||||
last_start[process] = time
|
last_start[process] = time
|
||||||
|
|
||||||
/datum/controller/processScheduler/proc/recordEnd(var/datum/controller/process/process, var/time = null)
|
/datum/controller/processScheduler/proc/recordEnd(var/datum/controller/process/process, var/time = null)
|
||||||
if (isnull(time))
|
if (isnull(time))
|
||||||
time = world.timeofday
|
time = TimeOfHour
|
||||||
|
|
||||||
// If world.timeofday has rolled over, then we need to adjust.
|
// If world.timeofday has rolled over, then we need to adjust.
|
||||||
if (time < last_start[process])
|
if (time < last_start[process])
|
||||||
last_start[process] -= 864000
|
last_start[process] -= 36000
|
||||||
|
|
||||||
var/lastRunTime = time - last_start[process]
|
var/lastRunTime = time - last_start[process]
|
||||||
|
|
||||||
@@ -273,6 +301,12 @@ var/global/datum/controller/processScheduler/processScheduler
|
|||||||
return t / c
|
return t / c
|
||||||
return c
|
return c
|
||||||
|
|
||||||
|
/datum/controller/processScheduler/proc/getProcessLastRunTime(var/datum/controller/process/process)
|
||||||
|
return last_run_time[process]
|
||||||
|
|
||||||
|
/datum/controller/processScheduler/proc/getProcessHighestRunTime(var/datum/controller/process/process)
|
||||||
|
return highest_run_time[process]
|
||||||
|
|
||||||
/datum/controller/processScheduler/proc/getStatusData()
|
/datum/controller/processScheduler/proc/getStatusData()
|
||||||
var/list/data = new
|
var/list/data = new
|
||||||
|
|
||||||
@@ -310,11 +344,39 @@ var/global/datum/controller/processScheduler/processScheduler
|
|||||||
var/datum/controller/process/process = nameToProcessMap[processName]
|
var/datum/controller/process/process = nameToProcessMap[processName]
|
||||||
process.disable()
|
process.disable()
|
||||||
|
|
||||||
/datum/controller/processScheduler/proc/getProcess(var/name)
|
/datum/controller/processScheduler/proc/getCurrentTickElapsedTime()
|
||||||
return nameToProcessMap[name]
|
if (world.time > currentTick)
|
||||||
|
updateCurrentTickData()
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
return TimeOfHour - currentTickStart
|
||||||
|
|
||||||
/datum/controller/processScheduler/proc/getProcessLastRunTime(var/datum/controller/process/process)
|
/datum/controller/processScheduler/proc/updateCurrentTickData()
|
||||||
return last_run_time[process]
|
if (world.time > currentTick)
|
||||||
|
// New tick!
|
||||||
|
currentTick = world.time
|
||||||
|
currentTickStart = TimeOfHour
|
||||||
|
updateTimeAllowance()
|
||||||
|
cpuAverage = (world.cpu + cpuAverage + cpuAverage) / 3
|
||||||
|
|
||||||
/datum/controller/processScheduler/proc/getIsRunning()
|
/datum/controller/processScheduler/proc/updateTimeAllowance()
|
||||||
return isRunning
|
// Time allowance goes down linearly with world.cpu.
|
||||||
|
var/tmp/error = cpuAverage - 100
|
||||||
|
var/tmp/timeAllowanceDelta = sign(error) * -0.5 * world.tick_lag * max(0, 0.001 * abs(error))
|
||||||
|
|
||||||
|
//timeAllowance = world.tick_lag * min(1, 0.5 * ((200/max(1,cpuAverage)) - 1))
|
||||||
|
timeAllowance = min(timeAllowanceMax, max(0, timeAllowance + timeAllowanceDelta))
|
||||||
|
|
||||||
|
/datum/controller/processScheduler/proc/sign(var/x)
|
||||||
|
if (x == 0)
|
||||||
|
return 1
|
||||||
|
return x / abs(x)
|
||||||
|
|
||||||
|
/datum/controller/processScheduler/proc/statProcesses()
|
||||||
|
if(!isRunning)
|
||||||
|
stat("Processes", "Scheduler not running")
|
||||||
|
return
|
||||||
|
stat("Processes", "[processes.len] (R [running.len] / Q [queued.len] / I [idle.len])")
|
||||||
|
stat(null, "[round(cpuAverage, 0.1)] CPU, [round(timeAllowance, 0.1)/10] TA")
|
||||||
|
for(var/datum/controller/process/p in processes)
|
||||||
|
p.statProcess()
|
||||||
@@ -1,127 +0,0 @@
|
|||||||
/**
|
|
||||||
* updateQueue.dm
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifdef UPDATE_QUEUE_DEBUG
|
|
||||||
#define uq_dbg(text) world << text
|
|
||||||
#else
|
|
||||||
#define uq_dbg(text)
|
|
||||||
#endif
|
|
||||||
/datum/updateQueue
|
|
||||||
var/tmp/list/objects
|
|
||||||
var/tmp/previousStart
|
|
||||||
var/tmp/procName
|
|
||||||
var/tmp/list/arguments
|
|
||||||
var/tmp/datum/updateQueueWorker/currentWorker
|
|
||||||
var/tmp/workerTimeout
|
|
||||||
var/tmp/adjustedWorkerTimeout
|
|
||||||
var/tmp/currentKillCount
|
|
||||||
var/tmp/totalKillCount
|
|
||||||
|
|
||||||
/datum/updateQueue/New(list/objects = list(), procName = "update", list/arguments = list(), workerTimeout = 2, inplace = 0)
|
|
||||||
..()
|
|
||||||
|
|
||||||
uq_dbg("Update queue created.")
|
|
||||||
|
|
||||||
// Init proc allows for recycling the worker.
|
|
||||||
init(objects = objects, procName = procName, arguments = arguments, workerTimeout = workerTimeout, inplace = inplace)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* init
|
|
||||||
* @param list objects objects to update
|
|
||||||
* @param text procName the proc to call on each item in the object list
|
|
||||||
* @param list arguments optional arguments to pass to the update proc
|
|
||||||
* @param number workerTimeout number of ticks to wait for an update to
|
|
||||||
finish before forking a new update worker
|
|
||||||
* @param bool inplace whether the updateQueue should make a copy of objects.
|
|
||||||
the internal list will be modified, so it is usually
|
|
||||||
a good idea to leave this alone. Default behavior is to
|
|
||||||
copy.
|
|
||||||
*/
|
|
||||||
/datum/updateQueue/proc/init(list/objects = list(), procName = "update", list/arguments = list(), workerTimeout = 2, inplace = 0)
|
|
||||||
uq_dbg("Update queue initialization started.")
|
|
||||||
|
|
||||||
if (!inplace)
|
|
||||||
// Make an internal copy of the list so we're not modifying the original.
|
|
||||||
initList(objects)
|
|
||||||
else
|
|
||||||
src.objects = objects
|
|
||||||
|
|
||||||
// Init vars
|
|
||||||
src.procName = procName
|
|
||||||
src.arguments = arguments
|
|
||||||
src.workerTimeout = workerTimeout
|
|
||||||
|
|
||||||
adjustedWorkerTimeout = workerTimeout
|
|
||||||
currentKillCount = 0
|
|
||||||
totalKillCount = 0
|
|
||||||
|
|
||||||
uq_dbg("Update queue initialization finished. procName = '[procName]'")
|
|
||||||
|
|
||||||
/datum/updateQueue/proc/initList(list/toCopy)
|
|
||||||
/**
|
|
||||||
* We will copy the list in reverse order, as our doWork proc
|
|
||||||
* will access them by popping an element off the end of the list.
|
|
||||||
* This ends up being quite a lot faster than taking elements off
|
|
||||||
* the head of the list.
|
|
||||||
*/
|
|
||||||
objects = new
|
|
||||||
|
|
||||||
uq_dbg("Copying [toCopy.len] items for processing.")
|
|
||||||
|
|
||||||
for(var/i=toCopy.len,i>0,)
|
|
||||||
objects.len++
|
|
||||||
objects[objects.len] = toCopy[i--]
|
|
||||||
|
|
||||||
/datum/updateQueue/proc/Run()
|
|
||||||
uq_dbg("Starting run...")
|
|
||||||
|
|
||||||
startWorker()
|
|
||||||
while (istype(currentWorker) && !currentWorker.finished)
|
|
||||||
sleep(2)
|
|
||||||
checkWorker()
|
|
||||||
|
|
||||||
uq_dbg("UpdateQueue completed run.")
|
|
||||||
|
|
||||||
/datum/updateQueue/proc/checkWorker()
|
|
||||||
if(istype(currentWorker))
|
|
||||||
// If world.timeofday has rolled over, then we need to adjust.
|
|
||||||
if(world.timeofday < currentWorker.lastStart)
|
|
||||||
currentWorker.lastStart -= 864000
|
|
||||||
|
|
||||||
if(world.timeofday - currentWorker.lastStart > adjustedWorkerTimeout)
|
|
||||||
// This worker is a bit slow, let's spawn a new one and kill the old one.
|
|
||||||
uq_dbg("Current worker is lagging... starting a new one.")
|
|
||||||
killWorker()
|
|
||||||
startWorker()
|
|
||||||
else // No worker!
|
|
||||||
uq_dbg("update queue ended up without a worker... starting a new one...")
|
|
||||||
startWorker()
|
|
||||||
|
|
||||||
/datum/updateQueue/proc/startWorker()
|
|
||||||
// only run the worker if we have objects to work on
|
|
||||||
if(objects.len)
|
|
||||||
uq_dbg("Starting worker process.")
|
|
||||||
|
|
||||||
// No need to create a fresh worker if we already have one...
|
|
||||||
if (istype(currentWorker))
|
|
||||||
currentWorker.init(objects, procName, arguments)
|
|
||||||
else
|
|
||||||
currentWorker = new(objects, procName, arguments)
|
|
||||||
currentWorker.start()
|
|
||||||
else
|
|
||||||
uq_dbg("Queue is empty. No worker was started.")
|
|
||||||
currentWorker = null
|
|
||||||
|
|
||||||
/datum/updateQueue/proc/killWorker()
|
|
||||||
// Kill the worker
|
|
||||||
currentWorker.kill()
|
|
||||||
currentWorker = null
|
|
||||||
// After we kill a worker, yield so that if the worker's been tying up the cpu, other stuff can immediately resume
|
|
||||||
sleep(-1)
|
|
||||||
currentKillCount++
|
|
||||||
totalKillCount++
|
|
||||||
if (currentKillCount >= 3)
|
|
||||||
uq_dbg("[currentKillCount] workers have been killed with a timeout of [adjustedWorkerTimeout]. Increasing worker timeout to compensate.")
|
|
||||||
adjustedWorkerTimeout++
|
|
||||||
currentKillCount = 0
|
|
||||||
@@ -1,83 +0,0 @@
|
|||||||
datum/updateQueueWorker
|
|
||||||
var/tmp/list/objects
|
|
||||||
var/tmp/killed
|
|
||||||
var/tmp/finished
|
|
||||||
var/tmp/procName
|
|
||||||
var/tmp/list/arguments
|
|
||||||
var/tmp/lastStart
|
|
||||||
var/tmp/cpuThreshold
|
|
||||||
|
|
||||||
datum/updateQueueWorker/New(var/list/objects, var/procName, var/list/arguments, var/cpuThreshold = 90)
|
|
||||||
..()
|
|
||||||
uq_dbg("updateQueueWorker created.")
|
|
||||||
|
|
||||||
init(objects, procName, arguments, cpuThreshold)
|
|
||||||
|
|
||||||
datum/updateQueueWorker/proc/init(var/list/objects, var/procName, var/list/arguments, var/cpuThreshold = 90)
|
|
||||||
src.objects = objects
|
|
||||||
src.procName = procName
|
|
||||||
src.arguments = arguments
|
|
||||||
src.cpuThreshold = cpuThreshold
|
|
||||||
|
|
||||||
killed = 0
|
|
||||||
finished = 0
|
|
||||||
|
|
||||||
datum/updateQueueWorker/proc/doWork()
|
|
||||||
// If there's nothing left to execute or we were killed, mark finished and return.
|
|
||||||
if (!objects || !objects.len) return finished()
|
|
||||||
|
|
||||||
lastStart = world.timeofday // Absolute number of ticks since the world started up
|
|
||||||
|
|
||||||
var/datum/object = objects[objects.len] // Pull out the object
|
|
||||||
objects.len-- // Remove the object from the list
|
|
||||||
|
|
||||||
if (istype(object) && !isturf(object) && !object.disposed && isnull(object.gcDestroyed)) // We only work with real objects
|
|
||||||
call(object, procName)(arglist(arguments))
|
|
||||||
|
|
||||||
// If there's nothing left to execute
|
|
||||||
// or we were killed while running the above code, mark finished and return.
|
|
||||||
if (!objects || !objects.len) return finished()
|
|
||||||
|
|
||||||
if (world.cpu > cpuThreshold)
|
|
||||||
// We don't want to force a tick into overtime!
|
|
||||||
// If the tick is about to go overtime, spawn the next update to go
|
|
||||||
// in the next tick.
|
|
||||||
uq_dbg("tick went into overtime with world.cpu = [world.cpu], deferred next update to next tick [1+(world.time / world.tick_lag)]")
|
|
||||||
|
|
||||||
spawn(1)
|
|
||||||
doWork()
|
|
||||||
else
|
|
||||||
spawn(0) // Execute anonymous function immediately as if we were in a while loop...
|
|
||||||
doWork()
|
|
||||||
|
|
||||||
datum/updateQueueWorker/proc/finished()
|
|
||||||
uq_dbg("updateQueueWorker finished.")
|
|
||||||
/**
|
|
||||||
* If the worker was killed while it was working on something, it
|
|
||||||
* should delete itself when it finally finishes working on it.
|
|
||||||
* Meanwhile, the updateQueue will have proceeded on with the rest of
|
|
||||||
* the queue. This will also terminate the spawned function that was
|
|
||||||
* created in the kill() proc.
|
|
||||||
*/
|
|
||||||
if(killed)
|
|
||||||
del(src)
|
|
||||||
|
|
||||||
finished = 1
|
|
||||||
|
|
||||||
datum/updateQueueWorker/proc/kill()
|
|
||||||
uq_dbg("updateQueueWorker killed.")
|
|
||||||
killed = 1
|
|
||||||
objects = null
|
|
||||||
|
|
||||||
/**
|
|
||||||
* If the worker is not done in 30 seconds after it's killed,
|
|
||||||
* we'll forcibly delete it, causing the anonymous function it was
|
|
||||||
* running to be terminated. Hasta la vista, baby.
|
|
||||||
*/
|
|
||||||
spawn(300)
|
|
||||||
del(src)
|
|
||||||
|
|
||||||
datum/updateQueueWorker/proc/start()
|
|
||||||
uq_dbg("updateQueueWorker started.")
|
|
||||||
spawn(0)
|
|
||||||
doWork()
|
|
||||||
@@ -1,94 +0,0 @@
|
|||||||
/datum/processSchedulerView
|
|
||||||
|
|
||||||
/datum/processSchedulerView/Topic(href, href_list)
|
|
||||||
if (!href_list["action"])
|
|
||||||
return
|
|
||||||
|
|
||||||
switch (href_list["action"])
|
|
||||||
if ("kill")
|
|
||||||
var/toKill = href_list["name"]
|
|
||||||
processScheduler.killProcess(toKill)
|
|
||||||
refreshProcessTable()
|
|
||||||
if ("enable")
|
|
||||||
var/toEnable = href_list["name"]
|
|
||||||
processScheduler.enableProcess(toEnable)
|
|
||||||
refreshProcessTable()
|
|
||||||
if ("disable")
|
|
||||||
var/toDisable = href_list["name"]
|
|
||||||
processScheduler.disableProcess(toDisable)
|
|
||||||
refreshProcessTable()
|
|
||||||
if ("refresh")
|
|
||||||
refreshProcessTable()
|
|
||||||
|
|
||||||
/datum/processSchedulerView/proc/refreshProcessTable()
|
|
||||||
windowCall("handleRefresh", getProcessTable())
|
|
||||||
|
|
||||||
/datum/processSchedulerView/proc/windowCall(var/function, var/data = null)
|
|
||||||
usr << output(data, "processSchedulerContext.browser:[function]")
|
|
||||||
|
|
||||||
/datum/processSchedulerView/proc/getProcessTable()
|
|
||||||
var/text = "<table class=\"table table-striped\"><thead><tr><td>Name</td><td>Avg(s)</td><td>Last(s)</td><td>Highest(s)</td><td>Tickcount</td><td>Tickrate</td><td>State</td><td>Action</td></tr></thead><tbody>"
|
|
||||||
// and the context of each
|
|
||||||
for (var/list/data in processScheduler.getStatusData())
|
|
||||||
text += "<tr>"
|
|
||||||
text += "<td>[data["name"]]</td>"
|
|
||||||
text += "<td>[num2text(data["averageRunTime"]/10,3)]</td>"
|
|
||||||
text += "<td>[num2text(data["lastRunTime"]/10,3)]</td>"
|
|
||||||
text += "<td>[num2text(data["highestRunTime"]/10,3)]</td>"
|
|
||||||
text += "<td>[num2text(data["ticks"],4)]</td>"
|
|
||||||
text += "<td>[data["schedule"]]</td>"
|
|
||||||
text += "<td>[data["status"]]</td>"
|
|
||||||
text += "<td><button class=\"btn kill-btn\" data-process-name=\"[data["name"]]\" id=\"kill-[data["name"]]\">Kill</button>"
|
|
||||||
if (data["disabled"])
|
|
||||||
text += "<button class=\"btn enable-btn\" data-process-name=\"[data["name"]]\" id=\"enable-[data["name"]]\">Enable</button>"
|
|
||||||
else
|
|
||||||
text += "<button class=\"btn disable-btn\" data-process-name=\"[data["name"]]\" id=\"disable-[data["name"]]\">Disable</button>"
|
|
||||||
text += "</td>"
|
|
||||||
text += "</tr>"
|
|
||||||
|
|
||||||
text += "</tbody></table>"
|
|
||||||
return text
|
|
||||||
|
|
||||||
/**
|
|
||||||
* getContext
|
|
||||||
* Outputs an interface showing stats for all processes.
|
|
||||||
*/
|
|
||||||
/datum/processSchedulerView/proc/getContext()
|
|
||||||
bootstrap_browse()
|
|
||||||
usr << browse('processScheduler.js', "file=processScheduler.js;display=0")
|
|
||||||
|
|
||||||
var/text = {"<html><head>
|
|
||||||
<title>Process Scheduler Detail</title>
|
|
||||||
<script type="text/javascript">var ref = '\ref[src]';</script>
|
|
||||||
[bootstrap_includes()]
|
|
||||||
<script type="text/javascript" src="processScheduler.js"></script>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
<h2>Process Scheduler</h2>
|
|
||||||
<div class="btn-group">
|
|
||||||
<button id="btn-refresh" class="btn">Refresh</button>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<h3>The process scheduler controls [processScheduler.getProcessCount()] loops.<h3>"}
|
|
||||||
|
|
||||||
text += "<div id=\"processTable\">"
|
|
||||||
text += getProcessTable()
|
|
||||||
text += "</div></body></html>"
|
|
||||||
|
|
||||||
usr << browse(text, "window=processSchedulerContext;size=800x600")
|
|
||||||
|
|
||||||
/datum/processSchedulerView/proc/bootstrap_browse()
|
|
||||||
usr << browse('bower_components/jquery/dist/jquery.min.js', "file=jquery.min.js;display=0")
|
|
||||||
usr << browse('bower_components/bootstrap2.3.2/bootstrap/js/bootstrap.min.js', "file=bootstrap.min.js;display=0")
|
|
||||||
usr << browse('bower_components/bootstrap2.3.2/bootstrap/css/bootstrap.min.css', "file=bootstrap.min.css;display=0")
|
|
||||||
usr << browse('bower_components/bootstrap2.3.2/bootstrap/img/glyphicons-halflings-white.png', "file=glyphicons-halflings-white.png;display=0")
|
|
||||||
usr << browse('bower_components/bootstrap2.3.2/bootstrap/img/glyphicons-halflings.png', "file=glyphicons-halflings.png;display=0")
|
|
||||||
usr << browse('bower_components/json2/json2.js', "file=json2.js;display=0")
|
|
||||||
|
|
||||||
/datum/processSchedulerView/proc/bootstrap_includes()
|
|
||||||
return {"
|
|
||||||
<link rel="stylesheet" href="bootstrap.min.css" />
|
|
||||||
<script type="text/javascript" src="json2.js"></script>
|
|
||||||
<script type="text/javascript" src="jquery.min.js"></script>
|
|
||||||
<script type="text/javascript" src="bootstrap.js"></script>
|
|
||||||
"}
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
/**
|
|
||||||
* testDyingUpdateQueueProcess
|
|
||||||
* This process is an example of a process using an updateQueue.
|
|
||||||
* The datums updated by this process behave badly and block the update loop
|
|
||||||
* by sleeping. If you #define UPDATE_QUEUE_DEBUG, you will see the updateQueue
|
|
||||||
* killing off its worker processes and spawning new ones to work around slow
|
|
||||||
* updates. This means that if you have a code path that sleeps for a long time
|
|
||||||
* in mob.Life once in a blue moon, the mob update loop will not hang.
|
|
||||||
*/
|
|
||||||
/datum/slowTestDatum/proc/wackyUpdateProcessName()
|
|
||||||
sleep(rand(0,20)) // Randomly REALLY slow :|
|
|
||||||
|
|
||||||
/datum/controller/process/testDyingUpdateQueueProcess
|
|
||||||
var/tmp/datum/updateQueue/updateQueueInstance
|
|
||||||
var/tmp/list/testDatums = list()
|
|
||||||
|
|
||||||
/datum/controller/process/testDyingUpdateQueueProcess/setup()
|
|
||||||
name = "Dying UpdateQueue Process"
|
|
||||||
schedule_interval = 30 // every 3 seconds
|
|
||||||
updateQueueInstance = new
|
|
||||||
for(var/i = 1, i < 30, i++)
|
|
||||||
testDatums.Add(new /datum/slowTestDatum)
|
|
||||||
|
|
||||||
/datum/controller/process/testDyingUpdateQueueProcess/doWork()
|
|
||||||
updateQueueInstance.init(testDatums, "wackyUpdateProcessName")
|
|
||||||
updateQueueInstance.Run()
|
|
||||||
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
/*
|
|
||||||
These are simple defaults for your project.
|
|
||||||
*/
|
|
||||||
#define DEBUG
|
|
||||||
|
|
||||||
var/global/datum/processSchedulerView/processSchedulerView
|
|
||||||
|
|
||||||
world
|
|
||||||
loop_checks = 0
|
|
||||||
New()
|
|
||||||
..()
|
|
||||||
processScheduler = new
|
|
||||||
processSchedulerView = new
|
|
||||||
|
|
||||||
mob
|
|
||||||
step_size = 8
|
|
||||||
|
|
||||||
New()
|
|
||||||
..()
|
|
||||||
|
|
||||||
|
|
||||||
verb
|
|
||||||
startProcessScheduler()
|
|
||||||
set name = "Start Process Scheduler"
|
|
||||||
processScheduler.setup()
|
|
||||||
processScheduler.start()
|
|
||||||
|
|
||||||
getProcessSchedulerContext()
|
|
||||||
set name = "Get Process Scheduler Status Panel"
|
|
||||||
processSchedulerView.getContext()
|
|
||||||
|
|
||||||
runUpdateQueueTests()
|
|
||||||
set name = "Run Update Queue Testsuite"
|
|
||||||
var/datum/updateQueueTests/t = new
|
|
||||||
t.runTests()
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
/**
|
|
||||||
* testHungProcess
|
|
||||||
* This process is an example of a simple update loop process that hangs.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/datum/controller/process/testHungProcess/setup()
|
|
||||||
name = "Hung Process"
|
|
||||||
schedule_interval = 30 // every 3 seconds
|
|
||||||
|
|
||||||
/datum/controller/process/testHungProcess/doWork()
|
|
||||||
sleep(1000) // FUCK
|
|
||||||
// scheck is also responsible for handling hung processes. If a process
|
|
||||||
// hangs, and later resumes, but has already been killed by the scheduler,
|
|
||||||
// scheck will force the process to bail out.
|
|
||||||
scheck()
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
/**
|
|
||||||
* testNiceProcess
|
|
||||||
* This process is an example of a simple update loop process that is
|
|
||||||
* relatively fast.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/datum/controller/process/testNiceProcess/setup()
|
|
||||||
name = "Nice Process"
|
|
||||||
schedule_interval = 10 // every second
|
|
||||||
|
|
||||||
/datum/controller/process/testNiceProcess/doWork()
|
|
||||||
sleep(rand(1,5)) // Just to pretend we're doing something
|
|
||||||
|
|
||||||
@@ -1,28 +0,0 @@
|
|||||||
/**
|
|
||||||
* testSlowProcess
|
|
||||||
* This process is an example of a simple update loop process that is slow.
|
|
||||||
* The update loop here sleeps inside to provide an example, but if you had
|
|
||||||
* a computationally intensive loop process that is simply slow, you can use
|
|
||||||
* scheck() inside the loop to force it to yield periodically according to
|
|
||||||
* the sleep_interval var. By default, scheck will cause a loop to sleep every
|
|
||||||
* 2 ticks.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/datum/controller/process/testSlowProcess/setup()
|
|
||||||
name = "Slow Process"
|
|
||||||
schedule_interval = 30 // every 3 seconds
|
|
||||||
|
|
||||||
/datum/controller/process/testSlowProcess/doWork()
|
|
||||||
// set background = 1 will cause loop constructs to sleep periodically,
|
|
||||||
// whenever the BYOND scheduler deems it productive to do so.
|
|
||||||
// This behavior is not always sufficient, nor is it always consistent.
|
|
||||||
// Rather than leaving it up to the BYOND scheduler, we can control it
|
|
||||||
// ourselves and leave nothing to the black box.
|
|
||||||
set background = 1
|
|
||||||
|
|
||||||
for(var/i=1,i<30,i++)
|
|
||||||
// Just to pretend we're doing something here
|
|
||||||
sleep(rand(3, 5))
|
|
||||||
|
|
||||||
// Forces this loop to yield(sleep) periodically.
|
|
||||||
scheck()
|
|
||||||
@@ -1,209 +0,0 @@
|
|||||||
var/global/list/updateQueueTestCount = list()
|
|
||||||
|
|
||||||
/datum/updateQueueTests
|
|
||||||
var/start
|
|
||||||
proc
|
|
||||||
runTests()
|
|
||||||
world << "<b>Running 9 tests...</b>"
|
|
||||||
testUpdateQueuePerformance()
|
|
||||||
sleep(1)
|
|
||||||
testInplace()
|
|
||||||
sleep(1)
|
|
||||||
testInplaceUpdateQueuePerformance()
|
|
||||||
sleep(1)
|
|
||||||
testUpdateQueueReinit()
|
|
||||||
sleep(1)
|
|
||||||
testCrashingQueue()
|
|
||||||
sleep(1)
|
|
||||||
testEmptyQueue()
|
|
||||||
sleep(1)
|
|
||||||
testManySlowItemsInQueue()
|
|
||||||
sleep(1)
|
|
||||||
testVariableWorkerTimeout()
|
|
||||||
sleep(1)
|
|
||||||
testReallySlowItemInQueue()
|
|
||||||
sleep(1)
|
|
||||||
world << "<b>Finished!</b>"
|
|
||||||
|
|
||||||
beginTiming()
|
|
||||||
start = world.time
|
|
||||||
|
|
||||||
endTiming(text)
|
|
||||||
var/time = (world.time - start) / world.tick_lag
|
|
||||||
world << {"<b><font color="blue">Performance - [text] - <font color="green">[time]</font> ticks</font></b>"}
|
|
||||||
|
|
||||||
getCount()
|
|
||||||
return updateQueueTestCount[updateQueueTestCount.len]
|
|
||||||
|
|
||||||
incrementTestCount()
|
|
||||||
updateQueueTestCount.len++
|
|
||||||
updateQueueTestCount[updateQueueTestCount.len] = 0
|
|
||||||
|
|
||||||
assertCountEquals(count, text)
|
|
||||||
assertThat(getCount() == count, text)
|
|
||||||
|
|
||||||
assertCountLessThan(count, text)
|
|
||||||
assertThat(getCount() < count, text)
|
|
||||||
|
|
||||||
assertCountGreaterThan(count, text)
|
|
||||||
assertThat(getCount() > count, text)
|
|
||||||
|
|
||||||
assertThat(condition, text)
|
|
||||||
if (condition)
|
|
||||||
world << {"<font color="green"><b>PASS</b></font>: [text]"}
|
|
||||||
else
|
|
||||||
world << {"<b><font color="red">FAIL</font>: [text]</b>"}
|
|
||||||
|
|
||||||
testUpdateQueuePerformance()
|
|
||||||
incrementTestCount()
|
|
||||||
var/list/objs = new
|
|
||||||
for(var/i=1,i<=100000,i++)
|
|
||||||
objs.Add(new /datum/uqTestDatum/fast(updateQueueTestCount.len))
|
|
||||||
|
|
||||||
var/datum/updateQueue/uq = new(objs)
|
|
||||||
|
|
||||||
beginTiming()
|
|
||||||
uq.Run()
|
|
||||||
endTiming("updating 100000 simple objects")
|
|
||||||
|
|
||||||
assertCountEquals(100000, "test that update queue updates all objects expected")
|
|
||||||
del(objs)
|
|
||||||
del(uq)
|
|
||||||
|
|
||||||
testUpdateQueueReinit()
|
|
||||||
incrementTestCount()
|
|
||||||
var/list/objs = new
|
|
||||||
for(var/i=1,i<=100,i++)
|
|
||||||
objs.Add(new /datum/uqTestDatum/fast(updateQueueTestCount.len))
|
|
||||||
|
|
||||||
var/datum/updateQueue/uq = new(objs)
|
|
||||||
uq.Run()
|
|
||||||
objs = new
|
|
||||||
|
|
||||||
for(var/i=1,i<=100,i++)
|
|
||||||
objs.Add(new /datum/uqTestDatum/fast(updateQueueTestCount.len))
|
|
||||||
uq.init(objs)
|
|
||||||
uq.Run()
|
|
||||||
assertCountEquals(200, "test that update queue reinitializes properly and updates all objects as expected.")
|
|
||||||
del(objs)
|
|
||||||
del(uq)
|
|
||||||
|
|
||||||
testInplace()
|
|
||||||
incrementTestCount()
|
|
||||||
var/list/objs = new
|
|
||||||
for(var/i=1,i<=100,i++)
|
|
||||||
objs.Add(new /datum/uqTestDatum/fast(updateQueueTestCount.len))
|
|
||||||
var/datum/updateQueue/uq = new(objects = objs, inplace = 1)
|
|
||||||
uq.Run()
|
|
||||||
assertThat(objs.len == 0, "test that update queue inplace option really works inplace")
|
|
||||||
assertCountEquals(100, "test that inplace update queue updates the right number of objects")
|
|
||||||
del(objs)
|
|
||||||
del(uq)
|
|
||||||
|
|
||||||
testInplaceUpdateQueuePerformance()
|
|
||||||
incrementTestCount()
|
|
||||||
var/list/objs = new
|
|
||||||
for(var/i=1,i<=100000,i++)
|
|
||||||
objs.Add(new /datum/uqTestDatum/fast(updateQueueTestCount.len))
|
|
||||||
|
|
||||||
var/datum/updateQueue/uq = new(objs)
|
|
||||||
|
|
||||||
beginTiming()
|
|
||||||
uq.Run()
|
|
||||||
endTiming("updating 100000 simple objects in place")
|
|
||||||
del(objs)
|
|
||||||
del(uq)
|
|
||||||
|
|
||||||
testCrashingQueue()
|
|
||||||
incrementTestCount()
|
|
||||||
var/list/objs = new
|
|
||||||
for(var/i=1,i<=10,i++)
|
|
||||||
objs.Add(new /datum/uqTestDatum/fast(updateQueueTestCount.len))
|
|
||||||
objs.Add(new /datum/uqTestDatum/crasher(updateQueueTestCount.len))
|
|
||||||
for(var/i=1,i<=10,i++)
|
|
||||||
objs.Add(new /datum/uqTestDatum/fast(updateQueueTestCount.len))
|
|
||||||
|
|
||||||
var/datum/updateQueue/uq = new(objs)
|
|
||||||
uq.Run()
|
|
||||||
assertCountEquals(20, "test that update queue handles crashed update procs OK")
|
|
||||||
del(objs)
|
|
||||||
del(uq)
|
|
||||||
|
|
||||||
testEmptyQueue()
|
|
||||||
incrementTestCount()
|
|
||||||
var/list/objs = new
|
|
||||||
var/datum/updateQueue/uq = new(objs)
|
|
||||||
uq.Run()
|
|
||||||
assertCountEquals(0, "test that update queue doesn't barf on empty lists")
|
|
||||||
del(objs)
|
|
||||||
del(uq)
|
|
||||||
|
|
||||||
testManySlowItemsInQueue()
|
|
||||||
incrementTestCount()
|
|
||||||
var/list/objs = new
|
|
||||||
for(var/i=1,i<=30,i++)
|
|
||||||
objs.Add(new /datum/uqTestDatum/slow(updateQueueTestCount.len))
|
|
||||||
var/datum/updateQueue/uq = new(objs)
|
|
||||||
uq.Run()
|
|
||||||
assertCountEquals(30, "test that update queue slows down execution if too many objects are slow to update")
|
|
||||||
del(objs)
|
|
||||||
del(uq)
|
|
||||||
|
|
||||||
testVariableWorkerTimeout()
|
|
||||||
incrementTestCount()
|
|
||||||
var/list/objs = new
|
|
||||||
for(var/i=1,i<=20,i++)
|
|
||||||
objs.Add(new /datum/uqTestDatum/slow(updateQueueTestCount.len))
|
|
||||||
var/datum/updateQueue/uq = new(objs, workerTimeout=6)
|
|
||||||
uq.Run()
|
|
||||||
assertCountEquals(20, "test that variable worker timeout works properly")
|
|
||||||
del(objs)
|
|
||||||
del(uq)
|
|
||||||
|
|
||||||
testReallySlowItemInQueue()
|
|
||||||
incrementTestCount()
|
|
||||||
var/list/objs = new
|
|
||||||
for(var/i=1,i<=10,i++)
|
|
||||||
objs.Add(new /datum/uqTestDatum/fast(updateQueueTestCount.len))
|
|
||||||
objs.Add(new /datum/uqTestDatum/reallySlow(updateQueueTestCount.len))
|
|
||||||
for(var/i=1,i<=10,i++)
|
|
||||||
objs.Add(new /datum/uqTestDatum/fast(updateQueueTestCount.len))
|
|
||||||
var/datum/updateQueue/uq = new(objs)
|
|
||||||
uq.Run()
|
|
||||||
assertCountEquals(20, "test that update queue skips objects that are too slow to update")
|
|
||||||
del(objs)
|
|
||||||
del(uq)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
datum/uqTestDatum
|
|
||||||
var/testNum
|
|
||||||
New(testNum)
|
|
||||||
..()
|
|
||||||
src.testNum = testNum
|
|
||||||
proc/update()
|
|
||||||
updateQueueTestCount[testNum]++
|
|
||||||
proc/lag(cycles)
|
|
||||||
set background = 1
|
|
||||||
for(var/i=0,i<cycles,)
|
|
||||||
i++
|
|
||||||
datum/uqTestDatum/fast
|
|
||||||
|
|
||||||
datum/uqTestDatum/slow
|
|
||||||
update()
|
|
||||||
set background = 1
|
|
||||||
var/start = world.timeofday
|
|
||||||
while(world.timeofday - start < 5) // lag 4 deciseconds
|
|
||||||
..()
|
|
||||||
|
|
||||||
datum/uqTestDatum/reallySlow
|
|
||||||
update()
|
|
||||||
set background = 1
|
|
||||||
var/start = world.timeofday
|
|
||||||
while(world.timeofday - start < 300) // lag 30 seconds
|
|
||||||
..()
|
|
||||||
|
|
||||||
datum/uqTestDatum/crasher
|
|
||||||
update()
|
|
||||||
CRASH("I crashed! (I am supposed to crash XD)")
|
|
||||||
..() // This should do nothing lol
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
/**
|
|
||||||
* testUpdateQueueProcess
|
|
||||||
* This process is an example of a process using an updateQueue.
|
|
||||||
* The datums updated by this process behave nicely and do not block.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/datum/fastTestDatum/proc/wackyUpdateProcessName()
|
|
||||||
sleep(prob(10)) // Pretty quick, usually instant
|
|
||||||
|
|
||||||
/datum/controller/process/testUpdateQueueProcess
|
|
||||||
var/tmp/datum/updateQueue/updateQueueInstance
|
|
||||||
var/tmp/list/testDatums = list()
|
|
||||||
|
|
||||||
/datum/controller/process/testUpdateQueueProcess/setup()
|
|
||||||
name = "UpdateQueue Process"
|
|
||||||
schedule_interval = 20 // every 2 seconds
|
|
||||||
updateQueueInstance = new
|
|
||||||
for(var/i = 1, i < 30, i++)
|
|
||||||
testDatums.Add(new /datum/fastTestDatum)
|
|
||||||
|
|
||||||
/datum/controller/process/testUpdateQueueProcess/doWork()
|
|
||||||
updateQueueInstance.init(testDatums, "wackyUpdateProcessName")
|
|
||||||
updateQueueInstance.Run()
|
|
||||||
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
/**
|
|
||||||
* testBadZombieProcess
|
|
||||||
* This process is an example of a simple update loop process that hangs.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/datum/controller/process/testZombieProcess/setup()
|
|
||||||
name = "Zombie Process"
|
|
||||||
schedule_interval = 30 // every 3 seconds
|
|
||||||
|
|
||||||
/datum/controller/process/testZombieProcess/doWork()
|
|
||||||
for (var/i = 0, i < 1000, i++)
|
|
||||||
sleep(1)
|
|
||||||
scheck()
|
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
/datum/controller/process/air/setup()
|
/datum/controller/process/air/setup()
|
||||||
name = "air"
|
name = "air"
|
||||||
schedule_interval = 20 // every 2 seconds
|
schedule_interval = 20 // every 2 seconds
|
||||||
|
start_delay = 4
|
||||||
|
|
||||||
if(!air_master)
|
if(!air_master)
|
||||||
air_master = new
|
air_master = new
|
||||||
|
|||||||
@@ -1,10 +1,41 @@
|
|||||||
|
|
||||||
|
// We manually initialize the alarm handlers instead of looping over all existing types
|
||||||
|
// to make it possible to write: camera.triggerAlarm() rather than alarm_manager.managers[datum/alarm_handler/camera].triggerAlarm() or a variant thereof.
|
||||||
|
/var/global/datum/alarm_handler/atmosphere/atmosphere_alarm = new()
|
||||||
|
/var/global/datum/alarm_handler/camera/camera_alarm = new()
|
||||||
|
/var/global/datum/alarm_handler/fire/fire_alarm = new()
|
||||||
|
/var/global/datum/alarm_handler/motion/motion_alarm = new()
|
||||||
|
/var/global/datum/alarm_handler/power/power_alarm = new()
|
||||||
|
|
||||||
|
// Alarm Manager, the manager for alarms.
|
||||||
|
var/datum/controller/process/alarm/alarm_manager
|
||||||
|
|
||||||
|
/datum/controller/process/alarm
|
||||||
|
var/list/datum/alarm/all_handlers
|
||||||
|
|
||||||
/datum/controller/process/alarm/setup()
|
/datum/controller/process/alarm/setup()
|
||||||
name = "alarm"
|
name = "alarm"
|
||||||
schedule_interval = 20 // every 2 seconds
|
schedule_interval = 20 // every 2 seconds
|
||||||
|
all_handlers = list(atmosphere_alarm, camera_alarm, fire_alarm, motion_alarm, power_alarm)
|
||||||
|
alarm_manager = src
|
||||||
|
|
||||||
/datum/controller/process/alarm/doWork()
|
/datum/controller/process/alarm/doWork()
|
||||||
alarm_manager.fire()
|
for(var/datum/alarm_handler/AH in all_handlers)
|
||||||
|
AH.process()
|
||||||
|
SCHECK
|
||||||
|
|
||||||
/datum/controller/process/alarm/getStatName()
|
/datum/controller/process/alarm/proc/active_alarms()
|
||||||
var/list/alarms = alarm_manager.active_alarms()
|
var/list/all_alarms = new
|
||||||
return ..()+"([alarms.len])"
|
for(var/datum/alarm_handler/AH in all_handlers)
|
||||||
|
var/list/alarms = AH.alarms
|
||||||
|
all_alarms += alarms
|
||||||
|
|
||||||
|
return all_alarms
|
||||||
|
|
||||||
|
/datum/controller/process/alarm/proc/number_of_active_alarms()
|
||||||
|
var/list/alarms = active_alarms()
|
||||||
|
return alarms.len
|
||||||
|
|
||||||
|
/datum/controller/process/alarm/statProcess()
|
||||||
|
..()
|
||||||
|
stat(null, "[number_of_active_alarms()] alarm\s")
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
var/datum/controller/process/chemistry/chemistryProcess
|
var/datum/controller/process/chemistry/chemistryProcess
|
||||||
|
|
||||||
/datum/controller/process/chemistry
|
/datum/controller/process/chemistry
|
||||||
var/tmp/datum/updateQueue/updateQueueInstance
|
|
||||||
var/list/active_holders
|
var/list/active_holders
|
||||||
var/list/chemical_reactions
|
var/list/chemical_reactions
|
||||||
var/list/chemical_reagents
|
var/list/chemical_reagents
|
||||||
@@ -9,20 +8,20 @@ var/datum/controller/process/chemistry/chemistryProcess
|
|||||||
/datum/controller/process/chemistry/setup()
|
/datum/controller/process/chemistry/setup()
|
||||||
name = "chemistry"
|
name = "chemistry"
|
||||||
schedule_interval = 20 // every 2 seconds
|
schedule_interval = 20 // every 2 seconds
|
||||||
updateQueueInstance = new
|
|
||||||
chemistryProcess = src
|
chemistryProcess = src
|
||||||
active_holders = list()
|
active_holders = list()
|
||||||
chemical_reactions = chemical_reactions_list
|
chemical_reactions = chemical_reactions_list
|
||||||
chemical_reagents = chemical_reagents_list
|
chemical_reagents = chemical_reagents_list
|
||||||
|
|
||||||
/datum/controller/process/chemistry/getStatName()
|
/datum/controller/process/chemistry/statProcess()
|
||||||
return ..()+"([active_holders.len])"
|
..()
|
||||||
|
stat(null, "[active_holders.len] reagent holder\s")
|
||||||
|
|
||||||
/datum/controller/process/chemistry/doWork()
|
/datum/controller/process/chemistry/doWork()
|
||||||
for(var/datum/reagents/holder in active_holders)
|
for(var/datum/reagents/holder in active_holders)
|
||||||
if(!holder.process_reactions())
|
if(!holder.process_reactions())
|
||||||
active_holders -= holder
|
active_holders -= holder
|
||||||
scheck()
|
SCHECK
|
||||||
|
|
||||||
/datum/controller/process/chemistry/proc/mark_for_update(var/datum/reagents/holder)
|
/datum/controller/process/chemistry/proc/mark_for_update(var/datum/reagents/holder)
|
||||||
if(holder in active_holders)
|
if(holder in active_holders)
|
||||||
|
|||||||
@@ -1,14 +0,0 @@
|
|||||||
/datum/controller/process/disease
|
|
||||||
var/tmp/datum/updateQueue/updateQueueInstance
|
|
||||||
|
|
||||||
/datum/controller/process/disease/setup()
|
|
||||||
name = "disease"
|
|
||||||
schedule_interval = 20 // every 2 seconds
|
|
||||||
updateQueueInstance = new
|
|
||||||
|
|
||||||
/datum/controller/process/disease/doWork()
|
|
||||||
updateQueueInstance.init(active_diseases, "process")
|
|
||||||
updateQueueInstance.Run()
|
|
||||||
|
|
||||||
/datum/controller/process/disease/getStatName()
|
|
||||||
return ..()+"([active_diseases.len])"
|
|
||||||
@@ -1,13 +1,18 @@
|
|||||||
|
// The time a datum was destroyed by the GC, or null if it hasn't been
|
||||||
|
/datum/var/gcDestroyed
|
||||||
|
|
||||||
|
#define GC_COLLECTIONS_PER_RUN 150
|
||||||
|
#define GC_COLLECTION_TIMEOUT (30 SECONDS)
|
||||||
|
#define GC_FORCE_DEL_PER_RUN 30
|
||||||
|
|
||||||
var/datum/controller/process/garbage_collector/garbage_collector
|
var/datum/controller/process/garbage_collector/garbage_collector
|
||||||
var/list/delayed_garbage = list()
|
var/list/delayed_garbage = list()
|
||||||
|
|
||||||
/datum/controller/process/garbage_collector
|
/datum/controller/process/garbage_collector
|
||||||
var/garbage_collect = 1 // Whether or not to actually do work
|
var/garbage_collect = 1 // Whether or not to actually do work
|
||||||
var/collection_timeout = 300 //deciseconds to wait to let running procs finish before we just say fuck it and force del() the object
|
var/total_dels = 0 // number of total del()'s
|
||||||
var/max_checks_multiplier = 5 //multiplier (per-decisecond) for calculating max number of tests per tick. These tests check if our GC'd objects are actually GC'd
|
var/tick_dels = 0 // number of del()'s we've done this tick
|
||||||
var/max_forcedel_multiplier = 1 //multiplier (per-decisecond) for calculating max number of force del() calls per tick.
|
var/soft_dels = 0
|
||||||
|
|
||||||
var/dels = 0 // number of del()'s we've done this tick
|
|
||||||
var/hard_dels = 0 // number of hard dels in total
|
var/hard_dels = 0 // number of hard dels in total
|
||||||
var/list/destroyed = list() // list of refID's of things that should be garbage collected
|
var/list/destroyed = list() // list of refID's of things that should be garbage collected
|
||||||
// refID's are associated with the time at which they time out and need to be manually del()
|
// refID's are associated with the time at which they time out and need to be manually del()
|
||||||
@@ -18,7 +23,8 @@ var/list/delayed_garbage = list()
|
|||||||
|
|
||||||
/datum/controller/process/garbage_collector/setup()
|
/datum/controller/process/garbage_collector/setup()
|
||||||
name = "garbage"
|
name = "garbage"
|
||||||
schedule_interval = 2 SECONDS
|
schedule_interval = 10 SECONDS
|
||||||
|
start_delay = 3
|
||||||
|
|
||||||
if(!garbage_collector)
|
if(!garbage_collector)
|
||||||
garbage_collector = src
|
garbage_collector = src
|
||||||
@@ -36,10 +42,10 @@ world/loop_checks = 0
|
|||||||
if(!garbage_collect)
|
if(!garbage_collect)
|
||||||
return
|
return
|
||||||
|
|
||||||
dels = 0
|
tick_dels = 0
|
||||||
var/time_to_kill = world.time - collection_timeout // Anything qdel() but not GC'd BEFORE this time needs to be manually del()
|
var/time_to_kill = world.time - GC_COLLECTION_TIMEOUT
|
||||||
var/checkRemain = max_checks_multiplier * schedule_interval
|
var/checkRemain = GC_COLLECTIONS_PER_RUN
|
||||||
var/maxDels = max_forcedel_multiplier * schedule_interval
|
var/remaining_force_dels = GC_FORCE_DEL_PER_RUN
|
||||||
|
|
||||||
#ifdef GC_FINDREF
|
#ifdef GC_FINDREF
|
||||||
var/list/searching = list()
|
var/list/searching = list()
|
||||||
@@ -58,7 +64,7 @@ world/loop_checks = 0
|
|||||||
if(A.loc != null)
|
if(A.loc != null)
|
||||||
testing("GC: [A] | [A.type] is located in [A.loc] instead of null")
|
testing("GC: [A] | [A.type] is located in [A.loc] instead of null")
|
||||||
if(A.contents.len)
|
if(A.contents.len)
|
||||||
testing("GC: [A] | [A.type] has contents: [jointext(A.contents)]")
|
testing("GC: [A] | [A.type] has contents: [list2text(A.contents)]")
|
||||||
if(searching.len)
|
if(searching.len)
|
||||||
for(var/atom/D in world)
|
for(var/atom/D in world)
|
||||||
LookForRefs(D, searching)
|
LookForRefs(D, searching)
|
||||||
@@ -67,7 +73,7 @@ world/loop_checks = 0
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
while(destroyed.len && --checkRemain >= 0)
|
while(destroyed.len && --checkRemain >= 0)
|
||||||
if(dels >= maxDels)
|
if(remaining_force_dels <= 0)
|
||||||
#ifdef GC_DEBUG
|
#ifdef GC_DEBUG
|
||||||
testing("GC: Reached max force dels per tick [dels] vs [maxDels]")
|
testing("GC: Reached max force dels per tick [dels] vs [maxDels]")
|
||||||
#endif
|
#endif
|
||||||
@@ -88,13 +94,22 @@ world/loop_checks = 0
|
|||||||
testing("GC: -- \ref[A] | [A.type] was unable to be GC'd and was deleted --")
|
testing("GC: -- \ref[A] | [A.type] was unable to be GC'd and was deleted --")
|
||||||
logging["[A.type]"]++
|
logging["[A.type]"]++
|
||||||
del(A)
|
del(A)
|
||||||
++dels
|
|
||||||
++hard_dels
|
hard_dels++
|
||||||
#ifdef GC_DEBUG
|
remaining_force_dels--
|
||||||
else
|
else
|
||||||
|
#ifdef GC_DEBUG
|
||||||
testing("GC: [refID] properly GC'd at [world.time] with timeout [GCd_at_time]")
|
testing("GC: [refID] properly GC'd at [world.time] with timeout [GCd_at_time]")
|
||||||
#endif
|
#endif
|
||||||
|
soft_dels++
|
||||||
|
tick_dels++
|
||||||
|
total_dels++
|
||||||
destroyed.Cut(1, 2)
|
destroyed.Cut(1, 2)
|
||||||
|
SCHECK
|
||||||
|
|
||||||
|
#undef GC_FORCE_DEL_PER_TICK
|
||||||
|
#undef GC_COLLECTION_TIMEOUT
|
||||||
|
#undef GC_COLLECTIONS_PER_TICK
|
||||||
|
|
||||||
#ifdef GC_FINDREF
|
#ifdef GC_FINDREF
|
||||||
/datum/controller/process/garbage_collector/proc/LookForRefs(var/datum/D, var/list/targ)
|
/datum/controller/process/garbage_collector/proc/LookForRefs(var/datum/D, var/list/targ)
|
||||||
@@ -132,8 +147,11 @@ world/loop_checks = 0
|
|||||||
destroyed -= "\ref[A]" // Removing any previous references that were GC'd so that the current object will be at the end of the list.
|
destroyed -= "\ref[A]" // Removing any previous references that were GC'd so that the current object will be at the end of the list.
|
||||||
destroyed["\ref[A]"] = world.time
|
destroyed["\ref[A]"] = world.time
|
||||||
|
|
||||||
/datum/controller/process/garbage_collector/getStatName()
|
/datum/controller/process/garbage_collector/statProcess()
|
||||||
return ..()+"([garbage_collector.destroyed.len]/[garbage_collector.dels]/[garbage_collector.hard_dels])"
|
..()
|
||||||
|
stat(null, "[garbage_collect ? "On" : "Off"], [destroyed.len] queued")
|
||||||
|
stat(null, "Dels: [total_dels], [soft_dels] soft, [hard_dels] hard, [tick_dels] last run")
|
||||||
|
|
||||||
|
|
||||||
// Tests if an atom has been deleted.
|
// Tests if an atom has been deleted.
|
||||||
/proc/deleted(atom/A)
|
/proc/deleted(atom/A)
|
||||||
@@ -149,7 +167,7 @@ world/loop_checks = 0
|
|||||||
crash_with("qdel() passed object of type [A.type]. qdel() can only handle /datum types.")
|
crash_with("qdel() passed object of type [A.type]. qdel() can only handle /datum types.")
|
||||||
del(A)
|
del(A)
|
||||||
if(garbage_collector)
|
if(garbage_collector)
|
||||||
garbage_collector.dels++
|
garbage_collector.total_dels++
|
||||||
garbage_collector.hard_dels++
|
garbage_collector.hard_dels++
|
||||||
else if(isnull(A.gcDestroyed))
|
else if(isnull(A.gcDestroyed))
|
||||||
// Let our friend know they're about to get collected
|
// Let our friend know they're about to get collected
|
||||||
|
|||||||
@@ -10,4 +10,4 @@
|
|||||||
log_access("AFK: [key_name(C)]")
|
log_access("AFK: [key_name(C)]")
|
||||||
C << "<SPAN CLASS='warning'>You have been inactive for more than [config.kick_inactive] minute\s and have been disconnected.</SPAN>"
|
C << "<SPAN CLASS='warning'>You have been inactive for more than [config.kick_inactive] minute\s and have been disconnected.</SPAN>"
|
||||||
del(C) // Don't qdel, cannot override finalize_qdel behaviour for clients.
|
del(C) // Don't qdel, cannot override finalize_qdel behaviour for clients.
|
||||||
scheck()
|
SCHECK
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
/datum/controller/process/machinery/setup()
|
/datum/controller/process/machinery/setup()
|
||||||
name = "machinery"
|
name = "machinery"
|
||||||
schedule_interval = 20 // every 2 seconds
|
schedule_interval = 20 // every 2 seconds
|
||||||
|
start_delay = 12
|
||||||
|
|
||||||
/datum/controller/process/machinery/doWork()
|
/datum/controller/process/machinery/doWork()
|
||||||
internal_sort()
|
internal_sort()
|
||||||
@@ -19,10 +20,6 @@
|
|||||||
/datum/controller/process/machinery/proc/internal_process_machinery()
|
/datum/controller/process/machinery/proc/internal_process_machinery()
|
||||||
for(var/obj/machinery/M in machines)
|
for(var/obj/machinery/M in machines)
|
||||||
if(M && !M.gcDestroyed)
|
if(M && !M.gcDestroyed)
|
||||||
#ifdef PROFILE_MACHINES
|
|
||||||
var/time_start = world.timeofday
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if(M.process() == PROCESS_KILL)
|
if(M.process() == PROCESS_KILL)
|
||||||
//M.inMachineList = 0 We don't use this debugging function
|
//M.inMachineList = 0 We don't use this debugging function
|
||||||
machines.Remove(M)
|
machines.Remove(M)
|
||||||
@@ -31,22 +28,13 @@
|
|||||||
if(M && M.use_power)
|
if(M && M.use_power)
|
||||||
M.auto_use_power()
|
M.auto_use_power()
|
||||||
|
|
||||||
#ifdef PROFILE_MACHINES
|
SCHECK
|
||||||
var/time_end = world.timeofday
|
|
||||||
|
|
||||||
if(!(M.type in machine_profiling))
|
|
||||||
machine_profiling[M.type] = 0
|
|
||||||
|
|
||||||
machine_profiling[M.type] += (time_end - time_start)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
scheck()
|
|
||||||
|
|
||||||
/datum/controller/process/machinery/proc/internal_process_power()
|
/datum/controller/process/machinery/proc/internal_process_power()
|
||||||
for(var/datum/powernet/powerNetwork in powernets)
|
for(var/datum/powernet/powerNetwork in powernets)
|
||||||
if(istype(powerNetwork) && !powerNetwork.disposed)
|
if(istype(powerNetwork) && isnull(powerNetwork.gcDestroyed))
|
||||||
powerNetwork.reset()
|
powerNetwork.reset()
|
||||||
scheck()
|
SCHECK
|
||||||
continue
|
continue
|
||||||
|
|
||||||
powernets.Remove(powerNetwork)
|
powernets.Remove(powerNetwork)
|
||||||
@@ -56,16 +44,20 @@
|
|||||||
for(var/obj/item/I in processing_power_items)
|
for(var/obj/item/I in processing_power_items)
|
||||||
if(!I.pwr_drain()) // 0 = Process Kill, remove from processing list.
|
if(!I.pwr_drain()) // 0 = Process Kill, remove from processing list.
|
||||||
processing_power_items.Remove(I)
|
processing_power_items.Remove(I)
|
||||||
scheck()
|
SCHECK
|
||||||
|
|
||||||
/datum/controller/process/machinery/proc/internal_process_pipenets()
|
/datum/controller/process/machinery/proc/internal_process_pipenets()
|
||||||
for(var/datum/pipe_network/pipeNetwork in pipe_networks)
|
for(var/datum/pipe_network/pipeNetwork in pipe_networks)
|
||||||
if(istype(pipeNetwork) && !pipeNetwork.disposed)
|
if(istype(pipeNetwork) && isnull(pipeNetwork.gcDestroyed))
|
||||||
pipeNetwork.process()
|
pipeNetwork.process()
|
||||||
scheck()
|
SCHECK
|
||||||
continue
|
continue
|
||||||
|
|
||||||
pipe_networks.Remove(pipeNetwork)
|
pipe_networks.Remove(pipeNetwork)
|
||||||
|
|
||||||
/datum/controller/process/machinery/getStatName()
|
/datum/controller/process/machinery/statProcess()
|
||||||
return ..()+"(MCH:[machines.len] PWR:[powernets.len] PIP:[pipe_networks.len])"
|
..()
|
||||||
|
stat(null, "[machines.len] machines")
|
||||||
|
stat(null, "[powernets.len] powernets")
|
||||||
|
stat(null, "[pipe_networks.len] pipenets")
|
||||||
|
stat(null, "[processing_power_items.len] power item\s")
|
||||||
@@ -4,20 +4,26 @@
|
|||||||
/datum/controller/process/mob/setup()
|
/datum/controller/process/mob/setup()
|
||||||
name = "mob"
|
name = "mob"
|
||||||
schedule_interval = 20 // every 2 seconds
|
schedule_interval = 20 // every 2 seconds
|
||||||
updateQueueInstance = new
|
start_delay = 16
|
||||||
|
|
||||||
/datum/controller/process/mob/started()
|
/datum/controller/process/mob/started()
|
||||||
..()
|
..()
|
||||||
if(!updateQueueInstance)
|
|
||||||
if(!mob_list)
|
if(!mob_list)
|
||||||
mob_list = list()
|
mob_list = list()
|
||||||
else if(mob_list.len)
|
|
||||||
updateQueueInstance = new
|
|
||||||
|
|
||||||
/datum/controller/process/mob/doWork()
|
/datum/controller/process/mob/doWork()
|
||||||
if(updateQueueInstance)
|
for(last_object in mob_list)
|
||||||
updateQueueInstance.init(mob_list, "Life")
|
var/mob/M = last_object
|
||||||
updateQueueInstance.Run()
|
if(isnull(M.gcDestroyed))
|
||||||
|
try
|
||||||
|
M.Life()
|
||||||
|
catch(var/exception/e)
|
||||||
|
catchException(e, M)
|
||||||
|
SCHECK
|
||||||
|
else
|
||||||
|
catchBadType(M)
|
||||||
|
mob_list -= M
|
||||||
|
|
||||||
/datum/controller/process/mob/getStatName()
|
/datum/controller/process/mob/statProcess()
|
||||||
return ..()+"([mob_list.len])"
|
..()
|
||||||
|
stat(null, "[mob_list.len] mobs")
|
||||||
@@ -1,14 +1,19 @@
|
|||||||
/datum/controller/process/nanoui
|
|
||||||
var/tmp/datum/updateQueue/updateQueueInstance
|
|
||||||
|
|
||||||
/datum/controller/process/nanoui/setup()
|
/datum/controller/process/nanoui/setup()
|
||||||
name = "nanoui"
|
name = "nanoui"
|
||||||
schedule_interval = 10 // every 1 second
|
schedule_interval = 20 // every 2 seconds
|
||||||
updateQueueInstance = new
|
|
||||||
|
/datum/controller/process/nanoui/statProcess()
|
||||||
|
..()
|
||||||
|
stat(null, "[nanomanager.processing_uis.len] UIs")
|
||||||
|
|
||||||
/datum/controller/process/nanoui/doWork()
|
/datum/controller/process/nanoui/doWork()
|
||||||
updateQueueInstance.init(nanomanager.processing_uis, "process")
|
for(last_object in nanomanager.processing_uis)
|
||||||
updateQueueInstance.Run()
|
var/datum/nanoui/NUI = last_object
|
||||||
|
if(istype(NUI) && isnull(NUI.gcDestroyed))
|
||||||
/datum/controller/process/nanoui/getStatName()
|
try
|
||||||
return ..()+"([nanomanager.processing_uis.len])"
|
NUI.process()
|
||||||
|
catch(var/exception/e)
|
||||||
|
catchException(e, NUI)
|
||||||
|
else
|
||||||
|
catchBadType(NUI)
|
||||||
|
nanomanager.processing_uis -= NUI
|
||||||
@@ -1,24 +1,26 @@
|
|||||||
var/global/list/object_profiling = list()
|
|
||||||
/datum/controller/process/obj
|
|
||||||
var/tmp/datum/updateQueue/updateQueueInstance
|
|
||||||
|
|
||||||
/datum/controller/process/obj/setup()
|
/datum/controller/process/obj/setup()
|
||||||
name = "obj"
|
name = "obj"
|
||||||
schedule_interval = 20 // every 2 seconds
|
schedule_interval = 20 // every 2 seconds
|
||||||
updateQueueInstance = new
|
start_delay = 8
|
||||||
|
|
||||||
/datum/controller/process/obj/started()
|
/datum/controller/process/obj/started()
|
||||||
..()
|
..()
|
||||||
if(!updateQueueInstance)
|
|
||||||
if(!processing_objects)
|
if(!processing_objects)
|
||||||
processing_objects = list()
|
processing_objects = list()
|
||||||
else if(processing_objects.len)
|
|
||||||
updateQueueInstance = new
|
|
||||||
|
|
||||||
/datum/controller/process/obj/doWork()
|
/datum/controller/process/obj/doWork()
|
||||||
if(updateQueueInstance)
|
for(last_object in processing_objects)
|
||||||
updateQueueInstance.init(processing_objects, "process")
|
var/datum/O = last_object
|
||||||
updateQueueInstance.Run()
|
if(isnull(O.gcDestroyed))
|
||||||
|
try
|
||||||
|
O:process()
|
||||||
|
catch(var/exception/e)
|
||||||
|
catchException(e, O)
|
||||||
|
SCHECK
|
||||||
|
else
|
||||||
|
catchBadType(O)
|
||||||
|
processing_objects -= O
|
||||||
|
|
||||||
/datum/controller/process/obj/getStatName()
|
/datum/controller/process/obj/statProcess()
|
||||||
return ..()+"([processing_objects.len])"
|
..()
|
||||||
|
stat(null, "[processing_objects.len] objects")
|
||||||
@@ -8,7 +8,8 @@ var/global/list/turf/processing_turfs = list()
|
|||||||
for(var/turf/T in processing_turfs)
|
for(var/turf/T in processing_turfs)
|
||||||
if(T.process() == PROCESS_KILL)
|
if(T.process() == PROCESS_KILL)
|
||||||
processing_turfs.Remove(T)
|
processing_turfs.Remove(T)
|
||||||
scheck()
|
SCHECK
|
||||||
|
|
||||||
/datum/controller/process/turf/getStatName()
|
/datum/controller/process/turf/statProcess()
|
||||||
return ..()+"([processing_turfs.len])"
|
..()
|
||||||
|
stat(null, "[processing_turfs.len] turf\s")
|
||||||
@@ -21,6 +21,7 @@ var/list/gamemode_cache = list()
|
|||||||
var/log_pda = 0 // log pda messages
|
var/log_pda = 0 // log pda messages
|
||||||
var/log_hrefs = 0 // logs all links clicked in-game. Could be used for debugging and tracking down exploits
|
var/log_hrefs = 0 // logs all links clicked in-game. Could be used for debugging and tracking down exploits
|
||||||
var/log_runtime = 0 // logs world.log to a file
|
var/log_runtime = 0 // logs world.log to a file
|
||||||
|
var/log_world_output = 0 // log world.log << messages
|
||||||
var/sql_enabled = 0 // for sql switching
|
var/sql_enabled = 0 // for sql switching
|
||||||
var/allow_admin_ooccolor = 0 // Allows admins with relevant permissions to have their own ooc colour
|
var/allow_admin_ooccolor = 0 // Allows admins with relevant permissions to have their own ooc colour
|
||||||
var/allow_vote_restart = 0 // allow votes to restart
|
var/allow_vote_restart = 0 // allow votes to restart
|
||||||
@@ -327,6 +328,9 @@ var/list/gamemode_cache = list()
|
|||||||
if ("log_pda")
|
if ("log_pda")
|
||||||
config.log_pda = 1
|
config.log_pda = 1
|
||||||
|
|
||||||
|
if ("log_world_output")
|
||||||
|
config.log_world_output = 1
|
||||||
|
|
||||||
if ("log_hrefs")
|
if ("log_hrefs")
|
||||||
config.log_hrefs = 1
|
config.log_hrefs = 1
|
||||||
|
|
||||||
|
|||||||
@@ -1,30 +0,0 @@
|
|||||||
// We manually initialize the alarm handlers instead of looping over all existing types
|
|
||||||
// to make it possible to write: camera.triggerAlarm() rather than alarm_manager.managers[datum/alarm_handler/camera].triggerAlarm() or a variant thereof.
|
|
||||||
/var/global/datum/alarm_handler/atmosphere/atmosphere_alarm = new()
|
|
||||||
/var/global/datum/alarm_handler/camera/camera_alarm = new()
|
|
||||||
/var/global/datum/alarm_handler/fire/fire_alarm = new()
|
|
||||||
/var/global/datum/alarm_handler/motion/motion_alarm = new()
|
|
||||||
/var/global/datum/alarm_handler/power/power_alarm = new()
|
|
||||||
|
|
||||||
/datum/subsystem/alarm
|
|
||||||
name = "Alarm"
|
|
||||||
var/list/datum/alarm/all_handlers
|
|
||||||
|
|
||||||
/datum/subsystem/alarm/New()
|
|
||||||
all_handlers = list(atmosphere_alarm, camera_alarm, fire_alarm, motion_alarm, power_alarm)
|
|
||||||
|
|
||||||
/datum/subsystem/alarm/fire()
|
|
||||||
for(var/datum/alarm_handler/AH in all_handlers)
|
|
||||||
AH.process()
|
|
||||||
|
|
||||||
/datum/subsystem/alarm/proc/active_alarms()
|
|
||||||
var/list/all_alarms = new
|
|
||||||
for(var/datum/alarm_handler/AH in all_handlers)
|
|
||||||
var/list/alarms = AH.alarms
|
|
||||||
all_alarms += alarms
|
|
||||||
|
|
||||||
return all_alarms
|
|
||||||
|
|
||||||
/datum/subsystem/alarm/proc/number_of_active_alarms()
|
|
||||||
var/list/alarms = active_alarms()
|
|
||||||
return alarms.len
|
|
||||||
@@ -26,7 +26,7 @@ var/global/list/random_junk
|
|||||||
if(prob(25))
|
if(prob(25))
|
||||||
return /obj/effect/decal/cleanable/generic
|
return /obj/effect/decal/cleanable/generic
|
||||||
if(!random_junk)
|
if(!random_junk)
|
||||||
random_junk = subtypes(/obj/item/trash)
|
random_junk = subtypesof(/obj/item/trash)
|
||||||
random_junk += typesof(/obj/item/weapon/cigbutt)
|
random_junk += typesof(/obj/item/weapon/cigbutt)
|
||||||
random_junk += /obj/effect/decal/cleanable/spiderling_remains
|
random_junk += /obj/effect/decal/cleanable/spiderling_remains
|
||||||
random_junk += /obj/effect/decal/remains/mouse
|
random_junk += /obj/effect/decal/remains/mouse
|
||||||
|
|||||||
@@ -118,7 +118,6 @@ var/join_motd = null
|
|||||||
|
|
||||||
var/datum/nanomanager/nanomanager = new() // NanoManager, the manager for Nano UIs.
|
var/datum/nanomanager/nanomanager = new() // NanoManager, the manager for Nano UIs.
|
||||||
var/datum/event_manager/event_manager = new() // Event Manager, the manager for events.
|
var/datum/event_manager/event_manager = new() // Event Manager, the manager for events.
|
||||||
var/datum/subsystem/alarm/alarm_manager = new() // Alarm Manager, the manager for alarms.
|
|
||||||
|
|
||||||
var/list/awaydestinations = list() // Away missions. A list of landmarks that the warpgate can take you to.
|
var/list/awaydestinations = list() // Away missions. A list of landmarks that the warpgate can take you to.
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ var/list/_client_preferences_by_type
|
|||||||
/proc/get_client_preferences()
|
/proc/get_client_preferences()
|
||||||
if(!_client_preferences)
|
if(!_client_preferences)
|
||||||
_client_preferences = list()
|
_client_preferences = list()
|
||||||
for(var/ct in subtypes(/datum/client_preference))
|
for(var/ct in subtypesof(/datum/client_preference))
|
||||||
var/datum/client_preference/client_type = ct
|
var/datum/client_preference/client_type = ct
|
||||||
if(initial(client_type.description))
|
if(initial(client_type.description))
|
||||||
_client_preferences += new client_type()
|
_client_preferences += new client_type()
|
||||||
|
|||||||
@@ -22,7 +22,7 @@
|
|||||||
L.force_update = 0
|
L.force_update = 0
|
||||||
L.needs_update = 0
|
L.needs_update = 0
|
||||||
|
|
||||||
scheck()
|
SCHECK
|
||||||
|
|
||||||
var/list/lighting_update_overlays_old = lighting_update_overlays //Same as above.
|
var/list/lighting_update_overlays_old = lighting_update_overlays //Same as above.
|
||||||
lighting_update_overlays = null //Same as above
|
lighting_update_overlays = null //Same as above
|
||||||
@@ -32,4 +32,4 @@
|
|||||||
O.update_overlay()
|
O.update_overlay()
|
||||||
O.needs_update = 0
|
O.needs_update = 0
|
||||||
|
|
||||||
scheck()
|
SCHECK
|
||||||
|
|||||||
@@ -56,8 +56,8 @@
|
|||||||
/mob/living/carbon/human/Stat()
|
/mob/living/carbon/human/Stat()
|
||||||
..()
|
..()
|
||||||
if(statpanel("Status"))
|
if(statpanel("Status"))
|
||||||
stat(null, "Intent: [a_intent]")
|
stat("Intent:", "[a_intent]")
|
||||||
stat(null, "Move Mode: [m_intent]")
|
stat("Move Mode:", "[m_intent]")
|
||||||
if(emergency_shuttle)
|
if(emergency_shuttle)
|
||||||
var/eta_status = emergency_shuttle.get_status_panel_eta()
|
var/eta_status = emergency_shuttle.get_status_panel_eta()
|
||||||
if(eta_status)
|
if(eta_status)
|
||||||
|
|||||||
@@ -661,15 +661,13 @@
|
|||||||
|
|
||||||
if(client.holder)
|
if(client.holder)
|
||||||
if(statpanel("Status"))
|
if(statpanel("Status"))
|
||||||
stat("Location:","([x], [y], [z])")
|
stat("Location:", "([x], [y], [z]) [loc]")
|
||||||
if(statpanel("Processes"))
|
|
||||||
stat("CPU:","[world.cpu]")
|
stat("CPU:","[world.cpu]")
|
||||||
stat("Instances:","[world.contents.len]")
|
stat("Instances:","[world.contents.len]")
|
||||||
if(processScheduler && processScheduler.getIsRunning())
|
|
||||||
for(var/datum/controller/process/P in processScheduler.processes)
|
if(statpanel("Processes"))
|
||||||
stat(P.getStatName(), P.getTickTime())
|
if(processScheduler)
|
||||||
else
|
processScheduler.statProcesses()
|
||||||
stat("processScheduler is not running.")
|
|
||||||
|
|
||||||
if(listed_turf && client)
|
if(listed_turf && client)
|
||||||
if(!TurfAdjacent(listed_turf))
|
if(!TurfAdjacent(listed_turf))
|
||||||
|
|||||||
@@ -107,7 +107,7 @@ var/list/lunchables_ethanol_reagents_ = list(/datum/reagent/ethanol/acid_spit,
|
|||||||
|
|
||||||
/proc/init_lunchable_reagent_list(var/list/banned_reagents, var/reagent_types)
|
/proc/init_lunchable_reagent_list(var/list/banned_reagents, var/reagent_types)
|
||||||
. = list()
|
. = list()
|
||||||
for(var/reagent_type in subtypes(reagent_types))
|
for(var/reagent_type in subtypesof(reagent_types))
|
||||||
if(reagent_type in banned_reagents)
|
if(reagent_type in banned_reagents)
|
||||||
continue
|
continue
|
||||||
var/datum/reagent/reagent = reagent_type
|
var/datum/reagent/reagent = reagent_type
|
||||||
|
|||||||
@@ -124,7 +124,7 @@ research holder datum.
|
|||||||
|
|
||||||
// A simple helper proc to find the name of a tech with a given ID.
|
// A simple helper proc to find the name of a tech with a given ID.
|
||||||
/proc/CallTechName(var/ID)
|
/proc/CallTechName(var/ID)
|
||||||
for(var/T in subtypes(/datum/tech))
|
for(var/T in subtypesof(/datum/tech))
|
||||||
var/datum/tech/check_tech = T
|
var/datum/tech/check_tech = T
|
||||||
if(initial(check_tech.id) == ID)
|
if(initial(check_tech.id) == ID)
|
||||||
return initial(check_tech.name)
|
return initial(check_tech.name)
|
||||||
|
|||||||
@@ -58,6 +58,9 @@ LOG_ATTACK
|
|||||||
## log pda messages
|
## log pda messages
|
||||||
LOG_PDA
|
LOG_PDA
|
||||||
|
|
||||||
|
## log world.log messages
|
||||||
|
# LOG_WORLD_OUTPUT
|
||||||
|
|
||||||
## log all Topic() calls (for use by coders in tracking down Topic issues)
|
## log all Topic() calls (for use by coders in tracking down Topic issues)
|
||||||
# LOG_HREFS
|
# LOG_HREFS
|
||||||
|
|
||||||
|
|||||||
@@ -21,6 +21,7 @@
|
|||||||
#include "code\__defines\admin.dm"
|
#include "code\__defines\admin.dm"
|
||||||
#include "code\__defines\appearance.dm"
|
#include "code\__defines\appearance.dm"
|
||||||
#include "code\__defines\atmos.dm"
|
#include "code\__defines\atmos.dm"
|
||||||
|
#include "code\__defines\btime.dm"
|
||||||
#include "code\__defines\chemistry.dm"
|
#include "code\__defines\chemistry.dm"
|
||||||
#include "code\__defines\damage_organs.dm"
|
#include "code\__defines\damage_organs.dm"
|
||||||
#include "code\__defines\dna.dm"
|
#include "code\__defines\dna.dm"
|
||||||
@@ -31,6 +32,7 @@
|
|||||||
#include "code\__defines\math_physics.dm"
|
#include "code\__defines\math_physics.dm"
|
||||||
#include "code\__defines\misc.dm"
|
#include "code\__defines\misc.dm"
|
||||||
#include "code\__defines\mobs.dm"
|
#include "code\__defines\mobs.dm"
|
||||||
|
#include "code\__defines\process_scheduler.dm"
|
||||||
#include "code\__defines\research.dm"
|
#include "code\__defines\research.dm"
|
||||||
#include "code\__defines\species_languages.dm"
|
#include "code\__defines\species_languages.dm"
|
||||||
#include "code\__defines\targeting.dm"
|
#include "code\__defines\targeting.dm"
|
||||||
@@ -133,7 +135,6 @@
|
|||||||
#include "code\controllers\Processes\air.dm"
|
#include "code\controllers\Processes\air.dm"
|
||||||
#include "code\controllers\Processes\alarm.dm"
|
#include "code\controllers\Processes\alarm.dm"
|
||||||
#include "code\controllers\Processes\chemistry.dm"
|
#include "code\controllers\Processes\chemistry.dm"
|
||||||
#include "code\controllers\Processes\disease.dm"
|
|
||||||
#include "code\controllers\Processes\emergencyShuttle.dm"
|
#include "code\controllers\Processes\emergencyShuttle.dm"
|
||||||
#include "code\controllers\Processes\event.dm"
|
#include "code\controllers\Processes\event.dm"
|
||||||
#include "code\controllers\Processes\garbage.dm"
|
#include "code\controllers\Processes\garbage.dm"
|
||||||
@@ -148,13 +149,9 @@
|
|||||||
#include "code\controllers\Processes\ticker.dm"
|
#include "code\controllers\Processes\ticker.dm"
|
||||||
#include "code\controllers\Processes\turf.dm"
|
#include "code\controllers\Processes\turf.dm"
|
||||||
#include "code\controllers\Processes\vote.dm"
|
#include "code\controllers\Processes\vote.dm"
|
||||||
#include "code\controllers\ProcessScheduler\core\_define.dm"
|
|
||||||
#include "code\controllers\ProcessScheduler\core\_stubs.dm"
|
#include "code\controllers\ProcessScheduler\core\_stubs.dm"
|
||||||
#include "code\controllers\ProcessScheduler\core\process.dm"
|
#include "code\controllers\ProcessScheduler\core\process.dm"
|
||||||
#include "code\controllers\ProcessScheduler\core\processScheduler.dm"
|
#include "code\controllers\ProcessScheduler\core\processScheduler.dm"
|
||||||
#include "code\controllers\ProcessScheduler\core\updateQueue.dm"
|
|
||||||
#include "code\controllers\ProcessScheduler\core\updateQueueWorker.dm"
|
|
||||||
#include "code\controllers\subsystem\alarms.dm"
|
|
||||||
#include "code\datums\ai_law_sets.dm"
|
#include "code\datums\ai_law_sets.dm"
|
||||||
#include "code\datums\ai_laws.dm"
|
#include "code\datums\ai_laws.dm"
|
||||||
#include "code\datums\browser.dm"
|
#include "code\datums\browser.dm"
|
||||||
|
|||||||
Reference in New Issue
Block a user