mirror of
https://github.com/SPLURT-Station/S.P.L.U.R.T-Station-13.git
synced 2025-12-10 09:54:52 +00:00
subsystem upgrades
This commit is contained in:
11
.vscode/extensions.json
vendored
11
.vscode/extensions.json
vendored
@@ -1,9 +1,10 @@
|
|||||||
{
|
{
|
||||||
"recommendations": [
|
"recommendations": [
|
||||||
"gbasood.byond-dm-language-support",
|
"gbasood.byond-dm-language-support",
|
||||||
"platymuus.dm-langclient",
|
"platymuus.dm-langclient",
|
||||||
"EditorConfig.EditorConfig",
|
"EditorConfig.EditorConfig",
|
||||||
"arcanis.vscode-zipfs",
|
"arcanis.vscode-zipfs",
|
||||||
"dbaeumer.vscode-eslint"
|
"dbaeumer.vscode-eslint",
|
||||||
]
|
"kevinkyang.auto-comment-blocks"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
#define EXTOOLS (world.system_type == MS_WINDOWS ? "byond-extools.dll" : "libbyond-extools.so")
|
|
||||||
@@ -284,11 +284,12 @@ GLOBAL_LIST_INIT(atmos_adjacent_savings, list(0,0))
|
|||||||
GLOBAL_VAR(atmos_extools_initialized) // this must be an uninitialized (null) one or init_monstermos will be called twice because reasons
|
GLOBAL_VAR(atmos_extools_initialized) // this must be an uninitialized (null) one or init_monstermos will be called twice because reasons
|
||||||
#define ATMOS_EXTOOLS_CHECK if(!GLOB.atmos_extools_initialized){\
|
#define ATMOS_EXTOOLS_CHECK if(!GLOB.atmos_extools_initialized){\
|
||||||
GLOB.atmos_extools_initialized=TRUE;\
|
GLOB.atmos_extools_initialized=TRUE;\
|
||||||
if(fexists(EXTOOLS)){\
|
var/extools = world.GetConfig("env", "EXTOOLS_DLL") || (world.system_type == MS_WINDOWS ? "./byond-extools.dll" : "./libbyond-extools.so");\
|
||||||
var/result = call(EXTOOLS,"init_monstermos")();\
|
if(fexists(extools)){\
|
||||||
|
var/result = call(extools,"init_monstermos")();\
|
||||||
if(result != "ok") {CRASH(result);}\
|
if(result != "ok") {CRASH(result);}\
|
||||||
} else {\
|
} else {\
|
||||||
CRASH("[EXTOOLS] does not exist!");\
|
CRASH("[extools] does not exist!");\
|
||||||
}\
|
}\
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -29,5 +29,6 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/world/proc/enable_debugger()
|
/world/proc/enable_debugger()
|
||||||
if (fexists(EXTOOLS))
|
var/dll = world.GetConfig("env", "EXTOOLS_DLL")
|
||||||
call(EXTOOLS, "debug_initialize")()
|
if (dll)
|
||||||
|
call(dll, "debug_initialize")()
|
||||||
|
|||||||
@@ -1,40 +1,90 @@
|
|||||||
//Update this whenever the db schema changes
|
//! Defines for subsystems and overlays
|
||||||
//make sure you add an update to the schema_version stable in the db changelog
|
//!
|
||||||
|
//! Lots of important stuff in here, make sure you have your brain switched on
|
||||||
|
//! when editing this file
|
||||||
|
|
||||||
|
//! ## DB defines
|
||||||
|
/**
|
||||||
|
* DB major schema version
|
||||||
|
*
|
||||||
|
* Update this whenever the db schema changes
|
||||||
|
*
|
||||||
|
* make sure you add an update to the schema_version stable in the db changelog
|
||||||
|
*/
|
||||||
#define DB_MAJOR_VERSION 4
|
#define DB_MAJOR_VERSION 4
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DB minor schema version
|
||||||
|
*
|
||||||
|
* Update this whenever the db schema changes
|
||||||
|
*
|
||||||
|
* make sure you add an update to the schema_version stable in the db changelog
|
||||||
|
*/
|
||||||
#define DB_MINOR_VERSION 7
|
#define DB_MINOR_VERSION 7
|
||||||
|
|
||||||
//Timing subsystem
|
//! ## Timing subsystem
|
||||||
//Don't run if there is an identical unique timer active
|
/**
|
||||||
//if the arguments to addtimer are the same as an existing timer, it doesn't create a new timer, and returns the id of the existing timer
|
* Don't run if there is an identical unique timer active
|
||||||
|
*
|
||||||
|
* if the arguments to addtimer are the same as an existing timer, it doesn't create a new timer,
|
||||||
|
* and returns the id of the existing timer
|
||||||
|
*/
|
||||||
#define TIMER_UNIQUE (1<<0)
|
#define TIMER_UNIQUE (1<<0)
|
||||||
//For unique timers: Replace the old timer rather then not start this one
|
|
||||||
|
///For unique timers: Replace the old timer rather then not start this one
|
||||||
#define TIMER_OVERRIDE (1<<1)
|
#define TIMER_OVERRIDE (1<<1)
|
||||||
//Timing should be based on how timing progresses on clients, not the sever.
|
|
||||||
// tracking this is more expensive,
|
/**
|
||||||
// should only be used in conjuction with things that have to progress client side, such as animate() or sound()
|
* Timing should be based on how timing progresses on clients, not the server.
|
||||||
|
*
|
||||||
|
* Tracking this is more expensive,
|
||||||
|
* should only be used in conjuction with things that have to progress client side, such as
|
||||||
|
* animate() or sound()
|
||||||
|
*/
|
||||||
#define TIMER_CLIENT_TIME (1<<2)
|
#define TIMER_CLIENT_TIME (1<<2)
|
||||||
//Timer can be stopped using deltimer()
|
|
||||||
|
///Timer can be stopped using deltimer()
|
||||||
#define TIMER_STOPPABLE (1<<3)
|
#define TIMER_STOPPABLE (1<<3)
|
||||||
//To be used with TIMER_UNIQUE
|
|
||||||
//prevents distinguishing identical timers with the wait variable
|
///prevents distinguishing identical timers with the wait variable
|
||||||
|
///
|
||||||
|
///To be used with TIMER_UNIQUE
|
||||||
#define TIMER_NO_HASH_WAIT (1<<4)
|
#define TIMER_NO_HASH_WAIT (1<<4)
|
||||||
//Loops the timer repeatedly until qdeleted
|
|
||||||
//In most cases you want a subsystem instead
|
///Loops the timer repeatedly until qdeleted
|
||||||
|
///
|
||||||
|
///In most cases you want a subsystem instead, so don't use this unless you have a good reason
|
||||||
#define TIMER_LOOP (1<<5)
|
#define TIMER_LOOP (1<<5)
|
||||||
|
|
||||||
#define TIMER_NO_INVOKE_WARNING 600 //number of byond ticks that are allowed to pass before the timer subsystem thinks it hung on something
|
///Empty ID define
|
||||||
|
|
||||||
#define TIMER_ID_NULL -1
|
#define TIMER_ID_NULL -1
|
||||||
|
|
||||||
#define INITIALIZATION_INSSATOMS 0 //New should not call Initialize
|
//! ## Initialization subsystem
|
||||||
#define INITIALIZATION_INNEW_MAPLOAD 2 //New should call Initialize(TRUE)
|
|
||||||
#define INITIALIZATION_INNEW_REGULAR 1 //New should call Initialize(FALSE)
|
|
||||||
|
|
||||||
#define INITIALIZE_HINT_NORMAL 0 //Nothing happens
|
///New should not call Initialize
|
||||||
#define INITIALIZE_HINT_LATELOAD 1 //Call LateInitialize
|
#define INITIALIZATION_INSSATOMS 0
|
||||||
#define INITIALIZE_HINT_QDEL 2 //Call qdel on the atom
|
///New should call Initialize(TRUE)
|
||||||
|
#define INITIALIZATION_INNEW_MAPLOAD 2
|
||||||
|
///New should call Initialize(FALSE)
|
||||||
|
#define INITIALIZATION_INNEW_REGULAR 1
|
||||||
|
|
||||||
//type and all subtypes should always call Initialize in New()
|
//! ### Initialization hints
|
||||||
|
|
||||||
|
///Nothing happens
|
||||||
|
#define INITIALIZE_HINT_NORMAL 0
|
||||||
|
/**
|
||||||
|
* call LateInitialize at the end of all atom Initalization
|
||||||
|
*
|
||||||
|
* The item will be added to the late_loaders list, this is iterated over after
|
||||||
|
* initalization of subsystems is complete and calls LateInitalize on the atom
|
||||||
|
* see [this file for the LateIntialize proc](atom.html#proc/LateInitialize)
|
||||||
|
*/
|
||||||
|
#define INITIALIZE_HINT_LATELOAD 1
|
||||||
|
|
||||||
|
///Call qdel on the atom after intialization
|
||||||
|
#define INITIALIZE_HINT_QDEL 2
|
||||||
|
|
||||||
|
///type and all subtypes should always immediately call Initialize in New()
|
||||||
#define INITIALIZE_IMMEDIATE(X) ##X/New(loc, ...){\
|
#define INITIALIZE_IMMEDIATE(X) ##X/New(loc, ...){\
|
||||||
..();\
|
..();\
|
||||||
if(!(flags_1 & INITIALIZED_1)) {\
|
if(!(flags_1 & INITIALIZED_1)) {\
|
||||||
@@ -47,35 +97,40 @@
|
|||||||
// Subsystems shutdown in the reverse of the order they initialize in
|
// Subsystems shutdown in the reverse of the order they initialize in
|
||||||
// The numbers just define the ordering, they are meaningless otherwise.
|
// The numbers just define the ordering, they are meaningless otherwise.
|
||||||
|
|
||||||
#define INIT_ORDER_PROFILER 100
|
#define INIT_ORDER_PROFILER 102
|
||||||
#define INIT_ORDER_FAIL2TOPIC 99
|
#define INIT_ORDER_FAIL2TOPIC 101
|
||||||
#define INIT_ORDER_TITLE 98
|
#define INIT_ORDER_TITLE 100
|
||||||
#define INIT_ORDER_GARBAGE 95
|
#define INIT_ORDER_GARBAGE 99
|
||||||
#define INIT_ORDER_DBCORE 94
|
#define INIT_ORDER_DBCORE 95
|
||||||
#define INIT_ORDER_STATPANELS 93
|
#define INIT_ORDER_BLACKBOX 94
|
||||||
#define INIT_ORDER_BLACKBOX 92
|
#define INIT_ORDER_SERVER_MAINT 93
|
||||||
#define INIT_ORDER_SERVER_MAINT 91
|
#define INIT_ORDER_INPUT 85
|
||||||
#define INIT_ORDER_INPUT 90
|
#define INIT_ORDER_SOUNDS 83
|
||||||
#define INIT_ORDER_SOUNDS 85
|
#define INIT_ORDER_INSTRUMENTS 82
|
||||||
#define INIT_ORDER_VIS 80
|
#define INIT_ORDER_VIS 80
|
||||||
|
// #define INIT_ORDER_ACHIEVEMENTS 77
|
||||||
#define INIT_ORDER_RESEARCH 75
|
#define INIT_ORDER_RESEARCH 75
|
||||||
#define INIT_ORDER_EVENTS 70
|
#define INIT_ORDER_EVENTS 70
|
||||||
#define INIT_ORDER_JOBS 65
|
#define INIT_ORDER_JOBS 65
|
||||||
#define INIT_ORDER_QUIRKS 60
|
#define INIT_ORDER_QUIRKS 60
|
||||||
#define INIT_ORDER_TICKER 55
|
#define INIT_ORDER_TICKER 55
|
||||||
#define INIT_ORDER_INSTRUMENTS 53
|
// #define INIT_ORDER_TCG 55
|
||||||
#define INIT_ORDER_MAPPING 50
|
#define INIT_ORDER_MAPPING 50
|
||||||
#define INIT_ORDER_ECONOMY 45
|
// #define INIT_ORDER_TIMETRACK 47
|
||||||
#define INIT_ORDER_NETWORKS 40
|
#define INIT_ORDER_NETWORKS 45
|
||||||
|
#define INIT_ORDER_ECONOMY 40
|
||||||
#define INIT_ORDER_HOLODECK 35
|
#define INIT_ORDER_HOLODECK 35
|
||||||
|
// #define INIT_ORDER_OUTPUTS 35
|
||||||
#define INIT_ORDER_ATOMS 30
|
#define INIT_ORDER_ATOMS 30
|
||||||
#define INIT_ORDER_LANGUAGE 25
|
#define INIT_ORDER_LANGUAGE 25
|
||||||
#define INIT_ORDER_MACHINES 20
|
#define INIT_ORDER_MACHINES 20
|
||||||
#define INIT_ORDER_CIRCUIT 15
|
#define INIT_ORDER_CIRCUIT 15
|
||||||
|
// #define INIT_ORDER_SKILLS 15
|
||||||
#define INIT_ORDER_TIMER 1
|
#define INIT_ORDER_TIMER 1
|
||||||
#define INIT_ORDER_DEFAULT 0
|
#define INIT_ORDER_DEFAULT 0
|
||||||
#define INIT_ORDER_AIR -1
|
#define INIT_ORDER_AIR -1
|
||||||
#define INIT_ORDER_AIR_TURFS -2
|
#define INIT_ORDER_AIR_TURFS -2
|
||||||
|
#define INIT_ORDER_PERSISTENCE -2 //before assets because some assets take data from SSPersistence
|
||||||
#define INIT_ORDER_MINIMAP -3
|
#define INIT_ORDER_MINIMAP -3
|
||||||
#define INIT_ORDER_ASSETS -4
|
#define INIT_ORDER_ASSETS -4
|
||||||
#define INIT_ORDER_ICON_SMOOTHING -5
|
#define INIT_ORDER_ICON_SMOOTHING -5
|
||||||
@@ -86,7 +141,9 @@
|
|||||||
#define INIT_ORDER_SHUTTLE -21
|
#define INIT_ORDER_SHUTTLE -21
|
||||||
#define INIT_ORDER_MINOR_MAPPING -40
|
#define INIT_ORDER_MINOR_MAPPING -40
|
||||||
#define INIT_ORDER_PATH -50
|
#define INIT_ORDER_PATH -50
|
||||||
#define INIT_ORDER_PERSISTENCE -95
|
// #define INIT_ORDER_DISCORD -60
|
||||||
|
// #define INIT_ORDER_EXPLOSIONS -69
|
||||||
|
#define INIT_ORDER_STATPANELS -98
|
||||||
#define INIT_ORDER_DEMO -99 // o avoid a bunch of changes related to initialization being written, do this last
|
#define INIT_ORDER_DEMO -99 // o avoid a bunch of changes related to initialization being written, do this last
|
||||||
#define INIT_ORDER_CHAT -100 //Should be last to ensure chat remains smooth during init.
|
#define INIT_ORDER_CHAT -100 //Should be last to ensure chat remains smooth during init.
|
||||||
|
|
||||||
@@ -102,6 +159,7 @@
|
|||||||
#define FIRE_PRIORITY_GARBAGE 15
|
#define FIRE_PRIORITY_GARBAGE 15
|
||||||
#define FIRE_PRIORITY_WET_FLOORS 20
|
#define FIRE_PRIORITY_WET_FLOORS 20
|
||||||
#define FIRE_PRIORITY_AIR 20
|
#define FIRE_PRIORITY_AIR 20
|
||||||
|
#define FIRE_PRIORITY_NPC 20
|
||||||
#define FIRE_PRIORITY_PROCESS 25
|
#define FIRE_PRIORITY_PROCESS 25
|
||||||
#define FIRE_PRIORITY_THROWING 25
|
#define FIRE_PRIORITY_THROWING 25
|
||||||
#define FIRE_PRIORITY_SPACEDRIFT 30
|
#define FIRE_PRIORITY_SPACEDRIFT 30
|
||||||
@@ -116,7 +174,7 @@
|
|||||||
#define FIRE_PRIORITY_AIR_TURFS 40
|
#define FIRE_PRIORITY_AIR_TURFS 40
|
||||||
#define FIRE_PRIORITY_DEFAULT 50
|
#define FIRE_PRIORITY_DEFAULT 50
|
||||||
#define FIRE_PRIORITY_PARALLAX 65
|
#define FIRE_PRIORITY_PARALLAX 65
|
||||||
#define FIRE_PRIORITY_NPC 80
|
#define FIRE_PRIORITY_INSTRUMENTS 80
|
||||||
#define FIRE_PRIORITY_MOBS 100
|
#define FIRE_PRIORITY_MOBS 100
|
||||||
#define FIRE_PRIORITY_TGUI 110
|
#define FIRE_PRIORITY_TGUI 110
|
||||||
#define FIRE_PRIORITY_PROJECTILES 200
|
#define FIRE_PRIORITY_PROJECTILES 200
|
||||||
@@ -126,6 +184,8 @@
|
|||||||
#define FIRE_PRIORITY_CHAT 400
|
#define FIRE_PRIORITY_CHAT 400
|
||||||
#define FIRE_PRIORITY_RUNECHAT 410
|
#define FIRE_PRIORITY_RUNECHAT 410
|
||||||
#define FIRE_PRIORITY_OVERLAYS 500
|
#define FIRE_PRIORITY_OVERLAYS 500
|
||||||
|
// #define FIRE_PRIORITY_EXPLOSIONS 666
|
||||||
|
#define FIRE_PRIORITY_TIMER 700
|
||||||
#define FIRE_PRIORITY_INPUT 1000 // This must always always be the max highest priority. Player input must never be lost.
|
#define FIRE_PRIORITY_INPUT 1000 // This must always always be the max highest priority. Player input must never be lost.
|
||||||
|
|
||||||
// SS runlevels
|
// SS runlevels
|
||||||
@@ -138,6 +198,37 @@
|
|||||||
|
|
||||||
#define RUNLEVELS_DEFAULT (RUNLEVEL_SETUP | RUNLEVEL_GAME | RUNLEVEL_POSTGAME)
|
#define RUNLEVELS_DEFAULT (RUNLEVEL_SETUP | RUNLEVEL_GAME | RUNLEVEL_POSTGAME)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
//! ## Overlays subsystem
|
||||||
|
|
||||||
|
///Compile all the overlays for an atom from the cache lists
|
||||||
|
// |= on overlays is not actually guaranteed to not add same appearances but we're optimistically using it anyway.
|
||||||
|
#define COMPILE_OVERLAYS(A)\
|
||||||
|
if (TRUE) {\
|
||||||
|
var/list/ad = A.add_overlays;\
|
||||||
|
var/list/rm = A.remove_overlays;\
|
||||||
|
if(LAZYLEN(rm)){\
|
||||||
|
A.overlays -= rm;\
|
||||||
|
rm.Cut();\
|
||||||
|
}\
|
||||||
|
if(LAZYLEN(ad)){\
|
||||||
|
A.overlays |= ad;\
|
||||||
|
ad.Cut();\
|
||||||
|
}\
|
||||||
|
A.flags_1 &= ~OVERLAY_QUEUED_1;\
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
Create a new timer and add it to the queue.
|
||||||
|
* Arguments:
|
||||||
|
* * callback the callback to call on timer finish
|
||||||
|
* * wait deciseconds to run the timer for
|
||||||
|
* * flags flags for this timer, see: code\__DEFINES\subsystems.dm
|
||||||
|
*/
|
||||||
|
#define addtimer(args...) _addtimer(args, file = __FILE__, line = __LINE__)
|
||||||
|
|
||||||
// SSair run section
|
// SSair run section
|
||||||
#define SSAIR_PIPENETS 1
|
#define SSAIR_PIPENETS 1
|
||||||
#define SSAIR_ATMOSMACHINERY 2
|
#define SSAIR_ATMOSMACHINERY 2
|
||||||
@@ -148,19 +239,3 @@
|
|||||||
#define SSAIR_REBUILD_PIPENETS 7
|
#define SSAIR_REBUILD_PIPENETS 7
|
||||||
#define SSAIR_EQUALIZE 8
|
#define SSAIR_EQUALIZE 8
|
||||||
#define SSAIR_ACTIVETURFS 9
|
#define SSAIR_ACTIVETURFS 9
|
||||||
|
|
||||||
// |= on overlays is not actually guaranteed to not add same appearances but we're optimistically using it anyway.
|
|
||||||
#define COMPILE_OVERLAYS(A)\
|
|
||||||
if (TRUE) {\
|
|
||||||
var/list/ad = A.add_overlays;\
|
|
||||||
var/list/rm = A.remove_overlays;\
|
|
||||||
if(LAZYLEN(rm)){\
|
|
||||||
A.overlays -= rm;\
|
|
||||||
A.remove_overlays = null;\
|
|
||||||
}\
|
|
||||||
if(LAZYLEN(ad)){\
|
|
||||||
A.overlays |= ad;\
|
|
||||||
A.add_overlays = null;\
|
|
||||||
}\
|
|
||||||
A.flags_1 &= ~OVERLAY_QUEUED_1;\
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -37,7 +37,7 @@
|
|||||||
* TYPECONT: The typepath of the contents of the list
|
* TYPECONT: The typepath of the contents of the list
|
||||||
* COMPARE: The object to compare against, usualy the same as INPUT
|
* COMPARE: The object to compare against, usualy the same as INPUT
|
||||||
* COMPARISON: The variable on the objects to compare
|
* COMPARISON: The variable on the objects to compare
|
||||||
* COMPTYPE: How the current bin item to compare against COMPARE is fetched. By key or value.
|
* COMPTYPE: How should the values be compared? Either COMPARE_KEY or COMPARE_VALUE.
|
||||||
*/
|
*/
|
||||||
#define BINARY_INSERT(INPUT, LIST, TYPECONT, COMPARE, COMPARISON, COMPTYPE) \
|
#define BINARY_INSERT(INPUT, LIST, TYPECONT, COMPARE, COMPARISON, COMPTYPE) \
|
||||||
do {\
|
do {\
|
||||||
@@ -49,7 +49,7 @@
|
|||||||
var/__BIN_LEFT = 1;\
|
var/__BIN_LEFT = 1;\
|
||||||
var/__BIN_RIGHT = __BIN_CTTL;\
|
var/__BIN_RIGHT = __BIN_CTTL;\
|
||||||
var/__BIN_MID = (__BIN_LEFT + __BIN_RIGHT) >> 1;\
|
var/__BIN_MID = (__BIN_LEFT + __BIN_RIGHT) >> 1;\
|
||||||
var/##TYPECONT/__BIN_ITEM;\
|
var ##TYPECONT/__BIN_ITEM;\
|
||||||
while(__BIN_LEFT < __BIN_RIGHT) {\
|
while(__BIN_LEFT < __BIN_RIGHT) {\
|
||||||
__BIN_ITEM = COMPTYPE;\
|
__BIN_ITEM = COMPTYPE;\
|
||||||
if(__BIN_ITEM.##COMPARISON <= COMPARE.##COMPARISON) {\
|
if(__BIN_ITEM.##COMPARISON <= COMPARE.##COMPARISON) {\
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
name = "Initializing..."
|
name = "Initializing..."
|
||||||
var/target
|
var/target
|
||||||
|
|
||||||
INITIALIZE_IMMEDIATE(/obj/effect/statclick) //it's new, but rebranded.
|
INITIALIZE_IMMEDIATE(/obj/effect/statclick)
|
||||||
|
|
||||||
/obj/effect/statclick/Initialize(mapload, text, target) //Don't port this to Initialize it's too critical
|
/obj/effect/statclick/Initialize(mapload, text, target) //Don't port this to Initialize it's too critical
|
||||||
. = ..()
|
. = ..()
|
||||||
@@ -33,14 +33,6 @@ INITIALIZE_IMMEDIATE(/obj/effect/statclick) //it's new, but rebranded.
|
|||||||
usr.client.debug_variables(target)
|
usr.client.debug_variables(target)
|
||||||
message_admins("Admin [key_name_admin(usr)] is debugging the [target] [class].")
|
message_admins("Admin [key_name_admin(usr)] is debugging the [target] [class].")
|
||||||
|
|
||||||
/obj/effect/statclick/misc_subsystems/Click()
|
|
||||||
if(!usr.client.holder)
|
|
||||||
return
|
|
||||||
var/subsystem = input(usr, "Debug which subsystem?", "Debug nonprocessing subsystem") as null|anything in (Master.subsystems - Master.statworthy_subsystems)
|
|
||||||
if(!subsystem)
|
|
||||||
return
|
|
||||||
usr.client.debug_variables(subsystem)
|
|
||||||
message_admins("Admin [key_name_admin(usr)] is debugging the [subsystem] subsystem.")
|
|
||||||
|
|
||||||
// Debug verbs.
|
// Debug verbs.
|
||||||
/client/proc/restart_controller(controller in list("Master", "Failsafe"))
|
/client/proc/restart_controller(controller in list("Master", "Failsafe"))
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/**
|
/**
|
||||||
* Failsafe
|
* Failsafe
|
||||||
*
|
*
|
||||||
* Pretty much pokes the MC to make sure it's still alive.
|
* Pretty much pokes the MC to make sure it's still alive.
|
||||||
**/
|
**/
|
||||||
|
|
||||||
GLOBAL_REAL(Failsafe, /datum/controller/failsafe)
|
GLOBAL_REAL(Failsafe, /datum/controller/failsafe)
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
/**
|
/**
|
||||||
* StonedMC
|
* StonedMC
|
||||||
*
|
*
|
||||||
* Designed to properly split up a given tick among subsystems
|
* Designed to properly split up a given tick among subsystems
|
||||||
* Note: if you read parts of this code and think "why is it doing it that way"
|
* Note: if you read parts of this code and think "why is it doing it that way"
|
||||||
* Odds are, there is a reason
|
* Odds are, there is a reason
|
||||||
*
|
*
|
||||||
**/
|
**/
|
||||||
|
|
||||||
//This is the ABSOLUTE ONLY THING that should init globally like this
|
//This is the ABSOLUTE ONLY THING that should init globally like this
|
||||||
@@ -28,8 +28,6 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
|||||||
|
|
||||||
// List of subsystems to process().
|
// List of subsystems to process().
|
||||||
var/list/subsystems
|
var/list/subsystems
|
||||||
/// List of subsystems to include in the MC stat panel.
|
|
||||||
var/list/statworthy_subsystems
|
|
||||||
|
|
||||||
// Vars for keeping track of tick drift.
|
// Vars for keeping track of tick drift.
|
||||||
var/init_timeofday
|
var/init_timeofday
|
||||||
@@ -41,7 +39,7 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
|||||||
///Only run ticker subsystems for the next n ticks.
|
///Only run ticker subsystems for the next n ticks.
|
||||||
var/skip_ticks = 0
|
var/skip_ticks = 0
|
||||||
|
|
||||||
var/make_runtime = 0
|
var/make_runtime = FALSE
|
||||||
|
|
||||||
var/initializations_finished_with_no_players_logged_in //I wonder what this could be?
|
var/initializations_finished_with_no_players_logged_in //I wonder what this could be?
|
||||||
|
|
||||||
@@ -67,9 +65,6 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
|||||||
//used by CHECK_TICK as well so that the procs subsystems call can obey that SS's tick limits
|
//used by CHECK_TICK as well so that the procs subsystems call can obey that SS's tick limits
|
||||||
var/static/current_ticklimit = TICK_LIMIT_RUNNING
|
var/static/current_ticklimit = TICK_LIMIT_RUNNING
|
||||||
|
|
||||||
/// Statclick for misc subsystems
|
|
||||||
var/obj/effect/statclick/misc_subsystems/misc_statclick
|
|
||||||
|
|
||||||
/datum/controller/master/New()
|
/datum/controller/master/New()
|
||||||
if(!config)
|
if(!config)
|
||||||
config = new
|
config = new
|
||||||
@@ -96,11 +91,6 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
|||||||
_subsystems += new I
|
_subsystems += new I
|
||||||
Master = src
|
Master = src
|
||||||
|
|
||||||
// We want to see all subsystems during init.
|
|
||||||
statworthy_subsystems = subsystems.Copy()
|
|
||||||
|
|
||||||
misc_statclick = new(null, "Debug")
|
|
||||||
|
|
||||||
if(!GLOB)
|
if(!GLOB)
|
||||||
new /datum/controller/global_vars
|
new /datum/controller/global_vars
|
||||||
|
|
||||||
@@ -217,7 +207,7 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
|||||||
// Sort subsystems by display setting for easy access.
|
// Sort subsystems by display setting for easy access.
|
||||||
sortTim(subsystems, /proc/cmp_subsystem_display)
|
sortTim(subsystems, /proc/cmp_subsystem_display)
|
||||||
// Set world options.
|
// Set world options.
|
||||||
world.fps = CONFIG_GET(number/fps)
|
world.change_fps(CONFIG_GET(number/fps))
|
||||||
var/initialized_tod = REALTIMEOFDAY
|
var/initialized_tod = REALTIMEOFDAY
|
||||||
|
|
||||||
if(tgs_prime)
|
if(tgs_prime)
|
||||||
@@ -271,14 +261,10 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
|||||||
var/list/tickersubsystems = list()
|
var/list/tickersubsystems = list()
|
||||||
var/list/runlevel_sorted_subsystems = list(list()) //ensure we always have at least one runlevel
|
var/list/runlevel_sorted_subsystems = list(list()) //ensure we always have at least one runlevel
|
||||||
var/timer = world.time
|
var/timer = world.time
|
||||||
statworthy_subsystems = list()
|
|
||||||
for (var/thing in subsystems)
|
for (var/thing in subsystems)
|
||||||
var/datum/controller/subsystem/SS = thing
|
var/datum/controller/subsystem/SS = thing
|
||||||
if (SS.flags & SS_NO_FIRE)
|
if (SS.flags & SS_NO_FIRE)
|
||||||
if(SS.flags & SS_ALWAYS_SHOW_STAT)
|
|
||||||
statworthy_subsystems += SS
|
|
||||||
continue
|
continue
|
||||||
statworthy_subsystems += SS
|
|
||||||
SS.queued_time = 0
|
SS.queued_time = 0
|
||||||
SS.queue_next = null
|
SS.queue_next = null
|
||||||
SS.queue_prev = null
|
SS.queue_prev = null
|
||||||
|
|||||||
@@ -23,7 +23,7 @@
|
|||||||
var/priority = FIRE_PRIORITY_DEFAULT
|
var/priority = FIRE_PRIORITY_DEFAULT
|
||||||
|
|
||||||
/// [Subsystem Flags][SS_NO_INIT] to control binary behavior. Flags must be set at compile time or before preinit finishes to take full effect. (You can also restart the mc to force them to process again)
|
/// [Subsystem Flags][SS_NO_INIT] to control binary behavior. Flags must be set at compile time or before preinit finishes to take full effect. (You can also restart the mc to force them to process again)
|
||||||
var/flags = 0
|
var/flags = NONE
|
||||||
|
|
||||||
/// This var is set to TRUE after the subsystem has been initialized.
|
/// This var is set to TRUE after the subsystem has been initialized.
|
||||||
var/initialized = FALSE
|
var/initialized = FALSE
|
||||||
@@ -114,7 +114,7 @@
|
|||||||
//previously, this would have been named 'process()' but that name is used everywhere for different things!
|
//previously, this would have been named 'process()' but that name is used everywhere for different things!
|
||||||
//fire() seems more suitable. This is the procedure that gets called every 'wait' deciseconds.
|
//fire() seems more suitable. This is the procedure that gets called every 'wait' deciseconds.
|
||||||
//Sleeping in here prevents future fires until returned.
|
//Sleeping in here prevents future fires until returned.
|
||||||
/datum/controller/subsystem/proc/fire(resumed = 0)
|
/datum/controller/subsystem/proc/fire(resumed = FALSE)
|
||||||
flags |= SS_NO_FIRE
|
flags |= SS_NO_FIRE
|
||||||
CRASH("Subsystem [src]([type]) does not fire() but did not set the SS_NO_FIRE flag. Please add the SS_NO_FIRE flag to any subsystem that doesn't fire so it doesn't get added to the processing list and waste cpu.")
|
CRASH("Subsystem [src]([type]) does not fire() but did not set the SS_NO_FIRE flag. Please add the SS_NO_FIRE flag to any subsystem that doesn't fire so it doesn't get added to the processing list and waste cpu.")
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ SUBSYSTEM_DEF(assets)
|
|||||||
switch (CONFIG_GET(string/asset_transport))
|
switch (CONFIG_GET(string/asset_transport))
|
||||||
if ("webroot")
|
if ("webroot")
|
||||||
newtransporttype = /datum/asset_transport/webroot
|
newtransporttype = /datum/asset_transport/webroot
|
||||||
|
|
||||||
if (newtransporttype == transport.type)
|
if (newtransporttype == transport.type)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|||||||
@@ -10,25 +10,28 @@ SUBSYSTEM_DEF(atoms)
|
|||||||
|
|
||||||
var/old_initialized
|
var/old_initialized
|
||||||
|
|
||||||
var/list/late_loaders
|
var/list/late_loaders = list()
|
||||||
|
|
||||||
var/list/BadInitializeCalls = list()
|
var/list/BadInitializeCalls = list()
|
||||||
|
|
||||||
|
initialized = INITIALIZATION_INSSATOMS
|
||||||
|
|
||||||
/datum/controller/subsystem/atoms/Initialize(timeofday)
|
/datum/controller/subsystem/atoms/Initialize(timeofday)
|
||||||
GLOB.fire_overlay.appearance_flags = RESET_COLOR
|
GLOB.fire_overlay.appearance_flags = RESET_COLOR
|
||||||
setupGenetics()
|
setupGenetics() //to set the mutations' sequence
|
||||||
|
|
||||||
initialized = INITIALIZATION_INNEW_MAPLOAD
|
initialized = INITIALIZATION_INNEW_MAPLOAD
|
||||||
InitializeAtoms()
|
InitializeAtoms()
|
||||||
|
initialized = INITIALIZATION_INNEW_REGULAR
|
||||||
return ..()
|
return ..()
|
||||||
|
|
||||||
/datum/controller/subsystem/atoms/proc/InitializeAtoms(list/atoms)
|
/datum/controller/subsystem/atoms/proc/InitializeAtoms(list/atoms)
|
||||||
if(initialized == INITIALIZATION_INSSATOMS)
|
if(initialized == INITIALIZATION_INSSATOMS)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
old_initialized = initialized
|
||||||
initialized = INITIALIZATION_INNEW_MAPLOAD
|
initialized = INITIALIZATION_INNEW_MAPLOAD
|
||||||
|
|
||||||
LAZYINITLIST(late_loaders)
|
|
||||||
|
|
||||||
var/count
|
var/count
|
||||||
var/list/mapload_arg = list(TRUE)
|
var/list/mapload_arg = list(TRUE)
|
||||||
if(atoms)
|
if(atoms)
|
||||||
@@ -49,7 +52,7 @@ SUBSYSTEM_DEF(atoms)
|
|||||||
testing("Initialized [count] atoms")
|
testing("Initialized [count] atoms")
|
||||||
pass(count)
|
pass(count)
|
||||||
|
|
||||||
initialized = INITIALIZATION_INNEW_REGULAR
|
initialized = old_initialized
|
||||||
|
|
||||||
if(late_loaders.len)
|
if(late_loaders.len)
|
||||||
for(var/I in late_loaders)
|
for(var/I in late_loaders)
|
||||||
@@ -58,6 +61,7 @@ SUBSYSTEM_DEF(atoms)
|
|||||||
testing("Late initialized [late_loaders.len] atoms")
|
testing("Late initialized [late_loaders.len] atoms")
|
||||||
late_loaders.Cut()
|
late_loaders.Cut()
|
||||||
|
|
||||||
|
/// Init this specific atom
|
||||||
/datum/controller/subsystem/atoms/proc/InitAtom(atom/A, list/arguments)
|
/datum/controller/subsystem/atoms/proc/InitAtom(atom/A, list/arguments)
|
||||||
var/the_type = A.type
|
var/the_type = A.type
|
||||||
if(QDELING(A))
|
if(QDELING(A))
|
||||||
@@ -150,8 +154,3 @@ SUBSYSTEM_DEF(atoms)
|
|||||||
var/initlog = InitLog()
|
var/initlog = InitLog()
|
||||||
if(initlog)
|
if(initlog)
|
||||||
text2file(initlog, "[GLOB.log_directory]/initialize.log")
|
text2file(initlog, "[GLOB.log_directory]/initialize.log")
|
||||||
|
|
||||||
#undef BAD_INIT_QDEL_BEFORE
|
|
||||||
#undef BAD_INIT_DIDNT_INIT
|
|
||||||
#undef BAD_INIT_SLEPT
|
|
||||||
#undef BAD_INIT_NO_HINT
|
|
||||||
|
|||||||
@@ -14,12 +14,14 @@ SUBSYSTEM_DEF(blackbox)
|
|||||||
"explosion" = 2,
|
"explosion" = 2,
|
||||||
"time_dilation_current" = 3,
|
"time_dilation_current" = 3,
|
||||||
"science_techweb_unlock" = 2,
|
"science_techweb_unlock" = 2,
|
||||||
"round_end_stats" = 2) //associative list of any feedback variables that have had their format changed since creation and their current version, remember to update this
|
"round_end_stats" = 2,
|
||||||
|
"testmerged_prs" = 2) //associative list of any feedback variables that have had their format changed since creation and their current version, remember to update this
|
||||||
|
|
||||||
/datum/controller/subsystem/blackbox/Initialize()
|
/datum/controller/subsystem/blackbox/Initialize()
|
||||||
triggertime = world.time
|
triggertime = world.time
|
||||||
record_feedback("amount", "random_seed", Master.random_seed)
|
record_feedback("amount", "random_seed", Master.random_seed)
|
||||||
record_feedback("amount", "dm_version", DM_VERSION)
|
record_feedback("amount", "dm_version", DM_VERSION)
|
||||||
|
record_feedback("amount", "dm_build", DM_BUILD)
|
||||||
record_feedback("amount", "byond_version", world.byond_version)
|
record_feedback("amount", "byond_version", world.byond_version)
|
||||||
record_feedback("amount", "byond_build", world.byond_build)
|
record_feedback("amount", "byond_build", world.byond_build)
|
||||||
. = ..()
|
. = ..()
|
||||||
@@ -39,10 +41,7 @@ SUBSYSTEM_DEF(blackbox)
|
|||||||
|
|
||||||
if(!SSdbcore.Connect())
|
if(!SSdbcore.Connect())
|
||||||
return
|
return
|
||||||
var/playercount = 0
|
var/playercount = LAZYLEN(GLOB.player_list)
|
||||||
for(var/mob/M in GLOB.player_list)
|
|
||||||
if(M.client)
|
|
||||||
playercount += 1
|
|
||||||
var/admincount = GLOB.admins.len
|
var/admincount = GLOB.admins.len
|
||||||
var/datum/DBQuery/query_record_playercount = SSdbcore.NewQuery("INSERT INTO [format_table_name("legacy_population")] (playercount, admincount, time, server_ip, server_port, round_id) VALUES ([playercount], [admincount], '[SQLtime()]', INET_ATON(IF('[world.internet_address]' LIKE '', '0', '[world.internet_address]')), '[world.port]', '[GLOB.round_id]')")
|
var/datum/DBQuery/query_record_playercount = SSdbcore.NewQuery("INSERT INTO [format_table_name("legacy_population")] (playercount, admincount, time, server_ip, server_port, round_id) VALUES ([playercount], [admincount], '[SQLtime()]', INET_ATON(IF('[world.internet_address]' LIKE '', '0', '[world.internet_address]')), '[world.port]', '[GLOB.round_id]')")
|
||||||
query_record_playercount.Execute()
|
query_record_playercount.Execute()
|
||||||
@@ -88,18 +87,24 @@ SUBSYSTEM_DEF(blackbox)
|
|||||||
if (!SSdbcore.Connect())
|
if (!SSdbcore.Connect())
|
||||||
return
|
return
|
||||||
|
|
||||||
|
// var/list/special_columns = list(
|
||||||
|
// "datetime" = "NOW()"
|
||||||
|
// )
|
||||||
var/list/sqlrowlist = list()
|
var/list/sqlrowlist = list()
|
||||||
|
|
||||||
for (var/datum/feedback_variable/FV in feedback)
|
for (var/datum/feedback_variable/FV in feedback)
|
||||||
var/sqlversion = 1
|
sqlrowlist += list(list(
|
||||||
if(FV.key in versions)
|
"datetime" = "Now()", //legacy
|
||||||
sqlversion = versions[FV.key]
|
"round_id" = GLOB.round_id,
|
||||||
sqlrowlist += list(list("datetime" = "Now()", "round_id" = GLOB.round_id, "key_name" = "'[sanitizeSQL(FV.key)]'", "key_type" = "'[FV.key_type]'", "version" = "[sqlversion]", "json" = "'[sanitizeSQL(json_encode(FV.json))]'"))
|
"key_name" = sanitizeSQL(FV.key),
|
||||||
|
"key_type" = FV.key_type,
|
||||||
|
"version" = versions[FV.key] || 1,
|
||||||
|
"json" = sanitizeSQL(json_encode(FV.json))
|
||||||
|
))
|
||||||
|
|
||||||
if (!length(sqlrowlist))
|
if (!length(sqlrowlist))
|
||||||
return
|
return
|
||||||
|
|
||||||
SSdbcore.MassInsert(format_table_name("feedback"), sqlrowlist, ignore_errors = TRUE, delayed = TRUE)
|
SSdbcore.MassInsert(format_table_name("feedback"), sqlrowlist, ignore_errors = TRUE, delayed = TRUE)//, special_columns = special_columns)
|
||||||
|
|
||||||
/datum/controller/subsystem/blackbox/proc/Seal()
|
/datum/controller/subsystem/blackbox/proc/Seal()
|
||||||
if(sealed)
|
if(sealed)
|
||||||
@@ -169,7 +174,7 @@ feedback data can be recorded in 5 formats:
|
|||||||
"tally"
|
"tally"
|
||||||
used to track the number of occurances of multiple related values i.e. how many times each type of gun is fired
|
used to track the number of occurances of multiple related values i.e. how many times each type of gun is fired
|
||||||
further calls to the same key will:
|
further calls to the same key will:
|
||||||
add or subtract from the saved value of the data key if it already exists
|
add or subtract from the saved value of the data key if it already exists
|
||||||
append the key and it's value if it doesn't exist
|
append the key and it's value if it doesn't exist
|
||||||
calls: SSblackbox.record_feedback("tally", "example", 1, "sample data")
|
calls: SSblackbox.record_feedback("tally", "example", 1, "sample data")
|
||||||
SSblackbox.record_feedback("tally", "example", 4, "sample data")
|
SSblackbox.record_feedback("tally", "example", 4, "sample data")
|
||||||
@@ -181,7 +186,7 @@ feedback data can be recorded in 5 formats:
|
|||||||
the final element in the data list is used as the tracking key, all prior elements are used for nesting
|
the final element in the data list is used as the tracking key, all prior elements are used for nesting
|
||||||
all data list elements must be strings
|
all data list elements must be strings
|
||||||
further calls to the same key will:
|
further calls to the same key will:
|
||||||
add or subtract from the saved value of the data key if it already exists in the same multi-dimensional position
|
add or subtract from the saved value of the data key if it already exists in the same multi-dimensional position
|
||||||
append the key and it's value if it doesn't exist
|
append the key and it's value if it doesn't exist
|
||||||
calls: SSblackbox.record_feedback("nested tally", "example", 1, list("fruit", "orange", "apricot"))
|
calls: SSblackbox.record_feedback("nested tally", "example", 1, list("fruit", "orange", "apricot"))
|
||||||
SSblackbox.record_feedback("nested tally", "example", 2, list("fruit", "orange", "orange"))
|
SSblackbox.record_feedback("nested tally", "example", 2, list("fruit", "orange", "orange"))
|
||||||
@@ -270,6 +275,18 @@ Versioning
|
|||||||
/datum/feedback_variable/New(new_key, new_key_type)
|
/datum/feedback_variable/New(new_key, new_key_type)
|
||||||
key = new_key
|
key = new_key
|
||||||
key_type = new_key_type
|
key_type = new_key_type
|
||||||
|
/*
|
||||||
|
/datum/controller/subsystem/blackbox/proc/LogAhelp(ticket, action, message, recipient, sender)
|
||||||
|
if(!SSdbcore.Connect())
|
||||||
|
return
|
||||||
|
|
||||||
|
var/datum/db_query/query_log_ahelp = SSdbcore.NewQuery({"
|
||||||
|
INSERT INTO [format_table_name("ticket")] (ticket, action, message, recipient, sender, server_ip, server_port, round_id, timestamp)
|
||||||
|
VALUES (:ticket, :action, :message, :recipient, :sender, INET_ATON(:server_ip), :server_port, :round_id, :time)
|
||||||
|
"}, list("ticket" = ticket, "action" = action, "message" = message, "recipient" = recipient, "sender" = sender, "server_ip" = world.internet_address || "0", "server_port" = world.port, "round_id" = GLOB.round_id, "time" = SQLtime()))
|
||||||
|
query_log_ahelp.Execute()
|
||||||
|
qdel(query_log_ahelp)
|
||||||
|
*/
|
||||||
|
|
||||||
/datum/controller/subsystem/blackbox/proc/ReportDeath(mob/living/L)
|
/datum/controller/subsystem/blackbox/proc/ReportDeath(mob/living/L)
|
||||||
set waitfor = FALSE
|
set waitfor = FALSE
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
/**
|
/*!
|
||||||
* Copyright (c) 2020 Aleksej Komarov
|
* Copyright (c) 2020 Aleksej Komarov
|
||||||
* SPDX-License-Identifier: MIT
|
* SPDX-License-Identifier: MIT
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ SUBSYSTEM_DEF(events)
|
|||||||
return ..()
|
return ..()
|
||||||
|
|
||||||
|
|
||||||
/datum/controller/subsystem/events/fire(resumed = 0)
|
/datum/controller/subsystem/events/fire(resumed = FALSE)
|
||||||
if(!resumed)
|
if(!resumed)
|
||||||
checkEvent() //only check these if we aren't resuming a paused fire
|
checkEvent() //only check these if we aren't resuming a paused fire
|
||||||
src.currentrun = running.Copy()
|
src.currentrun = running.Copy()
|
||||||
@@ -37,7 +37,7 @@ SUBSYSTEM_DEF(events)
|
|||||||
var/datum/thing = currentrun[currentrun.len]
|
var/datum/thing = currentrun[currentrun.len]
|
||||||
currentrun.len--
|
currentrun.len--
|
||||||
if(thing)
|
if(thing)
|
||||||
thing.process()
|
thing.process(wait * 0.1)
|
||||||
else
|
else
|
||||||
running.Remove(thing)
|
running.Remove(thing)
|
||||||
if (MC_TICK_CHECK)
|
if (MC_TICK_CHECK)
|
||||||
@@ -91,13 +91,13 @@ SUBSYSTEM_DEF(events)
|
|||||||
if(. == EVENT_CANT_RUN)//we couldn't run this event for some reason, set its max_occurrences to 0
|
if(. == EVENT_CANT_RUN)//we couldn't run this event for some reason, set its max_occurrences to 0
|
||||||
E.max_occurrences = 0
|
E.max_occurrences = 0
|
||||||
else if(. == EVENT_READY)
|
else if(. == EVENT_READY)
|
||||||
E.random = TRUE
|
E.runEvent(random = TRUE)
|
||||||
E.runEvent(TRUE)
|
|
||||||
|
|
||||||
//allows a client to trigger an event
|
//allows a client to trigger an event
|
||||||
//aka Badmin Central
|
//aka Badmin Central
|
||||||
// > Not in modules/admin
|
// > Not in modules/admin
|
||||||
// REEEEEEEEE
|
// REEEEEEEEE
|
||||||
|
// Why the heck is this here! Took me so damn long to find!
|
||||||
/client/proc/forceEvent()
|
/client/proc/forceEvent()
|
||||||
set name = "Trigger Event"
|
set name = "Trigger Event"
|
||||||
set category = "Admin.Events"
|
set category = "Admin.Events"
|
||||||
|
|||||||
@@ -1,3 +1,26 @@
|
|||||||
|
/*!
|
||||||
|
## Debugging GC issues
|
||||||
|
|
||||||
|
In order to debug `qdel()` failures, there are several tools available.
|
||||||
|
To enable these tools, define `TESTING` in [_compile_options.dm](https://github.com/tgstation/-tg-station/blob/master/code/_compile_options.dm).
|
||||||
|
|
||||||
|
First is a verb called "Find References", which lists **every** refererence to an object in the world. This allows you to track down any indirect or obfuscated references that you might have missed.
|
||||||
|
|
||||||
|
Complementing this is another verb, "qdel() then Find References".
|
||||||
|
This does exactly what you'd expect; it calls `qdel()` on the object and then it finds all references remaining.
|
||||||
|
This is great, because it means that `Destroy()` will have been called before it starts to find references,
|
||||||
|
so the only references you'll find will be the ones preventing the object from `qdel()`ing gracefully.
|
||||||
|
|
||||||
|
If you have a datum or something you are not destroying directly (say via the singulo),
|
||||||
|
the next tool is `QDEL_HINT_FINDREFERENCE`. You can return this in `Destroy()` (where you would normally `return ..()`),
|
||||||
|
to print a list of references once it enters the GC queue.
|
||||||
|
|
||||||
|
Finally is a verb, "Show qdel() Log", which shows the deletion log that the garbage subsystem keeps. This is helpful if you are having race conditions or need to review the order of deletions.
|
||||||
|
|
||||||
|
Note that for any of these tools to work `TESTING` must be defined.
|
||||||
|
By using these methods of finding references, you can make your life far, far easier when dealing with `qdel()` failures.
|
||||||
|
*/
|
||||||
|
|
||||||
SUBSYSTEM_DEF(garbage)
|
SUBSYSTEM_DEF(garbage)
|
||||||
name = "Garbage"
|
name = "Garbage"
|
||||||
priority = FIRE_PRIORITY_GARBAGE
|
priority = FIRE_PRIORITY_GARBAGE
|
||||||
@@ -6,7 +29,7 @@ SUBSYSTEM_DEF(garbage)
|
|||||||
runlevels = RUNLEVELS_DEFAULT | RUNLEVEL_LOBBY
|
runlevels = RUNLEVELS_DEFAULT | RUNLEVEL_LOBBY
|
||||||
init_order = INIT_ORDER_GARBAGE
|
init_order = INIT_ORDER_GARBAGE
|
||||||
|
|
||||||
var/list/collection_timeout = list(15 SECONDS, 30 SECONDS) // deciseconds to wait before moving something up in the queue to the next level
|
var/list/collection_timeout = list(2 MINUTES, 10 SECONDS) // deciseconds to wait before moving something up in the queue to the next level
|
||||||
|
|
||||||
//Stat tracking
|
//Stat tracking
|
||||||
var/delslasttick = 0 // number of del()'s we've done this tick
|
var/delslasttick = 0 // number of del()'s we've done this tick
|
||||||
@@ -24,10 +47,8 @@ SUBSYSTEM_DEF(garbage)
|
|||||||
|
|
||||||
//Queue
|
//Queue
|
||||||
var/list/queues
|
var/list/queues
|
||||||
|
|
||||||
#ifdef LEGACY_REFERENCE_TRACKING
|
#ifdef LEGACY_REFERENCE_TRACKING
|
||||||
var/list/reference_find_on_fail = list()
|
var/list/reference_find_on_fail = list()
|
||||||
var/list/reference_find_on_fail_types = list()
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
@@ -99,6 +120,9 @@ SUBSYSTEM_DEF(garbage)
|
|||||||
state = SS_RUNNING
|
state = SS_RUNNING
|
||||||
break
|
break
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/datum/controller/subsystem/garbage/proc/HandleQueue(level = GC_QUEUE_CHECK)
|
/datum/controller/subsystem/garbage/proc/HandleQueue(level = GC_QUEUE_CHECK)
|
||||||
if (level == GC_QUEUE_CHECK)
|
if (level == GC_QUEUE_CHECK)
|
||||||
delslasttick = 0
|
delslasttick = 0
|
||||||
@@ -135,7 +159,7 @@ SUBSYSTEM_DEF(garbage)
|
|||||||
++totalgcs
|
++totalgcs
|
||||||
pass_counts[level]++
|
pass_counts[level]++
|
||||||
#ifdef LEGACY_REFERENCE_TRACKING
|
#ifdef LEGACY_REFERENCE_TRACKING
|
||||||
reference_find_on_fail -= refID //It's deleted we don't care anymore.
|
reference_find_on_fail -= refID //It's deleted we don't care anymore.
|
||||||
#endif
|
#endif
|
||||||
if (MC_TICK_CHECK)
|
if (MC_TICK_CHECK)
|
||||||
return
|
return
|
||||||
@@ -149,10 +173,10 @@ SUBSYSTEM_DEF(garbage)
|
|||||||
D.find_references()
|
D.find_references()
|
||||||
#elif defined(LEGACY_REFERENCE_TRACKING)
|
#elif defined(LEGACY_REFERENCE_TRACKING)
|
||||||
if(reference_find_on_fail[refID])
|
if(reference_find_on_fail[refID])
|
||||||
D.find_references()
|
D.find_references_legacy()
|
||||||
#ifdef GC_FAILURE_HARD_LOOKUP
|
#ifdef GC_FAILURE_HARD_LOOKUP
|
||||||
else
|
else
|
||||||
D.find_references()
|
D.find_references_legacy()
|
||||||
#endif
|
#endif
|
||||||
reference_find_on_fail -= refID
|
reference_find_on_fail -= refID
|
||||||
#endif
|
#endif
|
||||||
@@ -195,11 +219,6 @@ SUBSYSTEM_DEF(garbage)
|
|||||||
var/gctime = world.time
|
var/gctime = world.time
|
||||||
var/refid = "\ref[D]"
|
var/refid = "\ref[D]"
|
||||||
|
|
||||||
#ifdef LEGACY_REFERENCE_TRACKING
|
|
||||||
if(reference_find_on_fail_types[D.type])
|
|
||||||
reference_find_on_fail["\ref[D]"] = TRUE
|
|
||||||
#endif
|
|
||||||
|
|
||||||
D.gc_destroyed = gctime
|
D.gc_destroyed = gctime
|
||||||
var/list/queue = queues[level]
|
var/list/queue = queues[level]
|
||||||
if (queue[refid])
|
if (queue[refid])
|
||||||
@@ -207,21 +226,6 @@ SUBSYSTEM_DEF(garbage)
|
|||||||
|
|
||||||
queue[refid] = gctime
|
queue[refid] = gctime
|
||||||
|
|
||||||
#ifdef LEGACY_REFERENCE_TRACKING
|
|
||||||
/datum/controller/subsystem/garbage/proc/add_type_to_findref(type)
|
|
||||||
if(!ispath(type))
|
|
||||||
return "NOT A VAILD PATH"
|
|
||||||
reference_find_on_fail_types |= typecacheof(type)
|
|
||||||
|
|
||||||
/datum/controller/subsystem/garbage/proc/remove_type_from_findref(type)
|
|
||||||
if(!ispath(type))
|
|
||||||
return "NOT A VALID PATH"
|
|
||||||
reference_find_on_fail_types -= typesof(type)
|
|
||||||
|
|
||||||
/datum/controller/subsystem/garbage/proc/clear_findref_types()
|
|
||||||
reference_find_on_fail_types = list()
|
|
||||||
#endif
|
|
||||||
|
|
||||||
//this is mainly to separate things profile wise.
|
//this is mainly to separate things profile wise.
|
||||||
/datum/controller/subsystem/garbage/proc/HardDelete(datum/D)
|
/datum/controller/subsystem/garbage/proc/HardDelete(datum/D)
|
||||||
var/time = world.timeofday
|
var/time = world.timeofday
|
||||||
@@ -274,8 +278,10 @@ SUBSYSTEM_DEF(garbage)
|
|||||||
/datum/qdel_item/New(mytype)
|
/datum/qdel_item/New(mytype)
|
||||||
name = "[mytype]"
|
name = "[mytype]"
|
||||||
|
|
||||||
// Should be treated as a replacement for the 'del' keyword.
|
|
||||||
// Datums passed to this will be given a chance to clean up references to allow the GC to collect them.
|
/// Should be treated as a replacement for the 'del' keyword.
|
||||||
|
///
|
||||||
|
/// Datums passed to this will be given a chance to clean up references to allow the GC to collect them.
|
||||||
/proc/qdel(datum/D, force=FALSE, ...)
|
/proc/qdel(datum/D, force=FALSE, ...)
|
||||||
if(!istype(D))
|
if(!istype(D))
|
||||||
del(D)
|
del(D)
|
||||||
@@ -330,9 +336,10 @@ SUBSYSTEM_DEF(garbage)
|
|||||||
#ifdef LEGACY_REFERENCE_TRACKING
|
#ifdef LEGACY_REFERENCE_TRACKING
|
||||||
if (QDEL_HINT_FINDREFERENCE) //qdel will, if LEGACY_REFERENCE_TRACKING is enabled, display all references to this object, then queue the object for deletion.
|
if (QDEL_HINT_FINDREFERENCE) //qdel will, if LEGACY_REFERENCE_TRACKING is enabled, display all references to this object, then queue the object for deletion.
|
||||||
SSgarbage.Queue(D)
|
SSgarbage.Queue(D)
|
||||||
|
D.find_references_legacy()
|
||||||
if (QDEL_HINT_IFFAIL_FINDREFERENCE)
|
if (QDEL_HINT_IFFAIL_FINDREFERENCE)
|
||||||
SSgarbage.Queue(D)
|
SSgarbage.Queue(D)
|
||||||
SSgarbage.reference_find_on_fail["\ref[D]"] = TRUE
|
SSgarbage.reference_find_on_fail[REF(D)] = TRUE
|
||||||
#endif
|
#endif
|
||||||
else
|
else
|
||||||
#ifdef TESTING
|
#ifdef TESTING
|
||||||
@@ -343,18 +350,3 @@ SUBSYSTEM_DEF(garbage)
|
|||||||
SSgarbage.Queue(D)
|
SSgarbage.Queue(D)
|
||||||
else if(D.gc_destroyed == GC_CURRENTLY_BEING_QDELETED)
|
else if(D.gc_destroyed == GC_CURRENTLY_BEING_QDELETED)
|
||||||
CRASH("[D.type] destroy proc was called multiple times, likely due to a qdel loop in the Destroy logic")
|
CRASH("[D.type] destroy proc was called multiple times, likely due to a qdel loop in the Destroy logic")
|
||||||
|
|
||||||
#ifdef TESTING
|
|
||||||
/proc/writeDatumCount()
|
|
||||||
var/list/datums = list()
|
|
||||||
for(var/datum/D in world)
|
|
||||||
datums[D.type] += 1
|
|
||||||
for(var/datum/D)
|
|
||||||
datums[D.type] += 1
|
|
||||||
datums = sortTim(datums, /proc/cmp_numeric_dsc, associative = TRUE)
|
|
||||||
if(fexists("data/DATUMCOUNT.txt"))
|
|
||||||
fdel("data/DATUMCOUNT.txt")
|
|
||||||
var/outfile = file("data/DATUMCOUNT.txt")
|
|
||||||
for(var/path in datums)
|
|
||||||
outfile << "[datums[path]]\t\t\t\t\t[path]"
|
|
||||||
#endif
|
|
||||||
|
|||||||
@@ -33,8 +33,9 @@ SUBSYSTEM_DEF(idlenpcpool)
|
|||||||
while(currentrun.len)
|
while(currentrun.len)
|
||||||
var/mob/living/simple_animal/SA = currentrun[currentrun.len]
|
var/mob/living/simple_animal/SA = currentrun[currentrun.len]
|
||||||
--currentrun.len
|
--currentrun.len
|
||||||
if (!SA)
|
if (QDELETED(SA))
|
||||||
GLOB.simple_animals[AI_IDLE] -= SA
|
GLOB.simple_animals[AI_IDLE] -= SA
|
||||||
|
log_world("Found a null in simple_animals list!")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if(!SA.ckey)
|
if(!SA.ckey)
|
||||||
|
|||||||
@@ -2,13 +2,13 @@ SUBSYSTEM_DEF(ipintel)
|
|||||||
name = "XKeyScore"
|
name = "XKeyScore"
|
||||||
init_order = INIT_ORDER_XKEYSCORE
|
init_order = INIT_ORDER_XKEYSCORE
|
||||||
flags = SS_NO_FIRE
|
flags = SS_NO_FIRE
|
||||||
var/enabled = 0 //disable at round start to avoid checking reconnects
|
var/enabled = FALSE //disable at round start to avoid checking reconnects
|
||||||
var/throttle = 0
|
var/throttle = 0
|
||||||
var/errors = 0
|
var/errors = 0
|
||||||
|
|
||||||
var/list/cache = list()
|
var/list/cache = list()
|
||||||
|
|
||||||
/datum/controller/subsystem/ipintel/Initialize(timeofday, zlevel)
|
/datum/controller/subsystem/ipintel/Initialize(timeofday, zlevel)
|
||||||
enabled = 1
|
enabled = TRUE
|
||||||
. = ..()
|
. = ..()
|
||||||
|
|
||||||
|
|||||||
@@ -1,14 +1,14 @@
|
|||||||
GLOBAL_LIST_EMPTY(lighting_update_lights) // List of lighting sources queued for update.
|
|
||||||
GLOBAL_LIST_EMPTY(lighting_update_corners) // List of lighting corners queued for update.
|
|
||||||
GLOBAL_LIST_EMPTY(lighting_update_objects) // List of lighting objects queued for update.
|
|
||||||
|
|
||||||
SUBSYSTEM_DEF(lighting)
|
SUBSYSTEM_DEF(lighting)
|
||||||
name = "Lighting"
|
name = "Lighting"
|
||||||
wait = 2
|
wait = 2
|
||||||
init_order = INIT_ORDER_LIGHTING
|
init_order = INIT_ORDER_LIGHTING
|
||||||
|
flags = SS_TICKER
|
||||||
|
var/static/list/sources_queue = list() // List of lighting sources queued for update.
|
||||||
|
var/static/list/corners_queue = list() // List of lighting corners queued for update.
|
||||||
|
var/static/list/objects_queue = list() // List of lighting objects queued for update.
|
||||||
|
|
||||||
/datum/controller/subsystem/lighting/stat_entry(msg)
|
/datum/controller/subsystem/lighting/stat_entry(msg)
|
||||||
msg = "L:[length(GLOB.lighting_update_lights)]|C:[length(GLOB.lighting_update_corners)]|O:[length(GLOB.lighting_update_objects)]"
|
msg = "L:[length(sources_queue)]|C:[length(corners_queue)]|O:[length(objects_queue)]"
|
||||||
return ..()
|
return ..()
|
||||||
|
|
||||||
|
|
||||||
@@ -31,9 +31,10 @@ SUBSYSTEM_DEF(lighting)
|
|||||||
MC_SPLIT_TICK_INIT(3)
|
MC_SPLIT_TICK_INIT(3)
|
||||||
if(!init_tick_checks)
|
if(!init_tick_checks)
|
||||||
MC_SPLIT_TICK
|
MC_SPLIT_TICK
|
||||||
|
var/list/queue = sources_queue
|
||||||
var/i = 0
|
var/i = 0
|
||||||
for (i in 1 to GLOB.lighting_update_lights.len)
|
for (i in 1 to length(queue))
|
||||||
var/datum/light_source/L = GLOB.lighting_update_lights[i]
|
var/datum/light_source/L = queue[i]
|
||||||
|
|
||||||
L.update_corners()
|
L.update_corners()
|
||||||
|
|
||||||
@@ -44,14 +45,15 @@ SUBSYSTEM_DEF(lighting)
|
|||||||
else if (MC_TICK_CHECK)
|
else if (MC_TICK_CHECK)
|
||||||
break
|
break
|
||||||
if (i)
|
if (i)
|
||||||
GLOB.lighting_update_lights.Cut(1, i+1)
|
queue.Cut(1, i+1)
|
||||||
i = 0
|
i = 0
|
||||||
|
|
||||||
if(!init_tick_checks)
|
if(!init_tick_checks)
|
||||||
MC_SPLIT_TICK
|
MC_SPLIT_TICK
|
||||||
|
|
||||||
for (i in 1 to GLOB.lighting_update_corners.len)
|
queue = corners_queue
|
||||||
var/datum/lighting_corner/C = GLOB.lighting_update_corners[i]
|
for (i in 1 to length(queue))
|
||||||
|
var/datum/lighting_corner/C = queue[i]
|
||||||
|
|
||||||
C.update_objects()
|
C.update_objects()
|
||||||
C.needs_update = FALSE
|
C.needs_update = FALSE
|
||||||
@@ -60,15 +62,16 @@ SUBSYSTEM_DEF(lighting)
|
|||||||
else if (MC_TICK_CHECK)
|
else if (MC_TICK_CHECK)
|
||||||
break
|
break
|
||||||
if (i)
|
if (i)
|
||||||
GLOB.lighting_update_corners.Cut(1, i+1)
|
queue.Cut(1, i+1)
|
||||||
i = 0
|
i = 0
|
||||||
|
|
||||||
|
|
||||||
if(!init_tick_checks)
|
if(!init_tick_checks)
|
||||||
MC_SPLIT_TICK
|
MC_SPLIT_TICK
|
||||||
|
|
||||||
for (i in 1 to GLOB.lighting_update_objects.len)
|
queue = objects_queue
|
||||||
var/atom/movable/lighting_object/O = GLOB.lighting_update_objects[i]
|
for (i in 1 to length(queue))
|
||||||
|
var/atom/movable/lighting_object/O = queue[i]
|
||||||
|
|
||||||
if (QDELETED(O))
|
if (QDELETED(O))
|
||||||
continue
|
continue
|
||||||
@@ -80,7 +83,7 @@ SUBSYSTEM_DEF(lighting)
|
|||||||
else if (MC_TICK_CHECK)
|
else if (MC_TICK_CHECK)
|
||||||
break
|
break
|
||||||
if (i)
|
if (i)
|
||||||
GLOB.lighting_update_objects.Cut(1, i+1)
|
queue.Cut(1, i+1)
|
||||||
|
|
||||||
|
|
||||||
/datum/controller/subsystem/lighting/Recover()
|
/datum/controller/subsystem/lighting/Recover()
|
||||||
|
|||||||
@@ -1,6 +1,4 @@
|
|||||||
//Fires five times every second.
|
|
||||||
|
|
||||||
PROCESSING_SUBSYSTEM_DEF(fastprocess)
|
PROCESSING_SUBSYSTEM_DEF(fastprocess)
|
||||||
name = "Fast Processing"
|
name = "Fast Processing"
|
||||||
wait = 2
|
wait = 0.2 SECONDS
|
||||||
stat_tag = "FP"
|
stat_tag = "FP"
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
PROCESSING_SUBSYSTEM_DEF(nanites)
|
PROCESSING_SUBSYSTEM_DEF(nanites)
|
||||||
name = "Nanites"
|
name = "Nanites"
|
||||||
flags = SS_BACKGROUND|SS_POST_FIRE_TIMING|SS_NO_INIT
|
flags = SS_BACKGROUND|SS_POST_FIRE_TIMING|SS_NO_INIT
|
||||||
wait = 10
|
wait = 1 SECONDS
|
||||||
|
|
||||||
var/list/datum/nanite_cloud_backup/cloud_backups = list()
|
var/list/datum/nanite_cloud_backup/cloud_backups = list()
|
||||||
var/list/mob/living/nanite_monitored_mobs = list()
|
var/list/mob/living/nanite_monitored_mobs = list()
|
||||||
|
|||||||
@@ -2,4 +2,4 @@ PROCESSING_SUBSYSTEM_DEF(obj)
|
|||||||
name = "Objects"
|
name = "Objects"
|
||||||
priority = FIRE_PRIORITY_OBJ
|
priority = FIRE_PRIORITY_OBJ
|
||||||
flags = SS_NO_INIT
|
flags = SS_NO_INIT
|
||||||
wait = 20
|
wait = 2 SECONDS
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
//Used to process objects. Fires once every second.
|
//Used to process objects.
|
||||||
|
|
||||||
SUBSYSTEM_DEF(processing)
|
SUBSYSTEM_DEF(processing)
|
||||||
name = "Processing"
|
name = "Processing"
|
||||||
priority = FIRE_PRIORITY_PROCESS
|
priority = FIRE_PRIORITY_PROCESS
|
||||||
flags = SS_BACKGROUND|SS_POST_FIRE_TIMING|SS_NO_INIT
|
flags = SS_BACKGROUND|SS_POST_FIRE_TIMING|SS_NO_INIT
|
||||||
wait = 10
|
wait = 1 SECONDS
|
||||||
|
|
||||||
var/stat_tag = "P" //Used for logging
|
var/stat_tag = "P" //Used for logging
|
||||||
var/list/processing = list()
|
var/list/processing = list()
|
||||||
@@ -14,7 +14,7 @@ SUBSYSTEM_DEF(processing)
|
|||||||
msg = "[stat_tag]:[length(processing)]"
|
msg = "[stat_tag]:[length(processing)]"
|
||||||
return ..()
|
return ..()
|
||||||
|
|
||||||
/datum/controller/subsystem/processing/fire(resumed = 0)
|
/datum/controller/subsystem/processing/fire(resumed = FALSE)
|
||||||
if (!resumed)
|
if (!resumed)
|
||||||
currentrun = processing.Copy()
|
currentrun = processing.Copy()
|
||||||
//cache for sanic speed (lists are references anyways)
|
//cache for sanic speed (lists are references anyways)
|
||||||
@@ -25,12 +25,26 @@ SUBSYSTEM_DEF(processing)
|
|||||||
current_run.len--
|
current_run.len--
|
||||||
if(QDELETED(thing))
|
if(QDELETED(thing))
|
||||||
processing -= thing
|
processing -= thing
|
||||||
else if(thing.process(wait) == PROCESS_KILL)
|
else if(thing.process(wait * 0.1) == PROCESS_KILL)
|
||||||
// fully stop so that a future START_PROCESSING will work
|
// fully stop so that a future START_PROCESSING will work
|
||||||
STOP_PROCESSING(src, thing)
|
STOP_PROCESSING(src, thing)
|
||||||
if (MC_TICK_CHECK)
|
if (MC_TICK_CHECK)
|
||||||
return
|
return
|
||||||
|
|
||||||
/datum/proc/process()
|
|
||||||
set waitfor = 0
|
/**
|
||||||
|
* This proc is called on a datum on every "cycle" if it is being processed by a subsystem. The time between each cycle is determined by the subsystem's "wait" setting.
|
||||||
|
* You can start and stop processing a datum using the START_PROCESSING and STOP_PROCESSING defines.
|
||||||
|
*
|
||||||
|
* Since the wait setting of a subsystem can be changed at any time, it is important that any rate-of-change that you implement in this proc is multiplied by the delta_time that is sent as a parameter,
|
||||||
|
* Additionally, any "prob" you use in this proc should instead use the DT_PROB define to make sure that the final probability per second stays the same even if the subsystem's wait is altered.
|
||||||
|
* Examples where this must be considered:
|
||||||
|
* - Implementing a cooldown timer, use `mytimer -= delta_time`, not `mytimer -= 1`. This way, `mytimer` will always have the unit of seconds
|
||||||
|
* - Damaging a mob, do `L.adjustFireLoss(20 * delta_time)`, not `L.adjustFireLoss(20)`. This way, the damage per second stays constant even if the wait of the subsystem is changed
|
||||||
|
* - Probability of something happening, do `if(DT_PROB(25, delta_time))`, not `if(prob(25))`. This way, if the subsystem wait is e.g. lowered, there won't be a higher chance of this event happening per second
|
||||||
|
*
|
||||||
|
* If you override this do not call parent, as it will return PROCESS_KILL. This is done to prevent objects that dont override process() from staying in the processing list
|
||||||
|
*/
|
||||||
|
/datum/proc/process(delta_time)
|
||||||
|
set waitfor = FALSE
|
||||||
return PROCESS_KILL
|
return PROCESS_KILL
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ PROCESSING_SUBSYSTEM_DEF(quirks)
|
|||||||
name = "Quirks"
|
name = "Quirks"
|
||||||
init_order = INIT_ORDER_QUIRKS
|
init_order = INIT_ORDER_QUIRKS
|
||||||
flags = SS_BACKGROUND
|
flags = SS_BACKGROUND
|
||||||
wait = 10
|
|
||||||
runlevels = RUNLEVEL_GAME
|
runlevels = RUNLEVEL_GAME
|
||||||
|
wait = 1 SECONDS
|
||||||
|
|
||||||
var/list/quirks = list() //Assoc. list of all roundstart quirk datum types; "name" = /path/
|
var/list/quirks = list() //Assoc. list of all roundstart quirk datum types; "name" = /path/
|
||||||
var/list/quirk_names_by_path = list()
|
var/list/quirk_names_by_path = list()
|
||||||
|
|||||||
@@ -6,15 +6,15 @@
|
|||||||
#define BUCKET_LIMIT (world.time + TICKS2DS(min(BUCKET_LEN - (SSrunechat.practical_offset - DS2TICKS(world.time - SSrunechat.head_offset)) - 1, BUCKET_LEN - 1)))
|
#define BUCKET_LIMIT (world.time + TICKS2DS(min(BUCKET_LEN - (SSrunechat.practical_offset - DS2TICKS(world.time - SSrunechat.head_offset)) - 1, BUCKET_LEN - 1)))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* # Runechat Subsystem
|
* # Runechat Subsystem
|
||||||
*
|
*
|
||||||
* Maintains a timer-like system to handle destruction of runechat messages. Much of this code is modeled
|
* Maintains a timer-like system to handle destruction of runechat messages. Much of this code is modeled
|
||||||
* after or adapted from the timer subsystem.
|
* after or adapted from the timer subsystem.
|
||||||
*
|
*
|
||||||
* Note that this has the same structure for storing and queueing messages as the timer subsystem does
|
* Note that this has the same structure for storing and queueing messages as the timer subsystem does
|
||||||
* for handling timers: the bucket_list is a list of chatmessage datums, each of which are the head
|
* for handling timers: the bucket_list is a list of chatmessage datums, each of which are the head
|
||||||
* of a circularly linked list. Any given index in bucket_list could be null, representing an empty bucket.
|
* of a circularly linked list. Any given index in bucket_list could be null, representing an empty bucket.
|
||||||
*/
|
*/
|
||||||
SUBSYSTEM_DEF(runechat)
|
SUBSYSTEM_DEF(runechat)
|
||||||
name = "Runechat"
|
name = "Runechat"
|
||||||
flags = SS_TICKER | SS_NO_INIT
|
flags = SS_TICKER | SS_NO_INIT
|
||||||
@@ -131,14 +131,14 @@ SUBSYSTEM_DEF(runechat)
|
|||||||
bucket_resolution = world.tick_lag
|
bucket_resolution = world.tick_lag
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Enters the runechat subsystem with this chatmessage, inserting it into the end-of-life queue
|
* Enters the runechat subsystem with this chatmessage, inserting it into the end-of-life queue
|
||||||
*
|
*
|
||||||
* This will also account for a chatmessage already being registered, and in which case
|
* This will also account for a chatmessage already being registered, and in which case
|
||||||
* the position will be updated to remove it from the previous location if necessary
|
* the position will be updated to remove it from the previous location if necessary
|
||||||
*
|
*
|
||||||
* Arguments:
|
* Arguments:
|
||||||
* * new_sched_destruction Optional, when provided is used to update an existing message with the new specified time
|
* * new_sched_destruction Optional, when provided is used to update an existing message with the new specified time
|
||||||
*/
|
*/
|
||||||
/datum/chatmessage/proc/enter_subsystem(new_sched_destruction = 0)
|
/datum/chatmessage/proc/enter_subsystem(new_sched_destruction = 0)
|
||||||
// Get local references from subsystem as they are faster to access than the datum references
|
// Get local references from subsystem as they are faster to access than the datum references
|
||||||
var/list/bucket_list = SSrunechat.bucket_list
|
var/list/bucket_list = SSrunechat.bucket_list
|
||||||
@@ -169,7 +169,7 @@ SUBSYSTEM_DEF(runechat)
|
|||||||
|
|
||||||
// Handle insertion into the secondary queue if the required time is outside our tracked amounts
|
// Handle insertion into the secondary queue if the required time is outside our tracked amounts
|
||||||
if (scheduled_destruction >= BUCKET_LIMIT)
|
if (scheduled_destruction >= BUCKET_LIMIT)
|
||||||
BINARY_INSERT(src, SSrunechat.second_queue, datum/chatmessage, src, scheduled_destruction, COMPARE_KEY)
|
BINARY_INSERT(src, SSrunechat.second_queue, /datum/chatmessage, src, scheduled_destruction, COMPARE_KEY)
|
||||||
return
|
return
|
||||||
|
|
||||||
// Get bucket position and a local reference to the datum var, it's faster to access this way
|
// Get bucket position and a local reference to the datum var, it's faster to access this way
|
||||||
@@ -194,8 +194,8 @@ SUBSYSTEM_DEF(runechat)
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Removes this chatmessage datum from the runechat subsystem
|
* Removes this chatmessage datum from the runechat subsystem
|
||||||
*/
|
*/
|
||||||
/datum/chatmessage/proc/leave_subsystem()
|
/datum/chatmessage/proc/leave_subsystem()
|
||||||
// Attempt to find the bucket that contains this chat message
|
// Attempt to find the bucket that contains this chat message
|
||||||
var/bucket_pos = BUCKET_POS(scheduled_destruction)
|
var/bucket_pos = BUCKET_POS(scheduled_destruction)
|
||||||
|
|||||||
@@ -1,31 +1,51 @@
|
|||||||
#define BUCKET_LEN (world.fps*1*60) //how many ticks should we keep in the bucket. (1 minutes worth)
|
/// Controls how many buckets should be kept, each representing a tick. (1 minutes worth)
|
||||||
|
#define BUCKET_LEN (world.fps*1*60)
|
||||||
|
/// Helper for getting the correct bucket for a given timer
|
||||||
#define BUCKET_POS(timer) (((round((timer.timeToRun - SStimer.head_offset) / world.tick_lag)+1) % BUCKET_LEN)||BUCKET_LEN)
|
#define BUCKET_POS(timer) (((round((timer.timeToRun - SStimer.head_offset) / world.tick_lag)+1) % BUCKET_LEN)||BUCKET_LEN)
|
||||||
|
/// Gets the maximum time at which timers will be invoked from buckets, used for deferring to secondary queue
|
||||||
#define TIMER_MAX (world.time + TICKS2DS(min(BUCKET_LEN-(SStimer.practical_offset-DS2TICKS(world.time - SStimer.head_offset))-1, BUCKET_LEN-1)))
|
#define TIMER_MAX (world.time + TICKS2DS(min(BUCKET_LEN-(SStimer.practical_offset-DS2TICKS(world.time - SStimer.head_offset))-1, BUCKET_LEN-1)))
|
||||||
#define TIMER_ID_MAX (2**24) //max float with integer precision
|
/// Max float with integer precision
|
||||||
|
#define TIMER_ID_MAX (2**24)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* # Timer Subsystem
|
||||||
|
*
|
||||||
|
* Handles creation, callbacks, and destruction of timed events.
|
||||||
|
*
|
||||||
|
* It is important to understand the buckets used in the timer subsystem are just a series of circular doubly-linked
|
||||||
|
* lists. The object at a given index in bucket_list is a /datum/timedevent, the head of a circular list, which has prev
|
||||||
|
* and next references for the respective elements in that bucket's circular list.
|
||||||
|
*/
|
||||||
SUBSYSTEM_DEF(timer)
|
SUBSYSTEM_DEF(timer)
|
||||||
name = "Timer"
|
name = "Timer"
|
||||||
wait = 1 //SS_TICKER subsystem, so wait is in ticks
|
wait = 1 // SS_TICKER subsystem, so wait is in ticks
|
||||||
init_order = INIT_ORDER_TIMER
|
init_order = INIT_ORDER_TIMER
|
||||||
|
priority = FIRE_PRIORITY_TIMER
|
||||||
flags = SS_TICKER|SS_NO_INIT
|
flags = SS_TICKER|SS_NO_INIT
|
||||||
|
|
||||||
var/list/datum/timedevent/second_queue = list() //awe, yes, you've had first queue, but what about second queue?
|
/// Queue used for storing timers that do not fit into the current buckets
|
||||||
|
var/list/datum/timedevent/second_queue = list()
|
||||||
|
/// A hashlist dictionary used for storing unique timers
|
||||||
var/list/hashes = list()
|
var/list/hashes = list()
|
||||||
|
/// world.time of the first entry in the bucket list, effectively the 'start time' of the current buckets
|
||||||
var/head_offset = 0 //world.time of the first entry in the the bucket.
|
var/head_offset = 0
|
||||||
var/practical_offset = 1 //index of the first non-empty item in the bucket.
|
/// Index of the wrap around pivot for buckets. buckets before this are later running buckets wrapped around from the end of the bucket list.
|
||||||
var/bucket_resolution = 0 //world.tick_lag the bucket was designed for
|
var/practical_offset = 1
|
||||||
var/bucket_count = 0 //how many timers are in the buckets
|
/// world.tick_lag the bucket was designed for
|
||||||
|
var/bucket_resolution = 0
|
||||||
var/list/bucket_list = list() //list of buckets, each bucket holds every timer that has to run that byond tick.
|
/// How many timers are in the buckets
|
||||||
|
var/bucket_count = 0
|
||||||
var/list/timer_id_dict = list() //list of all active timers assoicated to their timer id (for easy lookup)
|
/// List of buckets, each bucket holds every timer that has to run that byond tick
|
||||||
|
var/list/bucket_list = list()
|
||||||
var/list/clienttime_timers = list() //special snowflake timers that run on fancy pansy "client time"
|
/// List of all active timers associated to their timer ID (for easy lookup)
|
||||||
|
var/list/timer_id_dict = list()
|
||||||
|
/// Special timers that run in real-time, not BYOND time; these are more expensive to run and maintain
|
||||||
|
var/list/clienttime_timers = list()
|
||||||
|
/// Contains the last time that a timer's callback was invoked, or the last tick the SS fired if no timers are being processed
|
||||||
var/last_invoke_tick = 0
|
var/last_invoke_tick = 0
|
||||||
|
/// Contains the last time that a warning was issued for not invoking callbacks
|
||||||
var/static/last_invoke_warning = 0
|
var/static/last_invoke_warning = 0
|
||||||
|
/// Boolean operator controlling if the timer SS will automatically reset buckets if it fails to invoke callbacks for an extended period of time
|
||||||
var/static/bucket_auto_reset = TRUE
|
var/static/bucket_auto_reset = TRUE
|
||||||
|
|
||||||
/datum/controller/subsystem/timer/PreInit()
|
/datum/controller/subsystem/timer/PreInit()
|
||||||
@@ -38,44 +58,53 @@ SUBSYSTEM_DEF(timer)
|
|||||||
return ..()
|
return ..()
|
||||||
|
|
||||||
/datum/controller/subsystem/timer/fire(resumed = FALSE)
|
/datum/controller/subsystem/timer/fire(resumed = FALSE)
|
||||||
|
// Store local references to datum vars as it is faster to access them
|
||||||
var/lit = last_invoke_tick
|
var/lit = last_invoke_tick
|
||||||
var/last_check = world.time - TICKS2DS(BUCKET_LEN*1.5)
|
|
||||||
var/list/bucket_list = src.bucket_list
|
var/list/bucket_list = src.bucket_list
|
||||||
|
var/last_check = world.time - TICKS2DS(BUCKET_LEN * 1.5)
|
||||||
|
|
||||||
|
// If there are no timers being tracked, then consider now to be the last invoked time
|
||||||
if(!bucket_count)
|
if(!bucket_count)
|
||||||
last_invoke_tick = world.time
|
last_invoke_tick = world.time
|
||||||
|
|
||||||
|
// Check that we have invoked a callback in the last 1.5 minutes of BYOND time,
|
||||||
|
// and throw a warning and reset buckets if this is true
|
||||||
if(lit && lit < last_check && head_offset < last_check && last_invoke_warning < last_check)
|
if(lit && lit < last_check && head_offset < last_check && last_invoke_warning < last_check)
|
||||||
last_invoke_warning = world.time
|
last_invoke_warning = world.time
|
||||||
var/msg = "No regular timers processed in the last [BUCKET_LEN*1.5] ticks[bucket_auto_reset ? ", resetting buckets" : ""]!"
|
var/msg = "No regular timers processed in the last [BUCKET_LEN * 1.5] ticks[bucket_auto_reset ? ", resetting buckets" : ""]!"
|
||||||
message_admins(msg)
|
message_admins(msg)
|
||||||
WARNING(msg)
|
WARNING(msg)
|
||||||
if(bucket_auto_reset)
|
if(bucket_auto_reset)
|
||||||
bucket_resolution = 0
|
bucket_resolution = 0
|
||||||
|
|
||||||
log_world("Timer bucket reset. world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
var/list/to_log = list("Timer bucket reset. world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||||
for (var/i in 1 to length(bucket_list))
|
for (var/i in 1 to length(bucket_list))
|
||||||
var/datum/timedevent/bucket_head = bucket_list[i]
|
var/datum/timedevent/bucket_head = bucket_list[i]
|
||||||
if (!bucket_head)
|
if (!bucket_head)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
log_world("Active timers at index [i]:")
|
to_log += "Active timers at index [i]:"
|
||||||
|
|
||||||
var/datum/timedevent/bucket_node = bucket_head
|
var/datum/timedevent/bucket_node = bucket_head
|
||||||
var/anti_loop_check = 1000
|
var/anti_loop_check = 1000
|
||||||
do
|
do
|
||||||
log_world(get_timer_debug_string(bucket_node))
|
to_log += get_timer_debug_string(bucket_node)
|
||||||
bucket_node = bucket_node.next
|
bucket_node = bucket_node.next
|
||||||
anti_loop_check--
|
anti_loop_check--
|
||||||
while(bucket_node && bucket_node != bucket_head && anti_loop_check)
|
while(bucket_node && bucket_node != bucket_head && anti_loop_check)
|
||||||
log_world("Active timers in the second_queue queue:")
|
|
||||||
|
to_log += "Active timers in the second_queue queue:"
|
||||||
for(var/I in second_queue)
|
for(var/I in second_queue)
|
||||||
log_world(get_timer_debug_string(I))
|
to_log += get_timer_debug_string(I)
|
||||||
|
|
||||||
var/next_clienttime_timer_index = 0
|
// Dump all the logged data to the world log
|
||||||
var/len = length(clienttime_timers)
|
log_world(to_log.Join("\n"))
|
||||||
|
|
||||||
for (next_clienttime_timer_index in 1 to len)
|
// Process client-time timers
|
||||||
|
var/static/next_clienttime_timer_index = 0
|
||||||
|
if (next_clienttime_timer_index)
|
||||||
|
clienttime_timers.Cut(1, next_clienttime_timer_index+1)
|
||||||
|
next_clienttime_timer_index = 0
|
||||||
|
for (next_clienttime_timer_index in 1 to length(clienttime_timers))
|
||||||
if (MC_TICK_CHECK)
|
if (MC_TICK_CHECK)
|
||||||
next_clienttime_timer_index--
|
next_clienttime_timer_index--
|
||||||
break
|
break
|
||||||
@@ -86,8 +115,8 @@ SUBSYSTEM_DEF(timer)
|
|||||||
|
|
||||||
var/datum/callback/callBack = ctime_timer.callBack
|
var/datum/callback/callBack = ctime_timer.callBack
|
||||||
if (!callBack)
|
if (!callBack)
|
||||||
clienttime_timers.Cut(next_clienttime_timer_index,next_clienttime_timer_index+1)
|
CRASH("Invalid timer: [get_timer_debug_string(ctime_timer)] world.time: [world.time], \
|
||||||
CRASH("Invalid timer: [get_timer_debug_string(ctime_timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset], REALTIMEOFDAY: [REALTIMEOFDAY]")
|
head_offset: [head_offset], practical_offset: [practical_offset], REALTIMEOFDAY: [REALTIMEOFDAY]")
|
||||||
|
|
||||||
ctime_timer.spent = REALTIMEOFDAY
|
ctime_timer.spent = REALTIMEOFDAY
|
||||||
callBack.InvokeAsync()
|
callBack.InvokeAsync()
|
||||||
@@ -95,135 +124,93 @@ SUBSYSTEM_DEF(timer)
|
|||||||
if(ctime_timer.flags & TIMER_LOOP)
|
if(ctime_timer.flags & TIMER_LOOP)
|
||||||
ctime_timer.spent = 0
|
ctime_timer.spent = 0
|
||||||
ctime_timer.timeToRun = REALTIMEOFDAY + ctime_timer.wait
|
ctime_timer.timeToRun = REALTIMEOFDAY + ctime_timer.wait
|
||||||
BINARY_INSERT(ctime_timer, clienttime_timers, datum/timedevent, ctime_timer, timeToRun, COMPARE_KEY)
|
BINARY_INSERT(ctime_timer, clienttime_timers, /datum/timedevent, ctime_timer, timeToRun, COMPARE_KEY)
|
||||||
else
|
else
|
||||||
qdel(ctime_timer)
|
qdel(ctime_timer)
|
||||||
|
|
||||||
|
// Remove invoked client-time timers
|
||||||
if (next_clienttime_timer_index)
|
if (next_clienttime_timer_index)
|
||||||
clienttime_timers.Cut(1, next_clienttime_timer_index+1)
|
clienttime_timers.Cut(1, next_clienttime_timer_index+1)
|
||||||
|
next_clienttime_timer_index = 0
|
||||||
|
|
||||||
if (MC_TICK_CHECK)
|
if (MC_TICK_CHECK)
|
||||||
return
|
return
|
||||||
|
|
||||||
var/static/list/spent = list()
|
// Check for when we need to loop the buckets, this occurs when
|
||||||
var/static/datum/timedevent/timer
|
// the head_offset is approaching BUCKET_LEN ticks in the past
|
||||||
if (practical_offset > BUCKET_LEN)
|
if (practical_offset > BUCKET_LEN)
|
||||||
head_offset += TICKS2DS(BUCKET_LEN)
|
head_offset += TICKS2DS(BUCKET_LEN)
|
||||||
practical_offset = 1
|
practical_offset = 1
|
||||||
resumed = FALSE
|
resumed = FALSE
|
||||||
|
|
||||||
|
// Check for when we have to reset buckets, typically from auto-reset
|
||||||
if ((length(bucket_list) != BUCKET_LEN) || (world.tick_lag != bucket_resolution))
|
if ((length(bucket_list) != BUCKET_LEN) || (world.tick_lag != bucket_resolution))
|
||||||
reset_buckets()
|
reset_buckets()
|
||||||
bucket_list = src.bucket_list
|
bucket_list = src.bucket_list
|
||||||
resumed = FALSE
|
resumed = FALSE
|
||||||
|
|
||||||
|
|
||||||
if (!resumed)
|
// Iterate through each bucket starting from the practical offset
|
||||||
timer = null
|
while (practical_offset <= BUCKET_LEN && head_offset + ((practical_offset - 1) * world.tick_lag) <= world.time)
|
||||||
|
var/datum/timedevent/timer
|
||||||
while (practical_offset <= BUCKET_LEN && head_offset + ((practical_offset-1)*world.tick_lag) <= world.time)
|
while ((timer = bucket_list[practical_offset]))
|
||||||
var/datum/timedevent/head = bucket_list[practical_offset]
|
|
||||||
if (!timer || !head || timer == head)
|
|
||||||
head = bucket_list[practical_offset]
|
|
||||||
timer = head
|
|
||||||
while (timer)
|
|
||||||
var/datum/callback/callBack = timer.callBack
|
var/datum/callback/callBack = timer.callBack
|
||||||
if (!callBack)
|
if (!callBack)
|
||||||
bucket_resolution = null //force bucket recreation
|
bucket_resolution = null // force bucket recreation
|
||||||
CRASH("Invalid timer: [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
CRASH("Invalid timer: [get_timer_debug_string(timer)] world.time: [world.time], \
|
||||||
|
head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||||
|
|
||||||
|
timer.bucketEject() //pop the timer off of the bucket list.
|
||||||
|
|
||||||
|
// Invoke callback if possible
|
||||||
if (!timer.spent)
|
if (!timer.spent)
|
||||||
spent += timer
|
|
||||||
timer.spent = world.time
|
timer.spent = world.time
|
||||||
callBack.InvokeAsync()
|
callBack.InvokeAsync()
|
||||||
last_invoke_tick = world.time
|
last_invoke_tick = world.time
|
||||||
|
|
||||||
if (MC_TICK_CHECK)
|
if (timer.flags & TIMER_LOOP) // Prepare looping timers to re-enter the queue
|
||||||
return
|
timer.spent = 0
|
||||||
|
timer.timeToRun = world.time + timer.wait
|
||||||
timer = timer.next
|
timer.bucketJoin()
|
||||||
if (timer == head)
|
|
||||||
break
|
|
||||||
|
|
||||||
|
|
||||||
bucket_list[practical_offset++] = null
|
|
||||||
|
|
||||||
//we freed up a bucket, lets see if anything in second_queue needs to be shifted to that bucket.
|
|
||||||
var/i = 0
|
|
||||||
var/L = length(second_queue)
|
|
||||||
for (i in 1 to L)
|
|
||||||
timer = second_queue[i]
|
|
||||||
if (timer.timeToRun >= TIMER_MAX)
|
|
||||||
i--
|
|
||||||
break
|
|
||||||
|
|
||||||
if (timer.timeToRun < head_offset)
|
|
||||||
bucket_resolution = null //force bucket recreation
|
|
||||||
stack_trace("[i] Invalid timer state: Timer in long run queue with a time to run less then head_offset. [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
|
||||||
|
|
||||||
if (timer.callBack && !timer.spent)
|
|
||||||
timer.callBack.InvokeAsync()
|
|
||||||
spent += timer
|
|
||||||
bucket_count++
|
|
||||||
else if(!QDELETED(timer))
|
|
||||||
qdel(timer)
|
|
||||||
continue
|
|
||||||
|
|
||||||
if (timer.timeToRun < head_offset + TICKS2DS(practical_offset-1))
|
|
||||||
bucket_resolution = null //force bucket recreation
|
|
||||||
stack_trace("[i] Invalid timer state: Timer in long run queue that would require a backtrack to transfer to short run queue. [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
|
||||||
if (timer.callBack && !timer.spent)
|
|
||||||
timer.callBack.InvokeAsync()
|
|
||||||
spent += timer
|
|
||||||
bucket_count++
|
|
||||||
else if(!QDELETED(timer))
|
|
||||||
qdel(timer)
|
|
||||||
continue
|
|
||||||
|
|
||||||
bucket_count++
|
|
||||||
var/bucket_pos = max(1, BUCKET_POS(timer))
|
|
||||||
|
|
||||||
var/datum/timedevent/bucket_head = bucket_list[bucket_pos]
|
|
||||||
if (!bucket_head)
|
|
||||||
bucket_list[bucket_pos] = timer
|
|
||||||
timer.next = null
|
|
||||||
timer.prev = null
|
|
||||||
continue
|
|
||||||
|
|
||||||
if (!bucket_head.prev)
|
|
||||||
bucket_head.prev = bucket_head
|
|
||||||
timer.next = bucket_head
|
|
||||||
timer.prev = bucket_head.prev
|
|
||||||
timer.next.prev = timer
|
|
||||||
timer.prev.next = timer
|
|
||||||
if (i)
|
|
||||||
second_queue.Cut(1, i+1)
|
|
||||||
|
|
||||||
timer = null
|
|
||||||
|
|
||||||
bucket_count -= length(spent)
|
|
||||||
|
|
||||||
for (var/i in spent)
|
|
||||||
var/datum/timedevent/qtimer = i
|
|
||||||
if(QDELETED(qtimer))
|
|
||||||
bucket_count++
|
|
||||||
continue
|
|
||||||
if(!(qtimer.flags & TIMER_LOOP))
|
|
||||||
qdel(qtimer)
|
|
||||||
else
|
|
||||||
bucket_count++
|
|
||||||
qtimer.spent = 0
|
|
||||||
qtimer.bucketEject()
|
|
||||||
if(qtimer.flags & TIMER_CLIENT_TIME)
|
|
||||||
qtimer.timeToRun = REALTIMEOFDAY + qtimer.wait
|
|
||||||
else
|
else
|
||||||
qtimer.timeToRun = world.time + qtimer.wait
|
qdel(timer)
|
||||||
qtimer.bucketJoin()
|
|
||||||
|
|
||||||
spent.len = 0
|
if (MC_TICK_CHECK)
|
||||||
|
break
|
||||||
|
|
||||||
//formated this way to be runtime resistant
|
if (!bucket_list[practical_offset])
|
||||||
|
// Empty the bucket, check if anything in the secondary queue should be shifted to this bucket
|
||||||
|
bucket_list[practical_offset++] = null
|
||||||
|
var/i = 0
|
||||||
|
for (i in 1 to length(second_queue))
|
||||||
|
timer = second_queue[i]
|
||||||
|
if (timer.timeToRun >= TIMER_MAX)
|
||||||
|
i--
|
||||||
|
break
|
||||||
|
|
||||||
|
// Check for timers that are scheduled to run in the past
|
||||||
|
if (timer.timeToRun < head_offset)
|
||||||
|
bucket_resolution = null // force bucket recreation
|
||||||
|
stack_trace("[i] Invalid timer state: Timer in long run queue with a time to run less then head_offset. \
|
||||||
|
[get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||||
|
break
|
||||||
|
|
||||||
|
// Check for timers that are not capable of being scheduled to run without rebuilding buckets
|
||||||
|
if (timer.timeToRun < head_offset + TICKS2DS(practical_offset - 1))
|
||||||
|
bucket_resolution = null // force bucket recreation
|
||||||
|
stack_trace("[i] Invalid timer state: Timer in long run queue that would require a backtrack to transfer to \
|
||||||
|
short run queue. [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||||
|
break
|
||||||
|
|
||||||
|
timer.bucketJoin()
|
||||||
|
if (i)
|
||||||
|
second_queue.Cut(1, i+1)
|
||||||
|
if (MC_TICK_CHECK)
|
||||||
|
break
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generates a string with details about the timed event for debugging purposes
|
||||||
|
*/
|
||||||
/datum/controller/subsystem/timer/proc/get_timer_debug_string(datum/timedevent/TE)
|
/datum/controller/subsystem/timer/proc/get_timer_debug_string(datum/timedevent/TE)
|
||||||
. = "Timer: [TE]"
|
. = "Timer: [TE]"
|
||||||
. += "Prev: [TE.prev ? TE.prev : "NULL"], Next: [TE.next ? TE.next : "NULL"]"
|
. += "Prev: [TE.prev ? TE.prev : "NULL"], Next: [TE.next ? TE.next : "NULL"]"
|
||||||
@@ -234,12 +221,16 @@ SUBSYSTEM_DEF(timer)
|
|||||||
if(!TE.callBack)
|
if(!TE.callBack)
|
||||||
. += ", NO CALLBACK"
|
. += ", NO CALLBACK"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Destroys the existing buckets and creates new buckets from the existing timed events
|
||||||
|
*/
|
||||||
/datum/controller/subsystem/timer/proc/reset_buckets()
|
/datum/controller/subsystem/timer/proc/reset_buckets()
|
||||||
var/list/bucket_list = src.bucket_list
|
var/list/bucket_list = src.bucket_list // Store local reference to datum var, this is faster
|
||||||
var/list/alltimers = list()
|
var/list/alltimers = list()
|
||||||
//collect the timers currently in the bucket
|
|
||||||
|
// Get all timers currently in the buckets
|
||||||
for (var/bucket_head in bucket_list)
|
for (var/bucket_head in bucket_list)
|
||||||
if (!bucket_head)
|
if (!bucket_head) // if bucket is empty for this tick
|
||||||
continue
|
continue
|
||||||
var/datum/timedevent/bucket_node = bucket_head
|
var/datum/timedevent/bucket_node = bucket_head
|
||||||
do
|
do
|
||||||
@@ -247,25 +238,38 @@ SUBSYSTEM_DEF(timer)
|
|||||||
bucket_node = bucket_node.next
|
bucket_node = bucket_node.next
|
||||||
while(bucket_node && bucket_node != bucket_head)
|
while(bucket_node && bucket_node != bucket_head)
|
||||||
|
|
||||||
|
// Empty the list by zeroing and re-assigning the length
|
||||||
bucket_list.len = 0
|
bucket_list.len = 0
|
||||||
bucket_list.len = BUCKET_LEN
|
bucket_list.len = BUCKET_LEN
|
||||||
|
|
||||||
|
// Reset values for the subsystem to their initial values
|
||||||
practical_offset = 1
|
practical_offset = 1
|
||||||
bucket_count = 0
|
bucket_count = 0
|
||||||
head_offset = world.time
|
head_offset = world.time
|
||||||
bucket_resolution = world.tick_lag
|
bucket_resolution = world.tick_lag
|
||||||
|
|
||||||
|
// Add all timed events from the secondary queue as well
|
||||||
alltimers += second_queue
|
alltimers += second_queue
|
||||||
|
|
||||||
|
// If there are no timers being tracked by the subsystem,
|
||||||
|
// there is no need to do any further rebuilding
|
||||||
if (!length(alltimers))
|
if (!length(alltimers))
|
||||||
return
|
return
|
||||||
|
|
||||||
|
// Sort all timers by time to run
|
||||||
sortTim(alltimers, .proc/cmp_timer)
|
sortTim(alltimers, .proc/cmp_timer)
|
||||||
|
|
||||||
|
// Get the earliest timer, and if the TTR is earlier than the current world.time,
|
||||||
|
// then set the head offset appropriately to be the earliest time tracked by the
|
||||||
|
// current set of buckets
|
||||||
var/datum/timedevent/head = alltimers[1]
|
var/datum/timedevent/head = alltimers[1]
|
||||||
|
|
||||||
if (head.timeToRun < head_offset)
|
if (head.timeToRun < head_offset)
|
||||||
head_offset = head.timeToRun
|
head_offset = head.timeToRun
|
||||||
|
|
||||||
|
// Iterate through each timed event and insert it into an appropriate bucket,
|
||||||
|
// up unto the point that we can no longer insert into buckets as the TTR
|
||||||
|
// is outside the range we are tracking, then insert the remainder into the
|
||||||
|
// secondary queue
|
||||||
var/new_bucket_count
|
var/new_bucket_count
|
||||||
var/i = 1
|
var/i = 1
|
||||||
for (i in 1 to length(alltimers))
|
for (i in 1 to length(alltimers))
|
||||||
@@ -273,34 +277,38 @@ SUBSYSTEM_DEF(timer)
|
|||||||
if (!timer)
|
if (!timer)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
var/bucket_pos = BUCKET_POS(timer)
|
// Check that the TTR is within the range covered by buckets, when exceeded we've finished
|
||||||
if (timer.timeToRun >= TIMER_MAX)
|
if (timer.timeToRun >= TIMER_MAX)
|
||||||
i--
|
i--
|
||||||
break
|
break
|
||||||
|
|
||||||
|
// Check that timer has a valid callback and hasn't been invoked
|
||||||
if (!timer.callBack || timer.spent)
|
if (!timer.callBack || timer.spent)
|
||||||
WARNING("Invalid timer: [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
WARNING("Invalid timer: [get_timer_debug_string(timer)] world.time: [world.time], \
|
||||||
|
head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||||
if (timer.callBack)
|
if (timer.callBack)
|
||||||
qdel(timer)
|
qdel(timer)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
// Insert the timer into the bucket, and perform necessary circular doubly-linked list operations
|
||||||
new_bucket_count++
|
new_bucket_count++
|
||||||
|
var/bucket_pos = BUCKET_POS(timer)
|
||||||
var/datum/timedevent/bucket_head = bucket_list[bucket_pos]
|
var/datum/timedevent/bucket_head = bucket_list[bucket_pos]
|
||||||
if (!bucket_head)
|
if (!bucket_head)
|
||||||
bucket_list[bucket_pos] = timer
|
bucket_list[bucket_pos] = timer
|
||||||
timer.next = null
|
timer.next = null
|
||||||
timer.prev = null
|
timer.prev = null
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if (!bucket_head.prev)
|
if (!bucket_head.prev)
|
||||||
bucket_head.prev = bucket_head
|
bucket_head.prev = bucket_head
|
||||||
timer.next = bucket_head
|
timer.next = bucket_head
|
||||||
timer.prev = bucket_head.prev
|
timer.prev = bucket_head.prev
|
||||||
timer.next.prev = timer
|
timer.next.prev = timer
|
||||||
timer.prev.next = timer
|
timer.prev.next = timer
|
||||||
|
|
||||||
|
// Cut the timers that are tracked by the buckets from the secondary queue
|
||||||
if (i)
|
if (i)
|
||||||
alltimers.Cut(1, i+1)
|
alltimers.Cut(1, i + 1)
|
||||||
second_queue = alltimers
|
second_queue = alltimers
|
||||||
bucket_count = new_bucket_count
|
bucket_count = new_bucket_count
|
||||||
|
|
||||||
@@ -311,45 +319,64 @@ SUBSYSTEM_DEF(timer)
|
|||||||
timer_id_dict |= SStimer.timer_id_dict
|
timer_id_dict |= SStimer.timer_id_dict
|
||||||
bucket_list |= SStimer.bucket_list
|
bucket_list |= SStimer.bucket_list
|
||||||
|
|
||||||
|
/**
|
||||||
|
* # Timed Event
|
||||||
|
*
|
||||||
|
* This is the actual timer, it contains the callback and necessary data to maintain
|
||||||
|
* the timer.
|
||||||
|
*
|
||||||
|
* See the documentation for the timer subsystem for an explanation of the buckets referenced
|
||||||
|
* below in next and prev
|
||||||
|
*/
|
||||||
/datum/timedevent
|
/datum/timedevent
|
||||||
|
/// ID used for timers when the TIMER_STOPPABLE flag is present
|
||||||
var/id
|
var/id
|
||||||
|
/// The callback to invoke after the timer completes
|
||||||
var/datum/callback/callBack
|
var/datum/callback/callBack
|
||||||
|
/// The time at which the callback should be invoked at
|
||||||
var/timeToRun
|
var/timeToRun
|
||||||
|
/// The length of the timer
|
||||||
var/wait
|
var/wait
|
||||||
|
/// Unique hash generated when TIMER_UNIQUE flag is present
|
||||||
var/hash
|
var/hash
|
||||||
|
/// The source of the timedevent, whatever called addtimer
|
||||||
|
var/source
|
||||||
|
/// Flags associated with the timer, see _DEFINES/subsystems.dm
|
||||||
var/list/flags
|
var/list/flags
|
||||||
var/spent = 0 //time we ran the timer.
|
/// Time at which the timer was invoked or destroyed
|
||||||
var/name //for easy debugging.
|
var/spent = 0
|
||||||
//cicular doublely linked list
|
/// An informative name generated for the timer as its representation in strings, useful for debugging
|
||||||
|
var/name
|
||||||
|
/// Next timed event in the bucket
|
||||||
var/datum/timedevent/next
|
var/datum/timedevent/next
|
||||||
|
/// Previous timed event in the bucket
|
||||||
var/datum/timedevent/prev
|
var/datum/timedevent/prev
|
||||||
|
|
||||||
/datum/timedevent/New(datum/callback/callBack, wait, flags, hash)
|
/datum/timedevent/New(datum/callback/callBack, wait, flags, hash, source)
|
||||||
var/static/nextid = 1
|
var/static/nextid = 1
|
||||||
id = TIMER_ID_NULL
|
id = TIMER_ID_NULL
|
||||||
src.callBack = callBack
|
src.callBack = callBack
|
||||||
src.wait = wait
|
src.wait = wait
|
||||||
src.flags = flags
|
src.flags = flags
|
||||||
src.hash = hash
|
src.hash = hash
|
||||||
|
src.source = source
|
||||||
|
|
||||||
if (flags & TIMER_CLIENT_TIME)
|
// Determine time at which the timer's callback should be invoked
|
||||||
timeToRun = REALTIMEOFDAY + wait
|
timeToRun = (flags & TIMER_CLIENT_TIME ? REALTIMEOFDAY : world.time) + wait
|
||||||
else
|
|
||||||
timeToRun = world.time + wait
|
|
||||||
|
|
||||||
|
// Include the timer in the hash table if the timer is unique
|
||||||
if (flags & TIMER_UNIQUE)
|
if (flags & TIMER_UNIQUE)
|
||||||
SStimer.hashes[hash] = src
|
SStimer.hashes[hash] = src
|
||||||
|
|
||||||
|
// Generate ID for the timer if the timer is stoppable, include in the timer id dictionary
|
||||||
if (flags & TIMER_STOPPABLE)
|
if (flags & TIMER_STOPPABLE)
|
||||||
id = num2text(nextid, 100)
|
id = num2text(nextid, 100)
|
||||||
if (nextid >= SHORT_REAL_LIMIT)
|
if (nextid >= SHORT_REAL_LIMIT)
|
||||||
nextid += min(1, 2**round(nextid/SHORT_REAL_LIMIT))
|
nextid += min(1, 2 ** round(nextid / SHORT_REAL_LIMIT))
|
||||||
else
|
else
|
||||||
nextid++
|
nextid++
|
||||||
SStimer.timer_id_dict[id] = src
|
SStimer.timer_id_dict[id] = src
|
||||||
|
|
||||||
name = "Timer: [id] (\ref[src]), TTR: [timeToRun], Flags: [jointext(bitfield2list(flags, list("TIMER_UNIQUE", "TIMER_OVERRIDE", "TIMER_CLIENT_TIME", "TIMER_STOPPABLE", "TIMER_NO_HASH_WAIT", "TIMER_LOOP")), ", ")], callBack: \ref[callBack], callBack.object: [callBack.object]\ref[callBack.object]([getcallingtype()]), callBack.delegate:[callBack.delegate]([callBack.arguments ? callBack.arguments.Join(", ") : ""])"
|
|
||||||
|
|
||||||
if ((timeToRun < world.time || timeToRun < SStimer.head_offset) && !(flags & TIMER_CLIENT_TIME))
|
if ((timeToRun < world.time || timeToRun < SStimer.head_offset) && !(flags & TIMER_CLIENT_TIME))
|
||||||
CRASH("Invalid timer state: Timer created that would require a backtrack to run (addtimer would never let this happen): [SStimer.get_timer_debug_string(src)]")
|
CRASH("Invalid timer state: Timer created that would require a backtrack to run (addtimer would never let this happen): [SStimer.get_timer_debug_string(src)]")
|
||||||
|
|
||||||
@@ -390,23 +417,39 @@ SUBSYSTEM_DEF(timer)
|
|||||||
prev = null
|
prev = null
|
||||||
return QDEL_HINT_IWILLGC
|
return QDEL_HINT_IWILLGC
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Removes this timed event from any relevant buckets, or the secondary queue
|
||||||
|
*/
|
||||||
/datum/timedevent/proc/bucketEject()
|
/datum/timedevent/proc/bucketEject()
|
||||||
|
// Attempt to find bucket that contains this timed event
|
||||||
var/bucketpos = BUCKET_POS(src)
|
var/bucketpos = BUCKET_POS(src)
|
||||||
|
|
||||||
|
// Store local references for the bucket list and secondary queue
|
||||||
|
// This is faster than referencing them from the datum itself
|
||||||
var/list/bucket_list = SStimer.bucket_list
|
var/list/bucket_list = SStimer.bucket_list
|
||||||
var/list/second_queue = SStimer.second_queue
|
var/list/second_queue = SStimer.second_queue
|
||||||
|
|
||||||
|
// Attempt to get the head of the bucket
|
||||||
var/datum/timedevent/buckethead
|
var/datum/timedevent/buckethead
|
||||||
if(bucketpos > 0)
|
if(bucketpos > 0)
|
||||||
buckethead = bucket_list[bucketpos]
|
buckethead = bucket_list[bucketpos]
|
||||||
|
|
||||||
|
// Decrement the number of timers in buckets if the timed event is
|
||||||
|
// the head of the bucket, or has a TTR less than TIMER_MAX implying it fits
|
||||||
|
// into an existing bucket, or is otherwise not present in the secondary queue
|
||||||
if(buckethead == src)
|
if(buckethead == src)
|
||||||
bucket_list[bucketpos] = next
|
bucket_list[bucketpos] = next
|
||||||
SStimer.bucket_count--
|
SStimer.bucket_count--
|
||||||
else if(timeToRun < TIMER_MAX || next || prev)
|
else if(timeToRun < TIMER_MAX)
|
||||||
SStimer.bucket_count--
|
SStimer.bucket_count--
|
||||||
else
|
else
|
||||||
var/l = length(second_queue)
|
var/l = length(second_queue)
|
||||||
second_queue -= src
|
second_queue -= src
|
||||||
if(l == length(second_queue))
|
if(l == length(second_queue))
|
||||||
SStimer.bucket_count--
|
SStimer.bucket_count--
|
||||||
|
|
||||||
|
// Remove the timed event from the bucket, ensuring to maintain
|
||||||
|
// the integrity of the bucket's list if relevant
|
||||||
if(prev != next)
|
if(prev != next)
|
||||||
prev.next = next
|
prev.next = next
|
||||||
next.prev = prev
|
next.prev = prev
|
||||||
@@ -415,32 +458,47 @@ SUBSYSTEM_DEF(timer)
|
|||||||
next?.prev = null
|
next?.prev = null
|
||||||
prev = next = null
|
prev = next = null
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Attempts to add this timed event to a bucket, will enter the secondary queue
|
||||||
|
* if there are no appropriate buckets at this time.
|
||||||
|
*
|
||||||
|
* Secondary queueing of timed events will occur when the timespan covered by the existing
|
||||||
|
* buckets is exceeded by the time at which this timed event is scheduled to be invoked.
|
||||||
|
* If the timed event is tracking client time, it will be added to a special bucket.
|
||||||
|
*/
|
||||||
/datum/timedevent/proc/bucketJoin()
|
/datum/timedevent/proc/bucketJoin()
|
||||||
var/list/L
|
// Generate debug-friendly name for timer
|
||||||
|
var/static/list/bitfield_flags = list("TIMER_UNIQUE", "TIMER_OVERRIDE", "TIMER_CLIENT_TIME", "TIMER_STOPPABLE", "TIMER_NO_HASH_WAIT", "TIMER_LOOP")
|
||||||
|
name = "Timer: [id] (\ref[src]), TTR: [timeToRun], wait:[wait] Flags: [jointext(bitfield2list(flags, bitfield_flags), ", ")], \
|
||||||
|
callBack: \ref[callBack], callBack.object: [callBack.object]\ref[callBack.object]([getcallingtype()]), \
|
||||||
|
callBack.delegate:[callBack.delegate]([callBack.arguments ? callBack.arguments.Join(", ") : ""]), source: [source]"
|
||||||
|
|
||||||
|
// Check if this timed event should be diverted to the client time bucket, or the secondary queue
|
||||||
|
var/list/L
|
||||||
if (flags & TIMER_CLIENT_TIME)
|
if (flags & TIMER_CLIENT_TIME)
|
||||||
L = SStimer.clienttime_timers
|
L = SStimer.clienttime_timers
|
||||||
else if (timeToRun >= TIMER_MAX)
|
else if (timeToRun >= TIMER_MAX)
|
||||||
L = SStimer.second_queue
|
L = SStimer.second_queue
|
||||||
|
|
||||||
if(L)
|
if(L)
|
||||||
BINARY_INSERT(src, L, datum/timedevent, src, timeToRun, COMPARE_KEY)
|
BINARY_INSERT(src, L, /datum/timedevent, src, timeToRun, COMPARE_KEY)
|
||||||
return
|
return
|
||||||
|
|
||||||
//get the list of buckets
|
// Get a local reference to the bucket list, this is faster than referencing the datum
|
||||||
var/list/bucket_list = SStimer.bucket_list
|
var/list/bucket_list = SStimer.bucket_list
|
||||||
|
|
||||||
//calculate our place in the bucket list
|
// Find the correct bucket for this timed event
|
||||||
var/bucket_pos = BUCKET_POS(src)
|
var/bucket_pos = BUCKET_POS(src)
|
||||||
|
|
||||||
//get the bucket for our tick
|
|
||||||
var/datum/timedevent/bucket_head = bucket_list[bucket_pos]
|
var/datum/timedevent/bucket_head = bucket_list[bucket_pos]
|
||||||
SStimer.bucket_count++
|
SStimer.bucket_count++
|
||||||
//empty bucket, we will just add ourselves
|
|
||||||
|
// If there is no timed event at this position, then the bucket is 'empty'
|
||||||
|
// and we can just set this event to that position
|
||||||
if (!bucket_head)
|
if (!bucket_head)
|
||||||
bucket_list[bucket_pos] = src
|
bucket_list[bucket_pos] = src
|
||||||
return
|
return
|
||||||
//other wise, lets do a simplified linked list add.
|
|
||||||
|
// Otherwise, we merely add this timed event into the bucket, which is a
|
||||||
|
// circularly doubly-linked list
|
||||||
if (!bucket_head.prev)
|
if (!bucket_head.prev)
|
||||||
bucket_head.prev = bucket_head
|
bucket_head.prev = bucket_head
|
||||||
next = bucket_head
|
next = bucket_head
|
||||||
@@ -448,7 +506,9 @@ SUBSYSTEM_DEF(timer)
|
|||||||
next.prev = src
|
next.prev = src
|
||||||
prev.next = src
|
prev.next = src
|
||||||
|
|
||||||
///Returns a string of the type of the callback for this timer
|
/**
|
||||||
|
* Returns a string of the type of the callback for this timer
|
||||||
|
*/
|
||||||
/datum/timedevent/proc/getcallingtype()
|
/datum/timedevent/proc/getcallingtype()
|
||||||
. = "ERROR"
|
. = "ERROR"
|
||||||
if (callBack.object == GLOBAL_PROC)
|
if (callBack.object == GLOBAL_PROC)
|
||||||
@@ -457,14 +517,15 @@ SUBSYSTEM_DEF(timer)
|
|||||||
. = "[callBack.object.type]"
|
. = "[callBack.object.type]"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new timer and insert it in the queue
|
* Create a new timer and insert it in the queue.
|
||||||
*
|
* You should not call this directly, and should instead use the addtimer macro, which includes source information.
|
||||||
* Arguments:
|
*
|
||||||
* * callback the callback to call on timer finish
|
* Arguments:
|
||||||
* * wait deciseconds to run the timer for
|
* * callback the callback to call on timer finish
|
||||||
* * flags flags for this timer, see: code\__DEFINES\subsystems.dm
|
* * wait deciseconds to run the timer for
|
||||||
*/
|
* * flags flags for this timer, see: code\__DEFINES\subsystems.dm
|
||||||
/proc/addtimer(datum/callback/callback, wait = 0, flags = 0)
|
*/
|
||||||
|
/proc/_addtimer(datum/callback/callback, wait = 0, flags = 0, file, line)
|
||||||
if (!callback)
|
if (!callback)
|
||||||
CRASH("addtimer called without a callback")
|
CRASH("addtimer called without a callback")
|
||||||
|
|
||||||
@@ -472,31 +533,30 @@ SUBSYSTEM_DEF(timer)
|
|||||||
stack_trace("addtimer called with a negative wait. Converting to [world.tick_lag]")
|
stack_trace("addtimer called with a negative wait. Converting to [world.tick_lag]")
|
||||||
|
|
||||||
if (callback.object != GLOBAL_PROC && QDELETED(callback.object) && !QDESTROYING(callback.object))
|
if (callback.object != GLOBAL_PROC && QDELETED(callback.object) && !QDESTROYING(callback.object))
|
||||||
stack_trace("addtimer called with a callback assigned to a qdeleted object. In the future such timers will not be supported and may refuse to run or run with a 0 wait")
|
stack_trace("addtimer called with a callback assigned to a qdeleted object. In the future such timers will not \
|
||||||
|
be supported and may refuse to run or run with a 0 wait")
|
||||||
|
|
||||||
wait = max(CEILING(wait, world.tick_lag), world.tick_lag)
|
wait = max(CEILING(wait, world.tick_lag), world.tick_lag)
|
||||||
|
|
||||||
if(wait >= INFINITY)
|
if(wait >= INFINITY)
|
||||||
CRASH("Attempted to create timer with INFINITY delay")
|
CRASH("Attempted to create timer with INFINITY delay")
|
||||||
|
|
||||||
|
// Generate hash if relevant for timed events with the TIMER_UNIQUE flag
|
||||||
var/hash
|
var/hash
|
||||||
|
|
||||||
if (flags & TIMER_UNIQUE)
|
if (flags & TIMER_UNIQUE)
|
||||||
var/list/hashlist
|
var/list/hashlist = list(callback.object, "([REF(callback.object)])", callback.delegate, flags & TIMER_CLIENT_TIME)
|
||||||
if(flags & TIMER_NO_HASH_WAIT)
|
if(!(flags & TIMER_NO_HASH_WAIT))
|
||||||
hashlist = list(callback.object, "([REF(callback.object)])", callback.delegate, flags & TIMER_CLIENT_TIME)
|
hashlist += wait
|
||||||
else
|
|
||||||
hashlist = list(callback.object, "([REF(callback.object)])", callback.delegate, wait, flags & TIMER_CLIENT_TIME)
|
|
||||||
hashlist += callback.arguments
|
hashlist += callback.arguments
|
||||||
hash = hashlist.Join("|||||||")
|
hash = hashlist.Join("|||||||")
|
||||||
|
|
||||||
var/datum/timedevent/hash_timer = SStimer.hashes[hash]
|
var/datum/timedevent/hash_timer = SStimer.hashes[hash]
|
||||||
if(hash_timer)
|
if(hash_timer)
|
||||||
if (hash_timer.spent) //it's pending deletion, pretend it doesn't exist.
|
if (hash_timer.spent) // it's pending deletion, pretend it doesn't exist.
|
||||||
hash_timer.hash = null //but keep it from accidentally deleting us
|
hash_timer.hash = null // but keep it from accidentally deleting us
|
||||||
else
|
else
|
||||||
if (flags & TIMER_OVERRIDE)
|
if (flags & TIMER_OVERRIDE)
|
||||||
hash_timer.hash = null //no need having it delete it's hash if we are going to replace it
|
hash_timer.hash = null // no need having it delete it's hash if we are going to replace it
|
||||||
qdel(hash_timer)
|
qdel(hash_timer)
|
||||||
else
|
else
|
||||||
if (hash_timer.flags & TIMER_STOPPABLE)
|
if (hash_timer.flags & TIMER_STOPPABLE)
|
||||||
@@ -505,24 +565,23 @@ SUBSYSTEM_DEF(timer)
|
|||||||
else if(flags & TIMER_OVERRIDE)
|
else if(flags & TIMER_OVERRIDE)
|
||||||
stack_trace("TIMER_OVERRIDE used without TIMER_UNIQUE")
|
stack_trace("TIMER_OVERRIDE used without TIMER_UNIQUE")
|
||||||
|
|
||||||
var/datum/timedevent/timer = new(callback, wait, flags, hash)
|
var/datum/timedevent/timer = new(callback, wait, flags, hash, file && "[file]:[line]")
|
||||||
return timer.id
|
return timer.id
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Delete a timer
|
* Delete a timer
|
||||||
*
|
*
|
||||||
* Arguments:
|
* Arguments:
|
||||||
* * id a timerid or a /datum/timedevent
|
* * id a timerid or a /datum/timedevent
|
||||||
*/
|
*/
|
||||||
/proc/deltimer(id)
|
/proc/deltimer(id)
|
||||||
if (!id)
|
if (!id)
|
||||||
return FALSE
|
return FALSE
|
||||||
if (id == TIMER_ID_NULL)
|
if (id == TIMER_ID_NULL)
|
||||||
CRASH("Tried to delete a null timerid. Use TIMER_STOPPABLE flag")
|
CRASH("Tried to delete a null timerid. Use TIMER_STOPPABLE flag")
|
||||||
if (!istext(id))
|
if (istype(id, /datum/timedevent))
|
||||||
if (istype(id, /datum/timedevent))
|
qdel(id)
|
||||||
qdel(id)
|
return TRUE
|
||||||
return TRUE
|
|
||||||
//id is string
|
//id is string
|
||||||
var/datum/timedevent/timer = SStimer.timer_id_dict[id]
|
var/datum/timedevent/timer = SStimer.timer_id_dict[id]
|
||||||
if (timer && !timer.spent)
|
if (timer && !timer.spent)
|
||||||
@@ -531,25 +590,22 @@ SUBSYSTEM_DEF(timer)
|
|||||||
return FALSE
|
return FALSE
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the remaining deciseconds on a timer
|
* Get the remaining deciseconds on a timer
|
||||||
*
|
*
|
||||||
* Arguments:
|
* Arguments:
|
||||||
* * id a timerid or a /datum/timedevent
|
* * id a timerid or a /datum/timedevent
|
||||||
*/
|
*/
|
||||||
/proc/timeleft(id)
|
/proc/timeleft(id)
|
||||||
if (!id)
|
if (!id)
|
||||||
return null
|
return null
|
||||||
if (id == TIMER_ID_NULL)
|
if (id == TIMER_ID_NULL)
|
||||||
CRASH("Tried to get timeleft of a null timerid. Use TIMER_STOPPABLE flag")
|
CRASH("Tried to get timeleft of a null timerid. Use TIMER_STOPPABLE flag")
|
||||||
if (!istext(id))
|
if (istype(id, /datum/timedevent))
|
||||||
if (istype(id, /datum/timedevent))
|
var/datum/timedevent/timer = id
|
||||||
var/datum/timedevent/timer = id
|
return timer.timeToRun - world.time
|
||||||
return timer.timeToRun - world.time
|
|
||||||
//id is string
|
//id is string
|
||||||
var/datum/timedevent/timer = SStimer.timer_id_dict[id]
|
var/datum/timedevent/timer = SStimer.timer_id_dict[id]
|
||||||
if (timer && !timer.spent)
|
return (timer && !timer.spent) ? timer.timeToRun - world.time : null
|
||||||
return timer.timeToRun - world.time
|
|
||||||
return null
|
|
||||||
|
|
||||||
#undef BUCKET_LEN
|
#undef BUCKET_LEN
|
||||||
#undef BUCKET_POS
|
#undef BUCKET_POS
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ GLOBAL_LIST_EMPTY(potential_mods_per_skill)
|
|||||||
if(!mod_L)
|
if(!mod_L)
|
||||||
mod_L = GLOB.potential_mods_per_skill[target_skills] = list()
|
mod_L = GLOB.potential_mods_per_skill[target_skills] = list()
|
||||||
else
|
else
|
||||||
BINARY_INSERT(identifier, mod_L, datum/skill_modifier, src, priority, COMPARE_VALUE)
|
BINARY_INSERT(identifier, mod_L, /datum/skill_modifier, src, priority, COMPARE_VALUE)
|
||||||
mod_L[identifier] = src
|
mod_L[identifier] = src
|
||||||
GLOB.potential_skills_per_mod[target_skills_key] = list(target_skills)
|
GLOB.potential_skills_per_mod[target_skills_key] = list(target_skills)
|
||||||
else //Should be a list.
|
else //Should be a list.
|
||||||
@@ -66,7 +66,7 @@ GLOBAL_LIST_EMPTY(potential_mods_per_skill)
|
|||||||
if(!mod_L)
|
if(!mod_L)
|
||||||
mod_L = GLOB.potential_mods_per_skill[path] = list()
|
mod_L = GLOB.potential_mods_per_skill[path] = list()
|
||||||
else
|
else
|
||||||
BINARY_INSERT(identifier, mod_L, datum/skill_modifier, src, priority, COMPARE_VALUE)
|
BINARY_INSERT(identifier, mod_L, /datum/skill_modifier, src, priority, COMPARE_VALUE)
|
||||||
mod_L[identifier] = src
|
mod_L[identifier] = src
|
||||||
|
|
||||||
/datum/skill_modifier/Destroy()
|
/datum/skill_modifier/Destroy()
|
||||||
|
|||||||
@@ -15,5 +15,5 @@
|
|||||||
message_admins("An alien egg has been delivered to [ADMIN_VERBOSEJMP(T)].")
|
message_admins("An alien egg has been delivered to [ADMIN_VERBOSEJMP(T)].")
|
||||||
log_game("An alien egg has been delivered to [AREACOORD(T)]")
|
log_game("An alien egg has been delivered to [AREACOORD(T)]")
|
||||||
var/message = "Attention [station_name()], we have entrusted you with a research specimen in [get_area_name(T, TRUE)]. Remember to follow all safety precautions when dealing with the specimen."
|
var/message = "Attention [station_name()], we have entrusted you with a research specimen in [get_area_name(T, TRUE)]. Remember to follow all safety precautions when dealing with the specimen."
|
||||||
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, /proc/addtimer, CALLBACK(GLOBAL_PROC, /proc/print_command_report, message), announcement_time))
|
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, /proc/_addtimer, CALLBACK(GLOBAL_PROC, /proc/print_command_report, message), announcement_time))
|
||||||
return INITIALIZE_HINT_QDEL
|
return INITIALIZE_HINT_QDEL
|
||||||
|
|||||||
@@ -1,20 +1,46 @@
|
|||||||
#define RESTART_COUNTER_PATH "data/round_counter.txt"
|
#define RESTART_COUNTER_PATH "data/round_counter.txt"
|
||||||
|
|
||||||
GLOBAL_VAR(restart_counter)
|
GLOBAL_VAR(restart_counter)
|
||||||
|
|
||||||
GLOBAL_VAR_INIT(tgs_initialized, FALSE)
|
GLOBAL_VAR_INIT(tgs_initialized, FALSE)
|
||||||
|
|
||||||
GLOBAL_VAR(topic_status_lastcache)
|
GLOBAL_VAR(topic_status_lastcache)
|
||||||
GLOBAL_LIST(topic_status_cache)
|
GLOBAL_LIST(topic_status_cache)
|
||||||
|
|
||||||
//This happens after the Master subsystem new(s) (it's a global datum)
|
/**
|
||||||
//So subsystems globals exist, but are not initialised
|
* World creation
|
||||||
|
*
|
||||||
|
* Here is where a round itself is actually begun and setup.
|
||||||
|
* * db connection setup
|
||||||
|
* * config loaded from files
|
||||||
|
* * loads admins
|
||||||
|
* * Sets up the dynamic menu system
|
||||||
|
* * and most importantly, calls initialize on the master subsystem, starting the game loop that causes the rest of the game to begin processing and setting up
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* Nothing happens until something moves. ~Albert Einstein
|
||||||
|
*
|
||||||
|
* For clarity, this proc gets triggered later in the initialization pipeline, it is not the first thing to happen, as it might seem.
|
||||||
|
*
|
||||||
|
* Initialization Pipeline:
|
||||||
|
* Global vars are new()'ed, (including config, glob, and the master controller will also new and preinit all subsystems when it gets new()ed)
|
||||||
|
* Compiled in maps are loaded (mainly centcom). all areas/turfs/objs/mobs(ATOMs) in these maps will be new()ed
|
||||||
|
* world/New() (You are here)
|
||||||
|
* Once world/New() returns, client's can connect.
|
||||||
|
* 1 second sleep
|
||||||
|
* Master Controller initialization.
|
||||||
|
* Subsystem initialization.
|
||||||
|
* Non-compiled-in maps are maploaded, all atoms are new()ed
|
||||||
|
* All atoms in both compiled and uncompiled maps are initialized()
|
||||||
|
*/
|
||||||
/world/New()
|
/world/New()
|
||||||
if (fexists(EXTOOLS))
|
var/extools = world.GetConfig("env", "EXTOOLS_DLL") || (world.system_type == MS_WINDOWS ? "./byond-extools.dll" : "./libbyond-extools.so")
|
||||||
call(EXTOOLS, "maptick_initialize")()
|
if (fexists(extools))
|
||||||
|
call(extools, "maptick_initialize")()
|
||||||
#ifdef EXTOOLS_LOGGING
|
#ifdef EXTOOLS_LOGGING
|
||||||
call(EXTOOLS, "init_logging")()
|
call(extools, "init_logging")()
|
||||||
else
|
else
|
||||||
CRASH("[EXTOOLS] does not exist!")
|
CRASH("[extools] does not exist!")
|
||||||
#endif
|
#endif
|
||||||
enable_debugger()
|
enable_debugger()
|
||||||
#ifdef REFERENCE_TRACKING
|
#ifdef REFERENCE_TRACKING
|
||||||
@@ -25,10 +51,9 @@ GLOBAL_LIST(topic_status_cache)
|
|||||||
|
|
||||||
log_world("World loaded at [TIME_STAMP("hh:mm:ss", FALSE)]!")
|
log_world("World loaded at [TIME_STAMP("hh:mm:ss", FALSE)]!")
|
||||||
|
|
||||||
GLOB.config_error_log = GLOB.world_manifest_log = GLOB.world_pda_log = GLOB.world_job_debug_log = GLOB.sql_error_log = GLOB.world_href_log = GLOB.world_runtime_log = GLOB.world_attack_log = GLOB.world_game_log = "data/logs/config_error.[GUID()].log" //temporary file used to record errors with loading config, moved to log directory once logging is set bl
|
|
||||||
|
|
||||||
make_datum_references_lists() //initialises global lists for referencing frequently used datums (so that we only ever do it once)
|
make_datum_references_lists() //initialises global lists for referencing frequently used datums (so that we only ever do it once)
|
||||||
|
|
||||||
|
GLOB.config_error_log = GLOB.world_manifest_log = GLOB.world_pda_log = GLOB.world_job_debug_log = GLOB.sql_error_log = GLOB.world_href_log = GLOB.world_runtime_log = GLOB.world_attack_log = GLOB.world_game_log = "data/logs/config_error.[GUID()].log" //temporary file used to record errors with loading config, moved to log directory once logging is set bl
|
||||||
|
|
||||||
GLOB.revdata = new
|
GLOB.revdata = new
|
||||||
|
|
||||||
@@ -36,6 +61,9 @@ GLOBAL_LIST(topic_status_cache)
|
|||||||
|
|
||||||
config.Load(params[OVERRIDE_CONFIG_DIRECTORY_PARAMETER])
|
config.Load(params[OVERRIDE_CONFIG_DIRECTORY_PARAMETER])
|
||||||
|
|
||||||
|
load_admins()
|
||||||
|
load_mentors()
|
||||||
|
|
||||||
//SetupLogs depends on the RoundID, so lets check
|
//SetupLogs depends on the RoundID, so lets check
|
||||||
//DB schema and set RoundID if we can
|
//DB schema and set RoundID if we can
|
||||||
SSdbcore.CheckSchemaVersion()
|
SSdbcore.CheckSchemaVersion()
|
||||||
@@ -49,14 +77,9 @@ GLOBAL_LIST(topic_status_cache)
|
|||||||
world.log = file("[GLOB.log_directory]/dd.log") //not all runtimes trigger world/Error, so this is the only way to ensure we can see all of them.
|
world.log = file("[GLOB.log_directory]/dd.log") //not all runtimes trigger world/Error, so this is the only way to ensure we can see all of them.
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
load_admins()
|
|
||||||
load_mentors()
|
|
||||||
LoadVerbs(/datum/verbs/menu)
|
LoadVerbs(/datum/verbs/menu)
|
||||||
if(CONFIG_GET(flag/usewhitelist))
|
if(CONFIG_GET(flag/usewhitelist))
|
||||||
load_whitelist()
|
load_whitelist()
|
||||||
LoadBans()
|
|
||||||
initialize_global_loadout_items()
|
|
||||||
reload_custom_roundstart_items_list()//Cit change - loads donator items. Remind me to remove when I port over bay's loadout system
|
|
||||||
|
|
||||||
GLOB.timezoneOffset = text2num(time2text(0,"hh")) * 36000
|
GLOB.timezoneOffset = text2num(time2text(0,"hh")) * 36000
|
||||||
|
|
||||||
@@ -67,6 +90,10 @@ GLOBAL_LIST(topic_status_cache)
|
|||||||
if(NO_INIT_PARAMETER in params)
|
if(NO_INIT_PARAMETER in params)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
LoadBans()
|
||||||
|
initialize_global_loadout_items()
|
||||||
|
reload_custom_roundstart_items_list()//Cit change - loads donator items. Remind me to remove when I port over bay's loadout system
|
||||||
|
|
||||||
Master.Initialize(10, FALSE, TRUE)
|
Master.Initialize(10, FALSE, TRUE)
|
||||||
|
|
||||||
if(TEST_RUN_PARAMETER in params)
|
if(TEST_RUN_PARAMETER in params)
|
||||||
@@ -88,7 +115,7 @@ GLOBAL_LIST(topic_status_cache)
|
|||||||
#else
|
#else
|
||||||
cb = VARSET_CALLBACK(SSticker, force_ending, TRUE)
|
cb = VARSET_CALLBACK(SSticker, force_ending, TRUE)
|
||||||
#endif
|
#endif
|
||||||
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, /proc/addtimer, cb, 10 SECONDS))
|
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, /proc/_addtimer, cb, 10 SECONDS))
|
||||||
|
|
||||||
/world/proc/SetupLogs()
|
/world/proc/SetupLogs()
|
||||||
var/override_dir = params[OVERRIDE_LOG_DIRECTORY_PARAMETER]
|
var/override_dir = params[OVERRIDE_LOG_DIRECTORY_PARAMETER]
|
||||||
@@ -136,7 +163,7 @@ GLOBAL_LIST(topic_status_cache)
|
|||||||
|
|
||||||
|
|
||||||
#ifdef UNIT_TESTS
|
#ifdef UNIT_TESTS
|
||||||
GLOB.test_log = file("[GLOB.log_directory]/tests.log")
|
GLOB.test_log = "[GLOB.log_directory]/tests.log"
|
||||||
start_log(GLOB.test_log)
|
start_log(GLOB.test_log)
|
||||||
#endif
|
#endif
|
||||||
start_log(GLOB.world_game_log)
|
start_log(GLOB.world_game_log)
|
||||||
@@ -170,14 +197,6 @@ GLOBAL_LIST(topic_status_cache)
|
|||||||
/world/Topic(T, addr, master, key)
|
/world/Topic(T, addr, master, key)
|
||||||
TGS_TOPIC //redirect to server tools if necessary
|
TGS_TOPIC //redirect to server tools if necessary
|
||||||
|
|
||||||
if(!SSfail2topic)
|
|
||||||
return "Server not initialized."
|
|
||||||
else if(SSfail2topic.IsRateLimited(addr))
|
|
||||||
return "Rate limited."
|
|
||||||
|
|
||||||
if(length(T) > CONFIG_GET(number/topic_max_size))
|
|
||||||
return "Payload too large!"
|
|
||||||
|
|
||||||
var/static/list/topic_handlers = TopicHandlers()
|
var/static/list/topic_handlers = TopicHandlers()
|
||||||
|
|
||||||
var/list/input = params2list(T)
|
var/list/input = params2list(T)
|
||||||
@@ -194,7 +213,7 @@ GLOBAL_LIST(topic_status_cache)
|
|||||||
return
|
return
|
||||||
|
|
||||||
handler = new handler()
|
handler = new handler()
|
||||||
return handler.TryRun(input, addr)
|
return handler.TryRun(input)
|
||||||
|
|
||||||
/world/proc/AnnouncePR(announcement, list/payload)
|
/world/proc/AnnouncePR(announcement, list/payload)
|
||||||
var/static/list/PRcounts = list() //PR id -> number of times announced this round
|
var/static/list/PRcounts = list() //PR id -> number of times announced this round
|
||||||
@@ -280,11 +299,11 @@ GLOBAL_LIST(topic_status_cache)
|
|||||||
|
|
||||||
var/list/features = list()
|
var/list/features = list()
|
||||||
|
|
||||||
/*if(GLOB.master_mode) CIT CHANGE - hides the gamemode from the hub entry, removes some useless info from the hub entry
|
// if(GLOB.master_mode)
|
||||||
features += GLOB.master_mode
|
// features += GLOB.master_mode
|
||||||
|
|
||||||
if (!GLOB.enter_allowed)
|
// if (!GLOB.enter_allowed)
|
||||||
features += "closed"*/
|
// features += "closed"
|
||||||
|
|
||||||
var/s = ""
|
var/s = ""
|
||||||
var/hostedby
|
var/hostedby
|
||||||
@@ -292,25 +311,22 @@ GLOBAL_LIST(topic_status_cache)
|
|||||||
var/server_name = CONFIG_GET(string/servername)
|
var/server_name = CONFIG_GET(string/servername)
|
||||||
if (server_name)
|
if (server_name)
|
||||||
s += "<b>[server_name]</b> — "
|
s += "<b>[server_name]</b> — "
|
||||||
/*features += "[CONFIG_GET(flag/norespawn) ? "no " : ""]respawn" CIT CHANGE - removes some useless info from the hub entry
|
// features += "[CONFIG_GET(flag/norespawn) ? "no " : ""]respawn"
|
||||||
if(CONFIG_GET(flag/allow_vote_mode))
|
// if(CONFIG_GET(flag/allow_vote_mode))
|
||||||
features += "vote"
|
// features += "vote"
|
||||||
if(CONFIG_GET(flag/allow_ai))
|
// if(CONFIG_GET(flag/allow_ai))
|
||||||
features += "AI allowed"*/
|
// features += "AI allowed"
|
||||||
hostedby = CONFIG_GET(string/hostedby)
|
hostedby = CONFIG_GET(string/hostedby)
|
||||||
|
|
||||||
s += "<b>[station_name()]</b>";
|
s += "<b>[station_name()]</b>";
|
||||||
s += " ("
|
s += " ("
|
||||||
s += "<a href=\"https://citadel-station.net/home/\">" //Change this to wherever you want the hub to link to. CIT CHANGE - links to cit's website on the hub
|
s += "<a href=\"https://citadel-station.net/home\">" //Change this to wherever you want the hub to link to.
|
||||||
s += "Citadel" //Replace this with something else. Or ever better, delete it and uncomment the game version. CIT CHANGE - modifies the hub entry link
|
s += "Citadel" //Replace this with something else. Or ever better, delete it and uncomment the game version.
|
||||||
s += "</a>"
|
s += "</a>"
|
||||||
s += ")\]" //CIT CHANGE - encloses the server title in brackets to make the hub entry fancier
|
s += ")\]" //CIT CHANGE - encloses the server title in brackets to make the hub entry fancier
|
||||||
s += "<br>[CONFIG_GET(string/servertagline)]<br>" //CIT CHANGE - adds a tagline!
|
s += "<br>[CONFIG_GET(string/servertagline)]<br>" //CIT CHANGE - adds a tagline!
|
||||||
|
|
||||||
var/n = 0
|
var/players = GLOB.clients.len
|
||||||
for (var/mob/M in GLOB.player_list)
|
|
||||||
if (M.client)
|
|
||||||
n++
|
|
||||||
|
|
||||||
if(SSmapping.config) // this just stops the runtime, honk.
|
if(SSmapping.config) // this just stops the runtime, honk.
|
||||||
features += "[SSmapping.config.map_name]" //CIT CHANGE - makes the hub entry display the current map
|
features += "[SSmapping.config.map_name]" //CIT CHANGE - makes the hub entry display the current map
|
||||||
@@ -318,16 +334,23 @@ GLOBAL_LIST(topic_status_cache)
|
|||||||
if(NUM2SECLEVEL(GLOB.security_level))//CIT CHANGE - makes the hub entry show the security level
|
if(NUM2SECLEVEL(GLOB.security_level))//CIT CHANGE - makes the hub entry show the security level
|
||||||
features += "[NUM2SECLEVEL(GLOB.security_level)] alert"
|
features += "[NUM2SECLEVEL(GLOB.security_level)] alert"
|
||||||
|
|
||||||
if (n > 1)
|
var/popcaptext = ""
|
||||||
features += "~[n] players"
|
var/popcap = max(CONFIG_GET(number/extreme_popcap), CONFIG_GET(number/hard_popcap), CONFIG_GET(number/soft_popcap))
|
||||||
else if (n > 0)
|
if (popcap)
|
||||||
features += "~[n] player"
|
popcaptext = "/[popcap]"
|
||||||
|
|
||||||
|
if (players > 1)
|
||||||
|
features += "[players][popcaptext] players"
|
||||||
|
else if (players > 0)
|
||||||
|
features += "[players][popcaptext] player"
|
||||||
|
|
||||||
|
game_state = (CONFIG_GET(number/extreme_popcap) && players >= CONFIG_GET(number/extreme_popcap)) //tells the hub if we are full
|
||||||
|
|
||||||
if (!host && hostedby)
|
if (!host && hostedby)
|
||||||
features += "hosted by <b>[hostedby]</b>"
|
features += "hosted by <b>[hostedby]</b>"
|
||||||
|
|
||||||
if (features)
|
if (features)
|
||||||
s += "\[[jointext(features, ", ")]" //CIT CHANGE - replaces the colon here with a left bracket
|
s += "\[[jointext(features, ", ")]"
|
||||||
|
|
||||||
status = s
|
status = s
|
||||||
|
|
||||||
@@ -344,6 +367,26 @@ GLOBAL_LIST(topic_status_cache)
|
|||||||
maxz++
|
maxz++
|
||||||
SSmobs.MaxZChanged()
|
SSmobs.MaxZChanged()
|
||||||
SSidlenpcpool.MaxZChanged()
|
SSidlenpcpool.MaxZChanged()
|
||||||
world.refresh_atmos_grid()
|
|
||||||
|
|
||||||
/world/proc/refresh_atmos_grid()
|
/world/proc/change_fps(new_value = 20)
|
||||||
|
if(new_value <= 0)
|
||||||
|
CRASH("change_fps() called with [new_value] new_value.")
|
||||||
|
if(fps == new_value)
|
||||||
|
return //No change required.
|
||||||
|
|
||||||
|
fps = new_value
|
||||||
|
on_tickrate_change()
|
||||||
|
|
||||||
|
|
||||||
|
/world/proc/change_tick_lag(new_value = 0.5)
|
||||||
|
if(new_value <= 0)
|
||||||
|
CRASH("change_tick_lag() called with [new_value] new_value.")
|
||||||
|
if(tick_lag == new_value)
|
||||||
|
return //No change required.
|
||||||
|
|
||||||
|
tick_lag = new_value
|
||||||
|
on_tickrate_change()
|
||||||
|
|
||||||
|
|
||||||
|
/world/proc/on_tickrate_change()
|
||||||
|
SStimer?.reset_buckets()
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ GLOBAL_LIST_EMPTY(actionspeed_modification_cache)
|
|||||||
return TRUE
|
return TRUE
|
||||||
remove_actionspeed_modifier(existing, FALSE)
|
remove_actionspeed_modifier(existing, FALSE)
|
||||||
if(length(actionspeed_modification))
|
if(length(actionspeed_modification))
|
||||||
BINARY_INSERT(type_or_datum.id, actionspeed_modification, datum/actionspeed_modifier, type_or_datum, priority, COMPARE_VALUE)
|
BINARY_INSERT(type_or_datum.id, actionspeed_modification, /datum/actionspeed_modifier, type_or_datum, priority, COMPARE_VALUE)
|
||||||
LAZYSET(actionspeed_modification, type_or_datum.id, type_or_datum)
|
LAZYSET(actionspeed_modification, type_or_datum.id, type_or_datum)
|
||||||
if(update)
|
if(update)
|
||||||
update_actionspeed()
|
update_actionspeed()
|
||||||
|
|||||||
@@ -3,8 +3,9 @@
|
|||||||
GLOBAL_LIST_EMPTY(deletion_failures)
|
GLOBAL_LIST_EMPTY(deletion_failures)
|
||||||
|
|
||||||
/world/proc/enable_reference_tracking()
|
/world/proc/enable_reference_tracking()
|
||||||
if (fexists(EXTOOLS))
|
var/extools = world.GetConfig("env", "EXTOOLS_DLL") || (world.system_type == MS_WINDOWS ? "./byond-extools.dll" : "./libbyond-extools.so")
|
||||||
call(EXTOOLS, "ref_tracking_initialize")()
|
if (fexists(extools))
|
||||||
|
call(extools, "ref_tracking_initialize")()
|
||||||
|
|
||||||
/proc/get_back_references(datum/D)
|
/proc/get_back_references(datum/D)
|
||||||
CRASH("/proc/get_back_references not hooked by extools, reference tracking will not function!")
|
CRASH("/proc/get_back_references not hooked by extools, reference tracking will not function!")
|
||||||
@@ -109,7 +110,7 @@ GLOBAL_LIST_EMPTY(deletion_failures)
|
|||||||
set name = "Find References"
|
set name = "Find References"
|
||||||
set src in world
|
set src in world
|
||||||
|
|
||||||
find_references(FALSE)
|
find_references_legacy(FALSE)
|
||||||
|
|
||||||
|
|
||||||
/datum/proc/find_references_legacy(skip_alert)
|
/datum/proc/find_references_legacy(skip_alert)
|
||||||
@@ -164,7 +165,7 @@ GLOBAL_LIST_EMPTY(deletion_failures)
|
|||||||
|
|
||||||
qdel(src, TRUE) //force a qdel
|
qdel(src, TRUE) //force a qdel
|
||||||
if(!running_find_references)
|
if(!running_find_references)
|
||||||
find_references(TRUE)
|
find_references_legacy(TRUE)
|
||||||
|
|
||||||
|
|
||||||
/datum/verb/qdel_then_if_fail_find_references()
|
/datum/verb/qdel_then_if_fail_find_references()
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
|
|
||||||
/// Process asset cache client topic calls for "asset_cache_confirm_arrival=[INT]"
|
/// Process asset cache client topic calls for `"asset_cache_confirm_arrival=[INT]"`
|
||||||
/client/proc/asset_cache_confirm_arrival(job_id)
|
/client/proc/asset_cache_confirm_arrival(job_id)
|
||||||
var/asset_cache_job = round(text2num(job_id))
|
var/asset_cache_job = round(text2num(job_id))
|
||||||
//because we skip the limiter, we have to make sure this is a valid arrival and not somebody tricking us into letting them append to a list without limit.
|
//because we skip the limiter, we have to make sure this is a valid arrival and not somebody tricking us into letting them append to a list without limit.
|
||||||
@@ -10,7 +10,7 @@
|
|||||||
return asset_cache_job || TRUE
|
return asset_cache_job || TRUE
|
||||||
|
|
||||||
|
|
||||||
/// Process asset cache client topic calls for "asset_cache_preload_data=[HTML+JSON_STRING]
|
/// Process asset cache client topic calls for `"asset_cache_preload_data=[HTML+JSON_STRING]"`
|
||||||
/client/proc/asset_cache_preload_data(data)
|
/client/proc/asset_cache_preload_data(data)
|
||||||
var/json = data
|
var/json = data
|
||||||
var/list/preloaded_assets = json_decode(json)
|
var/list/preloaded_assets = json_decode(json)
|
||||||
|
|||||||
@@ -47,8 +47,9 @@
|
|||||||
"smmon_3.gif" = 'icons/program_icons/smmon_3.gif',
|
"smmon_3.gif" = 'icons/program_icons/smmon_3.gif',
|
||||||
"smmon_4.gif" = 'icons/program_icons/smmon_4.gif',
|
"smmon_4.gif" = 'icons/program_icons/smmon_4.gif',
|
||||||
"smmon_5.gif" = 'icons/program_icons/smmon_5.gif',
|
"smmon_5.gif" = 'icons/program_icons/smmon_5.gif',
|
||||||
"smmon_6.gif" = 'icons/program_icons/smmon_6.gif',
|
"smmon_6.gif" = 'icons/program_icons/smmon_6.gif'
|
||||||
"borg_mon.gif" = 'icons/program_icons/borg_mon.gif'
|
// "borg_mon.gif" = 'icons/program_icons/borg_mon.gif',
|
||||||
|
// "robotact.gif" = 'icons/program_icons/robotact.gif'
|
||||||
)
|
)
|
||||||
|
|
||||||
/datum/asset/simple/radar_assets
|
/datum/asset/simple/radar_assets
|
||||||
@@ -156,7 +157,6 @@
|
|||||||
)
|
)
|
||||||
|
|
||||||
/datum/asset/simple/namespaced/fontawesome
|
/datum/asset/simple/namespaced/fontawesome
|
||||||
legacy = TRUE
|
|
||||||
assets = list(
|
assets = list(
|
||||||
"fa-regular-400.eot" = 'html/font-awesome/webfonts/fa-regular-400.eot',
|
"fa-regular-400.eot" = 'html/font-awesome/webfonts/fa-regular-400.eot',
|
||||||
"fa-regular-400.woff" = 'html/font-awesome/webfonts/fa-regular-400.woff',
|
"fa-regular-400.woff" = 'html/font-awesome/webfonts/fa-regular-400.woff',
|
||||||
@@ -248,6 +248,10 @@
|
|||||||
"rule8" = 'icons/UI_Icons/Achievements/Misc/rule8.png',
|
"rule8" = 'icons/UI_Icons/Achievements/Misc/rule8.png',
|
||||||
"snail" = 'icons/UI_Icons/Achievements/Misc/snail.png',
|
"snail" = 'icons/UI_Icons/Achievements/Misc/snail.png',
|
||||||
"ascension" = 'icons/UI_Icons/Achievements/Misc/ascension.png',
|
"ascension" = 'icons/UI_Icons/Achievements/Misc/ascension.png',
|
||||||
|
"ashascend" = 'icons/UI_Icons/Achievements/Misc/ashascend.png',
|
||||||
|
"fleshascend" = 'icons/UI_Icons/Achievements/Misc/fleshascend.png',
|
||||||
|
"rustascend" = 'icons/UI_Icons/Achievements/Misc/rustascend.png',
|
||||||
|
"voidascend" = 'icons/UI_Icons/Achievements/Misc/voidascend.png',
|
||||||
"mining" = 'icons/UI_Icons/Achievements/Skills/mining.png',
|
"mining" = 'icons/UI_Icons/Achievements/Skills/mining.png',
|
||||||
"assistant" = 'icons/UI_Icons/Achievements/Mafia/assistant.png',
|
"assistant" = 'icons/UI_Icons/Achievements/Mafia/assistant.png',
|
||||||
"changeling" = 'icons/UI_Icons/Achievements/Mafia/changeling.png',
|
"changeling" = 'icons/UI_Icons/Achievements/Mafia/changeling.png',
|
||||||
@@ -288,7 +292,7 @@
|
|||||||
)
|
)
|
||||||
|
|
||||||
/datum/asset/spritesheet/simple/pills
|
/datum/asset/spritesheet/simple/pills
|
||||||
name ="pills"
|
name = "pills"
|
||||||
assets = list(
|
assets = list(
|
||||||
"pill1" = 'icons/UI_Icons/Pills/pill1.png',
|
"pill1" = 'icons/UI_Icons/Pills/pill1.png',
|
||||||
"pill2" = 'icons/UI_Icons/Pills/pill2.png',
|
"pill2" = 'icons/UI_Icons/Pills/pill2.png',
|
||||||
@@ -313,7 +317,28 @@
|
|||||||
"pill21" = 'icons/UI_Icons/Pills/pill21.png',
|
"pill21" = 'icons/UI_Icons/Pills/pill21.png',
|
||||||
"pill22" = 'icons/UI_Icons/Pills/pill22.png',
|
"pill22" = 'icons/UI_Icons/Pills/pill22.png',
|
||||||
)
|
)
|
||||||
|
/*
|
||||||
|
/datum/asset/spritesheet/simple/condiments
|
||||||
|
name = "condiments"
|
||||||
|
assets = list(
|
||||||
|
CONDIMASTER_STYLE_FALLBACK = 'icons/UI_Icons/Condiments/emptycondiment.png',
|
||||||
|
"enzyme" = 'icons/UI_Icons/Condiments/enzyme.png',
|
||||||
|
"flour" = 'icons/UI_Icons/Condiments/flour.png',
|
||||||
|
"mayonnaise" = 'icons/UI_Icons/Condiments/mayonnaise.png',
|
||||||
|
"milk" = 'icons/UI_Icons/Condiments/milk.png',
|
||||||
|
"blackpepper" = 'icons/UI_Icons/Condiments/peppermillsmall.png',
|
||||||
|
"rice" = 'icons/UI_Icons/Condiments/rice.png',
|
||||||
|
"sodiumchloride" = 'icons/UI_Icons/Condiments/saltshakersmall.png',
|
||||||
|
"soymilk" = 'icons/UI_Icons/Condiments/soymilk.png',
|
||||||
|
"soysauce" = 'icons/UI_Icons/Condiments/soysauce.png',
|
||||||
|
"sugar" = 'icons/UI_Icons/Condiments/sugar.png',
|
||||||
|
"ketchup" = 'icons/UI_Icons/Condiments/ketchup.png',
|
||||||
|
"capsaicin" = 'icons/UI_Icons/Condiments/hotsauce.png',
|
||||||
|
"frostoil" = 'icons/UI_Icons/Condiments/coldsauce.png',
|
||||||
|
"bbqsauce" = 'icons/UI_Icons/Condiments/bbqsauce.png',
|
||||||
|
"cornoil" = 'icons/UI_Icons/Condiments/oliveoil.png',
|
||||||
|
)
|
||||||
|
*/
|
||||||
//this exists purely to avoid meta by pre-loading all language icons.
|
//this exists purely to avoid meta by pre-loading all language icons.
|
||||||
/datum/asset/language/register()
|
/datum/asset/language/register()
|
||||||
for(var/path in typesof(/datum/language))
|
for(var/path in typesof(/datum/language))
|
||||||
@@ -484,3 +509,31 @@
|
|||||||
/datum/asset/spritesheet/mafia/register()
|
/datum/asset/spritesheet/mafia/register()
|
||||||
InsertAll("", 'icons/obj/mafia.dmi')
|
InsertAll("", 'icons/obj/mafia.dmi')
|
||||||
..()
|
..()
|
||||||
|
|
||||||
|
/datum/asset/simple/portraits
|
||||||
|
var/tab = "use subtypes of this please"
|
||||||
|
assets = list()
|
||||||
|
|
||||||
|
/datum/asset/simple/portraits/New()
|
||||||
|
if(!SSpersistence.paintings || !SSpersistence.paintings[tab] || !length(SSpersistence.paintings[tab]))
|
||||||
|
return
|
||||||
|
for(var/p in SSpersistence.paintings[tab])
|
||||||
|
var/list/portrait = p
|
||||||
|
var/png = "data/paintings/[tab]/[portrait["md5"]].png"
|
||||||
|
if(fexists(png))
|
||||||
|
assets[portrait["title"]] = png
|
||||||
|
..() //this is where it registers all these assets we added to the list
|
||||||
|
|
||||||
|
/datum/asset/simple/portraits/library
|
||||||
|
tab = "library"
|
||||||
|
|
||||||
|
/datum/asset/simple/portraits/library_secure
|
||||||
|
tab = "library_secure"
|
||||||
|
|
||||||
|
/datum/asset/simple/portraits/library_private
|
||||||
|
tab = "library_private"
|
||||||
|
|
||||||
|
/datum/asset/simple/safe
|
||||||
|
assets = list(
|
||||||
|
"safe_dial.png" = 'html/safe_dial.png'
|
||||||
|
)
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ Call .get_url_mappings() to get an associated list with the urls your assets can
|
|||||||
|
|
||||||
See the documentation for `/datum/asset_transport` for the backend api the asset datums utilize.
|
See the documentation for `/datum/asset_transport` for the backend api the asset datums utilize.
|
||||||
|
|
||||||
The global variable `SSassets.transport` contains the currently configured transport.
|
The global variable `SSassets.transport` contains the currently configured transport.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -32,6 +32,6 @@ The global variable `SSassets.transport` contains the currently configured trans
|
|||||||
|
|
||||||
Because byond browse() calls use non-blocking queues, if your code uses output() (which bypasses all of these queues) to invoke javascript functions you will need to first have the javascript announce to the server it has loaded before trying to invoke js functions.
|
Because byond browse() calls use non-blocking queues, if your code uses output() (which bypasses all of these queues) to invoke javascript functions you will need to first have the javascript announce to the server it has loaded before trying to invoke js functions.
|
||||||
|
|
||||||
To make your code work with any CDNs configured by the server, you must make sure assets are referenced from the url returned by `get_url_mappings()` or by asset_transport's `get_asset_url()`. (TGUI also has helpers for this.) If this can not be easily done, you can bypass the cdn using legacy assets, see the simple asset datum for details.
|
To make your code work with any CDNs configured by the server, you must make sure assets are referenced from the url returned by `get_url_mappings()` or by asset_transport's `get_asset_url()`. (TGUI also has helpers for this.) If this can not be easily done, you can bypass the cdn using legacy assets, see the simple asset datum for details.
|
||||||
|
|
||||||
CSS files that use url() can be made to use the CDN without needing to rewrite all url() calls in code by using the namespaced helper datum. See the documentation for `/datum/asset/simple/namespaced` for details.
|
CSS files that use url() can be made to use the CDN without needing to rewrite all url() calls in code by using the namespaced helper datum. See the documentation for `/datum/asset/simple/namespaced` for details.
|
||||||
|
|||||||
@@ -92,7 +92,7 @@
|
|||||||
log_admin_private("[key_name(usr)] cancelled event [name].")
|
log_admin_private("[key_name(usr)] cancelled event [name].")
|
||||||
SSblackbox.record_feedback("tally", "event_admin_cancelled", 1, typepath)
|
SSblackbox.record_feedback("tally", "event_admin_cancelled", 1, typepath)
|
||||||
|
|
||||||
/datum/round_event_control/proc/runEvent()
|
/datum/round_event_control/proc/runEvent(random = FALSE)
|
||||||
var/datum/round_event/E = new typepath()
|
var/datum/round_event/E = new typepath()
|
||||||
E.current_players = get_active_player_count(alive_check = 1, afk_check = 1, human_check = 1)
|
E.current_players = get_active_player_count(alive_check = 1, afk_check = 1, human_check = 1)
|
||||||
E.control = src
|
E.control = src
|
||||||
|
|||||||
@@ -175,7 +175,7 @@
|
|||||||
/datum/job/proc/announce_head(var/mob/living/carbon/human/H, var/channels) //tells the given channel that the given mob is the new department head. See communications.dm for valid channels.
|
/datum/job/proc/announce_head(var/mob/living/carbon/human/H, var/channels) //tells the given channel that the given mob is the new department head. See communications.dm for valid channels.
|
||||||
if(H && GLOB.announcement_systems.len)
|
if(H && GLOB.announcement_systems.len)
|
||||||
//timer because these should come after the captain announcement
|
//timer because these should come after the captain announcement
|
||||||
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, .proc/addtimer, CALLBACK(pick(GLOB.announcement_systems), /obj/machinery/announcement_system/proc/announce, "NEWHEAD", H.real_name, H.job, channels), 1))
|
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, .proc/_addtimer, CALLBACK(pick(GLOB.announcement_systems), /obj/machinery/announcement_system/proc/announce, "NEWHEAD", H.real_name, H.job, channels), 1))
|
||||||
|
|
||||||
//If the configuration option is set to require players to be logged as old enough to play certain jobs, then this proc checks that they are, otherwise it just returns 1
|
//If the configuration option is set to require players to be logged as old enough to play certain jobs, then this proc checks that they are, otherwise it just returns 1
|
||||||
/datum/job/proc/player_old_enough(client/C)
|
/datum/job/proc/player_old_enough(client/C)
|
||||||
|
|||||||
@@ -135,7 +135,7 @@
|
|||||||
var/curr_z = text2num(dmmRegex.group[5])
|
var/curr_z = text2num(dmmRegex.group[5])
|
||||||
if(curr_z < z_lower || curr_z > z_upper)
|
if(curr_z < z_lower || curr_z > z_upper)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
var/curr_x = text2num(dmmRegex.group[3])
|
var/curr_x = text2num(dmmRegex.group[3])
|
||||||
var/curr_y = text2num(dmmRegex.group[4])
|
var/curr_y = text2num(dmmRegex.group[4])
|
||||||
|
|
||||||
@@ -171,7 +171,7 @@
|
|||||||
if(width > right_width)
|
if(width > right_width)
|
||||||
for(var/i in 1 to lines)
|
for(var/i in 1 to lines)
|
||||||
gridLines[i] = copytext(gridLines[i], 1, key_len * right_width)
|
gridLines[i] = copytext(gridLines[i], 1, key_len * right_width)
|
||||||
|
|
||||||
// during the actual load we're starting at the top and working our way down
|
// during the actual load we're starting at the top and working our way down
|
||||||
gridSet.ycrd += lines - 1
|
gridSet.ycrd += lines - 1
|
||||||
|
|
||||||
@@ -305,9 +305,6 @@
|
|||||||
testing("Skipped loading [turfsSkipped] default turfs")
|
testing("Skipped loading [turfsSkipped] default turfs")
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if(did_expand)
|
|
||||||
world.refresh_atmos_grid()
|
|
||||||
|
|
||||||
return TRUE
|
return TRUE
|
||||||
|
|
||||||
/datum/parsed_map/proc/build_cache(no_changeturf, bad_paths=null)
|
/datum/parsed_map/proc/build_cache(no_changeturf, bad_paths=null)
|
||||||
|
|||||||
@@ -1,12 +1,22 @@
|
|||||||
//This file is just for the necessary /world definition
|
//This file is just for the necessary /world definition
|
||||||
//Try looking in game/world.dm
|
//Try looking in game/world.dm
|
||||||
|
|
||||||
|
/**
|
||||||
|
* # World
|
||||||
|
*
|
||||||
|
* Two possibilities exist: either we are alone in the Universe or we are not. Both are equally terrifying. ~ Arthur C. Clarke
|
||||||
|
*
|
||||||
|
* The byond world object stores some basic byond level config, and has a few hub specific procs for managing hub visiblity
|
||||||
|
*
|
||||||
|
* The world /New() is the root of where a round itself begins
|
||||||
|
*/
|
||||||
/world
|
/world
|
||||||
mob = /mob/dead/new_player
|
mob = /mob/dead/new_player
|
||||||
turf = /turf/open/space/basic
|
turf = /turf/open/space/basic
|
||||||
area = /area/space
|
area = /area/space
|
||||||
view = "15x15"
|
view = "15x15"
|
||||||
hub = "Exadv1.spacestation13"
|
hub = "Exadv1.spacestation13"
|
||||||
|
hub_password = "kMZy3U5jJHSiBQjr"
|
||||||
name = "/tg/ Station 13"
|
name = "/tg/ Station 13"
|
||||||
fps = 20
|
fps = 20
|
||||||
#ifdef FIND_REF_NO_CHECK_TICK
|
#ifdef FIND_REF_NO_CHECK_TICK
|
||||||
|
|||||||
BIN
html/safe_dial.png
Normal file
BIN
html/safe_dial.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 131 KiB |
@@ -17,7 +17,6 @@
|
|||||||
#include "_maps\_basemap.dm"
|
#include "_maps\_basemap.dm"
|
||||||
#include "code\_compile_options.dm"
|
#include "code\_compile_options.dm"
|
||||||
#include "code\world.dm"
|
#include "code\world.dm"
|
||||||
#include "code\__DEFINES\_extools.dm"
|
|
||||||
#include "code\__DEFINES\_globals.dm"
|
#include "code\__DEFINES\_globals.dm"
|
||||||
#include "code\__DEFINES\_protect.dm"
|
#include "code\__DEFINES\_protect.dm"
|
||||||
#include "code\__DEFINES\_tick.dm"
|
#include "code\__DEFINES\_tick.dm"
|
||||||
|
|||||||
Reference in New Issue
Block a user