mirror of
https://github.com/yogstation13/Yogstation.git
synced 2025-02-26 09:04:50 +00:00
@@ -2,12 +2,12 @@
|
||||
|
||||
#define MC_SPLIT_TICK_INIT(phase_count) var/original_tick_limit = Master.current_ticklimit; var/split_tick_phases = ##phase_count
|
||||
#define MC_SPLIT_TICK \
|
||||
if(split_tick_phases > 1){\
|
||||
Master.current_ticklimit = ((original_tick_limit - TICK_USAGE) / split_tick_phases) + TICK_USAGE;\
|
||||
--split_tick_phases;\
|
||||
} else {\
|
||||
Master.current_ticklimit = original_tick_limit;\
|
||||
}
|
||||
if(split_tick_phases > 1){\
|
||||
Master.current_ticklimit = ((original_tick_limit - TICK_USAGE) / split_tick_phases) + TICK_USAGE;\
|
||||
--split_tick_phases;\
|
||||
} else {\
|
||||
Master.current_ticklimit = original_tick_limit;\
|
||||
}
|
||||
|
||||
// Used to smooth out costs to try and avoid oscillation.
|
||||
#define MC_AVERAGE_FAST(average, current) (0.7 * (average) + 0.3 * (current))
|
||||
@@ -17,43 +17,65 @@
|
||||
#define MC_AVG_FAST_UP_SLOW_DOWN(average, current) (average > current ? MC_AVERAGE_SLOW(average, current) : MC_AVERAGE_FAST(average, current))
|
||||
#define MC_AVG_SLOW_UP_FAST_DOWN(average, current) (average < current ? MC_AVERAGE_SLOW(average, current) : MC_AVERAGE_FAST(average, current))
|
||||
|
||||
///creates a running average of "things elapsed" per time period when you need to count via a smaller time period.
|
||||
///eg you want an average number of things happening per second but you measure the event every tick (50 milliseconds).
|
||||
///make sure both time intervals are in the same units. doesnt work if current_duration > total_duration or if total_duration == 0
|
||||
#define MC_AVG_OVER_TIME(average, current, total_duration, current_duration) ((((total_duration) - (current_duration)) / (total_duration)) * (average) + (current))
|
||||
|
||||
#define MC_AVG_MINUTES(average, current, current_duration) (MC_AVG_OVER_TIME(average, current, 1 MINUTES, current_duration))
|
||||
|
||||
#define MC_AVG_SECONDS(average, current, current_duration) (MC_AVG_OVER_TIME(average, current, 1 SECONDS, current_duration))
|
||||
|
||||
#define NEW_SS_GLOBAL(varname) if(varname != src){if(istype(varname)){Recover();qdel(varname);}varname = src;}
|
||||
|
||||
#define START_PROCESSING(Processor, Datum) if (!(Datum.datum_flags & DF_ISPROCESSING)) {Datum.datum_flags |= DF_ISPROCESSING;Processor.processing += Datum}
|
||||
#define STOP_PROCESSING(Processor, Datum) Datum.datum_flags &= ~DF_ISPROCESSING;Processor.processing -= Datum
|
||||
#define STOP_PROCESSING(Processor, Datum) Datum.datum_flags &= ~DF_ISPROCESSING;Processor.processing -= Datum;Processor.currentrun -= Datum
|
||||
|
||||
//legacy, do not use. for dumb 'processing'-like subsystems that don't actually queue
|
||||
#define STOP_PROCESSING_DUMB(Processor, Datum) Datum.datum_flags &= ~DF_ISPROCESSING;Processor.processing -= Datum
|
||||
|
||||
/// Returns true if the MC is initialized and running.
|
||||
/// Optional argument init_stage controls what stage the mc must have initializted to count as initialized. Defaults to INITSTAGE_MAX if not specified.
|
||||
#define MC_RUNNING(INIT_STAGE...) (Master && Master.processing > 0 && Master.current_runlevel && Master.init_stage_completed == (max(min(INITSTAGE_MAX, ##INIT_STAGE), 1)))
|
||||
|
||||
#define MC_LOOP_RTN_NEWSTAGES 1
|
||||
#define MC_LOOP_RTN_GRACEFUL_EXIT 2
|
||||
|
||||
//! SubSystem flags (Please design any new flags so that the default is off, to make adding flags to subsystems easier)
|
||||
|
||||
/// subsystem does not initialize.
|
||||
#define SS_NO_INIT 1
|
||||
#define SS_NO_INIT (1 << 0)
|
||||
|
||||
/** subsystem does not fire. */
|
||||
/// (like can_fire = 0, but keeps it from getting added to the processing subsystems list)
|
||||
/// (Requires a MC restart to change)
|
||||
#define SS_NO_FIRE 2
|
||||
#define SS_NO_FIRE (1 << 1)
|
||||
|
||||
/** subsystem only runs on spare cpu (after all non-background subsystems have ran that tick) */
|
||||
/// SS_BACKGROUND has its own priority bracket
|
||||
#define SS_BACKGROUND 4
|
||||
|
||||
/// subsystem does not tick check, and should not run unless there is enough time (or its running behind (unless background))
|
||||
#define SS_NO_TICK_CHECK 8
|
||||
/** Subsystem only runs on spare cpu (after all non-background subsystems have ran that tick) */
|
||||
/// SS_BACKGROUND has its own priority bracket, this overrides SS_TICKER's priority bump
|
||||
#define SS_BACKGROUND (1 << 2)
|
||||
|
||||
/** Treat wait as a tick count, not DS, run every wait ticks. */
|
||||
/// (also forces it to run first in the tick, above even SS_NO_TICK_CHECK subsystems)
|
||||
/// (also forces it to run first in the tick (unless SS_BACKGROUND))
|
||||
/// (We don't want to be choked out by other subsystems queuing into us)
|
||||
/// (implies all runlevels because of how it works)
|
||||
/// (overrides SS_BACKGROUND)
|
||||
/// This is designed for basically anything that works as a mini-mc (like SStimer)
|
||||
#define SS_TICKER 16
|
||||
#define SS_TICKER (1 << 3)
|
||||
|
||||
/** keep the subsystem's timing on point by firing early if it fired late last fire because of lag */
|
||||
/// ie: if a 20ds subsystem fires say 5 ds late due to lag or what not, its next fire would be in 15ds, not 20ds.
|
||||
#define SS_KEEP_TIMING 32
|
||||
#define SS_KEEP_TIMING (1 << 4)
|
||||
|
||||
/** Calculate its next fire after its fired. */
|
||||
/// (IE: if a 5ds wait SS takes 2ds to run, its next fire should be 5ds away, not 3ds like it normally would be)
|
||||
/// This flag overrides SS_KEEP_TIMING
|
||||
#define SS_POST_FIRE_TIMING 64
|
||||
#define SS_POST_FIRE_TIMING (1 << 5)
|
||||
|
||||
/// If this subsystem doesn't initialize, it should not report as a hard error in CI.
|
||||
/// This should be used for subsystems that are flaky for complicated reasons, such as
|
||||
/// the Lua subsystem, which relies on auxtools, which is unstable.
|
||||
/// It should not be used simply to silence CI.
|
||||
#define SS_OK_TO_FAIL_INIT (1 << 6)
|
||||
|
||||
//! SUBSYSTEM STATES
|
||||
#define SS_IDLE 0 /// aint doing shit.
|
||||
@@ -63,6 +85,11 @@
|
||||
#define SS_SLEEPING 4 /// fire() slept.
|
||||
#define SS_PAUSING 5 /// in the middle of pausing
|
||||
|
||||
// Subsystem init stages
|
||||
#define INITSTAGE_EARLY 1 //! Early init stuff that doesn't need to wait for mapload
|
||||
#define INITSTAGE_MAIN 2 //! Main init stage
|
||||
#define INITSTAGE_MAX 2 //! Highest initstage.
|
||||
|
||||
#define SUBSYSTEM_DEF(X) GLOBAL_REAL(SS##X, /datum/controller/subsystem/##X);\
|
||||
/datum/controller/subsystem/##X/New(){\
|
||||
NEW_SS_GLOBAL(SS##X);\
|
||||
|
||||
6
code/__DEFINES/_helpers.dm
Normal file
6
code/__DEFINES/_helpers.dm
Normal file
@@ -0,0 +1,6 @@
|
||||
/// Takes a datum as input, returns its ref string, or a cached version of it
|
||||
/// This allows us to cache \ref creation, which ensures it'll only ever happen once per datum, saving string tree time
|
||||
/// It is slightly less optimal then a []'d datum, but the cost is massively outweighed by the potential savings
|
||||
/// It will only work for datums mind, for datum reasons
|
||||
/// : because of the embedded typecheck
|
||||
#define text_ref(datum) (isdatum(datum) ? (datum:cached_ref ||= "\ref[datum]") : ("\ref[datum]"))
|
||||
@@ -1,7 +1,3 @@
|
||||
#define SEND_SIGNAL(target, sigtype, arguments...) ( !target.comp_lookup || !target.comp_lookup[sigtype] ? NONE : target._SendSignal(sigtype, list(target, ##arguments)) )
|
||||
|
||||
#define SEND_GLOBAL_SIGNAL(sigtype, arguments...) ( SEND_SIGNAL(SSdcs, sigtype, ##arguments) )
|
||||
|
||||
#define COMPONENT_INCOMPATIBLE 1
|
||||
#define COMPONENT_NOTRANSFER 2
|
||||
|
||||
|
||||
11
code/__DEFINES/dcs/helpers.dm
Normal file
11
code/__DEFINES/dcs/helpers.dm
Normal file
@@ -0,0 +1,11 @@
|
||||
/// Used to trigger signals and call procs registered for that signal
|
||||
/// The datum hosting the signal is automaticaly added as the first argument
|
||||
/// Returns a bitfield gathered from all registered procs
|
||||
/// Arguments given here are packaged in a list and given to _SendSignal
|
||||
#define SEND_SIGNAL(target, sigtype, arguments...) ( !target.comp_lookup || !target.comp_lookup[sigtype] ? NONE : target._SendSignal(sigtype, list(target, ##arguments)) )
|
||||
|
||||
#define SEND_GLOBAL_SIGNAL(sigtype, arguments...) ( SEND_SIGNAL(SSdcs, sigtype, ##arguments) )
|
||||
|
||||
/// Signifies that this proc is used to handle signals.
|
||||
/// Every proc you pass to RegisterSignal must have this.
|
||||
#define SIGNAL_HANDLER SHOULD_NOT_SLEEP(TRUE)
|
||||
@@ -6,6 +6,8 @@
|
||||
|
||||
#define isweakref(D) (istype(D, /datum/weakref))
|
||||
|
||||
#define isdatum(thing) (istype(thing, /datum))
|
||||
|
||||
#define isappearance(A) (!isnum(A) && copytext("\ref[A]", 4, 6) == "3a")
|
||||
|
||||
#define isnan(x) ( isnum((x)) && ((x) != (x)) )
|
||||
|
||||
@@ -1,15 +1,28 @@
|
||||
//defines that give qdel hints. these can be given as a return in destory() or by calling
|
||||
//! Defines that give qdel hints.
|
||||
//!
|
||||
//! These can be given as a return in [/atom/proc/Destroy] or by calling [/proc/qdel].
|
||||
|
||||
/// `qdel` should queue the object for deletion.
|
||||
#define QDEL_HINT_QUEUE 0
|
||||
/// `qdel` should let the object live after calling [/atom/proc/Destroy].
|
||||
#define QDEL_HINT_LETMELIVE 1
|
||||
/// Functionally the same as the above. `qdel` should assume the object will gc on its own, and not check it.
|
||||
#define QDEL_HINT_IWILLGC 2
|
||||
/// Qdel should assume this object won't GC, and queue a hard delete using a hard reference.
|
||||
#define QDEL_HINT_HARDDEL 3
|
||||
// Qdel should assume this object won't gc, and hard delete it posthaste.
|
||||
#define QDEL_HINT_HARDDEL_NOW 4
|
||||
|
||||
|
||||
#define QDEL_HINT_QUEUE 0 //qdel should queue the object for deletion.
|
||||
#define QDEL_HINT_LETMELIVE 1 //qdel should let the object live after calling destory.
|
||||
#define QDEL_HINT_IWILLGC 2 //functionally the same as the above. qdel should assume the object will gc on its own, and not check it.
|
||||
#define QDEL_HINT_HARDDEL 3 //qdel should assume this object won't gc, and queue a hard delete using a hard reference.
|
||||
#define QDEL_HINT_HARDDEL_NOW 4 //qdel should assume this object won't gc, and hard del it post haste.
|
||||
#define QDEL_HINT_FINDREFERENCE 5 //functionally identical to QDEL_HINT_QUEUE if TESTING is not enabled in _compiler_options.dm.
|
||||
//if TESTING is enabled, qdel will call this object's find_references() verb.
|
||||
#define QDEL_HINT_IFFAIL_FINDREFERENCE 6 //Above but only if gc fails.
|
||||
//defines for the gc_destroyed var
|
||||
#ifdef REFERENCE_TRACKING
|
||||
/** If REFERENCE_TRACKING is enabled, qdel will call this object's find_references() verb.
|
||||
*
|
||||
* Functionally identical to [QDEL_HINT_QUEUE] if [GC_FAILURE_HARD_LOOKUP] is not enabled in _compiler_options.dm.
|
||||
*/
|
||||
#define QDEL_HINT_FINDREFERENCE 5
|
||||
/// Behavior as [QDEL_HINT_FINDREFERENCE], but only if the GC fails and a hard delete is forced.
|
||||
#define QDEL_HINT_IFFAIL_FINDREFERENCE 6
|
||||
#endif
|
||||
|
||||
// Defines for the ssgarbage queues
|
||||
#define GC_QUEUE_FILTER 1 //! short queue to filter out quick gc successes so they don't hang around in the main queue for 5 minutes
|
||||
@@ -18,14 +31,18 @@
|
||||
#define GC_QUEUE_COUNT 3 //! Number of queues, used for allocating the nested lists. Don't forget to increase this if you add a new queue stage
|
||||
|
||||
// Defines for the time an item has to get its reference cleaned before it fails the queue and moves to the next.
|
||||
#define GC_FILTER_QUEUE 1 SECONDS
|
||||
#define GC_CHECK_QUEUE 5 MINUTES
|
||||
#define GC_DEL_QUEUE 10 SECONDS
|
||||
#define GC_FILTER_QUEUE (1 SECONDS)
|
||||
#define GC_CHECK_QUEUE (5 MINUTES)
|
||||
#define GC_DEL_QUEUE (10 SECONDS)
|
||||
|
||||
#define GC_QUEUED_FOR_QUEUING -1
|
||||
|
||||
#define QDEL_ITEM_ADMINS_WARNED (1<<0) //! Set when admins are told about lag causing qdels in this type.
|
||||
#define QDEL_ITEM_SUSPENDED_FOR_LAG (1<<1) //! Set when a type can no longer be hard deleted on failure because of lag it causes while this happens.
|
||||
|
||||
// Defines for the [gc_destroyed][/datum/var/gc_destroyed] var.
|
||||
#define GC_CURRENTLY_BEING_QDELETED -2
|
||||
|
||||
#define QDELING(X) (X.gc_destroyed)
|
||||
#define QDELETED(X) (!X || QDELING(X))
|
||||
#define QDELETED(X) (isnull(X) || QDELING(X))
|
||||
#define QDESTROYING(X) (!X || X.gc_destroyed == GC_CURRENTLY_BEING_QDELETED)
|
||||
|
||||
|
||||
@@ -38,53 +38,159 @@
|
||||
#define RUST_G (__rust_g || __detect_rust_g())
|
||||
#endif
|
||||
|
||||
// Handle 515 call() -> call_ext() changes
|
||||
#if DM_VERSION >= 515
|
||||
#define RUSTG_CALL call_ext
|
||||
#else
|
||||
#define RUSTG_CALL call
|
||||
#endif
|
||||
|
||||
/// Gets the version of rust_g
|
||||
/proc/rustg_get_version() return RUSTG_CALL(RUST_G, "get_version")()
|
||||
|
||||
|
||||
/**
|
||||
* Sets up the Aho-Corasick automaton with its default options.
|
||||
*
|
||||
* The search patterns list and the replacements must be of the same length when replace is run, but an empty replacements list is allowed if replacements are supplied with the replace call
|
||||
* Arguments:
|
||||
* * key - The key for the automaton, to be used with subsequent rustg_acreplace/rustg_acreplace_with_replacements calls
|
||||
* * patterns - A non-associative list of strings to search for
|
||||
* * replacements - Default replacements for this automaton, used with rustg_acreplace
|
||||
*/
|
||||
#define rustg_setup_acreplace(key, patterns, replacements) RUSTG_CALL(RUST_G, "setup_acreplace")(key, json_encode(patterns), json_encode(replacements))
|
||||
|
||||
/**
|
||||
* Sets up the Aho-Corasick automaton using supplied options.
|
||||
*
|
||||
* The search patterns list and the replacements must be of the same length when replace is run, but an empty replacements list is allowed if replacements are supplied with the replace call
|
||||
* Arguments:
|
||||
* * key - The key for the automaton, to be used with subsequent rustg_acreplace/rustg_acreplace_with_replacements calls
|
||||
* * options - An associative list like list("anchored" = 0, "ascii_case_insensitive" = 0, "match_kind" = "Standard"). The values shown on the example are the defaults, and default values may be omitted. See the identically named methods at https://docs.rs/aho-corasick/latest/aho_corasick/struct.AhoCorasickBuilder.html to see what the options do.
|
||||
* * patterns - A non-associative list of strings to search for
|
||||
* * replacements - Default replacements for this automaton, used with rustg_acreplace
|
||||
*/
|
||||
#define rustg_setup_acreplace_with_options(key, options, patterns, replacements) RUSTG_CALL(RUST_G, "setup_acreplace")(key, json_encode(options), json_encode(patterns), json_encode(replacements))
|
||||
|
||||
/**
|
||||
* Run the specified replacement engine with the provided haystack text to replace, returning replaced text.
|
||||
*
|
||||
* Arguments:
|
||||
* * key - The key for the automaton
|
||||
* * text - Text to run replacements on
|
||||
*/
|
||||
#define rustg_acreplace(key, text) RUSTG_CALL(RUST_G, "acreplace")(key, text)
|
||||
|
||||
/**
|
||||
* Run the specified replacement engine with the provided haystack text to replace, returning replaced text.
|
||||
*
|
||||
* Arguments:
|
||||
* * key - The key for the automaton
|
||||
* * text - Text to run replacements on
|
||||
* * replacements - Replacements for this call. Must be the same length as the set-up patterns
|
||||
*/
|
||||
#define rustg_acreplace_with_replacements(key, text, replacements) RUSTG_CALL(RUST_G, "acreplace_with_replacements")(key, text, json_encode(replacements))
|
||||
|
||||
/**
|
||||
* This proc generates a cellular automata noise grid which can be used in procedural generation methods.
|
||||
*
|
||||
* Returns a single string that goes row by row, with values of 1 representing an alive cell, and a value of 0 representing a dead cell.
|
||||
*
|
||||
* Arguments:
|
||||
* * percentage: The chance of a turf starting closed
|
||||
* * smoothing_iterations: The amount of iterations the cellular automata simulates before returning the results
|
||||
* * birth_limit: If the number of neighboring cells is higher than this amount, a cell is born
|
||||
* * death_limit: If the number of neighboring cells is lower than this amount, a cell dies
|
||||
* * width: The width of the grid.
|
||||
* * height: The height of the grid.
|
||||
*/
|
||||
#define rustg_cnoise_generate(percentage, smoothing_iterations, birth_limit, death_limit, width, height) \
|
||||
RUSTG_CALL(RUST_G, "cnoise_generate")(percentage, smoothing_iterations, birth_limit, death_limit, width, height)
|
||||
|
||||
#define rustg_dmi_strip_metadata(fname) RUSTG_CALL(RUST_G, "dmi_strip_metadata")(fname)
|
||||
#define rustg_dmi_create_png(path, width, height, data) RUSTG_CALL(RUST_G, "dmi_create_png")(path, width, height, data)
|
||||
#define rustg_dmi_resize_png(path, width, height, resizetype) RUSTG_CALL(RUST_G, "dmi_resize_png")(path, width, height, resizetype)
|
||||
|
||||
#define rustg_file_read(fname) RUSTG_CALL(RUST_G, "file_read")(fname)
|
||||
#define rustg_file_exists(fname) RUSTG_CALL(RUST_G, "file_exists")(fname)
|
||||
#define rustg_file_write(text, fname) RUSTG_CALL(RUST_G, "file_write")(text, fname)
|
||||
#define rustg_file_append(text, fname) RUSTG_CALL(RUST_G, "file_append")(text, fname)
|
||||
#define rustg_file_get_line_count(fname) text2num(RUSTG_CALL(RUST_G, "file_get_line_count")(fname))
|
||||
#define rustg_file_seek_line(fname, line) RUSTG_CALL(RUST_G, "file_seek_line")(fname, "[line]")
|
||||
|
||||
#ifdef RUSTG_OVERRIDE_BUILTINS
|
||||
#define file2text(fname) rustg_file_read("[fname]")
|
||||
#define text2file(text, fname) rustg_file_append(text, "[fname]")
|
||||
#endif
|
||||
|
||||
#define rustg_git_revparse(rev) RUSTG_CALL(RUST_G, "rg_git_revparse")(rev)
|
||||
#define rustg_git_commit_date(rev) RUSTG_CALL(RUST_G, "rg_git_commit_date")(rev)
|
||||
|
||||
#define RUSTG_HTTP_METHOD_GET "get"
|
||||
#define RUSTG_HTTP_METHOD_PUT "put"
|
||||
#define RUSTG_HTTP_METHOD_DELETE "delete"
|
||||
#define RUSTG_HTTP_METHOD_PATCH "patch"
|
||||
#define RUSTG_HTTP_METHOD_HEAD "head"
|
||||
#define RUSTG_HTTP_METHOD_POST "post"
|
||||
#define rustg_http_request_blocking(method, url, body, headers, options) RUSTG_CALL(RUST_G, "http_request_blocking")(method, url, body, headers, options)
|
||||
#define rustg_http_request_async(method, url, body, headers, options) RUSTG_CALL(RUST_G, "http_request_async")(method, url, body, headers, options)
|
||||
#define rustg_http_check_request(req_id) RUSTG_CALL(RUST_G, "http_check_request")(req_id)
|
||||
|
||||
#define RUSTG_JOB_NO_RESULTS_YET "NO RESULTS YET"
|
||||
#define RUSTG_JOB_NO_SUCH_JOB "NO SUCH JOB"
|
||||
#define RUSTG_JOB_ERROR "JOB PANICKED"
|
||||
|
||||
#define rustg_cnoise_generate(percentage, smoothing_iterations, birth_limit, death_limit, width, height) \
|
||||
call(RUST_G, "cnoise_generate")(percentage, smoothing_iterations, birth_limit, death_limit, width, height)
|
||||
#define rustg_json_is_valid(text) (RUSTG_CALL(RUST_G, "json_is_valid")(text) == "true")
|
||||
|
||||
#define rustg_dmi_strip_metadata(fname) call(RUST_G, "dmi_strip_metadata")(fname)
|
||||
#define rustg_dmi_create_png(path, width, height, data) call(RUST_G, "dmi_create_png")(path, width, height, data)
|
||||
#define rustg_log_write(fname, text, format) RUSTG_CALL(RUST_G, "log_write")(fname, text, format)
|
||||
/proc/rustg_log_close_all() return RUSTG_CALL(RUST_G, "log_close_all")()
|
||||
|
||||
#define rustg_noise_get_at_coordinates(seed, x, y) call(RUST_G, "noise_get_at_coordinates")(seed, x, y)
|
||||
#define rustg_noise_get_at_coordinates(seed, x, y) RUSTG_CALL(RUST_G, "noise_get_at_coordinates")(seed, x, y)
|
||||
|
||||
#define rustg_git_revparse(rev) call(RUST_G, "rg_git_revparse")(rev)
|
||||
#define rustg_git_commit_date(rev) call(RUST_G, "rg_git_commit_date")(rev)
|
||||
#define rustg_sql_connect_pool(options) RUSTG_CALL(RUST_G, "sql_connect_pool")(options)
|
||||
#define rustg_sql_query_async(handle, query, params) RUSTG_CALL(RUST_G, "sql_query_async")(handle, query, params)
|
||||
#define rustg_sql_query_blocking(handle, query, params) RUSTG_CALL(RUST_G, "sql_query_blocking")(handle, query, params)
|
||||
#define rustg_sql_connected(handle) RUSTG_CALL(RUST_G, "sql_connected")(handle)
|
||||
#define rustg_sql_disconnect_pool(handle) RUSTG_CALL(RUST_G, "sql_disconnect_pool")(handle)
|
||||
#define rustg_sql_check_query(job_id) RUSTG_CALL(RUST_G, "sql_check_query")("[job_id]")
|
||||
|
||||
#define rustg_log_write(fname, text, format) call(RUST_G, "log_write")(fname, text, format)
|
||||
/proc/rustg_log_close_all() return call(RUST_G, "log_close_all")()
|
||||
#define rustg_time_microseconds(id) text2num(RUSTG_CALL(RUST_G, "time_microseconds")(id))
|
||||
#define rustg_time_milliseconds(id) text2num(RUSTG_CALL(RUST_G, "time_milliseconds")(id))
|
||||
#define rustg_time_reset(id) RUSTG_CALL(RUST_G, "time_reset")(id)
|
||||
|
||||
// RUST-G defines & procs for HTTP component
|
||||
#define RUSTG_HTTP_METHOD_GET "get"
|
||||
#define RUSTG_HTTP_METHOD_POST "post"
|
||||
#define RUSTG_HTTP_METHOD_PUT "put"
|
||||
#define RUSTG_HTTP_METHOD_DELETE "delete"
|
||||
#define RUSTG_HTTP_METHOD_PATCH "patch"
|
||||
#define RUSTG_HTTP_METHOD_HEAD "head"
|
||||
/proc/rustg_unix_timestamp()
|
||||
return text2num(RUSTG_CALL(RUST_G, "unix_timestamp")())
|
||||
|
||||
#define rustg_file_read(fname) call(RUST_G, "file_read")("[fname]")
|
||||
#define rustg_file_exists(fname) call(RUST_G, "file_exists")("[fname]")
|
||||
#define rustg_file_write(text, fname) call(RUST_G, "file_write")(text, "[fname]")
|
||||
#define rustg_file_append(text, fname) call(RUST_G, "file_append")(text, "[fname]")
|
||||
#define rustg_raw_read_toml_file(path) json_decode(RUSTG_CALL(RUST_G, "toml_file_to_json")(path) || "null")
|
||||
|
||||
#define rustg_sql_connect_pool(options) call(RUST_G, "sql_connect_pool")(options)
|
||||
#define rustg_sql_query_async(handle, query, params) call(RUST_G, "sql_query_async")(handle, query, params)
|
||||
#define rustg_sql_query_blocking(handle, query, params) call(RUST_G, "sql_query_blocking")(handle, query, params)
|
||||
#define rustg_sql_connected(handle) call(RUST_G, "sql_connected")(handle)
|
||||
#define rustg_sql_disconnect_pool(handle) call(RUST_G, "sql_disconnect_pool")(handle)
|
||||
#define rustg_sql_check_query(job_id) call(RUST_G, "sql_check_query")("[job_id]")
|
||||
/proc/rustg_read_toml_file(path)
|
||||
var/list/output = rustg_raw_read_toml_file(path)
|
||||
if (output["success"])
|
||||
return json_decode(output["content"])
|
||||
else
|
||||
CRASH(output["content"])
|
||||
|
||||
#define rustg_http_request_blocking(method, url, body, headers) call(RUST_G, "http_request_blocking")(method, url, body, headers)
|
||||
#define rustg_http_request_async(method, url, body, headers) call(RUST_G, "http_request_async")(method, url, body, headers)
|
||||
#define rustg_http_check_request(req_id) call(RUST_G, "http_check_request")(req_id)
|
||||
#define rustg_raw_toml_encode(value) json_decode(RUSTG_CALL(RUST_G, "toml_encode")(json_encode(value)))
|
||||
|
||||
#define rustg_hash_string(algorithm, text) call(RUST_G, "hash_string")(algorithm, text)
|
||||
#define rustg_hash_file(algorithm, fname) call(RUST_G, "hash_file")(algorithm, fname)
|
||||
#define rustg_hash_generate_totp(seed) call(RUST_G, "generate_totp")(seed)
|
||||
#define rustg_hash_generate_totp_tolerance(seed, tolerance) call(RUST_G, "generate_totp_tolerance")(seed, "[tolerance]")
|
||||
/proc/rustg_toml_encode(value)
|
||||
var/list/output = rustg_raw_toml_encode(value)
|
||||
if (output["success"])
|
||||
return output["content"]
|
||||
else
|
||||
CRASH(output["content"])
|
||||
|
||||
#define rustg_url_encode(text) RUSTG_CALL(RUST_G, "url_encode")("[text]")
|
||||
#define rustg_url_decode(text) RUSTG_CALL(RUST_G, "url_decode")(text)
|
||||
|
||||
#ifdef RUSTG_OVERRIDE_BUILTINS
|
||||
#define url_encode(text) rustg_url_encode(text)
|
||||
#define url_decode(text) rustg_url_decode(text)
|
||||
#endif
|
||||
|
||||
#define rustg_hash_string(algorithm, text) RUSTG_CALL(RUST_G, "hash_string")(algorithm, text)
|
||||
#define rustg_hash_file(algorithm, fname) RUSTG_CALL(RUST_G, "hash_file")(algorithm, fname)
|
||||
#define rustg_hash_generate_totp(seed) RUSTG_CALL(RUST_G, "generate_totp")(seed)
|
||||
#define rustg_hash_generate_totp_tolerance(seed, tolerance) RUSTG_CALL(RUST_G, "generate_totp_tolerance")(seed, tolerance)
|
||||
|
||||
#define RUSTG_HASH_MD5 "md5"
|
||||
#define RUSTG_HASH_SHA1 "sha1"
|
||||
|
||||
@@ -25,37 +25,40 @@
|
||||
|
||||
//! ## Timing subsystem
|
||||
/**
|
||||
* Don't run if there is an identical unique timer active
|
||||
*
|
||||
* if the arguments to addtimer are the same as an existing timer, it doesn't create a new timer,
|
||||
* and returns the id of the existing timer
|
||||
*/
|
||||
#define TIMER_UNIQUE (1<<0)
|
||||
* Don't run if there is an identical unique timer active
|
||||
*
|
||||
* if the arguments to addtimer are the same as an existing timer, it doesn't create a new timer,
|
||||
* and returns the id of the existing timer
|
||||
*/
|
||||
#define TIMER_UNIQUE (1<<0)
|
||||
|
||||
///For unique timers: Replace the old timer rather then not start this one
|
||||
#define TIMER_OVERRIDE (1<<1)
|
||||
#define TIMER_OVERRIDE (1<<1)
|
||||
|
||||
/**
|
||||
* Timing should be based on how timing progresses on clients, not the server.
|
||||
*
|
||||
* Tracking this is more expensive,
|
||||
* should only be used in conjuction with things that have to progress client side, such as
|
||||
* animate() or sound()
|
||||
*/
|
||||
#define TIMER_CLIENT_TIME (1<<2)
|
||||
* Timing should be based on how timing progresses on clients, not the server.
|
||||
*
|
||||
* Tracking this is more expensive,
|
||||
* should only be used in conjuction with things that have to progress client side, such as
|
||||
* animate() or sound()
|
||||
*/
|
||||
#define TIMER_CLIENT_TIME (1<<2)
|
||||
|
||||
///Timer can be stopped using deltimer()
|
||||
#define TIMER_STOPPABLE (1<<3)
|
||||
#define TIMER_STOPPABLE (1<<3)
|
||||
|
||||
///prevents distinguishing identical timers with the wait variable
|
||||
///
|
||||
///To be used with TIMER_UNIQUE
|
||||
#define TIMER_NO_HASH_WAIT (1<<4)
|
||||
#define TIMER_NO_HASH_WAIT (1<<4)
|
||||
|
||||
///Loops the timer repeatedly until qdeleted
|
||||
///
|
||||
///In most cases you want a subsystem instead, so don't use this unless you have a good reason
|
||||
#define TIMER_LOOP (1<<5)
|
||||
#define TIMER_LOOP (1<<5)
|
||||
|
||||
///Delete the timer on parent datum Destroy() and when deltimer'd
|
||||
#define TIMER_DELETE_ME (1<<6)
|
||||
|
||||
///Empty ID define
|
||||
#define TIMER_ID_NULL -1
|
||||
@@ -94,6 +97,25 @@
|
||||
}\
|
||||
}
|
||||
|
||||
//! ### SS initialization hints
|
||||
/**
|
||||
* Negative values incidate a failure or warning of some kind, positive are good.
|
||||
* 0 and 1 are unused so that TRUE and FALSE are guarenteed to be invalid values.
|
||||
*/
|
||||
|
||||
/// Subsystem failed to initialize entirely. Print a warning, log, and disable firing.
|
||||
#define SS_INIT_FAILURE -2
|
||||
|
||||
/// The default return value which must be overriden. Will succeed with a warning.
|
||||
#define SS_INIT_NONE -1
|
||||
|
||||
/// Subsystem initialized sucessfully.
|
||||
#define SS_INIT_SUCCESS 2
|
||||
|
||||
/// Successful, but don't print anything. Useful if subsystem was disabled.
|
||||
#define SS_INIT_NO_NEED 3
|
||||
|
||||
//! ### SS initialization load orders
|
||||
// Subsystem init_order, from highest priority to lowest priority
|
||||
// Subsystems shutdown in the reverse of the order they initialize in
|
||||
// The numbers just define the ordering, they are meaningless otherwise.
|
||||
@@ -173,6 +195,7 @@
|
||||
#define FIRE_PRIORITY_RUNECHAT 410
|
||||
#define FIRE_PRIORITY_OVERLAYS 500
|
||||
#define FIRE_PRIORITY_EXPLOSIONS 666
|
||||
#define FIRE_PRIORITY_TIMER 700
|
||||
#define FIRE_PRIORITY_INPUT 1000 // This must always always be the max highest priority. Player input must never be lost.
|
||||
|
||||
// SS runlevels
|
||||
@@ -217,6 +240,15 @@
|
||||
if(isobj(A) || ismob(A)){SSdemo.mark_dirty(A);}\
|
||||
} while (FALSE)
|
||||
|
||||
/**
|
||||
Create a new timer and add it to the queue.
|
||||
* Arguments:
|
||||
* * callback the callback to call on timer finish
|
||||
* * wait deciseconds to run the timer for
|
||||
* * flags flags for this timer, see: code\__DEFINES\subsystems.dm
|
||||
* * timer_subsystem the subsystem to insert this timer into
|
||||
*/
|
||||
#define addtimer(args...) _addtimer(args, file = __FILE__, line = __LINE__)
|
||||
|
||||
// Air subsystem subtasks
|
||||
#define SSAIR_PIPENETS 1
|
||||
@@ -234,3 +266,6 @@
|
||||
#define SSEXPLOSIONS_MOVABLES 1
|
||||
#define SSEXPLOSIONS_TURFS 2
|
||||
#define SSEXPLOSIONS_THROWS 3
|
||||
|
||||
/// The timer key used to know how long subsystem initialization takes
|
||||
#define SS_INIT_TIMER_KEY "ss_init"
|
||||
|
||||
@@ -38,6 +38,10 @@ When using time2text(), please use "DDD" to find the weekday. Refrain from using
|
||||
#define SATURDAY "Sat"
|
||||
#define SUNDAY "Sun"
|
||||
|
||||
#define MILLISECONDS *0.01
|
||||
|
||||
#define DECISECONDS *1 //the base unit all of these defines are scaled by, because byond uses that as a unit of measurement for some fucking reason
|
||||
|
||||
#define SECONDS *10
|
||||
|
||||
#define MINUTES SECONDS*60
|
||||
@@ -49,3 +53,7 @@ When using time2text(), please use "DDD" to find the weekday. Refrain from using
|
||||
#define DS2TICKS(DS) ((DS)/world.tick_lag)
|
||||
|
||||
#define TICKS2DS(T) ((T) TICKS)
|
||||
|
||||
#define MS2DS(T) ((T) MILLISECONDS)
|
||||
|
||||
#define DS2MS(T) ((T) * 100)
|
||||
|
||||
@@ -37,6 +37,7 @@
|
||||
* TYPECONT: The typepath of the contents of the list
|
||||
* COMPARE: The object to compare against, usualy the same as INPUT
|
||||
* COMPARISON: The variable on the objects to compare
|
||||
* COMPTYPE: How should the values be compared? Either COMPARE_KEY or COMPARE_VALUE.
|
||||
*/
|
||||
#define BINARY_INSERT(INPUT, LIST, TYPECONT, COMPARE, COMPARISON, COMPTYPE) \
|
||||
do {\
|
||||
@@ -48,7 +49,7 @@
|
||||
var/__BIN_LEFT = 1;\
|
||||
var/__BIN_RIGHT = __BIN_CTTL;\
|
||||
var/__BIN_MID = (__BIN_LEFT + __BIN_RIGHT) >> 1;\
|
||||
var/##TYPECONT/__BIN_ITEM;\
|
||||
var ##TYPECONT/__BIN_ITEM;\
|
||||
while(__BIN_LEFT < __BIN_RIGHT) {\
|
||||
__BIN_ITEM = COMPTYPE;\
|
||||
if(__BIN_ITEM.##COMPARISON <= COMPARE.##COMPARISON) {\
|
||||
@@ -64,6 +65,79 @@
|
||||
};\
|
||||
} while(FALSE)
|
||||
|
||||
/**
|
||||
* Custom binary search sorted insert utilising comparison procs instead of vars.
|
||||
* INPUT: Object to be inserted
|
||||
* LIST: List to insert object into
|
||||
* TYPECONT: The typepath of the contents of the list
|
||||
* COMPARE: The object to compare against, usualy the same as INPUT
|
||||
* COMPARISON: The plaintext name of a proc on INPUT that takes a single argument to accept a single element from LIST and returns a positive, negative or zero number to perform a comparison.
|
||||
* COMPTYPE: How should the values be compared? Either COMPARE_KEY or COMPARE_VALUE.
|
||||
*/
|
||||
#define BINARY_INSERT_PROC_COMPARE(INPUT, LIST, TYPECONT, COMPARE, COMPARISON, COMPTYPE) \
|
||||
do {\
|
||||
var/list/__BIN_LIST = LIST;\
|
||||
var/__BIN_CTTL = length(__BIN_LIST);\
|
||||
if(!__BIN_CTTL) {\
|
||||
__BIN_LIST += INPUT;\
|
||||
} else {\
|
||||
var/__BIN_LEFT = 1;\
|
||||
var/__BIN_RIGHT = __BIN_CTTL;\
|
||||
var/__BIN_MID = (__BIN_LEFT + __BIN_RIGHT) >> 1;\
|
||||
var ##TYPECONT/__BIN_ITEM;\
|
||||
while(__BIN_LEFT < __BIN_RIGHT) {\
|
||||
__BIN_ITEM = COMPTYPE;\
|
||||
if(__BIN_ITEM.##COMPARISON(COMPARE) <= 0) {\
|
||||
__BIN_LEFT = __BIN_MID + 1;\
|
||||
} else {\
|
||||
__BIN_RIGHT = __BIN_MID;\
|
||||
};\
|
||||
__BIN_MID = (__BIN_LEFT + __BIN_RIGHT) >> 1;\
|
||||
};\
|
||||
__BIN_ITEM = COMPTYPE;\
|
||||
__BIN_MID = __BIN_ITEM.##COMPARISON(COMPARE) > 0 ? __BIN_MID : __BIN_MID + 1;\
|
||||
__BIN_LIST.Insert(__BIN_MID, INPUT);\
|
||||
};\
|
||||
} while(FALSE)
|
||||
|
||||
#define SORT_FIRST_INDEX(list) (list[1])
|
||||
#define SORT_COMPARE_DIRECTLY(thing) (thing)
|
||||
#define SORT_VAR_NO_TYPE(varname) var/varname
|
||||
/****
|
||||
* Even more custom binary search sorted insert, using defines instead of vars
|
||||
* INPUT: Item to be inserted
|
||||
* LIST: List to insert INPUT into
|
||||
* TYPECONT: A define setting the var to the typepath of the contents of the list
|
||||
* COMPARE: The item to compare against, usualy the same as INPUT
|
||||
* COMPARISON: A define that takes an item to compare as input, and returns their comparable value
|
||||
* COMPTYPE: How should the list be compared? Either COMPARE_KEY or COMPARE_VALUE.
|
||||
*/
|
||||
#define BINARY_INSERT_DEFINE(INPUT, LIST, TYPECONT, COMPARE, COMPARISON, COMPTYPE) \
|
||||
do {\
|
||||
var/list/__BIN_LIST = LIST;\
|
||||
var/__BIN_CTTL = length(__BIN_LIST);\
|
||||
if(!__BIN_CTTL) {\
|
||||
__BIN_LIST += INPUT;\
|
||||
} else {\
|
||||
var/__BIN_LEFT = 1;\
|
||||
var/__BIN_RIGHT = __BIN_CTTL;\
|
||||
var/__BIN_MID = (__BIN_LEFT + __BIN_RIGHT) >> 1;\
|
||||
##TYPECONT(__BIN_ITEM);\
|
||||
while(__BIN_LEFT < __BIN_RIGHT) {\
|
||||
__BIN_ITEM = COMPTYPE;\
|
||||
if(##COMPARISON(__BIN_ITEM) <= ##COMPARISON(COMPARE)) {\
|
||||
__BIN_LEFT = __BIN_MID + 1;\
|
||||
} else {\
|
||||
__BIN_RIGHT = __BIN_MID;\
|
||||
};\
|
||||
__BIN_MID = (__BIN_LEFT + __BIN_RIGHT) >> 1;\
|
||||
};\
|
||||
__BIN_ITEM = COMPTYPE;\
|
||||
__BIN_MID = ##COMPARISON(__BIN_ITEM) > ##COMPARISON(COMPARE) ? __BIN_MID : __BIN_MID + 1;\
|
||||
__BIN_LIST.Insert(__BIN_MID, INPUT);\
|
||||
};\
|
||||
} while(FALSE)
|
||||
|
||||
/// Returns a list in plain english as a string
|
||||
/proc/english_list(list/input, nothing_text = "nothing", and_text = " and ", comma_text = ", ", final_comma_text = "" )
|
||||
var/total = input.len
|
||||
@@ -390,6 +464,9 @@
|
||||
|
||||
return r
|
||||
|
||||
//tg compat
|
||||
#define bitfield_to_list(args...) bitfield2list(args)
|
||||
|
||||
/// Returns the key based on the index
|
||||
#define KEYBYINDEX(L, index) (((index <= length(L)) && (index > 0)) ? L[index] : null)
|
||||
|
||||
@@ -488,6 +565,8 @@
|
||||
|
||||
return L
|
||||
|
||||
// /tg/ compat
|
||||
#define reverse_range(args...) reverseRange(args)
|
||||
|
||||
//return first thing in L which has var/varname == value
|
||||
//this is typecaste as list/L, but you could actually feed it an atom instead.
|
||||
|
||||
@@ -752,3 +752,7 @@
|
||||
continue
|
||||
|
||||
C.energy_fail(rand(duration_min,duration_max))
|
||||
|
||||
/// For legacy procs using addtimer in callbacks. Don't use this.
|
||||
/proc/_addtimer_here(callback, time)
|
||||
addtimer(callback, time)
|
||||
|
||||
15
code/__HELPERS/nameof.dm
Normal file
15
code/__HELPERS/nameof.dm
Normal file
@@ -0,0 +1,15 @@
|
||||
/**
|
||||
* NAMEOF: Compile time checked variable name to string conversion
|
||||
* evaluates to a string equal to "X", but compile errors if X isn't a var on datum.
|
||||
* datum may be null, but it does need to be a typed var.
|
||||
**/
|
||||
#define NAMEOF(datum, X) (#X || ##datum.##X)
|
||||
|
||||
/**
|
||||
* NAMEOF that actually works in static definitions because src::type requires src to be defined
|
||||
*/
|
||||
#if DM_VERSION >= 515
|
||||
#define NAMEOF_STATIC(datum, X) (nameof(type::##X))
|
||||
#else
|
||||
#define NAMEOF_STATIC(datum, X) (#X || ##datum.##X)
|
||||
#endif
|
||||
@@ -1459,9 +1459,6 @@ GLOBAL_DATUM_INIT(dview_mob, /mob/dview, new)
|
||||
if(is_servant_of_ratvar(V) || isobserver(V))
|
||||
. += V
|
||||
|
||||
//datum may be null, but it does need to be a typed var
|
||||
#define NAMEOF(datum, X) (#X || ##datum.##X)
|
||||
|
||||
#define VARSET_LIST_CALLBACK(target, var_name, var_value) CALLBACK(GLOBAL_PROC, /proc/___callbackvarset, ##target, ##var_name, ##var_value)
|
||||
//dupe code because dm can't handle 3 level deep macros
|
||||
#define VARSET_CALLBACK(datum, var, var_value) CALLBACK(GLOBAL_PROC, /proc/___callbackvarset, ##datum, NAMEOF(##datum, ##var), ##var_value)
|
||||
|
||||
48
code/__byond_version_compat.dm
Normal file
48
code/__byond_version_compat.dm
Normal file
@@ -0,0 +1,48 @@
|
||||
// This file contains defines allowing targeting byond versions newer than the supported
|
||||
|
||||
//Update this whenever you need to take advantage of more recent byond features
|
||||
#define MIN_COMPILER_VERSION 514
|
||||
#define MIN_COMPILER_BUILD 1556
|
||||
#if (DM_VERSION < MIN_COMPILER_VERSION || DM_BUILD < MIN_COMPILER_BUILD) && !defined(SPACEMAN_DMM)
|
||||
//Don't forget to update this part
|
||||
#error Your version of BYOND is too out-of-date to compile this project. Go to https://secure.byond.com/download and update.
|
||||
#error You need version 514.1556 or higher
|
||||
#endif
|
||||
|
||||
//If you update these values, update the message in the #error
|
||||
#define MAX_BYOND_MAJOR 514
|
||||
#define MAX_BYOND_MINOR 1589
|
||||
|
||||
// You can define IGNORE_MAX_BYOND_VERSION to bypass the max version check.
|
||||
// Note: This will likely break the game, especially any extools/auxtools linkage. Only use if you know what you're doing!
|
||||
#ifdef OPENDREAM // Thanks, Altoids!
|
||||
#define IGNORE_MAX_BYOND_VERSION
|
||||
#endif
|
||||
|
||||
#if ((DM_VERSION > MAX_BYOND_MAJOR) || (DM_BUILD > MAX_BYOND_MINOR)) && !defined(IGNORE_MAX_BYOND_VERSION)
|
||||
#error Your version of BYOND is too new to compile this project. Download version 514.1589 at www.byond.com/download/build/514/514.1589_byond.exe
|
||||
#endif
|
||||
|
||||
// 515 split call for external libraries into call_ext
|
||||
#if DM_VERSION < 515
|
||||
#define LIBCALL call
|
||||
#else
|
||||
#define LIBCALL call_ext
|
||||
#endif
|
||||
|
||||
// So we want to have compile time guarantees these procs exist on local type, unfortunately 515 killed the .proc/procname syntax so we have to use nameof()
|
||||
#if DM_VERSION < 515
|
||||
/// Call by name proc reference, checks if the proc exists on this type or as a global proc
|
||||
#define PROC_REF(X) (.proc/##X)
|
||||
/// Call by name proc reference, checks if the proc exists on given type or as a global proc
|
||||
#define TYPE_PROC_REF(TYPE, X) (##TYPE.proc/##X)
|
||||
/// Call by name proc reference, checks if the proc is existing global proc
|
||||
#define GLOBAL_PROC_REF(X) (/proc/##X)
|
||||
#else
|
||||
/// Call by name proc reference, checks if the proc exists on this type or as a global proc
|
||||
#define PROC_REF(X) (nameof(.proc/##X))
|
||||
/// Call by name proc reference, checks if the proc exists on given type or as a global proc
|
||||
#define TYPE_PROC_REF(TYPE, X) (nameof(##TYPE.proc/##X))
|
||||
/// Call by name proc reference, checks if the proc is existing global proc
|
||||
#define GLOBAL_PROC_REF(X) (/proc/##X)
|
||||
#endif
|
||||
@@ -31,14 +31,6 @@
|
||||
#define FORCE_MAP "_maps/runtimestation.json"
|
||||
#endif
|
||||
|
||||
//Update this whenever you need to take advantage of more recent byond features
|
||||
#define MIN_COMPILER_VERSION 514
|
||||
#if DM_VERSION < MIN_COMPILER_VERSION
|
||||
//Don't forget to update this part
|
||||
#error Your version of BYOND is too out-of-date to compile this project. Go to https://secure.byond.com/download and update.
|
||||
#error You need version 514 or higher
|
||||
#endif
|
||||
|
||||
//Additional code for the above flags.
|
||||
#ifdef TESTING
|
||||
#warn compiling in TESTING mode. testing() debug messages will be visible.
|
||||
@@ -58,20 +50,6 @@
|
||||
|
||||
#define EXTOOLS (world.system_type == MS_WINDOWS ? "byond-extools.dll" : "libbyond-extools.so")
|
||||
|
||||
//If you update these values, update the message in the #error
|
||||
#define MAX_BYOND_MAJOR 514
|
||||
#define MAX_BYOND_MINOR 1589
|
||||
|
||||
// You can define IGNORE_MAX_BYOND_VERSION to bypass the max version check.
|
||||
// Note: This will likely break the game, especially any extools/auxtools linkage. Only use if you know what you're doing!
|
||||
#ifdef OPENDREAM // Thanks, Altoids!
|
||||
#define IGNORE_MAX_BYOND_VERSION
|
||||
#endif
|
||||
|
||||
#if ((DM_VERSION > MAX_BYOND_MAJOR) || (DM_BUILD > MAX_BYOND_MINOR)) && !defined(IGNORE_MAX_BYOND_VERSION)
|
||||
#error Your version of BYOND is too new to compile this project. Download version 514.1589 at www.byond.com/download/build/514/514.1589_byond.exe
|
||||
#endif
|
||||
|
||||
#ifdef TRAVISBUILDING
|
||||
// Turdis is special :)
|
||||
#define CBT
|
||||
|
||||
@@ -1,14 +1,25 @@
|
||||
// Clickable stat() button.
|
||||
/obj/effect/statclick
|
||||
name = "Initializing..."
|
||||
blocks_emissive = NONE
|
||||
var/target
|
||||
|
||||
INITIALIZE_IMMEDIATE(/obj/effect/statclick)
|
||||
|
||||
/obj/effect/statclick/Initialize(mapload, text, target) //Don't port this to Initialize it's too critical
|
||||
/obj/effect/statclick/Initialize(mapload, text, target)
|
||||
. = ..()
|
||||
name = text
|
||||
src.target = target
|
||||
if(isdatum(target)) //Harddel man bad
|
||||
RegisterSignal(target, COMSIG_PARENT_QDELETING, PROC_REF(cleanup))
|
||||
|
||||
/obj/effect/statclick/Destroy()
|
||||
target = null
|
||||
return ..()
|
||||
|
||||
/obj/effect/statclick/proc/cleanup()
|
||||
SIGNAL_HANDLER
|
||||
qdel(src)
|
||||
|
||||
/obj/effect/statclick/proc/update(text)
|
||||
name = text
|
||||
@@ -25,7 +36,7 @@ INITIALIZE_IMMEDIATE(/obj/effect/statclick)
|
||||
class = "subsystem"
|
||||
else if(istype(target, /datum/controller))
|
||||
class = "controller"
|
||||
else if(istype(target, /datum))
|
||||
else if(isdatum(target))
|
||||
class = "datum"
|
||||
else
|
||||
class = "unknown"
|
||||
@@ -51,3 +62,30 @@ INITIALIZE_IMMEDIATE(/obj/effect/statclick)
|
||||
SSblackbox.record_feedback("tally", "admin_verb", 1, "Restart Failsafe Controller")
|
||||
|
||||
message_admins("Admin [key_name_admin(usr)] has restarted the [controller] controller.")
|
||||
|
||||
/client/proc/debug_controller()
|
||||
set category = "Misc.Server Debug"
|
||||
set name = "Debug Controller"
|
||||
set desc = "Debug the various periodic loop controllers for the game (be careful!)"
|
||||
|
||||
if(!holder)
|
||||
return
|
||||
|
||||
var/list/controllers = list()
|
||||
var/list/controller_choices = list()
|
||||
|
||||
for (var/datum/controller/controller in world)
|
||||
if (istype(controller, /datum/controller/subsystem))
|
||||
continue
|
||||
controllers["[controller] (controller.type)"] = controller //we use an associated list to ensure clients can't hold references to controllers
|
||||
controller_choices += "[controller] (controller.type)"
|
||||
|
||||
var/datum/controller/controller_string = input("Select controller to debug", "Debug Controller") as null|anything in controller_choices
|
||||
var/datum/controller/controller = controllers[controller_string]
|
||||
|
||||
if (!istype(controller))
|
||||
return
|
||||
debug_variables(controller)
|
||||
|
||||
//SSblackbox.record_feedback("tally", "admin_verb", 1, "Restart Failsafe Controller")
|
||||
message_admins("Admin [key_name_admin(usr)] is debugging the [controller] controller.")
|
||||
|
||||
@@ -519,3 +519,15 @@
|
||||
/datum/config_entry/flag/disable_gc_failure_hard_deletes
|
||||
|
||||
/datum/config_entry/flag/disable_all_hard_deletes
|
||||
|
||||
/datum/config_entry/number/hard_deletes_overrun_threshold
|
||||
integer = FALSE
|
||||
min_val = 0
|
||||
default = 0.5
|
||||
|
||||
/datum/config_entry/number/hard_deletes_overrun_limit
|
||||
default = 0
|
||||
min_val = 0
|
||||
|
||||
/// logs all timers in buckets on automatic bucket reset (Useful for timer debugging)
|
||||
/datum/config_entry/flag/log_timers_on_bucket_reset
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* Failsafe
|
||||
*
|
||||
* Pretty much pokes the MC to make sure it's still alive.
|
||||
/**
|
||||
* Failsafe
|
||||
*
|
||||
* Pretty much pokes the MC to make sure it's still alive.
|
||||
**/
|
||||
|
||||
GLOBAL_REAL(Failsafe, /datum/controller/failsafe)
|
||||
@@ -15,7 +15,7 @@ GLOBAL_REAL(Failsafe, /datum/controller/failsafe)
|
||||
// The alert level. For every failed poke, we drop a DEFCON level. Once we hit DEFCON 1, restart the MC.
|
||||
var/defcon = 5
|
||||
//the world.time of the last check, so the mc can restart US if we hang.
|
||||
// (Real friends look out for *eachother*)
|
||||
// (Real friends look out for *eachother*)
|
||||
var/lasttick = 0
|
||||
|
||||
// Track the MC iteration to make sure its still on track.
|
||||
@@ -31,8 +31,24 @@ GLOBAL_REAL(Failsafe, /datum/controller/failsafe)
|
||||
Initialize()
|
||||
|
||||
/datum/controller/failsafe/Initialize()
|
||||
set waitfor = 0
|
||||
set waitfor = FALSE
|
||||
Failsafe.Loop()
|
||||
if (!Master || defcon == 0) //Master is gone/not responding and Failsafe just exited its loop
|
||||
defcon = 3 //Reset defcon level as its used inside the emergency loop
|
||||
while (defcon > 0)
|
||||
var/recovery_result = emergency_loop()
|
||||
if (recovery_result == 1) //Exit emergency loop and delete self if it was able to recover MC
|
||||
break
|
||||
else if (defcon == 1) //Exit Failsafe if we weren't able to recover the MC in the last stage
|
||||
log_game("FailSafe: Failed to recover MC while in emergency state. Failsafe exiting.")
|
||||
message_admins(span_boldannounce("Failsafe failed critically while trying to recreate broken MC. Please manually fix the MC or reboot the server. Failsafe exiting now."))
|
||||
message_admins(span_boldannounce("You can try manually calling these two procs:."))
|
||||
message_admins(span_boldannounce("/proc/recover_all_SS_and_recreate_master: Most stuff should still function but expect instability/runtimes/broken stuff."))
|
||||
message_admins(span_boldannounce("/proc/delete_all_SS_and_recreate_master: Most stuff will be broken but basic stuff like movement and chat should still work."))
|
||||
else if (recovery_result == -1) //Failed to recreate MC
|
||||
defcon--
|
||||
sleep(initial(processing_interval)) //Wait a bit until the next try
|
||||
|
||||
if(!QDELETED(src))
|
||||
qdel(src) //when Loop() returns, we delete ourselves and let the mc recreate us
|
||||
|
||||
@@ -45,24 +61,37 @@ GLOBAL_REAL(Failsafe, /datum/controller/failsafe)
|
||||
while(running)
|
||||
lasttick = world.time
|
||||
if(!Master)
|
||||
// Replace the missing Master! This should never, ever happen.
|
||||
new /datum/controller/master()
|
||||
// Break out of the main loop so we go into emergency state
|
||||
break
|
||||
// Only poke it if overrides are not in effect.
|
||||
if(processing_interval > 0)
|
||||
if(Master.processing && Master.iteration)
|
||||
if (defcon > 1 && (!Master.stack_end_detector || !Master.stack_end_detector.check()))
|
||||
|
||||
to_chat(GLOB.permissions.admins, span_boldannounce("ERROR: The Master Controller code stack has exited unexpectedly, Restarting..."))
|
||||
defcon = 0
|
||||
var/rtn = Recreate_MC()
|
||||
if(rtn > 0)
|
||||
master_iteration = 0
|
||||
to_chat(GLOB.permissions.admins, span_adminnotice("MC restarted successfully"))
|
||||
else if(rtn < 0)
|
||||
log_game("FailSafe: Could not restart MC, runtime encountered. Entering defcon 0")
|
||||
to_chat(GLOB.permissions.admins, span_boldannounce("ERROR: DEFCON [defcon_pretty()]. Could not restart MC, runtime encountered. I will silently keep retrying."))
|
||||
// Check if processing is done yet.
|
||||
if(Master.iteration == master_iteration)
|
||||
switch(defcon)
|
||||
if(4,5)
|
||||
--defcon
|
||||
|
||||
if(3)
|
||||
message_admins(span_adminnotice("Notice: DEFCON [defcon_pretty()]. The Master Controller has not fired in the last [(5-defcon) * processing_interval] ticks."))
|
||||
--defcon
|
||||
|
||||
if(2)
|
||||
to_chat(GLOB.permissions.admins, span_boldannounce("Warning: DEFCON [defcon_pretty()]. The Master Controller has not fired in the last [(5-defcon) * processing_interval] ticks. Automatic restart in [processing_interval] ticks."))
|
||||
--defcon
|
||||
if(1)
|
||||
|
||||
if(1)
|
||||
to_chat(GLOB.permissions.admins, span_boldannounce("Warning: DEFCON [defcon_pretty()]. The Master Controller has still not fired within the last [(5-defcon) * processing_interval] ticks. Killing and restarting..."))
|
||||
--defcon
|
||||
var/rtn = Recreate_MC()
|
||||
@@ -75,6 +104,7 @@ GLOBAL_REAL(Failsafe, /datum/controller/failsafe)
|
||||
to_chat(GLOB.permissions.admins, span_boldannounce("ERROR: DEFCON [defcon_pretty()]. Could not restart MC, runtime encountered. I will silently keep retrying."))
|
||||
//if the return number was 0, it just means the mc was restarted too recently, and it just needs some time before we try again
|
||||
//no need to handle that specially when defcon 0 can handle it
|
||||
|
||||
if(0) //DEFCON 0! (mc failed to restart)
|
||||
var/rtn = Recreate_MC()
|
||||
if(rtn > 0)
|
||||
@@ -92,6 +122,57 @@ GLOBAL_REAL(Failsafe, /datum/controller/failsafe)
|
||||
defcon = 5
|
||||
sleep(initial(processing_interval))
|
||||
|
||||
//Emergency loop used when Master got deleted or the main loop exited while Defcon == 0
|
||||
//Loop is driven externally so runtimes only cancel the current recovery attempt
|
||||
/datum/controller/failsafe/proc/emergency_loop()
|
||||
//The code in this proc should be kept as simple as possible, anything complicated like to_chat might rely on master existing and runtime
|
||||
//The goal should always be to get a new Master up and running before anything else
|
||||
. = -1
|
||||
switch (defcon) //The lower defcon goes the harder we try to fix the MC
|
||||
if (2 to 3) //Try to normally recreate the MC two times
|
||||
. = Recreate_MC()
|
||||
if (1) //Delete the old MC first so we don't transfer any info, in case that caused any issues
|
||||
del(Master)
|
||||
. = Recreate_MC()
|
||||
|
||||
if (. == 1) //We were able to create a new master
|
||||
master_iteration = 0
|
||||
SSticker.Recover(); //Recover the ticket system so the Masters runlevel gets set
|
||||
Master.Initialize(10, FALSE, TRUE) //Need to manually start the MC, normally world.new would do this
|
||||
to_chat(GLOB.permissions.admins, span_adminnotice("Failsafe recovered MC while in emergency state [defcon_pretty()]"))
|
||||
else
|
||||
log_game("FailSafe: Failsafe in emergency state and was unable to recreate MC while in defcon state [defcon_pretty()].")
|
||||
message_admins(span_boldannounce("Failsafe in emergency state and master down, trying to recreate MC while in defcon level [defcon_pretty()] failed."))
|
||||
|
||||
///Recreate all SSs which will still cause data survive due to Recover(), the new Master will then find and take them from global.vars
|
||||
/proc/recover_all_SS_and_recreate_master()
|
||||
del(Master)
|
||||
var/list/subsytem_types = subtypesof(/datum/controller/subsystem)
|
||||
sortTim(subsytem_types, GLOBAL_PROC_REF(cmp_subsystem_init))
|
||||
for(var/I in subsytem_types)
|
||||
new I
|
||||
. = Recreate_MC()
|
||||
if (. == 1) //We were able to create a new master
|
||||
SSticker.Recover(); //Recover the ticket system so the Masters runlevel gets set
|
||||
Master.Initialize(10, FALSE, TRUE) //Need to manually start the MC, normally world.new would do this
|
||||
to_chat(GLOB.permissions.admins, span_adminnotice("MC successfully recreated after recovering all subsystems!"))
|
||||
else
|
||||
message_admins(span_boldannounce("Failed to create new MC!"))
|
||||
|
||||
///Delete all existing SS to basically start over
|
||||
/proc/delete_all_SS_and_recreate_master()
|
||||
del(Master)
|
||||
for(var/global_var in global.vars)
|
||||
if (istype(global.vars[global_var], /datum/controller/subsystem))
|
||||
del(global.vars[global_var])
|
||||
. = Recreate_MC()
|
||||
if (. == 1) //We were able to create a new master
|
||||
SSticker.Recover(); //Recover the ticket system so the Masters runlevel gets set
|
||||
Master.Initialize(10, FALSE, TRUE) //Need to manually start the MC, normally world.new would do this
|
||||
to_chat(GLOB.permissions.admins, span_adminnotice("MC successfully recreated after deleting and recreating all subsystems!"))
|
||||
else
|
||||
message_admins(span_boldannounce("Failed to create new MC!"))
|
||||
|
||||
/datum/controller/failsafe/proc/defcon_pretty()
|
||||
return defcon
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ GLOBAL_REAL(GLOB, /datum/controller/global_vars)
|
||||
|
||||
var/datum/controller/exclude_these = new
|
||||
gvars_datum_in_built_vars = exclude_these.vars + list(NAMEOF(src, gvars_datum_protected_varlist), NAMEOF(src, gvars_datum_in_built_vars), NAMEOF(src, gvars_datum_init_order))
|
||||
QDEL_IN(exclude_these, 0) //signal logging isn't ready
|
||||
QDEL_IN(exclude_these, 0) //signal logging isn't ready
|
||||
|
||||
log_world("[vars.len - gvars_datum_in_built_vars.len] global variables")
|
||||
|
||||
|
||||
@@ -1,12 +1,18 @@
|
||||
/**
|
||||
* StonedMC
|
||||
*
|
||||
* Designed to properly split up a given tick among subsystems
|
||||
* Note: if you read parts of this code and think "why is it doing it that way"
|
||||
* Odds are, there is a reason
|
||||
*
|
||||
/**
|
||||
* StonedMC
|
||||
*
|
||||
* Designed to properly split up a given tick among subsystems
|
||||
* Note: if you read parts of this code and think "why is it doing it that way"
|
||||
* Odds are, there is a reason
|
||||
*
|
||||
**/
|
||||
|
||||
//Init the debugger datum first so we can debug Master
|
||||
//You might wonder why not just create the debugger datum global in its own file, since its loaded way earlier than this DM file
|
||||
//Well for whatever reason then the Master gets created first and then the debugger when doing that
|
||||
//So thats why this code lives here now, until someone finds out how Byond inits globals
|
||||
//GLOBAL_REAL(Debugger, /datum/debugger) = new
|
||||
|
||||
//This is the ABSOLUTE ONLY THING that should init globally like this
|
||||
//2019 update: the failsafe,config and Global controllers also do it
|
||||
GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
@@ -18,51 +24,62 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
/datum/controller/master
|
||||
name = "Master"
|
||||
|
||||
// Are we processing (higher values increase the processing delay by n ticks)
|
||||
/// Are we processing (higher values increase the processing delay by n ticks)
|
||||
var/processing = TRUE
|
||||
// How many times have we ran
|
||||
/// How many times have we ran
|
||||
var/iteration = 0
|
||||
/// Stack end detector to detect stack overflows that kill the mc's main loop
|
||||
var/datum/stack_end_detector/stack_end_detector
|
||||
|
||||
// world.time of last fire, for tracking lag outside of the mc
|
||||
/// world.time of last fire, for tracking lag outside of the mc
|
||||
var/last_run
|
||||
|
||||
// List of subsystems to process().
|
||||
/// List of subsystems to process().
|
||||
var/list/subsystems
|
||||
|
||||
///Most recent init stage to complete init.
|
||||
var/static/init_stage_completed
|
||||
|
||||
// Vars for keeping track of tick drift.
|
||||
var/init_timeofday
|
||||
var/init_time
|
||||
var/tickdrift = 0
|
||||
|
||||
/// How long is the MC sleeping between runs, read only (set by Loop() based off of anti-tick-contention heuristics)
|
||||
var/sleep_delta = 1
|
||||
|
||||
///Only run ticker subsystems for the next n ticks.
|
||||
/// Only run ticker subsystems for the next n ticks.
|
||||
var/skip_ticks = 0
|
||||
|
||||
var/make_runtime = 0
|
||||
/// makes the mc main loop runtime
|
||||
var/make_runtime = FALSE
|
||||
|
||||
var/initializations_finished_with_no_players_logged_in //I wonder what this could be?
|
||||
var/initializations_finished_with_no_players_logged_in //I wonder what this could be?
|
||||
|
||||
// The type of the last subsystem to be process()'d.
|
||||
/// The type of the last subsystem to be fire()'d.
|
||||
var/last_type_processed
|
||||
|
||||
var/datum/controller/subsystem/queue_head //Start of queue linked list
|
||||
var/datum/controller/subsystem/queue_tail //End of queue linked list (used for appending to the list)
|
||||
var/datum/controller/subsystem/queue_head //!Start of queue linked list
|
||||
var/datum/controller/subsystem/queue_tail //!End of queue linked list (used for appending to the list)
|
||||
var/queue_priority_count = 0 //Running total so that we don't have to loop thru the queue each run to split up the tick
|
||||
var/queue_priority_count_bg = 0 //Same, but for background subsystems
|
||||
var/map_loading = FALSE //Are we loading in a new map?
|
||||
var/map_loading = FALSE //!Are we loading in a new map?
|
||||
|
||||
var/current_runlevel //for scheduling different subsystems for different stages of the round
|
||||
var/current_runlevel //!for scheduling different subsystems for different stages of the round
|
||||
var/sleep_offline_after_initializations = TRUE
|
||||
|
||||
/// During initialization, will be the instanced subsytem that is currently initializing.
|
||||
/// Outside of initialization, returns null.
|
||||
var/current_initializing_subsystem = null
|
||||
|
||||
var/static/restart_clear = 0
|
||||
var/static/restart_timeout = 0
|
||||
var/static/restart_count = 0
|
||||
|
||||
var/static/random_seed
|
||||
|
||||
//current tick limit, assigned before running a subsystem.
|
||||
//used by CHECK_TICK as well so that the procs subsystems call can obey that SS's tick limits
|
||||
///current tick limit, assigned before running a subsystem.
|
||||
///used by CHECK_TICK as well so that the procs subsystems call can obey that SS's tick limits
|
||||
var/static/current_ticklimit = TICK_LIMIT_RUNNING
|
||||
|
||||
/datum/controller/master/New()
|
||||
@@ -71,21 +88,39 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
// Highlander-style: there can only be one! Kill off the old and replace it with the new.
|
||||
|
||||
if(!random_seed)
|
||||
random_seed = (TEST_RUN_PARAMETER in world.params) ? 29051994 : rand(1, 1e9)
|
||||
#ifdef UNIT_TESTS
|
||||
random_seed = 29051994
|
||||
#else
|
||||
random_seed = rand(1, 1e9)
|
||||
#endif
|
||||
rand_seed(random_seed)
|
||||
|
||||
var/list/_subsystems = list()
|
||||
subsystems = _subsystems
|
||||
if (Master != src)
|
||||
if (istype(Master))
|
||||
if (istype(Master)) //If there is an existing MC take over his stuff and delete it
|
||||
Recover()
|
||||
qdel(Master)
|
||||
Master = src
|
||||
else
|
||||
var/list/subsytem_types = subtypesof(/datum/controller/subsystem)
|
||||
sortTim(subsytem_types, /proc/cmp_subsystem_init)
|
||||
for(var/I in subsytem_types)
|
||||
_subsystems += new I
|
||||
Master = src
|
||||
//Code used for first master on game boot or if existing master got deleted
|
||||
Master = src
|
||||
var/list/subsystem_types = subtypesof(/datum/controller/subsystem)
|
||||
sortTim(subsystem_types, GLOBAL_PROC_REF(cmp_subsystem_init))
|
||||
|
||||
//Find any abandoned subsystem from the previous master (if there was any)
|
||||
var/list/existing_subsystems = list()
|
||||
for(var/global_var in global.vars)
|
||||
if (istype(global.vars[global_var], /datum/controller/subsystem))
|
||||
existing_subsystems += global.vars[global_var]
|
||||
|
||||
//Either init a new SS or if an existing one was found use that
|
||||
for(var/I in subsystem_types)
|
||||
var/ss_idx = existing_subsystems.Find(I)
|
||||
if (ss_idx)
|
||||
_subsystems += existing_subsystems[ss_idx]
|
||||
else
|
||||
_subsystems += new I
|
||||
|
||||
if(!GLOB)
|
||||
new /datum/controller/global_vars
|
||||
@@ -97,15 +132,15 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
|
||||
/datum/controller/master/Shutdown()
|
||||
processing = FALSE
|
||||
sortTim(subsystems, /proc/cmp_subsystem_init)
|
||||
reverseRange(subsystems)
|
||||
sortTim(subsystems, GLOBAL_PROC_REF(cmp_subsystem_init))
|
||||
reverse_range(subsystems)
|
||||
for(var/datum/controller/subsystem/ss in subsystems)
|
||||
log_world("Shutting down [ss.name] subsystem...")
|
||||
ss.Shutdown()
|
||||
log_world("Shutdown complete")
|
||||
|
||||
// Returns 1 if we created a new mc, 0 if we couldn't due to a recent restart,
|
||||
// -1 if we encountered a runtime trying to recreate it
|
||||
// -1 if we encountered a runtime trying to recreate it
|
||||
/proc/Recreate_MC()
|
||||
. = -1 //so if we runtime, things know we failed
|
||||
if (world.time < Master.restart_timeout)
|
||||
@@ -116,7 +151,8 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
var/delay = 50 * ++Master.restart_count
|
||||
Master.restart_timeout = world.time + delay
|
||||
Master.restart_clear = world.time + (delay * 2)
|
||||
Master.processing = FALSE //stop ticking this one
|
||||
if (Master) //Can only do this if master hasn't been deleted
|
||||
Master.processing = FALSE //stop ticking this one
|
||||
try
|
||||
new/datum/controller/master()
|
||||
catch
|
||||
@@ -126,17 +162,22 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
|
||||
/datum/controller/master/Recover()
|
||||
var/msg = "## DEBUG: [time2text(world.timeofday)] MC restarted. Reports:\n"
|
||||
for (var/varname in Master.vars)
|
||||
switch (varname)
|
||||
if("name", "tag", "bestF", "type", "parent_type", "vars", "statclick") // Built-in junk.
|
||||
continue
|
||||
else
|
||||
var/varval = Master.vars[varname]
|
||||
if (istype(varval, /datum)) // Check if it has a type var.
|
||||
var/datum/D = varval
|
||||
msg += "\t [varname] = [D]([D.type])\n"
|
||||
else
|
||||
msg += "\t [varname] = [varval]\n"
|
||||
var/list/master_attributes = Master.vars
|
||||
var/list/filtered_variables = list(
|
||||
NAMEOF(src, name),
|
||||
NAMEOF(src, parent_type),
|
||||
NAMEOF(src, statclick),
|
||||
NAMEOF(src, tag),
|
||||
NAMEOF(src, type),
|
||||
NAMEOF(src, vars),
|
||||
)
|
||||
for (var/varname in master_attributes - filtered_variables)
|
||||
var/varval = master_attributes[varname]
|
||||
if (isdatum(varval)) // Check if it has a type var.
|
||||
var/datum/D = varval
|
||||
msg += "\t [varname] = [D]([D.type])\n"
|
||||
else
|
||||
msg += "\t [varname] = [varval]\n"
|
||||
log_world(msg)
|
||||
|
||||
var/datum/controller/subsystem/BadBoy = Master.last_type_processed
|
||||
@@ -149,7 +190,7 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
msg = "The [BadBoy.name] subsystem was the last to fire for 2 controller restarts. It will be recovered now and disabled if it happens again."
|
||||
FireHim = TRUE
|
||||
if(3)
|
||||
msg = "The [BadBoy.name] subsystem seems to be destabilizing the MC and will be offlined."
|
||||
msg = "The [BadBoy.name] subsystem seems to be destabilizing the MC and will be put offline."
|
||||
BadBoy.flags |= SS_NO_FIRE
|
||||
if(msg)
|
||||
to_chat(GLOB.permissions.admins, span_boldannounce("[msg]"))
|
||||
@@ -157,7 +198,7 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
|
||||
if (istype(Master.subsystems))
|
||||
if(FireHim)
|
||||
Master.subsystems += new BadBoy.type //NEW_SS_GLOBAL will remove the old one
|
||||
Master.subsystems += new BadBoy.type //NEW_SS_GLOBAL will remove the old one
|
||||
subsystems = Master.subsystems
|
||||
current_runlevel = Master.current_runlevel
|
||||
StartProcessing(10)
|
||||
@@ -165,42 +206,62 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
to_chat(world, span_boldannounce("The Master Controller is having some issues, we will need to re-initialize EVERYTHING"))
|
||||
Initialize(20, TRUE)
|
||||
|
||||
|
||||
// Please don't stuff random bullshit here,
|
||||
// Make a subsystem, give it the SS_NO_FIRE flag, and do your work in it's Initialize()
|
||||
// Make a subsystem, give it the SS_NO_FIRE flag, and do your work in it's Initialize()
|
||||
/datum/controller/master/Initialize(delay, init_sss, tgs_prime)
|
||||
set waitfor = 0
|
||||
|
||||
if(delay)
|
||||
sleep(delay)
|
||||
|
||||
if(tgs_prime)
|
||||
world.TgsInitializationComplete()
|
||||
|
||||
if(init_sss)
|
||||
init_subtypes(/datum/controller/subsystem, subsystems)
|
||||
|
||||
init_stage_completed = 0
|
||||
var/mc_started = FALSE
|
||||
|
||||
to_chat(world, span_boldannounce("Initializing subsystems..."))
|
||||
|
||||
// Sort subsystems by init_order, so they initialize in the correct order.
|
||||
sortTim(subsystems, /proc/cmp_subsystem_init)
|
||||
//yogs -- loading progress stuff; have to initialize this static
|
||||
for(var/s in subsystems)
|
||||
var/datum/controller/subsystem/SS = s
|
||||
SS.total_loading_points += SS.loading_points
|
||||
//yogs end
|
||||
var/list/stage_sorted_subsystems = new(INITSTAGE_MAX)
|
||||
for (var/i in 1 to INITSTAGE_MAX)
|
||||
stage_sorted_subsystems[i] = list()
|
||||
|
||||
// Sort subsystems by init_order, so they initialize in the correct order.
|
||||
sortTim(subsystems, GLOBAL_PROC_REF(cmp_subsystem_init))
|
||||
|
||||
for (var/datum/controller/subsystem/subsystem as anything in subsystems)
|
||||
//yogs -- loading progress stuff; have to initialize this static
|
||||
subsystem.total_loading_points += subsystem.loading_points
|
||||
//yogs end
|
||||
var/subsystem_init_stage = subsystem.init_stage
|
||||
if (!isnum(subsystem_init_stage) || subsystem_init_stage < 1 || subsystem_init_stage > INITSTAGE_MAX || round(subsystem_init_stage) != subsystem_init_stage)
|
||||
stack_trace("ERROR: MC: subsystem `[subsystem.type]` has invalid init_stage: `[subsystem_init_stage]`. Setting to `[INITSTAGE_MAX]`")
|
||||
subsystem_init_stage = subsystem.init_stage = INITSTAGE_MAX
|
||||
stage_sorted_subsystems[subsystem_init_stage] += subsystem
|
||||
|
||||
// Sort subsystems by display setting for easy access.
|
||||
sortTim(subsystems, GLOBAL_PROC_REF(cmp_subsystem_display))
|
||||
var/start_timeofday = REALTIMEOFDAY
|
||||
// Initialize subsystems.
|
||||
current_ticklimit = CONFIG_GET(number/tick_limit_mc_init)
|
||||
for (var/datum/controller/subsystem/SS in subsystems)
|
||||
if (SS.flags & SS_NO_INIT)
|
||||
continue
|
||||
SS.Initialize(REALTIMEOFDAY)
|
||||
CHECK_TICK
|
||||
current_ticklimit = TICK_LIMIT_RUNNING
|
||||
for (var/current_init_stage in 1 to INITSTAGE_MAX)
|
||||
|
||||
// Initialize subsystems.
|
||||
for (var/datum/controller/subsystem/subsystem in stage_sorted_subsystems[current_init_stage])
|
||||
init_subsystem(subsystem)
|
||||
|
||||
CHECK_TICK
|
||||
current_initializing_subsystem = null
|
||||
init_stage_completed = current_init_stage
|
||||
if (!mc_started)
|
||||
mc_started = TRUE
|
||||
if (!current_runlevel)
|
||||
SetRunLevel(1)
|
||||
// Loop.
|
||||
Master.StartProcessing(0)
|
||||
|
||||
var/time = (REALTIMEOFDAY - start_timeofday) / 10
|
||||
|
||||
|
||||
|
||||
var/msg = "Initializations complete within [time] second[time == 1 ? "" : "s"]!"
|
||||
to_chat(world, span_boldannounce("[msg]"))
|
||||
log_world(msg)
|
||||
@@ -212,24 +273,101 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
"changelog_hash" = GLOB.changelog_hash)
|
||||
webhook_send_roundstatus("lobby", webhookData) //yogs end -webhook support
|
||||
|
||||
if (!current_runlevel)
|
||||
SetRunLevel(1)
|
||||
|
||||
// Sort subsystems by display setting for easy access.
|
||||
sortTim(subsystems, /proc/cmp_subsystem_display)
|
||||
// Set world options.
|
||||
world.change_fps(CONFIG_GET(number/fps))
|
||||
var/initialized_tod = REALTIMEOFDAY
|
||||
|
||||
if(tgs_prime)
|
||||
world.TgsInitializationComplete()
|
||||
|
||||
if(sleep_offline_after_initializations)
|
||||
world.sleep_offline = TRUE
|
||||
sleep(0.1 SECONDS)
|
||||
sleep(1 TICKS)
|
||||
|
||||
if(sleep_offline_after_initializations && CONFIG_GET(flag/resume_after_initializations))
|
||||
world.sleep_offline = FALSE
|
||||
initializations_finished_with_no_players_logged_in = initialized_tod < REALTIMEOFDAY - 10
|
||||
// Loop.
|
||||
Master.StartProcessing(0)
|
||||
|
||||
/**
|
||||
* Initialize a given subsystem and handle the results.
|
||||
*
|
||||
* Arguments:
|
||||
* * subsystem - the subsystem to initialize.
|
||||
*/
|
||||
/datum/controller/master/proc/init_subsystem(datum/controller/subsystem/subsystem)
|
||||
var/static/list/valid_results = list(
|
||||
SS_INIT_FAILURE,
|
||||
SS_INIT_NONE,
|
||||
SS_INIT_SUCCESS,
|
||||
SS_INIT_NO_NEED,
|
||||
)
|
||||
|
||||
if (subsystem.flags & SS_NO_INIT || subsystem.initialized) //Don't init SSs with the corresponding flag or if they already are initialized
|
||||
return
|
||||
|
||||
current_initializing_subsystem = subsystem
|
||||
rustg_time_reset(SS_INIT_TIMER_KEY)
|
||||
|
||||
var/result = subsystem.Initialize()
|
||||
|
||||
// Capture end time
|
||||
var/time = rustg_time_milliseconds(SS_INIT_TIMER_KEY)
|
||||
var/seconds = round(time / 1000, 0.01)
|
||||
|
||||
// Always update the blackbox tally regardless.
|
||||
SSblackbox.record_feedback("tally", "subsystem_initialize", time, subsystem.name)
|
||||
|
||||
// Gave invalid return value.
|
||||
if(result && !(result in valid_results))
|
||||
warning("[subsystem.name] subsystem initialized, returning invalid result [result]. This is a bug.")
|
||||
|
||||
// just returned ..() or didn't implement Initialize() at all
|
||||
if(result == SS_INIT_NONE)
|
||||
warning("[subsystem.name] subsystem does not implement Initialize() or it returns ..(). If the former is true, the SS_NO_INIT flag should be set for this subsystem.")
|
||||
|
||||
if(result != SS_INIT_FAILURE)
|
||||
// Some form of success, implicit failure, or the SS in unused.
|
||||
subsystem.initialized = TRUE
|
||||
|
||||
SEND_SIGNAL(subsystem, COMSIG_SUBSYSTEM_POST_INITIALIZE)
|
||||
else
|
||||
// The subsystem officially reports that it failed to init and wishes to be treated as such.
|
||||
subsystem.initialized = FALSE
|
||||
subsystem.can_fire = FALSE
|
||||
|
||||
// The rest of this proc is printing the world log and chat message.
|
||||
var/message_prefix
|
||||
|
||||
// If true, print the chat message with boldwarning text.
|
||||
var/chat_warning = FALSE
|
||||
|
||||
switch(result)
|
||||
if(SS_INIT_FAILURE)
|
||||
message_prefix = "Failed to initialize [subsystem.name] subsystem after"
|
||||
chat_warning = TRUE
|
||||
if(SS_INIT_SUCCESS)
|
||||
message_prefix = "Initialized [subsystem.name] subsystem within"
|
||||
if(SS_INIT_NO_NEED)
|
||||
// This SS is disabled or is otherwise shy.
|
||||
return
|
||||
else
|
||||
// SS_INIT_NONE or an invalid value.
|
||||
message_prefix = "Initialized [subsystem.name] subsystem with errors within"
|
||||
chat_warning = TRUE
|
||||
|
||||
var/message = "[message_prefix] [seconds] second[seconds == 1 ? "" : "s"]!"
|
||||
var/chat_message = chat_warning ? span_boldwarning(message) : span_boldannounce(message)
|
||||
|
||||
to_chat(world, chat_message)
|
||||
log_world(message)
|
||||
|
||||
//yogs loading points
|
||||
if(subsystem.loading_points) // We're probably one of those crappy subsystems that take 0 seconds, so return
|
||||
subsystem.total_loading_points_progress += subsystem.loading_points
|
||||
var/percent = round(subsystem.total_loading_points_progress / subsystem.total_loading_points * 100)
|
||||
to_chat(world,span_boldnotice("Subsystem initialization at [percent]%..."))
|
||||
// Yogs end
|
||||
|
||||
/datum/controller/master/proc/SetRunLevel(new_runlevel)
|
||||
var/old_runlevel = current_runlevel
|
||||
@@ -247,8 +385,14 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
if(delay)
|
||||
sleep(delay)
|
||||
testing("Master starting processing")
|
||||
var/rtn = Loop()
|
||||
if (rtn > 0 || processing < 0)
|
||||
var/started_stage
|
||||
var/rtn = -2
|
||||
do
|
||||
started_stage = init_stage_completed
|
||||
rtn = Loop(started_stage)
|
||||
while (rtn == MC_LOOP_RTN_NEWSTAGES && processing > 0 && started_stage < init_stage_completed)
|
||||
|
||||
if (rtn >= MC_LOOP_RTN_GRACEFUL_EXIT || processing < 0)
|
||||
return //this was suppose to happen.
|
||||
//loop ended, restart the mc
|
||||
log_game("MC crashed or runtimed, restarting")
|
||||
@@ -260,24 +404,26 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
Failsafe.defcon = 2
|
||||
|
||||
// Main loop.
|
||||
/datum/controller/master/proc/Loop()
|
||||
/datum/controller/master/proc/Loop(init_stage)
|
||||
. = -1
|
||||
//Prep the loop (most of this is because we want MC restarts to reset as much state as we can, and because
|
||||
// local vars rock
|
||||
// local vars rock
|
||||
|
||||
//all this shit is here so that flag edits can be refreshed by restarting the MC. (and for speed)
|
||||
var/list/tickersubsystems = list()
|
||||
var/list/runlevel_sorted_subsystems = list(list()) //ensure we always have at least one runlevel
|
||||
var/list/runlevel_sorted_subsystems = list(list()) //ensure we always have at least one runlevel
|
||||
var/timer = world.time
|
||||
for (var/thing in subsystems)
|
||||
var/datum/controller/subsystem/SS = thing
|
||||
if (SS.flags & SS_NO_FIRE)
|
||||
continue
|
||||
if (SS.init_stage > init_stage)
|
||||
continue
|
||||
SS.queued_time = 0
|
||||
SS.queue_next = null
|
||||
SS.queue_prev = null
|
||||
SS.state = SS_IDLE
|
||||
if (SS.flags & SS_TICKER)
|
||||
if ((SS.flags & (SS_TICKER|SS_BACKGROUND)) == SS_TICKER)
|
||||
tickersubsystems += SS
|
||||
timer += world.tick_lag * rand(1, 5)
|
||||
SS.next_fire = timer
|
||||
@@ -298,9 +444,9 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
queue_tail = null
|
||||
//these sort by lower priorities first to reduce the number of loops needed to add subsequent SS's to the queue
|
||||
//(higher subsystems will be sooner in the queue, adding them later in the loop means we don't have to loop thru them next queue add)
|
||||
sortTim(tickersubsystems, /proc/cmp_subsystem_priority)
|
||||
sortTim(tickersubsystems, GLOBAL_PROC_REF(cmp_subsystem_priority))
|
||||
for(var/I in runlevel_sorted_subsystems)
|
||||
sortTim(runlevel_sorted_subsystems, /proc/cmp_subsystem_priority)
|
||||
sortTim(I, GLOBAL_PROC_REF(cmp_subsystem_priority))
|
||||
I += tickersubsystems
|
||||
|
||||
var/cached_runlevel = current_runlevel
|
||||
@@ -313,33 +459,43 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
var/error_level = 0
|
||||
var/sleep_delta = 1
|
||||
var/list/subsystems_to_check
|
||||
//the actual loop.
|
||||
|
||||
//setup the stack overflow detector
|
||||
stack_end_detector = new()
|
||||
var/datum/stack_canary/canary = stack_end_detector.prime_canary()
|
||||
canary.use_variable()
|
||||
//the actual loop.
|
||||
while (1)
|
||||
tickdrift = max(0, MC_AVERAGE_FAST(tickdrift, (((REALTIMEOFDAY - init_timeofday) - (world.time - init_time)) / world.tick_lag)))
|
||||
var/starting_tick_usage = TICK_USAGE
|
||||
|
||||
if (init_stage != init_stage_completed)
|
||||
return MC_LOOP_RTN_NEWSTAGES
|
||||
if (processing <= 0)
|
||||
current_ticklimit = TICK_LIMIT_RUNNING
|
||||
sleep(1 SECONDS)
|
||||
continue
|
||||
|
||||
//Anti-tick-contention heuristics:
|
||||
//if there are mutiple sleeping procs running before us hogging the cpu, we have to run later.
|
||||
// (because sleeps are processed in the order received, longer sleeps are more likely to run first)
|
||||
if (starting_tick_usage > TICK_LIMIT_MC) //if there isn't enough time to bother doing anything this tick, sleep a bit.
|
||||
sleep_delta *= 2
|
||||
current_ticklimit = TICK_LIMIT_RUNNING * 0.5
|
||||
sleep(world.tick_lag * (processing * sleep_delta))
|
||||
continue
|
||||
if (init_stage == INITSTAGE_MAX)
|
||||
//if there are mutiple sleeping procs running before us hogging the cpu, we have to run later.
|
||||
// (because sleeps are processed in the order received, longer sleeps are more likely to run first)
|
||||
if (starting_tick_usage > TICK_LIMIT_MC) //if there isn't enough time to bother doing anything this tick, sleep a bit.
|
||||
sleep_delta *= 2
|
||||
current_ticklimit = TICK_LIMIT_RUNNING * 0.5
|
||||
sleep(world.tick_lag * (processing * sleep_delta))
|
||||
continue
|
||||
|
||||
//Byond resumed us late. assume it might have to do the same next tick
|
||||
if (last_run + CEILING(world.tick_lag * (processing * sleep_delta), world.tick_lag) < world.time)
|
||||
sleep_delta += 1
|
||||
//Byond resumed us late. assume it might have to do the same next tick
|
||||
if (last_run + CEILING(world.tick_lag * (processing * sleep_delta), world.tick_lag) < world.time)
|
||||
sleep_delta += 1
|
||||
|
||||
sleep_delta = MC_AVERAGE_FAST(sleep_delta, 1) //decay sleep_delta
|
||||
sleep_delta = MC_AVERAGE_FAST(sleep_delta, 1) //decay sleep_delta
|
||||
|
||||
if (starting_tick_usage > (TICK_LIMIT_MC*0.75)) //we ran 3/4 of the way into the tick
|
||||
sleep_delta += 1
|
||||
if (starting_tick_usage > (TICK_LIMIT_MC*0.75)) //we ran 3/4 of the way into the tick
|
||||
sleep_delta += 1
|
||||
else
|
||||
sleep_delta = 1
|
||||
|
||||
//debug
|
||||
if (make_runtime)
|
||||
@@ -367,29 +523,40 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
else
|
||||
subsystems_to_check = tickersubsystems
|
||||
|
||||
if (CheckQueue(subsystems_to_check) <= 0)
|
||||
if (CheckQueue(subsystems_to_check) <= 0) //error processing queue
|
||||
stack_trace("MC: CheckQueue failed. Current error_level is [round(error_level, 0.25)]")
|
||||
if (!SoftReset(tickersubsystems, runlevel_sorted_subsystems))
|
||||
log_world("MC: SoftReset() failed, crashing")
|
||||
return
|
||||
if (!error_level)
|
||||
error_level++
|
||||
CRASH("MC: SoftReset() failed, exiting loop()")
|
||||
|
||||
if (error_level < 2) //except for the first strike, stop incrmenting our iteration so failsafe enters defcon
|
||||
iteration++
|
||||
error_level++
|
||||
else
|
||||
cached_runlevel = null //3 strikes, Lets reset the runlevel lists
|
||||
current_ticklimit = TICK_LIMIT_RUNNING
|
||||
sleep(1 SECONDS)
|
||||
sleep((1 SECONDS) * error_level)
|
||||
error_level++
|
||||
continue
|
||||
|
||||
if (queue_head)
|
||||
if (RunQueue() <= 0)
|
||||
if (!SoftReset(tickersubsystems, runlevel_sorted_subsystems))
|
||||
log_world("MC: SoftReset() failed, crashing")
|
||||
return
|
||||
if (!error_level)
|
||||
iteration++
|
||||
if (RunQueue() <= 0) //error running queue
|
||||
stack_trace("MC: RunQueue failed. Current error_level is [round(error_level, 0.25)]")
|
||||
if (error_level > 1) //skip the first error,
|
||||
if (!SoftReset(tickersubsystems, runlevel_sorted_subsystems))
|
||||
error_level++
|
||||
CRASH("MC: SoftReset() failed, exiting loop()")
|
||||
|
||||
if (error_level <= 2) //after 3 strikes stop incrmenting our iteration so failsafe enters defcon
|
||||
iteration++
|
||||
else
|
||||
cached_runlevel = null //3 strikes, Lets also reset the runlevel lists
|
||||
current_ticklimit = TICK_LIMIT_RUNNING
|
||||
sleep((1 SECONDS) * error_level)
|
||||
error_level++
|
||||
continue
|
||||
error_level++
|
||||
current_ticklimit = TICK_LIMIT_RUNNING
|
||||
sleep(1 SECONDS)
|
||||
continue
|
||||
error_level--
|
||||
if (error_level > 0)
|
||||
error_level = max(MC_AVERAGE_SLOW(error_level-1, error_level), 0)
|
||||
if (!queue_head) //reset the counts if the queue is empty, in the off chance they get out of sync
|
||||
queue_priority_count = 0
|
||||
queue_priority_count_bg = 0
|
||||
@@ -399,9 +566,27 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
if (skip_ticks)
|
||||
skip_ticks--
|
||||
src.sleep_delta = MC_AVERAGE_FAST(src.sleep_delta, sleep_delta)
|
||||
current_ticklimit = TICK_LIMIT_RUNNING
|
||||
if (processing * sleep_delta <= world.tick_lag)
|
||||
current_ticklimit -= (TICK_LIMIT_RUNNING * 0.25) //reserve the tail 1/4 of the next tick for the mc if we plan on running next tick
|
||||
|
||||
// Force any verbs into overtime, to test how they perfrom under load
|
||||
// For local ONLY
|
||||
#ifdef VERB_STRESS_TEST
|
||||
/// Target enough tick usage to only allow time for our maptick estimate and verb processing, and nothing else
|
||||
var/overtime_target = TICK_LIMIT_RUNNING
|
||||
// This will leave just enough cpu time for maptick, forcing verbs to run into overtime
|
||||
// Use this for testing the worst case scenario, when maptick is spiking and usage is otherwise completely consumed
|
||||
#ifdef FORCE_VERB_OVERTIME
|
||||
overtime_target += TICK_BYOND_RESERVE
|
||||
#endif
|
||||
CONSUME_UNTIL(overtime_target)
|
||||
#endif
|
||||
|
||||
if (init_stage != INITSTAGE_MAX)
|
||||
current_ticklimit = TICK_LIMIT_RUNNING * 2
|
||||
else
|
||||
current_ticklimit = TICK_LIMIT_RUNNING
|
||||
if (processing * sleep_delta <= world.tick_lag)
|
||||
current_ticklimit -= (TICK_LIMIT_RUNNING * 0.25) //reserve the tail 1/4 of the next tick for the mc if we plan on running next tick
|
||||
|
||||
sleep(world.tick_lag * (processing * sleep_delta))
|
||||
|
||||
|
||||
@@ -431,11 +616,16 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
continue
|
||||
if ((SS_flags & (SS_TICKER|SS_KEEP_TIMING)) == SS_KEEP_TIMING && SS.last_fire + (SS.wait * 0.75) > world.time)
|
||||
continue
|
||||
if (SS.postponed_fires >= 1)
|
||||
SS.postponed_fires--
|
||||
SS.update_nextfire()
|
||||
continue
|
||||
SS.enqueue()
|
||||
. = 1
|
||||
|
||||
|
||||
// Run thru the queue of subsystems to run, running them while balancing out their allocated tick precentage
|
||||
/// RunQueue - Run thru the queue of subsystems to run, running them while balancing out their allocated tick precentage
|
||||
/// Returns 0 if runtimed, a negitive number for logic errors, and a positive number if the operation completed without errors
|
||||
/datum/controller/master/proc/RunQueue()
|
||||
. = 0
|
||||
var/datum/controller/subsystem/queue_node
|
||||
@@ -447,12 +637,11 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
var/tick_precentage
|
||||
var/tick_remaining
|
||||
var/ran = TRUE //this is right
|
||||
var/ran_non_ticker = FALSE
|
||||
var/bg_calc //have we swtiched current_tick_budget to background mode yet?
|
||||
var/tick_usage
|
||||
|
||||
//keep running while we have stuff to run and we haven't gone over a tick
|
||||
// this is so subsystems paused eariler can use tick time that later subsystems never used
|
||||
// this is so subsystems paused eariler can use tick time that later subsystems never used
|
||||
while (ran && queue_head && TICK_USAGE < TICK_LIMIT_MC)
|
||||
ran = FALSE
|
||||
bg_calc = FALSE
|
||||
@@ -461,7 +650,6 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
while (queue_node)
|
||||
if (ran && TICK_USAGE > TICK_LIMIT_RUNNING)
|
||||
break
|
||||
|
||||
queue_node_flags = queue_node.flags
|
||||
queue_node_priority = queue_node.queued_priority
|
||||
|
||||
@@ -469,37 +657,35 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
queue_node = queue_node.queue_next
|
||||
continue
|
||||
|
||||
//super special case, subsystems where we can't make them pause mid way through
|
||||
//if we can't run them this tick (without going over a tick)
|
||||
//we bump up their priority and attempt to run them next tick
|
||||
//(unless we haven't even ran anything this tick, since its unlikely they will ever be able run
|
||||
// in those cases, so we just let them run)
|
||||
if (queue_node_flags & SS_NO_TICK_CHECK)
|
||||
if (queue_node.tick_usage > TICK_LIMIT_RUNNING - TICK_USAGE && ran_non_ticker)
|
||||
queue_node.queued_priority += queue_priority_count * 0.1
|
||||
queue_priority_count -= queue_node_priority
|
||||
queue_priority_count += queue_node.queued_priority
|
||||
current_tick_budget -= queue_node_priority
|
||||
queue_node = queue_node.queue_next
|
||||
continue
|
||||
if ((queue_node_flags & SS_BACKGROUND))
|
||||
if (!bg_calc)
|
||||
current_tick_budget = queue_priority_count_bg
|
||||
bg_calc = TRUE
|
||||
else if (bg_calc)
|
||||
//error state, do sane fallback behavior
|
||||
if (. == 0)
|
||||
log_world("MC: Queue logic failure, non-background subsystem queued to run after a background subsystem: [queue_node] queue_prev:[queue_node.queue_prev]")
|
||||
. = -1
|
||||
current_tick_budget = queue_priority_count //this won't even be right, but is the best we have.
|
||||
bg_calc = FALSE
|
||||
|
||||
if ((queue_node_flags & SS_BACKGROUND) && !bg_calc)
|
||||
current_tick_budget = queue_priority_count_bg
|
||||
bg_calc = TRUE
|
||||
|
||||
tick_remaining = TICK_LIMIT_RUNNING - TICK_USAGE
|
||||
|
||||
if (current_tick_budget > 0 && queue_node_priority > 0)
|
||||
tick_precentage = tick_remaining / (current_tick_budget / queue_node_priority)
|
||||
if (queue_node_priority >= 0 && current_tick_budget > 0 && current_tick_budget >= queue_node_priority)
|
||||
//Give the subsystem a precentage of the remaining tick based on the remaining priority
|
||||
tick_precentage = tick_remaining * (queue_node_priority / current_tick_budget)
|
||||
else
|
||||
tick_precentage = tick_remaining
|
||||
//error state
|
||||
if (. == 0)
|
||||
log_world("MC: tick_budget sync error. [json_encode(list(current_tick_budget, queue_priority_count, queue_priority_count_bg, bg_calc, queue_node, queue_node_priority))]")
|
||||
. = -1
|
||||
tick_precentage = tick_remaining //just because we lost track of priority calculations doesn't mean we can't try to finish off the run, if the error state persists, we don't want to stop ticks from happening
|
||||
|
||||
tick_precentage = max(tick_precentage*0.5, tick_precentage-queue_node.tick_overrun)
|
||||
|
||||
current_ticklimit = round(TICK_USAGE + tick_precentage)
|
||||
|
||||
if (!(queue_node_flags & SS_TICKER))
|
||||
ran_non_ticker = TRUE
|
||||
ran = TRUE
|
||||
|
||||
queue_node_paused = (queue_node.state == SS_PAUSED || queue_node.state == SS_PAUSING)
|
||||
@@ -536,7 +722,7 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
queue_node.paused_ticks = 0
|
||||
queue_node.paused_tick_usage = 0
|
||||
|
||||
if (queue_node_flags & SS_BACKGROUND) //update our running total
|
||||
if (bg_calc) //update our running total
|
||||
queue_priority_count_bg -= queue_node_priority
|
||||
else
|
||||
queue_priority_count -= queue_node_priority
|
||||
@@ -544,14 +730,7 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
queue_node.last_fire = world.time
|
||||
queue_node.times_fired++
|
||||
|
||||
if (queue_node_flags & SS_TICKER)
|
||||
queue_node.next_fire = world.time + (world.tick_lag * queue_node.wait)
|
||||
else if (queue_node_flags & SS_POST_FIRE_TIMING)
|
||||
queue_node.next_fire = world.time + queue_node.wait + (world.tick_lag * (queue_node.tick_overrun/100))
|
||||
else if (queue_node_flags & SS_KEEP_TIMING)
|
||||
queue_node.next_fire += queue_node.wait
|
||||
else
|
||||
queue_node.next_fire = queue_node.queued_time + queue_node.wait + (world.tick_lag * (queue_node.tick_overrun/100))
|
||||
queue_node.update_nextfire()
|
||||
|
||||
queue_node.queued_time = 0
|
||||
|
||||
@@ -560,17 +739,19 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
|
||||
queue_node = queue_node.queue_next
|
||||
|
||||
. = 1
|
||||
if (. == 0)
|
||||
. = 1
|
||||
|
||||
//resets the queue, and all subsystems, while filtering out the subsystem lists
|
||||
// called if any mc's queue procs runtime or exit improperly.
|
||||
// called if any mc's queue procs runtime or exit improperly.
|
||||
/datum/controller/master/proc/SoftReset(list/ticker_SS, list/runlevel_SS)
|
||||
. = 0
|
||||
log_world("MC: SoftReset called, resetting MC queue state.")
|
||||
stack_trace("MC: SoftReset called, resetting MC queue state.")
|
||||
|
||||
if (!istype(subsystems) || !istype(ticker_SS) || !istype(runlevel_SS))
|
||||
log_world("MC: SoftReset: Bad list contents: '[subsystems]' '[ticker_SS]' '[runlevel_SS]'")
|
||||
return
|
||||
var/subsystemstocheck = subsystems + ticker_SS
|
||||
var/subsystemstocheck = subsystems | ticker_SS
|
||||
for(var/I in runlevel_SS)
|
||||
subsystemstocheck |= I
|
||||
|
||||
@@ -604,14 +785,17 @@ GLOBAL_REAL(Master, /datum/controller/master) = new
|
||||
log_world("MC: SoftReset: Finished.")
|
||||
. = 1
|
||||
|
||||
/// Warns us that the end of tick byond map_update will be laggier then normal, so that we can just skip running subsystems this tick.
|
||||
/datum/controller/master/proc/laggy_byond_map_update_incoming()
|
||||
if (!skip_ticks)
|
||||
skip_ticks = 1
|
||||
|
||||
|
||||
/datum/controller/master/stat_entry(msg)
|
||||
msg = "(TickRate:[Master.processing]) (Iteration:[Master.iteration]) (TickLimit: [round(Master.current_ticklimit, 0.1)])"
|
||||
return msg
|
||||
|
||||
|
||||
/datum/controller/master/StartLoadingMap()
|
||||
//disallow more than one map to load at once, multithreading it will just cause race conditions
|
||||
while(map_loading)
|
||||
|
||||
@@ -1,39 +1,102 @@
|
||||
/**
|
||||
* # Subsystem base class
|
||||
*
|
||||
* Defines a subsystem to be managed by the [Master Controller][/datum/controller/master]
|
||||
*
|
||||
* Simply define a child of this subsystem, using the [SUBSYSTEM_DEF] macro, and the MC will handle registration.
|
||||
* Changing the name is required
|
||||
**/
|
||||
|
||||
/datum/controller/subsystem
|
||||
// Metadata; you should define these.
|
||||
name = "fire coderbus" //name of the subsystem
|
||||
var/init_order = INIT_ORDER_DEFAULT //order of initialization. Higher numbers are initialized first, lower numbers later. Use defines in __DEFINES/subsystems.dm for easy understanding of order.
|
||||
var/wait = 20 //time to wait (in deciseconds) between each call to fire(). Must be a positive integer.
|
||||
var/priority = FIRE_PRIORITY_DEFAULT //When mutiple subsystems need to run in the same tick, higher priority subsystems will run first and be given a higher share of the tick before MC_TICK_CHECK triggers a sleep
|
||||
|
||||
var/flags = 0 //see MC.dm in __DEFINES Most flags must be set on world start to take full effect. (You can also restart the mc to force them to process again)
|
||||
/// Name of the subsystem - you must change this
|
||||
name = "fire coderbus"
|
||||
|
||||
var/initialized = FALSE //set to TRUE after it has been initialized, will obviously never be set if the subsystem doesn't initialize
|
||||
/// Order of initialization. Higher numbers are initialized first, lower numbers later. Use or create defines such as [INIT_ORDER_DEFAULT] so we can see the order in one file.
|
||||
var/init_order = INIT_ORDER_DEFAULT
|
||||
|
||||
//set to 0 to prevent fire() calls, mostly for admin use or subsystems that may be resumed later
|
||||
// use the SS_NO_FIRE flag instead for systems that never fire to keep it from even being added to the list
|
||||
/// Time to wait (in deciseconds) between each call to fire(). Must be a positive integer.
|
||||
var/wait = 20
|
||||
|
||||
/// Priority Weight: When mutiple subsystems need to run in the same tick, higher priority subsystems will be given a higher share of the tick before MC_TICK_CHECK triggers a sleep, higher priority subsystems also run before lower priority subsystems
|
||||
var/priority = FIRE_PRIORITY_DEFAULT
|
||||
|
||||
/// [Subsystem Flags][SS_NO_INIT] to control binary behavior. Flags must be set at compile time or before preinit finishes to take full effect. (You can also restart the mc to force them to process again)
|
||||
var/flags = NONE
|
||||
|
||||
/// Which stage does this subsystem init at. Earlier stages can fire while later stages init.
|
||||
var/init_stage = INITSTAGE_MAIN
|
||||
|
||||
/// This var is set to TRUE after the subsystem has been initialized.
|
||||
var/initialized = FALSE
|
||||
|
||||
/// Set to 0 to prevent fire() calls, mostly for admin use or subsystems that may be resumed later
|
||||
/// use the [SS_NO_FIRE] flag instead for systems that never fire to keep it from even being added to list that is checked every tick
|
||||
var/can_fire = TRUE
|
||||
|
||||
// Bookkeeping variables; probably shouldn't mess with these.
|
||||
var/last_fire = 0 //last world.time we called fire()
|
||||
var/next_fire = 0 //scheduled world.time for next fire()
|
||||
var/cost = 0 //average time to execute
|
||||
var/tick_usage = 0 //average tick usage
|
||||
var/tick_overrun = 0 //average tick overrun
|
||||
var/state = SS_IDLE //tracks the current state of the ss, running, paused, etc.
|
||||
var/paused_ticks = 0 //ticks this ss is taking to run right now.
|
||||
var/paused_tick_usage //total tick_usage of all of our runs while pausing this run
|
||||
var/ticks = 1 //how many ticks does this ss take to run on avg.
|
||||
var/times_fired = 0 //number of times we have called fire()
|
||||
var/queued_time = 0 //time we entered the queue, (for timing and priority reasons)
|
||||
var/queued_priority //we keep a running total to make the math easier, if priority changes mid-fire that would break our running total, so we store it here
|
||||
//linked list stuff for the queue
|
||||
///Bitmap of what game states can this subsystem fire at. See [RUNLEVELS_DEFAULT] for more details.
|
||||
var/runlevels = RUNLEVELS_DEFAULT //points of the game at which the SS can fire
|
||||
|
||||
/*
|
||||
* The following variables are managed by the MC and should not be modified directly.
|
||||
*/
|
||||
|
||||
/// Last world.time the subsystem completed a run (as in wasn't paused by [MC_TICK_CHECK])
|
||||
var/last_fire = 0
|
||||
|
||||
/// Scheduled world.time for next fire()
|
||||
var/next_fire = 0
|
||||
|
||||
/// Running average of the amount of milliseconds it takes the subsystem to complete a run (including all resumes but not the time spent paused)
|
||||
var/cost = 0
|
||||
|
||||
/// Running average of the amount of tick usage in percents of a tick it takes the subsystem to complete a run
|
||||
var/tick_usage = 0
|
||||
|
||||
/// Running average of the amount of tick usage (in percents of a game tick) the subsystem has spent past its allocated time without pausing
|
||||
var/tick_overrun = 0
|
||||
|
||||
/// How much of a tick (in percents of a tick) were we allocated last fire.
|
||||
var/tick_allocation_last = 0
|
||||
|
||||
/// How much of a tick (in percents of a tick) do we get allocated by the mc on avg.
|
||||
var/tick_allocation_avg = 0
|
||||
|
||||
/// Tracks the current execution state of the subsystem. Used to handle subsystems that sleep in fire so the mc doesn't run them again while they are sleeping
|
||||
var/state = SS_IDLE
|
||||
|
||||
/// Tracks how many fires the subsystem has consecutively paused on in the current run
|
||||
var/paused_ticks = 0
|
||||
|
||||
/// Tracks how much of a tick the subsystem has consumed in the current run
|
||||
var/paused_tick_usage
|
||||
|
||||
/// Tracks how many fires the subsystem takes to complete a run on average.
|
||||
var/ticks = 1
|
||||
|
||||
/// Tracks the amount of completed runs for the subsystem
|
||||
var/times_fired = 0
|
||||
|
||||
/// How many fires have we been requested to postpone
|
||||
var/postponed_fires = 0
|
||||
|
||||
/// Time the subsystem entered the queue, (for timing and priority reasons)
|
||||
var/queued_time = 0
|
||||
|
||||
/// Priority at the time the subsystem entered the queue. Needed to avoid changes in priority (by admins and the like) from breaking things.
|
||||
var/queued_priority
|
||||
|
||||
/// How many times we suspect a subsystem type has crashed the MC, 3 strikes and you're out!
|
||||
var/static/list/failure_strikes
|
||||
|
||||
/// Next subsystem in the queue of subsystems to run this tick
|
||||
var/datum/controller/subsystem/queue_next
|
||||
/// Previous subsystem in the queue of subsystems to run this tick
|
||||
var/datum/controller/subsystem/queue_prev
|
||||
|
||||
var/runlevels = RUNLEVELS_DEFAULT //points of the game at which the SS can fire
|
||||
|
||||
var/static/list/failure_strikes //How many times we suspect a subsystem type has crashed the MC, 3 strikes and you're out!
|
||||
//Do not blindly add vars here to the bottom, put it where it goes above
|
||||
//If your var only has two values, put it in as a flag.
|
||||
|
||||
//yogs start -- loading time stuff
|
||||
var/static/total_loading_points_progress = 0 //! How much progress we've made in loading all the subsystems so far.
|
||||
@@ -50,10 +113,15 @@
|
||||
/datum/controller/subsystem/proc/PreInit()
|
||||
return
|
||||
|
||||
//This is used so the mc knows when the subsystem sleeps. do not override.
|
||||
///This is used so the mc knows when the subsystem sleeps. do not override.
|
||||
/datum/controller/subsystem/proc/ignite(resumed = FALSE)
|
||||
SHOULD_NOT_OVERRIDE(TRUE)
|
||||
set waitfor = FALSE
|
||||
. = SS_IDLE
|
||||
|
||||
tick_allocation_last = Master.current_ticklimit-(TICK_USAGE)
|
||||
tick_allocation_avg = MC_AVERAGE(tick_allocation_avg, tick_allocation_last)
|
||||
|
||||
. = SS_SLEEPING
|
||||
fire(resumed)
|
||||
. = state
|
||||
@@ -65,9 +133,9 @@
|
||||
state = SS_PAUSED
|
||||
queued_time = QT
|
||||
|
||||
//previously, this would have been named 'process()' but that name is used everywhere for different things!
|
||||
//fire() seems more suitable. This is the procedure that gets called every 'wait' deciseconds.
|
||||
//Sleeping in here prevents future fires until returned.
|
||||
///previously, this would have been named 'process()' but that name is used everywhere for different things!
|
||||
///fire() seems more suitable. This is the procedure that gets called every 'wait' deciseconds.
|
||||
///Sleeping in here prevents future fires until returned.
|
||||
/datum/controller/subsystem/proc/fire(resumed = FALSE)
|
||||
flags |= SS_NO_FIRE
|
||||
CRASH("Subsystem [src]([type]) does not fire() but did not set the SS_NO_FIRE flag. Please add the SS_NO_FIRE flag to any subsystem that doesn't fire so it doesn't get added to the processing list and waste cpu.")
|
||||
@@ -76,12 +144,38 @@
|
||||
dequeue()
|
||||
can_fire = 0
|
||||
flags |= SS_NO_FIRE
|
||||
Master.subsystems -= src
|
||||
if (Master)
|
||||
Master.subsystems -= src
|
||||
return ..()
|
||||
|
||||
//Queue it to run.
|
||||
// (we loop thru a linked list until we get to the end or find the right point)
|
||||
// (this lets us sort our run order correctly without having to re-sort the entire already sorted list)
|
||||
|
||||
/** Update next_fire for the next run.
|
||||
* reset_time (bool) - Ignore things that would normally alter the next fire, like tick_overrun, and last_fire. (also resets postpone)
|
||||
*/
|
||||
/datum/controller/subsystem/proc/update_nextfire(reset_time = FALSE)
|
||||
var/queue_node_flags = flags
|
||||
|
||||
if (reset_time)
|
||||
postponed_fires = 0
|
||||
if (queue_node_flags & SS_TICKER)
|
||||
next_fire = world.time + (world.tick_lag * wait)
|
||||
else
|
||||
next_fire = world.time + wait
|
||||
return
|
||||
|
||||
if (queue_node_flags & SS_TICKER)
|
||||
next_fire = world.time + (world.tick_lag * wait)
|
||||
else if (queue_node_flags & SS_POST_FIRE_TIMING)
|
||||
next_fire = world.time + wait + (world.tick_lag * (tick_overrun/100))
|
||||
else if (queue_node_flags & SS_KEEP_TIMING)
|
||||
next_fire += wait
|
||||
else
|
||||
next_fire = queued_time + wait + (world.tick_lag * (tick_overrun/100))
|
||||
|
||||
|
||||
///Queue it to run.
|
||||
/// (we loop thru a linked list until we get to the end or find the right point)
|
||||
/// (this lets us sort our run order correctly without having to re-sort the entire already sorted list)
|
||||
/datum/controller/subsystem/proc/enqueue()
|
||||
var/SS_priority = priority
|
||||
var/SS_flags = flags
|
||||
@@ -94,7 +188,7 @@
|
||||
queue_node_flags = queue_node.flags
|
||||
|
||||
if (queue_node_flags & (SS_TICKER|SS_BACKGROUND) == SS_TICKER)
|
||||
if (!(SS_flags & SS_TICKER))
|
||||
if ((SS_flags & (SS_TICKER|SS_BACKGROUND)) != SS_TICKER)
|
||||
continue
|
||||
if (queue_node_priority < SS_priority)
|
||||
break
|
||||
@@ -145,9 +239,9 @@
|
||||
queue_next.queue_prev = queue_prev
|
||||
if (queue_prev)
|
||||
queue_prev.queue_next = queue_next
|
||||
if (src == Master.queue_tail)
|
||||
if (Master && (src == Master.queue_tail))
|
||||
Master.queue_tail = queue_prev
|
||||
if (src == Master.queue_head)
|
||||
if (Master && (src == Master.queue_head))
|
||||
Master.queue_head = queue_next
|
||||
queued_time = 0
|
||||
if (state == SS_QUEUED)
|
||||
@@ -165,35 +259,14 @@
|
||||
/// Called after the config has been loaded or reloaded.
|
||||
/datum/controller/subsystem/proc/OnConfigLoad()
|
||||
|
||||
///used to initialize the subsystem AFTER the map has loaded
|
||||
///This should be called by the derived subsystem class AFTER it has done its own initialization.
|
||||
/datum/controller/subsystem/Initialize(start_timeofday)
|
||||
initialized = TRUE
|
||||
SEND_SIGNAL(src, COMSIG_SUBSYSTEM_POST_INITIALIZE, start_timeofday)
|
||||
var/time = (REALTIMEOFDAY - start_timeofday)/10
|
||||
var/msg = "Initialized [name] subsystem within [time] second[time == 1 ? "" : "s"]!" // Yogs -- quieter subsystem initialization
|
||||
to_chat(GLOB.permissions.admins,
|
||||
type = MESSAGE_TYPE_DEBUG,
|
||||
html = span_notice(msg),
|
||||
confidential = FALSE)
|
||||
log_world(msg)
|
||||
if(!loading_points) // We're probably one of those crappy subsystems that take 0 seconds, so return
|
||||
return time
|
||||
total_loading_points_progress += loading_points
|
||||
var/percent = round(total_loading_points_progress / total_loading_points * 100)
|
||||
to_chat(world,span_boldnotice("Subsystem initialization at [percent]%..."))
|
||||
return time // Yogs end
|
||||
|
||||
/**
|
||||
* Used to initialize the subsystem. This is expected to be overriden by subtypes.
|
||||
*/
|
||||
/datum/controller/subsystem/Initialize()
|
||||
return SS_INIT_NONE
|
||||
|
||||
/datum/controller/subsystem/stat_entry(msg)
|
||||
if(can_fire && !(SS_NO_FIRE & flags))
|
||||
msg = "[round(cost,1)]ms|[round(tick_usage,1)]%([round(tick_overrun,1)]%)|[round(ticks,0.1)]\t[msg]"
|
||||
else
|
||||
msg = "OFFLINE\t[msg]"
|
||||
return msg
|
||||
|
||||
/datum/controller/subsystem/stat_entry(msg)
|
||||
if(can_fire && !(SS_NO_FIRE & flags))
|
||||
if(can_fire && !(SS_NO_FIRE & flags) && init_stage <= Master.init_stage_completed)
|
||||
msg = "[round(cost,1)]ms|[round(tick_usage,1)]%([round(tick_overrun,1)]%)|[round(ticks,0.1)]\t[msg]"
|
||||
else
|
||||
msg = "OFFLINE\t[msg]"
|
||||
@@ -212,11 +285,10 @@
|
||||
if (SS_IDLE)
|
||||
. = " "
|
||||
|
||||
//could be used to postpone a costly subsystem for (default one) var/cycles, cycles
|
||||
//for instance, during cpu intensive operations like explosions
|
||||
/// Causes the next "cycle" fires to be missed. Effect is accumulative but can reset by calling update_nextfire(reset_time = TRUE)
|
||||
/datum/controller/subsystem/proc/postpone(cycles = 1)
|
||||
if(next_fire - world.time < wait)
|
||||
next_fire += (wait*cycles)
|
||||
if (can_fire && cycles >= 1)
|
||||
postponed_fires += cycles
|
||||
|
||||
//usually called via datum/controller/subsystem/New() when replacing a subsystem (i.e. due to a recurring crash)
|
||||
//should attempt to salvage what it can from the old instance of subsystem
|
||||
@@ -224,10 +296,10 @@
|
||||
|
||||
/datum/controller/subsystem/vv_edit_var(var_name, var_value)
|
||||
switch (var_name)
|
||||
if ("can_fire")
|
||||
if (NAMEOF(src, can_fire))
|
||||
//this is so the subsystem doesn't rapid fire to make up missed ticks causing more lag
|
||||
if (var_value)
|
||||
next_fire = world.time + wait
|
||||
if ("queued_priority") //editing this breaks things.
|
||||
return 0
|
||||
update_nextfire(reset_time = TRUE)
|
||||
if (NAMEOF(src, queued_priority)) //editing this breaks things.
|
||||
return FALSE
|
||||
. = ..()
|
||||
|
||||
@@ -76,7 +76,7 @@ SUBSYSTEM_DEF(achievements)
|
||||
qdel(getRidOfOldStuff)
|
||||
|
||||
qdel(ridOldChieves)
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/**
|
||||
* Subsystem firing, checks solar panel achievement
|
||||
|
||||
@@ -20,7 +20,7 @@ SUBSYSTEM_DEF(adjacent_air)
|
||||
/datum/controller/subsystem/adjacent_air/Initialize()
|
||||
while(length(queue))
|
||||
fire(mc_check = FALSE)
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/adjacent_air/fire(resumed = FALSE, mc_check = TRUE)
|
||||
|
||||
|
||||
@@ -71,7 +71,7 @@ SUBSYSTEM_DEF(air)
|
||||
setup_atmos_machinery()
|
||||
setup_pipenets()
|
||||
gas_reactions = init_gas_reactions()
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/air/proc/extools_update_ssair()
|
||||
|
||||
|
||||
@@ -33,4 +33,4 @@ SUBSYSTEM_DEF(assets)
|
||||
|
||||
transport.Initialize(cache)
|
||||
|
||||
..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
@@ -20,7 +20,7 @@ SUBSYSTEM_DEF(atoms)
|
||||
setupGenetics() //to set the mutations' sequence
|
||||
initialized = INITIALIZATION_INNEW_MAPLOAD
|
||||
InitializeAtoms()
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/atoms/proc/InitializeAtoms(list/atoms)
|
||||
if(initialized == INITIALIZATION_INSSATOMS)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
SUBSYSTEM_DEF(blackbox)
|
||||
name = "Blackbox"
|
||||
wait = 6000
|
||||
flags = SS_NO_TICK_CHECK
|
||||
wait = 10 MINUTES
|
||||
runlevels = RUNLEVEL_GAME | RUNLEVEL_POSTGAME
|
||||
init_order = INIT_ORDER_BLACKBOX
|
||||
|
||||
@@ -23,7 +22,7 @@ SUBSYSTEM_DEF(blackbox)
|
||||
record_feedback("amount", "dm_version", DM_VERSION)
|
||||
record_feedback("amount", "byond_version", world.byond_version)
|
||||
record_feedback("amount", "byond_build", world.byond_build)
|
||||
. = ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
//poll population
|
||||
/datum/controller/subsystem/blackbox/fire()
|
||||
|
||||
@@ -12,6 +12,11 @@ SUBSYSTEM_DEF(chat)
|
||||
|
||||
var/list/payload_by_client = list()
|
||||
|
||||
/datum/controller/subsystem/chat/Initialize()
|
||||
// Just used by chat system to know that initialization is nearly finished.
|
||||
// The to_chat checks could probably check the runlevel instead, but would require testing.
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/chat/fire()
|
||||
for(var/key in payload_by_client)
|
||||
var/client/client = key
|
||||
|
||||
@@ -25,7 +25,7 @@ SUBSYSTEM_DEF(dbcore)
|
||||
if(2)
|
||||
message_admins("Could not get schema version from database")
|
||||
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/dbcore/fire()
|
||||
for(var/I in active_queries)
|
||||
@@ -397,4 +397,4 @@ Delayed insert mode was removed in mysql 7 and only works with MyISAM type table
|
||||
|
||||
/datum/DBQuery/proc/Close()
|
||||
rows = null
|
||||
item = null
|
||||
item = null
|
||||
|
||||
@@ -147,7 +147,7 @@ SUBSYSTEM_DEF(demo)
|
||||
for(var/line in pre_init_lines)
|
||||
WRITE_LOG_NO_FORMAT(GLOB.demo_log, "[line]\n")
|
||||
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/demo/fire()
|
||||
if(!src.marked_new.len && !src.marked_dirty.len && !src.marked_turfs.len && !src.del_list.len)
|
||||
|
||||
@@ -12,7 +12,7 @@ Then their Discord ID is linked to their Ckey in the databse
|
||||
|
||||
SUBSYSTEM_DEF(discord)
|
||||
name = "Discord"
|
||||
flags = SS_NO_FIRE
|
||||
flags = SS_NO_FIRE|SS_NO_INIT
|
||||
|
||||
var/notify_file = file("data/notify.json")
|
||||
|
||||
@@ -63,4 +63,4 @@ SUBSYSTEM_DEF(discord)
|
||||
// Clean up a discord account mention
|
||||
/datum/controller/subsystem/discord/proc/id_clean(input)
|
||||
var/regex/num_only = regex("\[^0-9\]", "g")
|
||||
return num_only.Replace(input, "")
|
||||
return num_only.Replace(input, "")
|
||||
|
||||
@@ -17,7 +17,7 @@ SUBSYSTEM_DEF(disease)
|
||||
for(var/common_disease_type in all_common_diseases)
|
||||
var/datum/disease/prototype = new common_disease_type()
|
||||
archive_diseases[prototype.GetDiseaseID()] = prototype
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/disease/stat_entry(msg)
|
||||
msg = "P:[length(active_diseases)]"
|
||||
|
||||
@@ -87,7 +87,7 @@ SUBSYSTEM_DEF(economy)
|
||||
continue
|
||||
else
|
||||
new /datum/bank_account/department(A, budget_starting_amt)
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/economy/fire(resumed = 0)
|
||||
tally_departments() //see how many staff in each department
|
||||
|
||||
@@ -22,7 +22,7 @@ SUBSYSTEM_DEF(events)
|
||||
control += E //add it to the list of all events (controls)
|
||||
reschedule()
|
||||
getHoliday()
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
|
||||
/datum/controller/subsystem/events/fire(resumed = 0)
|
||||
|
||||
@@ -1,3 +1,26 @@
|
||||
/*!
|
||||
## Debugging GC issues
|
||||
|
||||
In order to debug `qdel()` failures, there are several tools available.
|
||||
To enable these tools, define `TESTING` in [_compile_options.dm](https://github.com/tgstation/-tg-station/blob/master/code/_compile_options.dm).
|
||||
|
||||
First is a verb called "Find References", which lists **every** refererence to an object in the world. This allows you to track down any indirect or obfuscated references that you might have missed.
|
||||
|
||||
Complementing this is another verb, "qdel() then Find References".
|
||||
This does exactly what you'd expect; it calls `qdel()` on the object and then it finds all references remaining.
|
||||
This is great, because it means that `Destroy()` will have been called before it starts to find references,
|
||||
so the only references you'll find will be the ones preventing the object from `qdel()`ing gracefully.
|
||||
|
||||
If you have a datum or something you are not destroying directly (say via the singulo),
|
||||
the next tool is `QDEL_HINT_FINDREFERENCE`. You can return this in `Destroy()` (where you would normally `return ..()`),
|
||||
to print a list of references once it enters the GC queue.
|
||||
|
||||
Finally is a verb, "Show qdel() Log", which shows the deletion log that the garbage subsystem keeps. This is helpful if you are having race conditions or need to review the order of deletions.
|
||||
|
||||
Note that for any of these tools to work `TESTING` must be defined.
|
||||
By using these methods of finding references, you can make your life far, far easier when dealing with `qdel()` failures.
|
||||
*/
|
||||
|
||||
SUBSYSTEM_DEF(garbage)
|
||||
name = "Garbage"
|
||||
priority = FIRE_PRIORITY_GARBAGE
|
||||
@@ -5,39 +28,37 @@ SUBSYSTEM_DEF(garbage)
|
||||
flags = SS_POST_FIRE_TIMING|SS_BACKGROUND|SS_NO_INIT
|
||||
runlevels = RUNLEVELS_DEFAULT | RUNLEVEL_LOBBY
|
||||
init_order = INIT_ORDER_GARBAGE
|
||||
init_stage = INITSTAGE_EARLY
|
||||
|
||||
var/list/collection_timeout = list(GC_FILTER_QUEUE, GC_CHECK_QUEUE, GC_DEL_QUEUE) // deciseconds to wait before moving something up in the queue to the next level
|
||||
|
||||
//Stat tracking
|
||||
var/delslasttick = 0 // number of del()'s we've done this tick
|
||||
var/gcedlasttick = 0 // number of things that gc'ed last tick
|
||||
var/delslasttick = 0 // number of del()'s we've done this tick
|
||||
var/gcedlasttick = 0 // number of things that gc'ed last tick
|
||||
var/totaldels = 0
|
||||
var/totalgcs = 0
|
||||
|
||||
var/highest_del_time = 0
|
||||
var/highest_del_tickusage = 0
|
||||
var/highest_del_ms = 0
|
||||
var/highest_del_type_string = ""
|
||||
|
||||
var/list/pass_counts
|
||||
var/list/fail_counts
|
||||
|
||||
var/list/items = list() // Holds our qdel_item statistics datums
|
||||
var/list/items = list() // Holds our qdel_item statistics datums
|
||||
|
||||
//Queue
|
||||
var/list/queues
|
||||
|
||||
#ifdef TESTING
|
||||
#ifdef REFERENCE_TRACKING
|
||||
var/list/reference_find_on_fail = list()
|
||||
#ifdef REFERENCE_TRACKING_DEBUG
|
||||
//Should we save found refs. Used for unit testing
|
||||
var/should_save_refs = FALSE
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
/datum/controller/subsystem/garbage/PreInit()
|
||||
queues = new(GC_QUEUE_COUNT)
|
||||
pass_counts = new(GC_QUEUE_COUNT)
|
||||
fail_counts = new(GC_QUEUE_COUNT)
|
||||
for(var/i in 1 to GC_QUEUE_COUNT)
|
||||
queues[i] = list()
|
||||
pass_counts[i] = 0
|
||||
fail_counts[i] = 0
|
||||
InitQueues()
|
||||
|
||||
/datum/controller/subsystem/garbage/stat_entry(msg)
|
||||
var/list/counts = list()
|
||||
@@ -57,7 +78,6 @@ SUBSYSTEM_DEF(garbage)
|
||||
msg += "TGR:[round((totalgcs/(totaldels+totalgcs))*100, 0.01)]%"
|
||||
msg += " P:[pass_counts.Join(",")]"
|
||||
msg += "|F:[fail_counts.Join(",")]"
|
||||
|
||||
return ..()
|
||||
|
||||
/datum/controller/subsystem/garbage/Shutdown()
|
||||
@@ -69,13 +89,18 @@ SUBSYSTEM_DEF(garbage)
|
||||
for(var/path in items)
|
||||
var/datum/qdel_item/I = items[path]
|
||||
dellog += "Path: [path]"
|
||||
if (I.qdel_flags & QDEL_ITEM_SUSPENDED_FOR_LAG)
|
||||
dellog += "\tSUSPENDED FOR LAG"
|
||||
if (I.failures)
|
||||
dellog += "\tFailures: [I.failures]"
|
||||
dellog += "\tqdel() Count: [I.qdels]"
|
||||
dellog += "\tDestroy() Cost: [I.destroy_time]ms"
|
||||
if (I.hard_deletes)
|
||||
dellog += "\tTotal Hard Deletes [I.hard_deletes]"
|
||||
dellog += "\tTotal Hard Deletes: [I.hard_deletes]"
|
||||
dellog += "\tTime Spent Hard Deleting: [I.hard_delete_time]ms"
|
||||
dellog += "\tHighest Time Spent Hard Deleting: [I.hard_delete_max]ms"
|
||||
if (I.hard_deletes_over_threshold)
|
||||
dellog += "\tHard Deletes Over Threshold: [I.hard_deletes_over_threshold]"
|
||||
if (I.slept_destroy)
|
||||
dellog += "\tSleeps: [I.slept_destroy]"
|
||||
if (I.no_respect_force)
|
||||
@@ -104,6 +129,15 @@ SUBSYSTEM_DEF(garbage)
|
||||
|
||||
|
||||
|
||||
/datum/controller/subsystem/garbage/proc/InitQueues()
|
||||
if (isnull(queues)) // Only init the queues if they don't already exist, prevents overriding of recovered lists
|
||||
queues = new(GC_QUEUE_COUNT)
|
||||
pass_counts = new(GC_QUEUE_COUNT)
|
||||
fail_counts = new(GC_QUEUE_COUNT)
|
||||
for(var/i in 1 to GC_QUEUE_COUNT)
|
||||
queues[i] = list()
|
||||
pass_counts[i] = 0
|
||||
fail_counts[i] = 0
|
||||
|
||||
/datum/controller/subsystem/garbage/proc/HandleQueue(level = GC_QUEUE_FILTER)
|
||||
if (level == GC_QUEUE_FILTER)
|
||||
@@ -121,18 +155,21 @@ SUBSYSTEM_DEF(garbage)
|
||||
|
||||
lastlevel = level
|
||||
|
||||
for (var/refID in queue)
|
||||
if (!refID)
|
||||
//We do this rather then for(var/refID in queue) because that sort of for loop copies the whole list.
|
||||
//Normally this isn't expensive, but the gc queue can grow to 40k items, and that gets costly/causes overrun.
|
||||
for (var/i in 1 to length(queue))
|
||||
var/list/L = queue[i]
|
||||
if (length(L) < 2)
|
||||
count++
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
continue
|
||||
|
||||
var/GCd_at_time = queue[refID]
|
||||
var/GCd_at_time = L[1]
|
||||
if(GCd_at_time > cut_off_time)
|
||||
break // Everything else is newer, skip them
|
||||
count++
|
||||
|
||||
var/refID = L[2]
|
||||
var/datum/D
|
||||
D = locate(refID)
|
||||
|
||||
@@ -140,8 +177,8 @@ SUBSYSTEM_DEF(garbage)
|
||||
++gcedlasttick
|
||||
++totalgcs
|
||||
pass_counts[level]++
|
||||
#ifdef TESTING
|
||||
reference_find_on_fail -= refID //It's deleted we don't care anymore.
|
||||
#ifdef REFERENCE_TRACKING
|
||||
reference_find_on_fail -= refID //It's deleted we don't care anymore.
|
||||
#endif
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
@@ -149,21 +186,43 @@ SUBSYSTEM_DEF(garbage)
|
||||
|
||||
// Something's still referring to the qdel'd object.
|
||||
fail_counts[level]++
|
||||
|
||||
#ifdef REFERENCE_TRACKING
|
||||
var/ref_searching = FALSE
|
||||
#endif
|
||||
|
||||
switch (level)
|
||||
if (GC_QUEUE_CHECK)
|
||||
#ifdef TESTING
|
||||
#ifdef REFERENCE_TRACKING
|
||||
if(reference_find_on_fail[refID])
|
||||
D.find_references()
|
||||
INVOKE_ASYNC(D, TYPE_PROC_REF(/datum,find_references))
|
||||
ref_searching = TRUE
|
||||
#ifdef GC_FAILURE_HARD_LOOKUP
|
||||
else
|
||||
D.find_references()
|
||||
INVOKE_ASYNC(D, TYPE_PROC_REF(/datum,find_references))
|
||||
ref_searching = TRUE
|
||||
#endif
|
||||
reference_find_on_fail -= refID
|
||||
#endif
|
||||
var/type = D.type
|
||||
var/datum/qdel_item/I = items[type]
|
||||
testing("GC: -- \ref[src] | [type] was unable to be GC'd --")
|
||||
|
||||
log_world("## TESTING: GC: -- [text_ref(D)] | [type] was unable to be GC'd --")
|
||||
#ifdef TESTING
|
||||
for(var/c in GLOB.permissions.admins) //Using testing() here would fill the logs with ADMIN_VV garbage
|
||||
var/client/admin = c
|
||||
if(!check_rights_for(admin, R_ADMIN))
|
||||
continue
|
||||
to_chat(admin, "## TESTING: GC: -- [ADMIN_VV(D)] | [type] was unable to be GC'd --")
|
||||
#endif
|
||||
I.failures++
|
||||
|
||||
if (I.qdel_flags & QDEL_ITEM_SUSPENDED_FOR_LAG)
|
||||
#ifdef REFERENCE_TRACKING
|
||||
if(ref_searching)
|
||||
return //ref searching intentionally cancels all further fires while running so things that hold references don't end up getting deleted, so we want to return here instead of continue
|
||||
#endif
|
||||
continue
|
||||
if (GC_QUEUE_HARDDELETE)
|
||||
if (!CONFIG_GET(flag/disable_gc_failure_hard_deletes))
|
||||
HardDelete(D)
|
||||
@@ -173,6 +232,11 @@ SUBSYSTEM_DEF(garbage)
|
||||
|
||||
Queue(D, level+1)
|
||||
|
||||
#ifdef REFERENCE_TRACKING
|
||||
if(ref_searching)
|
||||
return
|
||||
#endif
|
||||
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
if (count)
|
||||
@@ -186,93 +250,94 @@ SUBSYSTEM_DEF(garbage)
|
||||
HardDelete(D)
|
||||
return
|
||||
var/gctime = world.time
|
||||
var/refid = "\ref[D]"
|
||||
var/refid = text_ref(D)
|
||||
|
||||
D.gc_destroyed = gctime
|
||||
var/list/queue = queues[level]
|
||||
if (queue[refid])
|
||||
queue -= refid // Removing any previous references that were GC'd so that the current object will be at the end of the list.
|
||||
|
||||
queue[refid] = gctime
|
||||
queue[++queue.len] = list(gctime, refid) // not += for byond reasons
|
||||
|
||||
//this is mainly to separate things profile wise.
|
||||
/datum/controller/subsystem/garbage/proc/HardDelete(datum/D)
|
||||
var/time = world.timeofday
|
||||
var/tick = TICK_USAGE
|
||||
var/ticktime = world.time
|
||||
++delslasttick
|
||||
++totaldels
|
||||
var/type = D.type
|
||||
var/refID = "\ref[D]"
|
||||
if (!CONFIG_GET(flag/disable_all_hard_deletes))
|
||||
del(D)
|
||||
var/refID = text_ref(D)
|
||||
|
||||
tick = (TICK_USAGE-tick+((world.time-ticktime)/world.tick_lag*100))
|
||||
var/tick_usage = TICK_USAGE
|
||||
del(D)
|
||||
tick_usage = TICK_USAGE_TO_MS(tick_usage)
|
||||
|
||||
var/datum/qdel_item/I = items[type]
|
||||
|
||||
I.hard_deletes++
|
||||
I.hard_delete_time += TICK_DELTA_TO_MS(tick)
|
||||
I.hard_delete_time += tick_usage
|
||||
if (tick_usage > I.hard_delete_max)
|
||||
I.hard_delete_max = tick_usage
|
||||
if (tick_usage > highest_del_ms)
|
||||
highest_del_ms = tick_usage
|
||||
highest_del_type_string = "[type]"
|
||||
|
||||
var/time = MS2DS(tick_usage)
|
||||
|
||||
if (tick > highest_del_tickusage)
|
||||
highest_del_tickusage = tick
|
||||
time = world.timeofday - time
|
||||
if (!time && TICK_DELTA_TO_MS(tick) > 1)
|
||||
time = TICK_DELTA_TO_MS(tick)/100
|
||||
if (time > highest_del_time)
|
||||
highest_del_time = time
|
||||
if (time > 10)
|
||||
log_game("Error: [type]([refID]) took longer than 1 second to delete (took [time/10] seconds to delete)")
|
||||
message_admins("Error: [type]([refID]) took longer than 1 second to delete (took [time/10] seconds to delete).")
|
||||
if (time > 0.1 SECONDS)
|
||||
postpone(time)
|
||||
var/threshold = CONFIG_GET(number/hard_deletes_overrun_threshold)
|
||||
if (threshold && (time > threshold SECONDS))
|
||||
if (!(I.qdel_flags & QDEL_ITEM_ADMINS_WARNED))
|
||||
log_game("Error: [type]([refID]) took longer than [threshold] seconds to delete (took [round(time/10, 0.1)] seconds to delete)")
|
||||
message_admins("Error: [type]([refID]) took longer than [threshold] seconds to delete (took [round(time/10, 0.1)] seconds to delete).")
|
||||
I.qdel_flags |= QDEL_ITEM_ADMINS_WARNED
|
||||
I.hard_deletes_over_threshold++
|
||||
var/overrun_limit = CONFIG_GET(number/hard_deletes_overrun_limit)
|
||||
if (overrun_limit && I.hard_deletes_over_threshold >= overrun_limit)
|
||||
I.qdel_flags |= QDEL_ITEM_SUSPENDED_FOR_LAG
|
||||
|
||||
/datum/controller/subsystem/garbage/Recover()
|
||||
InitQueues() //We first need to create the queues before recovering data
|
||||
if (istype(SSgarbage.queues))
|
||||
for (var/i in 1 to SSgarbage.queues.len)
|
||||
queues[i] |= SSgarbage.queues[i]
|
||||
|
||||
|
||||
/// Qdel Item: Holds statistics on each type that passes thru qdel
|
||||
/datum/qdel_item
|
||||
var/name = ""
|
||||
var/qdels = 0 //Total number of times it's passed thru qdel.
|
||||
var/destroy_time = 0 //Total amount of milliseconds spent processing this type's Destroy()
|
||||
var/failures = 0 //Times it was queued for soft deletion but failed to soft delete.
|
||||
var/hard_deletes = 0 //Different from failures because it also includes QDEL_HINT_HARDDEL deletions
|
||||
var/hard_delete_time = 0//Total amount of milliseconds spent hard deleting this type.
|
||||
var/no_respect_force = 0//Number of times it's not respected force=TRUE
|
||||
var/no_hint = 0 //Number of times it's not even bother to give a qdel hint
|
||||
var/slept_destroy = 0 //Number of times it's slept in its destroy
|
||||
var/name = "" //!Holds the type as a string for this type
|
||||
var/qdels = 0 //!Total number of times it's passed thru qdel.
|
||||
var/destroy_time = 0 //!Total amount of milliseconds spent processing this type's Destroy()
|
||||
var/failures = 0 //!Times it was queued for soft deletion but failed to soft delete.
|
||||
var/hard_deletes = 0 //!Different from failures because it also includes QDEL_HINT_HARDDEL deletions
|
||||
var/hard_delete_time = 0 //!Total amount of milliseconds spent hard deleting this type.
|
||||
var/hard_delete_max = 0 //!Highest time spent hard_deleting this in ms.
|
||||
var/hard_deletes_over_threshold = 0 //!Number of times hard deletes took longer than the configured threshold
|
||||
var/no_respect_force = 0 //!Number of times it's not respected force=TRUE
|
||||
var/no_hint = 0 //!Number of times it's not even bother to give a qdel hint
|
||||
var/slept_destroy = 0 //!Number of times it's slept in its destroy
|
||||
var/qdel_flags = 0 //!Flags related to this type's trip thru qdel.
|
||||
|
||||
/datum/qdel_item/New(mytype)
|
||||
name = "[mytype]"
|
||||
|
||||
#ifdef TESTING
|
||||
/proc/qdel_and_find_ref_if_fail(datum/D, force = FALSE)
|
||||
SSgarbage.reference_find_on_fail[REF(D)] = TRUE
|
||||
qdel(D, force)
|
||||
#endif
|
||||
|
||||
// Should be treated as a replacement for the 'del' keyword.
|
||||
// Datums passed to this will be given a chance to clean up references to allow the GC to collect them.
|
||||
/// Should be treated as a replacement for the 'del' keyword.
|
||||
///
|
||||
/// Datums passed to this will be given a chance to clean up references to allow the GC to collect them.
|
||||
/proc/qdel(datum/D, force=FALSE, ...)
|
||||
if(!istype(D))
|
||||
del(D)
|
||||
return
|
||||
|
||||
var/datum/qdel_item/I = SSgarbage.items[D.type]
|
||||
if (!I)
|
||||
I = SSgarbage.items[D.type] = new /datum/qdel_item(D.type)
|
||||
I.qdels++
|
||||
|
||||
|
||||
if(isnull(D.gc_destroyed))
|
||||
if (SEND_SIGNAL(D, COMSIG_PARENT_PREQDELETED, force)) // Give the components a chance to prevent their parent from being deleted
|
||||
return
|
||||
D.gc_destroyed = GC_CURRENTLY_BEING_QDELETED
|
||||
var/start_time = world.time
|
||||
var/start_tick = world.tick_usage
|
||||
SEND_SIGNAL(D, COMSIG_PARENT_QDELETING, force) // Let the (remaining) components know about the result of Destroy
|
||||
var/hint = D.Destroy(arglist(args.Copy(2))) // Let our friend know they're about to get fucked up.
|
||||
SEND_SIGNAL(D, COMSIG_PARENT_QDELETING, force, hint) // Let the (remaining) components know about the result of Destroy
|
||||
if(world.time != start_time)
|
||||
I.slept_destroy++
|
||||
else
|
||||
@@ -280,13 +345,13 @@ SUBSYSTEM_DEF(garbage)
|
||||
if(!D)
|
||||
return
|
||||
switch(hint)
|
||||
if (QDEL_HINT_QUEUE) //qdel should queue the object for deletion.
|
||||
if (QDEL_HINT_QUEUE) //qdel should queue the object for deletion.
|
||||
SSgarbage.Queue(D)
|
||||
if (QDEL_HINT_IWILLGC)
|
||||
D.gc_destroyed = world.time
|
||||
SSdemo.mark_destroyed(D)
|
||||
return
|
||||
if (QDEL_HINT_LETMELIVE) //qdel should let the object live after calling destory.
|
||||
if (QDEL_HINT_LETMELIVE) //qdel should let the object live after calling destory.
|
||||
if(!force)
|
||||
D.gc_destroyed = null //clear the gc variable (important!)
|
||||
return
|
||||
@@ -309,16 +374,14 @@ SUBSYSTEM_DEF(garbage)
|
||||
if (QDEL_HINT_HARDDEL_NOW) //qdel should assume this object won't gc, and hard del it post haste.
|
||||
SSdemo.mark_destroyed(D)
|
||||
SSgarbage.HardDelete(D)
|
||||
if (QDEL_HINT_FINDREFERENCE)//qdel will, if TESTING is enabled, display all references to this object, then queue the object for deletion.
|
||||
#ifdef REFERENCE_TRACKING
|
||||
if (QDEL_HINT_FINDREFERENCE) //qdel will, if REFERENCE_TRACKING is enabled, display all references to this object, then queue the object for deletion.
|
||||
SSgarbage.Queue(D)
|
||||
#ifdef TESTING
|
||||
D.find_references()
|
||||
#endif
|
||||
if (QDEL_HINT_IFFAIL_FINDREFERENCE)
|
||||
D.find_references() //This breaks ci. Consider it insurance against somehow pring reftracking on accident
|
||||
if (QDEL_HINT_IFFAIL_FINDREFERENCE) //qdel will, if REFERENCE_TRACKING is enabled and the object fails to collect, display all references to this object.
|
||||
SSgarbage.Queue(D)
|
||||
#ifdef TESTING
|
||||
SSgarbage.reference_find_on_fail[REF(D)] = TRUE
|
||||
#endif
|
||||
SSgarbage.reference_find_on_fail[text_ref(D)] = TRUE
|
||||
#endif
|
||||
else
|
||||
#ifdef TESTING
|
||||
if(!I.no_hint)
|
||||
@@ -330,116 +393,3 @@ SUBSYSTEM_DEF(garbage)
|
||||
SSdemo.mark_destroyed(D)
|
||||
else if(D.gc_destroyed == GC_CURRENTLY_BEING_QDELETED)
|
||||
CRASH("[D.type] destroy proc was called multiple times, likely due to a qdel loop in the Destroy logic")
|
||||
|
||||
#ifdef TESTING
|
||||
|
||||
/datum/verb/find_refs()
|
||||
set category = "Misc.Server Debug"
|
||||
set name = "Find References"
|
||||
set src in world
|
||||
|
||||
find_references(FALSE)
|
||||
|
||||
/datum/proc/find_references(skip_alert)
|
||||
running_find_references = type
|
||||
if(usr && usr.client)
|
||||
if(usr.client.running_find_references)
|
||||
testing("CANCELLED search for references to a [usr.client.running_find_references].")
|
||||
usr.client.running_find_references = null
|
||||
running_find_references = null
|
||||
//restart the garbage collector
|
||||
SSgarbage.can_fire = 1
|
||||
SSgarbage.next_fire = world.time + world.tick_lag
|
||||
return
|
||||
|
||||
if(!skip_alert)
|
||||
if(alert("Running this will lock everything up for about 5 minutes. Would you like to begin the search?", "Find References", "Yes", "No") == "No")
|
||||
running_find_references = null
|
||||
return
|
||||
|
||||
//this keeps the garbage collector from failing to collect objects being searched for in here
|
||||
SSgarbage.can_fire = 0
|
||||
|
||||
if(usr && usr.client)
|
||||
usr.client.running_find_references = type
|
||||
|
||||
testing("Beginning search for references to a [type].")
|
||||
last_find_references = world.time
|
||||
|
||||
DoSearchVar(GLOB) //globals
|
||||
for(var/datum/thing in world) //atoms (don't beleive it's lies)
|
||||
DoSearchVar(thing, "World -> [thing]")
|
||||
|
||||
for (var/datum/thing) //datums
|
||||
DoSearchVar(thing, "World -> [thing]")
|
||||
|
||||
for (var/client/thing) //clients
|
||||
DoSearchVar(thing, "World -> [thing]")
|
||||
|
||||
testing("Completed search for references to a [type].")
|
||||
if(usr && usr.client)
|
||||
usr.client.running_find_references = null
|
||||
running_find_references = null
|
||||
|
||||
//restart the garbage collector
|
||||
SSgarbage.can_fire = 1
|
||||
SSgarbage.next_fire = world.time + world.tick_lag
|
||||
|
||||
/datum/verb/qdel_then_find_references()
|
||||
set category = "Misc.Server Debug"
|
||||
set name = "qdel() then Find References"
|
||||
set src in world
|
||||
|
||||
qdel(src, TRUE) //Force.
|
||||
if(!running_find_references)
|
||||
find_references(TRUE)
|
||||
|
||||
/datum/verb/qdel_then_if_fail_find_references()
|
||||
set category = "Misc.Server Debug"
|
||||
set name = "qdel() then Find References if GC failure"
|
||||
set src in world
|
||||
|
||||
qdel_and_find_ref_if_fail(src, TRUE)
|
||||
|
||||
/datum/proc/DoSearchVar(X, Xname, recursive_limit = 64)
|
||||
if(usr && usr.client && !usr.client.running_find_references)
|
||||
return
|
||||
if (!recursive_limit)
|
||||
return
|
||||
|
||||
if(istype(X, /datum))
|
||||
var/datum/D = X
|
||||
if(D.last_find_references == last_find_references)
|
||||
return
|
||||
|
||||
D.last_find_references = last_find_references
|
||||
var/list/L = D.vars
|
||||
|
||||
for(var/varname in L)
|
||||
if (varname == "vars")
|
||||
continue
|
||||
var/variable = L[varname]
|
||||
|
||||
if(variable == src)
|
||||
testing("Found [src.type] \ref[src] in [D.type]'s [varname] var. [Xname]")
|
||||
|
||||
else if(islist(variable))
|
||||
DoSearchVar(variable, "[Xname] -> list", recursive_limit-1)
|
||||
|
||||
else if(islist(X))
|
||||
var/normal = IS_NORMAL_LIST(X)
|
||||
for(var/I in X)
|
||||
if (I == src)
|
||||
testing("Found [src.type] \ref[src] in list [Xname].")
|
||||
|
||||
else if (I && !isnum(I) && normal && X[I] == src)
|
||||
testing("Found [src.type] \ref[src] in list [Xname]\[[I]\]")
|
||||
|
||||
else if (islist(I))
|
||||
DoSearchVar(I, "[Xname] -> list", recursive_limit-1)
|
||||
|
||||
#ifndef FIND_REF_NO_CHECK_TICK
|
||||
CHECK_TICK
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -41,4 +41,4 @@ SUBSYSTEM_DEF(icon_smooth)
|
||||
smooth_icon(A)
|
||||
CHECK_TICK
|
||||
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
@@ -6,6 +6,7 @@ SUBSYSTEM_DEF(input)
|
||||
flags = SS_TICKER
|
||||
priority = FIRE_PRIORITY_INPUT
|
||||
runlevels = RUNLEVELS_DEFAULT | RUNLEVEL_LOBBY
|
||||
init_stage = INITSTAGE_EARLY
|
||||
|
||||
var/list/macro_sets
|
||||
var/list/movement_keys
|
||||
@@ -19,7 +20,7 @@ SUBSYSTEM_DEF(input)
|
||||
|
||||
refresh_client_macro_sets()
|
||||
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
// This is for when macro sets are eventualy datumized
|
||||
/datum/controller/subsystem/input/proc/setup_default_macro_sets()
|
||||
|
||||
@@ -10,5 +10,5 @@ SUBSYSTEM_DEF(ipintel)
|
||||
|
||||
/datum/controller/subsystem/ipintel/Initialize(timeofday, zlevel)
|
||||
enabled = 1
|
||||
. = ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ SUBSYSTEM_DEF(job)
|
||||
LoadJobs()
|
||||
generate_selectable_species()
|
||||
set_overflow_role(CONFIG_GET(string/overflow_job))
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/job/proc/set_overflow_role(new_overflow_role)
|
||||
var/datum/job/new_overflow = GetJob(new_overflow_role)
|
||||
|
||||
@@ -15,4 +15,4 @@ SUBSYSTEM_DEF(language)
|
||||
|
||||
GLOB.language_datum_instances[language] = instance
|
||||
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
@@ -28,7 +28,7 @@ SUBSYSTEM_DEF(lighting)
|
||||
|
||||
fire(FALSE, TRUE)
|
||||
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/lighting/fire(resumed, init_tick_checks)
|
||||
MC_SPLIT_TICK_INIT(3)
|
||||
|
||||
@@ -9,7 +9,7 @@ SUBSYSTEM_DEF(machines)
|
||||
/datum/controller/subsystem/machines/Initialize()
|
||||
makepowernets()
|
||||
fire()
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/machines/proc/makepowernets()
|
||||
for(var/datum/powernet/PN in powernets)
|
||||
|
||||
@@ -141,7 +141,7 @@ SUBSYSTEM_DEF(mapping)
|
||||
initialize_reserved_level()
|
||||
// Build minimaps
|
||||
build_minimaps()
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/mapping/proc/wipe_reservations(wipe_safety_delay = 100)
|
||||
if(clearing_reserved_turfs || !initialized) //in either case this is just not needed.
|
||||
|
||||
@@ -14,7 +14,7 @@ SUBSYSTEM_DEF(materials)
|
||||
|
||||
/datum/controller/subsystem/materials/Initialize(timeofday)
|
||||
InitializeMaterials()
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
///Ran on initialize, populated the materials and materials_by_category dictionaries with their appropiate vars (See these variables for more info)
|
||||
/datum/controller/subsystem/materials/proc/InitializeMaterials(timeofday)
|
||||
@@ -22,4 +22,4 @@ SUBSYSTEM_DEF(materials)
|
||||
var/datum/material/ref = new type
|
||||
materials[type] = ref
|
||||
for(var/c in ref.categories)
|
||||
materials_by_category[c] += list(ref)
|
||||
materials_by_category[c] += list(ref)
|
||||
|
||||
@@ -8,7 +8,7 @@ SUBSYSTEM_DEF(minor_mapping)
|
||||
/datum/controller/subsystem/minor_mapping/Initialize(timeofday)
|
||||
trigger_migration(CONFIG_GET(number/mice_roundstart), FALSE) //we dont want roundstart special rats
|
||||
place_satchels()
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/minor_mapping/proc/trigger_migration(num_mice = 10, special = TRUE)
|
||||
var/list/exposed_wires = find_exposed_wires()
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
SUBSYSTEM_DEF(nightshift)
|
||||
name = "Night Shift"
|
||||
wait = 600
|
||||
flags = SS_NO_TICK_CHECK
|
||||
wait = 1 MINUTES
|
||||
|
||||
var/nightshift_active = FALSE
|
||||
var/nightshift_start_time = 702000 //7:30 PM, station time
|
||||
@@ -13,7 +12,8 @@ SUBSYSTEM_DEF(nightshift)
|
||||
/datum/controller/subsystem/nightshift/Initialize()
|
||||
if(!CONFIG_GET(flag/enable_night_shifts))
|
||||
can_fire = FALSE
|
||||
return ..()
|
||||
return SS_INIT_NO_NEED
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/nightshift/fire(resumed = FALSE)
|
||||
if(world.time - SSticker.round_start_time < nightshift_first_check)
|
||||
|
||||
@@ -21,7 +21,7 @@ SUBSYSTEM_DEF(overlays)
|
||||
/datum/controller/subsystem/overlays/Initialize()
|
||||
initialized = TRUE
|
||||
fire(mc_check = FALSE)
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
|
||||
/datum/controller/subsystem/overlays/stat_entry(msg)
|
||||
|
||||
@@ -11,12 +11,12 @@ SUBSYSTEM_DEF(parallax)
|
||||
var/random_parallax_color
|
||||
|
||||
/datum/controller/subsystem/parallax/Initialize(timeofday)
|
||||
. = ..()
|
||||
if(prob(70)) //70% chance to pick a special extra layer
|
||||
random_layer = pick(/atom/movable/screen/parallax_layer/random/space_gas, /atom/movable/screen/parallax_layer/random/asteroids)
|
||||
random_parallax_color = pick(COLOR_TEAL, COLOR_GREEN, COLOR_SILVER, COLOR_YELLOW, COLOR_CYAN, COLOR_ORANGE, COLOR_PURPLE)//Special color for random_layer1. Has to be done here so everyone sees the same color.
|
||||
planet_y_offset = rand(100, 160)
|
||||
planet_x_offset = rand(100, 160)
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/parallax/fire(resumed = 0)
|
||||
if (!resumed)
|
||||
|
||||
@@ -10,7 +10,7 @@ SUBSYSTEM_DEF(pathfinder)
|
||||
space_type_cache = typecacheof(/turf/open/space)
|
||||
mobs = new(10)
|
||||
circuits = new(3)
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/flowcache
|
||||
var/lcount
|
||||
|
||||
@@ -27,7 +27,7 @@ SUBSYSTEM_DEF(persistence)
|
||||
LoadAntagReputation()
|
||||
LoadRandomizedRecipes()
|
||||
LoadPaintings()
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/persistence/proc/LoadPoly()
|
||||
for(var/mob/living/simple_animal/parrot/Poly/P in GLOB.alive_mob_list)
|
||||
@@ -358,4 +358,4 @@ SUBSYSTEM_DEF(persistence)
|
||||
if(!original_human || original_human.stat == DEAD || !original_human.all_scars || original_human != ending_human)
|
||||
original_human.save_persistent_scars(TRUE)
|
||||
else
|
||||
original_human.save_persistent_scars()
|
||||
original_human.save_persistent_scars()
|
||||
|
||||
@@ -24,7 +24,7 @@ PROCESSING_SUBSYSTEM_DEF(instruments)
|
||||
/datum/controller/subsystem/processing/instruments/Initialize()
|
||||
initialize_instrument_data()
|
||||
synthesizer_instrument_ids = get_allowed_instrument_ids()
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/processing/instruments/proc/on_song_new(datum/song/S)
|
||||
songs += S
|
||||
|
||||
@@ -14,7 +14,7 @@ PROCESSING_SUBSYSTEM_DEF(networks)
|
||||
/datum/controller/subsystem/processing/networks/Initialize()
|
||||
station_network = new
|
||||
station_network.register_map_supremecy()
|
||||
. = ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/processing/networks/proc/register_network(datum/ntnet/network)
|
||||
if(!networks_by_id[network.network_id])
|
||||
|
||||
@@ -31,7 +31,7 @@ PROCESSING_SUBSYSTEM_DEF(quirks)
|
||||
list("Prosthetic Limb (Right Leg)","Paraplegic"),
|
||||
list("Prosthetic Limb","Paraplegic")
|
||||
)
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/processing/quirks/proc/SetupQuirks()
|
||||
// Sort by Positive, Negative, Neutral; and then by name
|
||||
|
||||
@@ -27,7 +27,7 @@ PROCESSING_SUBSYSTEM_DEF(station)
|
||||
announcer = new announcer() //Initialize the station's announcer datum
|
||||
default_announcer = new default_announcer()
|
||||
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
///Rolls for the amount of traits and adds them to the traits list
|
||||
/datum/controller/subsystem/processing/station/proc/SetupTraits()
|
||||
|
||||
@@ -4,8 +4,7 @@ SUBSYSTEM_DEF(profiler)
|
||||
name = "Profiler"
|
||||
init_order = INIT_ORDER_PROFILER
|
||||
runlevels = RUNLEVELS_DEFAULT | RUNLEVEL_LOBBY
|
||||
wait = 3000
|
||||
flags = SS_NO_TICK_CHECK
|
||||
wait = 5 MINUTES
|
||||
var/fetch_cost = 0
|
||||
var/write_cost = 0
|
||||
|
||||
@@ -19,7 +18,7 @@ SUBSYSTEM_DEF(profiler)
|
||||
StartProfiling()
|
||||
else
|
||||
StopProfiling() //Stop the early start from world/New
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/profiler/fire()
|
||||
if(CONFIG_GET(flag/auto_profile))
|
||||
@@ -61,4 +60,4 @@ SUBSYSTEM_DEF(profiler)
|
||||
WRITE_FILE(json_file, current_profile_data)
|
||||
write_cost = MC_AVERAGE(write_cost, TICK_DELTA_TO_MS(TICK_USAGE_REAL - timer))
|
||||
WRITE_FILE(json_file, current_profile_data)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -52,7 +52,7 @@ SUBSYSTEM_DEF(research)
|
||||
autosort_categories()
|
||||
error_design = new
|
||||
error_node = new
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/research/fire()
|
||||
handle_research_income()
|
||||
|
||||
@@ -169,7 +169,7 @@ SUBSYSTEM_DEF(runechat)
|
||||
|
||||
// Handle insertion into the secondary queue if the required time is outside our tracked amounts
|
||||
if (scheduled_destruction >= BUCKET_LIMIT)
|
||||
BINARY_INSERT(src, SSrunechat.second_queue, datum/chatmessage, src, scheduled_destruction, COMPARE_KEY)
|
||||
BINARY_INSERT(src, SSrunechat.second_queue, /datum/chatmessage, src, scheduled_destruction, COMPARE_KEY)
|
||||
return
|
||||
|
||||
// Get bucket position and a local reference to the datum var, it's faster to access this way
|
||||
|
||||
@@ -6,6 +6,7 @@ SUBSYSTEM_DEF(server_maint)
|
||||
flags = SS_POST_FIRE_TIMING
|
||||
priority = FIRE_PRIORITY_SERVER_MAINT
|
||||
init_order = INIT_ORDER_SERVER_MAINT
|
||||
init_stage = INITSTAGE_EARLY
|
||||
runlevels = RUNLEVEL_LOBBY | RUNLEVELS_DEFAULT
|
||||
var/list/currentrun
|
||||
var/cleanup_ticker = 0
|
||||
@@ -16,7 +17,7 @@ SUBSYSTEM_DEF(server_maint)
|
||||
/datum/controller/subsystem/server_maint/Initialize(timeofday)
|
||||
if (CONFIG_GET(flag/hub))
|
||||
world.update_hub_visibility(TRUE)
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/server_maint/fire(resumed = FALSE)
|
||||
if(!resumed)
|
||||
|
||||
@@ -4,7 +4,7 @@ SUBSYSTEM_DEF(shuttle)
|
||||
name = "Shuttle"
|
||||
wait = 10
|
||||
init_order = INIT_ORDER_SHUTTLE
|
||||
flags = SS_KEEP_TIMING|SS_NO_TICK_CHECK
|
||||
flags = SS_KEEP_TIMING
|
||||
|
||||
loading_points = 4.9 SECONDS // Yogs -- loading times
|
||||
|
||||
@@ -81,7 +81,7 @@ SUBSYSTEM_DEF(shuttle)
|
||||
WARNING("No /obj/docking_port/mobile/emergency/backup placed on the map!")
|
||||
if(!supply)
|
||||
WARNING("No /obj/docking_port/mobile/supply placed on the map!")
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/shuttle/proc/initial_load()
|
||||
for(var/s in stationary)
|
||||
|
||||
@@ -25,7 +25,7 @@ SUBSYSTEM_DEF(sounds)
|
||||
|
||||
/datum/controller/subsystem/sounds/Initialize()
|
||||
setup_available_channels()
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/sounds/proc/setup_available_channels()
|
||||
channel_list = list()
|
||||
|
||||
@@ -2,7 +2,9 @@ SUBSYSTEM_DEF(statpanels)
|
||||
name = "Stat Panels"
|
||||
wait = 4
|
||||
init_order = INIT_ORDER_STATPANELS
|
||||
init_stage = INITSTAGE_EARLY
|
||||
runlevels = RUNLEVELS_DEFAULT | RUNLEVEL_LOBBY
|
||||
flags = SS_NO_INIT
|
||||
var/list/currentrun = list()
|
||||
var/encoded_global_data
|
||||
var/mc_data_encoded
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
SUBSYSTEM_DEF(statpanels)
|
||||
name = "Stat Panels"
|
||||
wait = 4
|
||||
init_order = INIT_ORDER_STATPANELS
|
||||
runlevels = RUNLEVELS_DEFAULT | RUNLEVEL_LOBBY
|
||||
var/list/currentrun = list()
|
||||
var/encoded_global_data
|
||||
var/mc_data_encoded
|
||||
|
||||
/datum/controller/subsystem/statpanels/fire(resumed = 0)
|
||||
if (!resumed)
|
||||
var/datum/map_config/cached = SSmapping.next_map_config
|
||||
var/list/global_data = list(
|
||||
"Map: [html_encode(SSmapping.config?.map_name || "Loading...")]",
|
||||
cached ? "Next Map: [html_encode(cached.map_name)]" : null,
|
||||
"Round ID: [GLOB.round_id ? GLOB.round_id : "NULL"]",
|
||||
"Server Time: [time2text(world.timeofday, "YYYY-MM-DD hh:mm:ss")]",
|
||||
"Round Time: [worldtime2text()]",
|
||||
"Station Time: [station_time_timestamp()]",
|
||||
"Security Alert Level: [get_security_level()]",
|
||||
"Time Dilation: [round(SStime_track.time_dilation_current,1)]% AVG:([round(SStime_track.time_dilation_avg_fast,1)]%, [round(SStime_track.time_dilation_avg,1)]%, [round(SStime_track.time_dilation_avg_slow,1)]%)",
|
||||
""
|
||||
)
|
||||
|
||||
if(SSshuttle.emergency)
|
||||
var/ETA = SSshuttle.emergency.getModeStr()
|
||||
if(ETA)
|
||||
global_data += "[ETA] [SSshuttle.emergency.getTimerStr()]"
|
||||
encoded_global_data = url_encode(json_encode(global_data))
|
||||
|
||||
var/list/mc_data = list(
|
||||
list("CPU:", world.cpu),
|
||||
list("Instances:", "[num2text(world.contents.len, 10)]"),
|
||||
list("World Time:", "[world.time]"),
|
||||
list("Globals:", "Edit", "\ref[GLOB]"),
|
||||
list("[config]:", "Edit", "\ref[config]"),
|
||||
list("Byond:", "(FPS:[world.fps]) (TickCount:[world.time/world.tick_lag]) (TickDrift:[round(Master.tickdrift,1)]([round((Master.tickdrift/(world.time/world.tick_lag))*100,0.1)]%))"),
|
||||
list("Master Controller:", Master ? "(TickRate:[Master.processing]) (Iteration:[Master.iteration])" : "ERROR", "\ref[Master]"),
|
||||
list("Failsafe Controller:", Failsafe ? "Defcon: [Failsafe.defcon_pretty()] (Interval: [Failsafe.processing_interval] | Iteration: [Failsafe.master_iteration])" : "ERROR", "\ref[Failsafe]"),
|
||||
list("","")
|
||||
)
|
||||
for(var/datum/controller/subsystem/SS in Master.subsystems)
|
||||
mc_data[++mc_data.len] = list("\[[SS.state_letter()]][SS.name]", SS.stat_entry(), "\ref[SS]")
|
||||
mc_data[++mc_data.len] = list("Camera Net", "Cameras: [GLOB.cameranet.cameras.len] | Chunks: [GLOB.cameranet.chunks.len]", "\ref[GLOB.cameranet]")
|
||||
mc_data_encoded = url_encode(json_encode(mc_data))
|
||||
src.currentrun = GLOB.clients.Copy()
|
||||
|
||||
var/list/currentrun = src.currentrun
|
||||
while(currentrun.len)
|
||||
var/client/C = currentrun[currentrun.len]
|
||||
C << output(url_encode(C.statpanel), "statbrowser:tab_change") // work around desyncs
|
||||
currentrun.len--
|
||||
var/ping_str = url_encode("Ping: [round(C.lastping, 1)]ms (Average: [round(C.avgping, 1)]ms)")
|
||||
var/other_str = url_encode(json_encode(C.mob.get_status_tab_items()))
|
||||
C << output("[encoded_global_data];[ping_str];[other_str]", "statbrowser:update")
|
||||
if(C.holder && C.statpanel == "MC")
|
||||
var/turf/T = get_turf(C.eye)
|
||||
var/coord_entry = url_encode(COORD(T))
|
||||
C << output("[mc_data_encoded];[coord_entry];[url_encode(C.holder.href_token)]", "statbrowser:update_mc")
|
||||
var/list/proc_holders = C.mob.get_proc_holders()
|
||||
C.spell_tabs.Cut()
|
||||
for(var/list/item in proc_holders)
|
||||
C.spell_tabs |= item[1]
|
||||
var/proc_holders_encoded = ""
|
||||
if(C.statpanel in C.spell_tabs)
|
||||
proc_holders_encoded = url_encode(json_encode(proc_holders))
|
||||
C << output("[url_encode(json_encode(C.spell_tabs))];[proc_holders_encoded]", "statbrowser:update_spells")
|
||||
if(MC_TICK_CHECK)
|
||||
return
|
||||
@@ -49,7 +49,7 @@ SUBSYSTEM_DEF(stickyban)
|
||||
cache[ckey] = ban
|
||||
world.SetConfig("ban", ckey, list2stickyban(ban))
|
||||
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/stickyban/proc/Populatedbcache()
|
||||
var/newdbcache = list() //so if we runtime or the db connection dies we don't kill the existing cache
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
SUBSYSTEM_DEF(sun)
|
||||
name = "Sun"
|
||||
wait = 1 MINUTES
|
||||
flags = SS_NO_TICK_CHECK
|
||||
|
||||
var/azimuth = 0 ///clockwise, top-down rotation from 0 (north) to 359
|
||||
var/azimuth_mod = 1 ///multiplier against base_rotation
|
||||
@@ -12,7 +11,7 @@ SUBSYSTEM_DEF(sun)
|
||||
azimuth_mod = round(rand(50, 200)/100, 0.01) // 50% - 200% of standard rotation
|
||||
if(prob(50))
|
||||
azimuth_mod *= -1
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/sun/fire(resumed = FALSE)
|
||||
azimuth += azimuth_mod * base_rotation
|
||||
@@ -29,4 +28,4 @@ SUBSYSTEM_DEF(sun)
|
||||
/datum/controller/subsystem/sun/vv_edit_var(var_name, var_value)
|
||||
. = ..()
|
||||
if(var_name == NAMEOF(src, azimuth))
|
||||
complete_movement()
|
||||
complete_movement()
|
||||
|
||||
@@ -142,7 +142,7 @@ SUBSYSTEM_DEF(ticker)
|
||||
gametime_offset = rand(0, 23) HOURS
|
||||
else if(CONFIG_GET(flag/shift_time_realtime))
|
||||
gametime_offset = world.timeofday
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/ticker/fire()
|
||||
if(seclevel2num(get_security_level()) < SEC_LEVEL_GAMMA && !GLOB.cryopods_enabled)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
SUBSYSTEM_DEF(time_track)
|
||||
name = "Time Tracking"
|
||||
wait = 600
|
||||
flags = SS_NO_INIT|SS_NO_TICK_CHECK
|
||||
flags = SS_NO_INIT
|
||||
runlevels = RUNLEVEL_LOBBY | RUNLEVELS_DEFAULT
|
||||
|
||||
var/time_dilation_current = 0
|
||||
|
||||
@@ -1,32 +1,56 @@
|
||||
#define BUCKET_LEN (world.fps*1*60) //how many ticks should we keep in the bucket. (1 minutes worth)
|
||||
#define BUCKET_POS(timer) (((round((timer.timeToRun - SStimer.head_offset) / world.tick_lag)+1) % BUCKET_LEN)||BUCKET_LEN)
|
||||
#define TIMER_MAX (world.time + TICKS2DS(min(BUCKET_LEN-(SStimer.practical_offset-DS2TICKS(world.time - SStimer.head_offset))-1, BUCKET_LEN-1)))
|
||||
#define TIMER_ID_MAX (2**24) //max float with integer precision
|
||||
/// Controls how many buckets should be kept, each representing a tick. (1 minutes worth)
|
||||
#define BUCKET_LEN (world.fps*1*60)
|
||||
/// Helper for getting the correct bucket for a given timer
|
||||
#define BUCKET_POS(timer) (((round((timer.timeToRun - timer.timer_subsystem.head_offset) / world.tick_lag)+1) % BUCKET_LEN)||BUCKET_LEN)
|
||||
/// Gets the maximum time at which timers will be invoked from buckets, used for deferring to secondary queue
|
||||
#define TIMER_MAX(timer_ss) (timer_ss.head_offset + TICKS2DS(BUCKET_LEN + timer_ss.practical_offset - 1))
|
||||
/// Max float with integer precision
|
||||
#define TIMER_ID_MAX (2**24)
|
||||
|
||||
/**
|
||||
* # Timer Subsystem
|
||||
*
|
||||
* Handles creation, callbacks, and destruction of timed events.
|
||||
*
|
||||
* It is important to understand the buckets used in the timer subsystem are just a series of doubly-linked
|
||||
* lists. The object at a given index in bucket_list is a /datum/timedevent, the head of a list, which has prev
|
||||
* and next references for the respective elements in that bucket's list.
|
||||
*/
|
||||
SUBSYSTEM_DEF(timer)
|
||||
name = "Timer"
|
||||
wait = 1 //SS_TICKER subsystem, so wait is in ticks
|
||||
wait = 1 // SS_TICKER subsystem, so wait is in ticks
|
||||
init_order = INIT_ORDER_TIMER
|
||||
|
||||
priority = FIRE_PRIORITY_TIMER
|
||||
flags = SS_TICKER|SS_NO_INIT
|
||||
|
||||
var/list/datum/timedevent/second_queue = list() //awe, yes, you've had first queue, but what about second queue?
|
||||
/// Queue used for storing timers that do not fit into the current buckets
|
||||
var/list/datum/timedevent/second_queue = list()
|
||||
/// A hashlist dictionary used for storing unique timers
|
||||
var/list/hashes = list()
|
||||
|
||||
var/head_offset = 0 //world.time of the first entry in the the bucket.
|
||||
var/practical_offset = 1 //index of the first non-empty item in the bucket.
|
||||
var/bucket_resolution = 0 //world.tick_lag the bucket was designed for
|
||||
var/bucket_count = 0 //how many timers are in the buckets
|
||||
|
||||
var/list/bucket_list = list() //list of buckets, each bucket holds every timer that has to run that byond tick.
|
||||
|
||||
var/list/timer_id_dict = list() //list of all active timers assoicated to their timer id (for easy lookup)
|
||||
|
||||
var/list/clienttime_timers = list() //special snowflake timers that run on fancy pansy "client time"
|
||||
|
||||
/// world.time of the first entry in the bucket list, effectively the 'start time' of the current buckets
|
||||
var/head_offset = 0
|
||||
/// Index of the wrap around pivot for buckets. buckets before this are later running buckets wrapped around from the end of the bucket list.
|
||||
var/practical_offset = 1
|
||||
/// world.tick_lag the bucket was designed for
|
||||
var/bucket_resolution = 0
|
||||
/// How many timers are in the buckets
|
||||
var/bucket_count = 0
|
||||
/// List of buckets, each bucket holds every timer that has to run that byond tick
|
||||
var/list/bucket_list = list()
|
||||
/// List of all active timers associated to their timer ID (for easy lookup)
|
||||
var/list/timer_id_dict = list()
|
||||
/// Special timers that run in real-time, not BYOND time; these are more expensive to run and maintain
|
||||
var/list/clienttime_timers = list()
|
||||
/// Contains the last time that a timer's callback was invoked, or the last tick the SS fired if no timers are being processed
|
||||
var/last_invoke_tick = 0
|
||||
/// Keeps track of the next index to work on for client timers
|
||||
var/next_clienttime_timer_index = 0
|
||||
/// Contains the last time that a warning was issued for not invoking callbacks
|
||||
var/static/last_invoke_warning = 0
|
||||
/// Boolean operator controlling if the timer SS will automatically reset buckets if it fails to invoke callbacks for an extended period of time
|
||||
var/static/bucket_auto_reset = TRUE
|
||||
/// How many times bucket was reset
|
||||
var/bucket_reset_count = 0
|
||||
|
||||
/datum/controller/subsystem/timer/PreInit()
|
||||
bucket_list.len = BUCKET_LEN
|
||||
@@ -34,48 +58,59 @@ SUBSYSTEM_DEF(timer)
|
||||
bucket_resolution = world.tick_lag
|
||||
|
||||
/datum/controller/subsystem/timer/stat_entry(msg)
|
||||
msg = "B:[bucket_count] P:[length(second_queue)] H:[length(hashes)] C:[length(clienttime_timers)] S:[length(timer_id_dict)]"
|
||||
msg = "B:[bucket_count] P:[length(second_queue)] H:[length(hashes)] C:[length(clienttime_timers)] S:[length(timer_id_dict)] RST:[bucket_reset_count]"
|
||||
return ..()
|
||||
|
||||
/datum/controller/subsystem/timer/fire(resumed = FALSE)
|
||||
var/lit = last_invoke_tick
|
||||
var/last_check = world.time - TICKS2DS(BUCKET_LEN*1.5)
|
||||
var/list/bucket_list = src.bucket_list
|
||||
|
||||
if(!bucket_count)
|
||||
last_invoke_tick = world.time
|
||||
|
||||
if(lit && lit < last_check && head_offset < last_check && last_invoke_warning < last_check)
|
||||
last_invoke_warning = world.time
|
||||
var/msg = "No regular timers processed in the last [BUCKET_LEN*1.5] ticks[bucket_auto_reset ? ", resetting buckets" : ""]!"
|
||||
message_admins(msg)
|
||||
WARNING(msg)
|
||||
if(bucket_auto_reset)
|
||||
bucket_resolution = 0
|
||||
|
||||
log_world("Timer bucket reset. world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
/datum/controller/subsystem/timer/proc/dump_timer_buckets(full = TRUE)
|
||||
var/list/to_log = list("Timer bucket reset. world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
if (full)
|
||||
for (var/i in 1 to length(bucket_list))
|
||||
var/datum/timedevent/bucket_head = bucket_list[i]
|
||||
if (!bucket_head)
|
||||
continue
|
||||
|
||||
log_world("Active timers at index [i]:")
|
||||
|
||||
to_log += "Active timers at index [i]:"
|
||||
var/datum/timedevent/bucket_node = bucket_head
|
||||
var/anti_loop_check = 1000
|
||||
var/anti_loop_check = 1
|
||||
do
|
||||
log_world(get_timer_debug_string(bucket_node))
|
||||
to_log += get_timer_debug_string(bucket_node)
|
||||
bucket_node = bucket_node.next
|
||||
anti_loop_check--
|
||||
while(bucket_node && bucket_node != bucket_head && anti_loop_check)
|
||||
log_world("Active timers in the second_queue queue:")
|
||||
|
||||
to_log += "Active timers in the second_queue queue:"
|
||||
for(var/I in second_queue)
|
||||
log_world(get_timer_debug_string(I))
|
||||
to_log += get_timer_debug_string(I)
|
||||
|
||||
var/next_clienttime_timer_index = 0
|
||||
var/len = length(clienttime_timers)
|
||||
// Dump all the logged data to the world log
|
||||
log_world(to_log.Join("\n"))
|
||||
|
||||
for (next_clienttime_timer_index in 1 to len)
|
||||
/datum/controller/subsystem/timer/fire(resumed = FALSE)
|
||||
// Store local references to datum vars as it is faster to access them
|
||||
var/lit = last_invoke_tick
|
||||
var/list/bucket_list = src.bucket_list
|
||||
var/last_check = world.time - TICKS2DS(BUCKET_LEN * 1.5)
|
||||
|
||||
// If there are no timers being tracked, then consider now to be the last invoked time
|
||||
if(!bucket_count)
|
||||
last_invoke_tick = world.time
|
||||
|
||||
// Check that we have invoked a callback in the last 1.5 minutes of BYOND time,
|
||||
// and throw a warning and reset buckets if this is true
|
||||
if(lit && lit < last_check && head_offset < last_check && last_invoke_warning < last_check)
|
||||
last_invoke_warning = world.time
|
||||
var/msg = "No regular timers processed in the last [BUCKET_LEN * 1.5] ticks[bucket_auto_reset ? ", resetting buckets" : ""]!"
|
||||
message_admins(msg)
|
||||
WARNING(msg)
|
||||
if(bucket_auto_reset)
|
||||
bucket_resolution = 0
|
||||
dump_timer_buckets(CONFIG_GET(flag/log_timers_on_bucket_reset))
|
||||
|
||||
// Process client-time timers
|
||||
if (next_clienttime_timer_index)
|
||||
clienttime_timers.Cut(1, next_clienttime_timer_index+1)
|
||||
next_clienttime_timer_index = 0
|
||||
for (next_clienttime_timer_index in 1 to length(clienttime_timers))
|
||||
if (MC_TICK_CHECK)
|
||||
next_clienttime_timer_index--
|
||||
break
|
||||
@@ -86,144 +121,106 @@ SUBSYSTEM_DEF(timer)
|
||||
|
||||
var/datum/callback/callBack = ctime_timer.callBack
|
||||
if (!callBack)
|
||||
clienttime_timers.Cut(next_clienttime_timer_index,next_clienttime_timer_index+1)
|
||||
CRASH("Invalid timer: [get_timer_debug_string(ctime_timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset], REALTIMEOFDAY: [REALTIMEOFDAY]")
|
||||
CRASH("Invalid timer: [get_timer_debug_string(ctime_timer)] world.time: [world.time], \
|
||||
head_offset: [head_offset], practical_offset: [practical_offset], REALTIMEOFDAY: [REALTIMEOFDAY]")
|
||||
|
||||
ctime_timer.spent = REALTIMEOFDAY
|
||||
callBack.InvokeAsync()
|
||||
|
||||
if(ctime_timer.flags & TIMER_LOOP)
|
||||
if(ctime_timer.flags & TIMER_LOOP) // Re-insert valid looping client timers into the client timer list.
|
||||
if (QDELETED(ctime_timer)) // Don't re-insert timers deleted inside their callbacks.
|
||||
continue
|
||||
ctime_timer.spent = 0
|
||||
ctime_timer.timeToRun = REALTIMEOFDAY + ctime_timer.wait
|
||||
BINARY_INSERT(ctime_timer, clienttime_timers, datum/timedevent, ctime_timer, timeToRun, COMPARE_KEY)
|
||||
BINARY_INSERT(ctime_timer, clienttime_timers, /datum/timedevent, ctime_timer, timeToRun, COMPARE_KEY)
|
||||
else
|
||||
qdel(ctime_timer)
|
||||
|
||||
|
||||
// Remove invoked client-time timers
|
||||
if (next_clienttime_timer_index)
|
||||
clienttime_timers.Cut(1, next_clienttime_timer_index+1)
|
||||
next_clienttime_timer_index = 0
|
||||
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
|
||||
var/static/list/spent = list()
|
||||
var/static/datum/timedevent/timer
|
||||
// Check for when we need to loop the buckets, this occurs when
|
||||
// the head_offset is approaching BUCKET_LEN ticks in the past
|
||||
if (practical_offset > BUCKET_LEN)
|
||||
head_offset += TICKS2DS(BUCKET_LEN)
|
||||
practical_offset = 1
|
||||
resumed = FALSE
|
||||
|
||||
// Check for when we have to reset buckets, typically from auto-reset
|
||||
if ((length(bucket_list) != BUCKET_LEN) || (world.tick_lag != bucket_resolution))
|
||||
reset_buckets()
|
||||
bucket_list = src.bucket_list
|
||||
resumed = FALSE
|
||||
|
||||
|
||||
if (!resumed)
|
||||
timer = null
|
||||
|
||||
while (practical_offset <= BUCKET_LEN && head_offset + ((practical_offset-1)*world.tick_lag) <= world.time)
|
||||
var/datum/timedevent/head = bucket_list[practical_offset]
|
||||
if (!timer || !head || timer == head)
|
||||
head = bucket_list[practical_offset]
|
||||
timer = head
|
||||
while (timer)
|
||||
// Iterate through each bucket starting from the practical offset
|
||||
while (practical_offset <= BUCKET_LEN && head_offset + ((practical_offset - 1) * world.tick_lag) <= world.time)
|
||||
var/datum/timedevent/timer
|
||||
while ((timer = bucket_list[practical_offset]))
|
||||
var/datum/callback/callBack = timer.callBack
|
||||
if (!callBack)
|
||||
bucket_resolution = null //force bucket recreation
|
||||
CRASH("Invalid timer: [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
stack_trace("Invalid timer: [get_timer_debug_string(timer)] world.time: [world.time], \
|
||||
head_offset: [head_offset], practical_offset: [practical_offset], bucket_joined: [timer.bucket_joined]")
|
||||
if (!timer.spent)
|
||||
bucket_resolution = null // force bucket recreation
|
||||
return
|
||||
|
||||
timer.bucketEject() //pop the timer off of the bucket list.
|
||||
|
||||
// Invoke callback if possible
|
||||
if (!timer.spent)
|
||||
spent += timer
|
||||
timer.spent = world.time
|
||||
callBack.InvokeAsync()
|
||||
last_invoke_tick = world.time
|
||||
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
|
||||
timer = timer.next
|
||||
if (timer == head)
|
||||
break
|
||||
|
||||
|
||||
bucket_list[practical_offset++] = null
|
||||
|
||||
//we freed up a bucket, lets see if anything in second_queue needs to be shifted to that bucket.
|
||||
var/i = 0
|
||||
var/L = length(second_queue)
|
||||
for (i in 1 to L)
|
||||
timer = second_queue[i]
|
||||
if (timer.timeToRun >= TIMER_MAX)
|
||||
i--
|
||||
break
|
||||
|
||||
if (timer.timeToRun < head_offset)
|
||||
bucket_resolution = null //force bucket recreation
|
||||
stack_trace("[i] Invalid timer state: Timer in long run queue with a time to run less then head_offset. [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
|
||||
if (timer.callBack && !timer.spent)
|
||||
timer.callBack.InvokeAsync()
|
||||
spent += timer
|
||||
bucket_count++
|
||||
else if(!QDELETED(timer))
|
||||
qdel(timer)
|
||||
continue
|
||||
|
||||
if (timer.timeToRun < head_offset + TICKS2DS(practical_offset-1))
|
||||
bucket_resolution = null //force bucket recreation
|
||||
stack_trace("[i] Invalid timer state: Timer in long run queue that would require a backtrack to transfer to short run queue. [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
if (timer.callBack && !timer.spent)
|
||||
timer.callBack.InvokeAsync()
|
||||
spent += timer
|
||||
bucket_count++
|
||||
else if(!QDELETED(timer))
|
||||
qdel(timer)
|
||||
continue
|
||||
|
||||
bucket_count++
|
||||
var/bucket_pos = max(1, BUCKET_POS(timer))
|
||||
|
||||
var/datum/timedevent/bucket_head = bucket_list[bucket_pos]
|
||||
if (!bucket_head)
|
||||
bucket_list[bucket_pos] = timer
|
||||
timer.next = null
|
||||
timer.prev = null
|
||||
continue
|
||||
|
||||
if (!bucket_head.prev)
|
||||
bucket_head.prev = bucket_head
|
||||
timer.next = bucket_head
|
||||
timer.prev = bucket_head.prev
|
||||
timer.next.prev = timer
|
||||
timer.prev.next = timer
|
||||
if (i)
|
||||
second_queue.Cut(1, i+1)
|
||||
|
||||
timer = null
|
||||
|
||||
bucket_count -= length(spent)
|
||||
|
||||
for (var/i in spent)
|
||||
var/datum/timedevent/qtimer = i
|
||||
if(QDELETED(qtimer))
|
||||
bucket_count++
|
||||
continue
|
||||
if(!(qtimer.flags & TIMER_LOOP))
|
||||
qdel(qtimer)
|
||||
else
|
||||
bucket_count++
|
||||
qtimer.spent = 0
|
||||
qtimer.bucketEject()
|
||||
if(qtimer.flags & TIMER_CLIENT_TIME)
|
||||
qtimer.timeToRun = REALTIMEOFDAY + qtimer.wait
|
||||
if (timer.flags & TIMER_LOOP) // Prepare valid looping timers to re-enter the queue
|
||||
if(QDELETED(timer)) // If a loop is deleted in its callback, we need to avoid re-inserting it.
|
||||
continue
|
||||
timer.spent = 0
|
||||
timer.timeToRun = world.time + timer.wait
|
||||
timer.bucketJoin()
|
||||
else
|
||||
qtimer.timeToRun = world.time + qtimer.wait
|
||||
qtimer.bucketJoin()
|
||||
qdel(timer)
|
||||
|
||||
spent.len = 0
|
||||
if (MC_TICK_CHECK)
|
||||
break
|
||||
|
||||
//formated this way to be runtime resistant
|
||||
if (!bucket_list[practical_offset])
|
||||
// Empty the bucket, check if anything in the secondary queue should be shifted to this bucket
|
||||
bucket_list[practical_offset] = null // Just in case
|
||||
practical_offset++
|
||||
var/i = 0
|
||||
for (i in 1 to length(second_queue))
|
||||
timer = second_queue[i]
|
||||
if (timer.timeToRun >= TIMER_MAX(src))
|
||||
i--
|
||||
break
|
||||
|
||||
// Check for timers that are scheduled to run in the past
|
||||
if (timer.timeToRun < head_offset)
|
||||
bucket_resolution = null // force bucket recreation
|
||||
stack_trace("[i] Invalid timer state: Timer in long run queue with a time to run less then head_offset. \
|
||||
[get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
break
|
||||
|
||||
// Check for timers that are not capable of being scheduled to run without rebuilding buckets
|
||||
if (timer.timeToRun < head_offset + TICKS2DS(practical_offset - 1))
|
||||
bucket_resolution = null // force bucket recreation
|
||||
stack_trace("[i] Invalid timer state: Timer in long run queue that would require a backtrack to transfer to \
|
||||
short run queue. [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
break
|
||||
|
||||
timer.bucketJoin()
|
||||
if (i)
|
||||
second_queue.Cut(1, i+1)
|
||||
if (MC_TICK_CHECK)
|
||||
break
|
||||
|
||||
/**
|
||||
* Generates a string with details about the timed event for debugging purposes
|
||||
*/
|
||||
/datum/controller/subsystem/timer/proc/get_timer_debug_string(datum/timedevent/TE)
|
||||
. = "Timer: [TE]"
|
||||
. += "Prev: [TE.prev ? TE.prev : "NULL"], Next: [TE.next ? TE.next : "NULL"]"
|
||||
@@ -234,12 +231,19 @@ SUBSYSTEM_DEF(timer)
|
||||
if(!TE.callBack)
|
||||
. += ", NO CALLBACK"
|
||||
|
||||
/**
|
||||
* Destroys the existing buckets and creates new buckets from the existing timed events
|
||||
*/
|
||||
/datum/controller/subsystem/timer/proc/reset_buckets()
|
||||
var/list/bucket_list = src.bucket_list
|
||||
WARNING("Timer buckets has been reset, this may cause timer to lag")
|
||||
bucket_reset_count++
|
||||
|
||||
var/list/bucket_list = src.bucket_list // Store local reference to datum var, this is faster
|
||||
var/list/alltimers = list()
|
||||
//collect the timers currently in the bucket
|
||||
|
||||
// Get all timers currently in the buckets
|
||||
for (var/bucket_head in bucket_list)
|
||||
if (!bucket_head)
|
||||
if (!bucket_head) // if bucket is empty for this tick
|
||||
continue
|
||||
var/datum/timedevent/bucket_node = bucket_head
|
||||
do
|
||||
@@ -247,25 +251,45 @@ SUBSYSTEM_DEF(timer)
|
||||
bucket_node = bucket_node.next
|
||||
while(bucket_node && bucket_node != bucket_head)
|
||||
|
||||
// Empty the list by zeroing and re-assigning the length
|
||||
bucket_list.len = 0
|
||||
bucket_list.len = BUCKET_LEN
|
||||
|
||||
// Reset values for the subsystem to their initial values
|
||||
practical_offset = 1
|
||||
bucket_count = 0
|
||||
head_offset = world.time
|
||||
bucket_resolution = world.tick_lag
|
||||
|
||||
// Add all timed events from the secondary queue as well
|
||||
alltimers += second_queue
|
||||
|
||||
for (var/datum/timedevent/t as anything in alltimers)
|
||||
t.timer_subsystem = src // Recovered timers need to be reparented
|
||||
t.bucket_joined = FALSE
|
||||
t.bucket_pos = -1
|
||||
t.prev = null
|
||||
t.next = null
|
||||
|
||||
// If there are no timers being tracked by the subsystem,
|
||||
// there is no need to do any further rebuilding
|
||||
if (!length(alltimers))
|
||||
return
|
||||
|
||||
sortTim(alltimers, .proc/cmp_timer)
|
||||
// Sort all timers by time to run
|
||||
sortTim(alltimers, GLOBAL_PROC_REF(cmp_timer))
|
||||
|
||||
// Get the earliest timer, and if the TTR is earlier than the current world.time,
|
||||
// then set the head offset appropriately to be the earliest time tracked by the
|
||||
// current set of buckets
|
||||
var/datum/timedevent/head = alltimers[1]
|
||||
|
||||
if (head.timeToRun < head_offset)
|
||||
head_offset = head.timeToRun
|
||||
|
||||
// Iterate through each timed event and insert it into an appropriate bucket,
|
||||
// up unto the point that we can no longer insert into buckets as the TTR
|
||||
// is outside the range we are tracking, then insert the remainder into the
|
||||
// secondary queue
|
||||
var/new_bucket_count
|
||||
var/i = 1
|
||||
for (i in 1 to length(alltimers))
|
||||
@@ -273,19 +297,25 @@ SUBSYSTEM_DEF(timer)
|
||||
if (!timer)
|
||||
continue
|
||||
|
||||
var/bucket_pos = BUCKET_POS(timer)
|
||||
if (timer.timeToRun >= TIMER_MAX)
|
||||
// Check that the TTR is within the range covered by buckets, when exceeded we've finished
|
||||
if (timer.timeToRun >= TIMER_MAX(src))
|
||||
i--
|
||||
break
|
||||
|
||||
|
||||
// Check that timer has a valid callback and hasn't been invoked
|
||||
if (!timer.callBack || timer.spent)
|
||||
WARNING("Invalid timer: [get_timer_debug_string(timer)] world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
WARNING("Invalid timer: [get_timer_debug_string(timer)] world.time: [world.time], \
|
||||
head_offset: [head_offset], practical_offset: [practical_offset]")
|
||||
if (timer.callBack)
|
||||
qdel(timer)
|
||||
continue
|
||||
|
||||
// Insert the timer into the bucket, and perform necessary doubly-linked list operations
|
||||
new_bucket_count++
|
||||
var/bucket_pos = BUCKET_POS(timer)
|
||||
timer.bucket_pos = bucket_pos
|
||||
timer.bucket_joined = TRUE
|
||||
|
||||
var/datum/timedevent/bucket_head = bucket_list[bucket_pos]
|
||||
if (!bucket_head)
|
||||
bucket_list[bucket_pos] = timer
|
||||
@@ -293,64 +323,100 @@ SUBSYSTEM_DEF(timer)
|
||||
timer.prev = null
|
||||
continue
|
||||
|
||||
if (!bucket_head.prev)
|
||||
bucket_head.prev = bucket_head
|
||||
bucket_head.prev = timer
|
||||
timer.next = bucket_head
|
||||
timer.prev = bucket_head.prev
|
||||
timer.next.prev = timer
|
||||
timer.prev.next = timer
|
||||
timer.prev = null
|
||||
bucket_list[bucket_pos] = timer
|
||||
|
||||
// Cut the timers that are tracked by the buckets from the secondary queue
|
||||
if (i)
|
||||
alltimers.Cut(1, i+1)
|
||||
alltimers.Cut(1, i + 1)
|
||||
second_queue = alltimers
|
||||
bucket_count = new_bucket_count
|
||||
|
||||
|
||||
/datum/controller/subsystem/timer/Recover()
|
||||
second_queue |= SStimer.second_queue
|
||||
hashes |= SStimer.hashes
|
||||
timer_id_dict |= SStimer.timer_id_dict
|
||||
bucket_list |= SStimer.bucket_list
|
||||
// Find the current timer sub-subsystem in global and recover its buckets etc
|
||||
var/datum/controller/subsystem/timer/timerSS = null
|
||||
for(var/global_var in global.vars)
|
||||
if (istype(global.vars[global_var],src.type))
|
||||
timerSS = global.vars[global_var]
|
||||
|
||||
hashes = timerSS.hashes
|
||||
timer_id_dict = timerSS.timer_id_dict
|
||||
|
||||
bucket_list = timerSS.bucket_list
|
||||
second_queue = timerSS.second_queue
|
||||
|
||||
// The buckets are FUBAR
|
||||
reset_buckets()
|
||||
|
||||
/**
|
||||
* # Timed Event
|
||||
*
|
||||
* This is the actual timer, it contains the callback and necessary data to maintain
|
||||
* the timer.
|
||||
*
|
||||
* See the documentation for the timer subsystem for an explanation of the buckets referenced
|
||||
* below in next and prev
|
||||
*/
|
||||
/datum/timedevent
|
||||
/// ID used for timers when the TIMER_STOPPABLE flag is present
|
||||
var/id
|
||||
/// The callback to invoke after the timer completes
|
||||
var/datum/callback/callBack
|
||||
/// The time at which the callback should be invoked at
|
||||
var/timeToRun
|
||||
/// The length of the timer
|
||||
var/wait
|
||||
/// Unique hash generated when TIMER_UNIQUE flag is present
|
||||
var/hash
|
||||
/// The source of the timedevent, whatever called addtimer
|
||||
var/source
|
||||
/// Flags associated with the timer, see _DEFINES/subsystems.dm
|
||||
var/list/flags
|
||||
var/spent = 0 //time we ran the timer.
|
||||
var/name //for easy debugging.
|
||||
//cicular doublely linked list
|
||||
/// Time at which the timer was invoked or destroyed
|
||||
var/spent = 0
|
||||
/// An informative name generated for the timer as its representation in strings, useful for debugging
|
||||
var/name
|
||||
/// Next timed event in the bucket
|
||||
var/datum/timedevent/next
|
||||
/// Previous timed event in the bucket
|
||||
var/datum/timedevent/prev
|
||||
/// The timer subsystem this event is associated with
|
||||
var/datum/controller/subsystem/timer/timer_subsystem
|
||||
/// Boolean indicating if timer joined into bucket
|
||||
var/bucket_joined = FALSE
|
||||
/// Initial bucket position
|
||||
var/bucket_pos = -1
|
||||
|
||||
/datum/timedevent/New(datum/callback/callBack, wait, flags, hash)
|
||||
/datum/timedevent/New(datum/callback/callBack, wait, flags, datum/controller/subsystem/timer/timer_subsystem, hash, source)
|
||||
var/static/nextid = 1
|
||||
id = TIMER_ID_NULL
|
||||
src.callBack = callBack
|
||||
src.wait = wait
|
||||
src.flags = flags
|
||||
src.hash = hash
|
||||
src.source = source
|
||||
src.timer_subsystem = timer_subsystem || SStimer
|
||||
|
||||
if (flags & TIMER_CLIENT_TIME)
|
||||
timeToRun = REALTIMEOFDAY + wait
|
||||
else
|
||||
timeToRun = world.time + wait
|
||||
// Determine time at which the timer's callback should be invoked
|
||||
timeToRun = (flags & TIMER_CLIENT_TIME ? REALTIMEOFDAY : world.time) + wait
|
||||
|
||||
// Include the timer in the hash table if the timer is unique
|
||||
if (flags & TIMER_UNIQUE)
|
||||
SStimer.hashes[hash] = src
|
||||
timer_subsystem.hashes[hash] = src
|
||||
|
||||
// Generate ID for the timer if the timer is stoppable, include in the timer id dictionary
|
||||
if (flags & TIMER_STOPPABLE)
|
||||
id = num2text(nextid, 100)
|
||||
if (nextid >= SHORT_REAL_LIMIT)
|
||||
nextid += min(1, 2**round(nextid/SHORT_REAL_LIMIT))
|
||||
nextid += min(1, 2 ** round(nextid / SHORT_REAL_LIMIT))
|
||||
else
|
||||
nextid++
|
||||
SStimer.timer_id_dict[id] = src
|
||||
timer_subsystem.timer_id_dict[id] = src
|
||||
|
||||
name = "Timer: [id] (\ref[src]), TTR: [timeToRun], Flags: [jointext(bitfield2list(flags, list("TIMER_UNIQUE", "TIMER_OVERRIDE", "TIMER_CLIENT_TIME", "TIMER_STOPPABLE", "TIMER_NO_HASH_WAIT", "TIMER_LOOP")), ", ")], callBack: \ref[callBack], callBack.object: [callBack.object]\ref[callBack.object]([getcallingtype()]), callBack.delegate:[callBack.delegate]([callBack.arguments ? callBack.arguments.Join(", ") : ""])"
|
||||
|
||||
if ((timeToRun < world.time || timeToRun < SStimer.head_offset) && !(flags & TIMER_CLIENT_TIME))
|
||||
if ((timeToRun < world.time || timeToRun < timer_subsystem.head_offset) && !(flags & TIMER_CLIENT_TIME))
|
||||
CRASH("Invalid timer state: Timer created that would require a backtrack to run (addtimer would never let this happen): [SStimer.get_timer_debug_string(src)]")
|
||||
|
||||
if (callBack.object != GLOBAL_PROC && !QDESTROYING(callBack.object))
|
||||
@@ -361,7 +427,7 @@ SUBSYSTEM_DEF(timer)
|
||||
/datum/timedevent/Destroy()
|
||||
..()
|
||||
if (flags & TIMER_UNIQUE && hash)
|
||||
SStimer.hashes -= hash
|
||||
timer_subsystem.hashes -= hash
|
||||
|
||||
if (callBack && callBack.object && callBack.object != GLOBAL_PROC && callBack.object.active_timers)
|
||||
callBack.object.active_timers -= src
|
||||
@@ -370,12 +436,12 @@ SUBSYSTEM_DEF(timer)
|
||||
callBack = null
|
||||
|
||||
if (flags & TIMER_STOPPABLE)
|
||||
SStimer.timer_id_dict -= id
|
||||
timer_subsystem.timer_id_dict -= id
|
||||
|
||||
if (flags & TIMER_CLIENT_TIME)
|
||||
if (!spent)
|
||||
spent = world.time
|
||||
SStimer.clienttime_timers -= src
|
||||
timer_subsystem.clienttime_timers -= src
|
||||
return QDEL_HINT_IWILLGC
|
||||
|
||||
if (!spent)
|
||||
@@ -390,65 +456,104 @@ SUBSYSTEM_DEF(timer)
|
||||
prev = null
|
||||
return QDEL_HINT_IWILLGC
|
||||
|
||||
/**
|
||||
* Removes this timed event from any relevant buckets, or the secondary queue
|
||||
*/
|
||||
/datum/timedevent/proc/bucketEject()
|
||||
var/bucketpos = BUCKET_POS(src)
|
||||
var/list/bucket_list = SStimer.bucket_list
|
||||
var/list/second_queue = SStimer.second_queue
|
||||
// Store local references for the bucket list and secondary queue
|
||||
// This is faster than referencing them from the datum itself
|
||||
var/list/bucket_list = timer_subsystem.bucket_list
|
||||
var/list/second_queue = timer_subsystem.second_queue
|
||||
|
||||
// Attempt to get the head of the bucket
|
||||
var/datum/timedevent/buckethead
|
||||
if(bucketpos > 0)
|
||||
buckethead = bucket_list[bucketpos]
|
||||
if(bucket_pos > 0)
|
||||
buckethead = bucket_list[bucket_pos]
|
||||
|
||||
// Decrement the number of timers in buckets if the timed event is
|
||||
// the head of the bucket, or has a TTR less than TIMER_MAX implying it fits
|
||||
// into an existing bucket, or is otherwise not present in the secondary queue
|
||||
if(buckethead == src)
|
||||
bucket_list[bucketpos] = next
|
||||
SStimer.bucket_count--
|
||||
else if(timeToRun < TIMER_MAX || next || prev)
|
||||
SStimer.bucket_count--
|
||||
bucket_list[bucket_pos] = next
|
||||
timer_subsystem.bucket_count--
|
||||
else if(bucket_joined)
|
||||
timer_subsystem.bucket_count--
|
||||
else
|
||||
var/l = length(second_queue)
|
||||
second_queue -= src
|
||||
if(l == length(second_queue))
|
||||
SStimer.bucket_count--
|
||||
if(prev != next)
|
||||
timer_subsystem.bucket_count--
|
||||
|
||||
// Remove the timed event from the bucket, ensuring to maintain
|
||||
// the integrity of the bucket's list if relevant
|
||||
if (prev && prev.next == src)
|
||||
prev.next = next
|
||||
if (next && next.prev == src)
|
||||
next.prev = prev
|
||||
else
|
||||
prev?.next = null
|
||||
next?.prev = null
|
||||
prev = next = null
|
||||
bucket_pos = -1
|
||||
bucket_joined = FALSE
|
||||
|
||||
/**
|
||||
* Attempts to add this timed event to a bucket, will enter the secondary queue
|
||||
* if there are no appropriate buckets at this time.
|
||||
*
|
||||
* Secondary queueing of timed events will occur when the timespan covered by the existing
|
||||
* buckets is exceeded by the time at which this timed event is scheduled to be invoked.
|
||||
* If the timed event is tracking client time, it will be added to a special bucket.
|
||||
*/
|
||||
/datum/timedevent/proc/bucketJoin()
|
||||
// Generate debug-friendly name for timer
|
||||
var/static/list/bitfield_flags = list("TIMER_UNIQUE", "TIMER_OVERRIDE", "TIMER_CLIENT_TIME", "TIMER_STOPPABLE", "TIMER_NO_HASH_WAIT", "TIMER_LOOP")
|
||||
name = "Timer: [id] ([text_ref(src)]), TTR: [timeToRun], wait:[wait] Flags: [jointext(bitfield_to_list(flags, bitfield_flags), ", ")], \
|
||||
callBack: [text_ref(callBack)], callBack.object: [callBack.object][text_ref(callBack.object)]([getcallingtype()]), \
|
||||
callBack.delegate:[callBack.delegate]([callBack.arguments ? callBack.arguments.Join(", ") : ""]), source: [source]"
|
||||
|
||||
if (bucket_joined)
|
||||
stack_trace("Bucket already joined! [name]")
|
||||
|
||||
// Check if this timed event should be diverted to the client time bucket, or the secondary queue
|
||||
var/list/L
|
||||
|
||||
if (flags & TIMER_CLIENT_TIME)
|
||||
L = SStimer.clienttime_timers
|
||||
else if (timeToRun >= TIMER_MAX)
|
||||
L = SStimer.second_queue
|
||||
|
||||
L = timer_subsystem.clienttime_timers
|
||||
else if (timeToRun >= TIMER_MAX(timer_subsystem))
|
||||
L = timer_subsystem.second_queue
|
||||
if(L)
|
||||
BINARY_INSERT(src, L, datum/timedevent, src, timeToRun, COMPARE_KEY)
|
||||
BINARY_INSERT(src, L, /datum/timedevent, src, timeToRun, COMPARE_KEY)
|
||||
return
|
||||
|
||||
//get the list of buckets
|
||||
var/list/bucket_list = SStimer.bucket_list
|
||||
// Get a local reference to the bucket list, this is faster than referencing the datum
|
||||
var/list/bucket_list = timer_subsystem.bucket_list
|
||||
|
||||
//calculate our place in the bucket list
|
||||
var/bucket_pos = BUCKET_POS(src)
|
||||
// Find the correct bucket for this timed event
|
||||
bucket_pos = BUCKET_POS(src)
|
||||
|
||||
if (bucket_pos < timer_subsystem.practical_offset && timeToRun < (timer_subsystem.head_offset + TICKS2DS(BUCKET_LEN)))
|
||||
WARNING("Bucket pos in past: bucket_pos = [bucket_pos] < practical_offset = [timer_subsystem.practical_offset] \
|
||||
&& timeToRun = [timeToRun] < [timer_subsystem.head_offset + TICKS2DS(BUCKET_LEN)], Timer: [name]")
|
||||
bucket_pos = timer_subsystem.practical_offset // Recover bucket_pos to avoid timer blocking queue
|
||||
|
||||
//get the bucket for our tick
|
||||
var/datum/timedevent/bucket_head = bucket_list[bucket_pos]
|
||||
SStimer.bucket_count++
|
||||
//empty bucket, we will just add ourselves
|
||||
timer_subsystem.bucket_count++
|
||||
|
||||
// If there is no timed event at this position, then the bucket is 'empty'
|
||||
// and we can just set this event to that position
|
||||
if (!bucket_head)
|
||||
bucket_joined = TRUE
|
||||
bucket_list[bucket_pos] = src
|
||||
return
|
||||
//other wise, lets do a simplified linked list add.
|
||||
if (!bucket_head.prev)
|
||||
bucket_head.prev = bucket_head
|
||||
next = bucket_head
|
||||
prev = bucket_head.prev
|
||||
next.prev = src
|
||||
prev.next = src
|
||||
|
||||
///Returns a string of the type of the callback for this timer
|
||||
// Otherwise, we merely add this timed event into the bucket, which is a
|
||||
// doubly-linked list
|
||||
bucket_joined = TRUE
|
||||
bucket_head.prev = src
|
||||
next = bucket_head
|
||||
prev = null
|
||||
bucket_list[bucket_pos] = src
|
||||
|
||||
/**
|
||||
* Returns a string of the type of the callback for this timer
|
||||
*/
|
||||
/datum/timedevent/proc/getcallingtype()
|
||||
. = "ERROR"
|
||||
if (callBack.object == GLOBAL_PROC)
|
||||
@@ -457,14 +562,16 @@ SUBSYSTEM_DEF(timer)
|
||||
. = "[callBack.object.type]"
|
||||
|
||||
/**
|
||||
* Create a new timer and insert it in the queue
|
||||
*
|
||||
* Arguments:
|
||||
* * callback the callback to call on timer finish
|
||||
* * wait deciseconds to run the timer for
|
||||
* * flags flags for this timer, see: code\__DEFINES\subsystems.dm
|
||||
*/
|
||||
/proc/addtimer(datum/callback/callback, wait = 0, flags = 0)
|
||||
* Create a new timer and insert it in the queue.
|
||||
* You should not call this directly, and should instead use the addtimer macro, which includes source information.
|
||||
*
|
||||
* Arguments:
|
||||
* * callback the callback to call on timer finish
|
||||
* * wait deciseconds to run the timer for
|
||||
* * flags flags for this timer, see: code\__DEFINES\subsystems.dm
|
||||
* * timer_subsystem the subsystem to insert this timer into
|
||||
*/
|
||||
/proc/_addtimer(datum/callback/callback, wait = 0, flags = 0, datum/controller/subsystem/timer/timer_subsystem, file, line)
|
||||
if (!callback)
|
||||
CRASH("addtimer called without a callback")
|
||||
|
||||
@@ -472,84 +579,114 @@ SUBSYSTEM_DEF(timer)
|
||||
stack_trace("addtimer called with a negative wait. Converting to [world.tick_lag]")
|
||||
|
||||
if (callback.object != GLOBAL_PROC && QDELETED(callback.object) && !QDESTROYING(callback.object))
|
||||
stack_trace("addtimer called with a callback assigned to a qdeleted object. In the future such timers will not be supported and may refuse to run or run with a 0 wait")
|
||||
stack_trace("addtimer called with a callback assigned to a qdeleted object. In the future such timers will not \
|
||||
be supported and may refuse to run or run with a 0 wait")
|
||||
|
||||
wait = max(CEILING(wait, world.tick_lag), world.tick_lag)
|
||||
if (flags & TIMER_CLIENT_TIME) // REALTIMEOFDAY has a resolution of 1 decisecond
|
||||
wait = max(CEILING(wait, 1), 1) // so if we use tick_lag timers may be inserted in the "past"
|
||||
else
|
||||
wait = max(CEILING(wait, world.tick_lag), world.tick_lag)
|
||||
|
||||
if(wait >= INFINITY)
|
||||
CRASH("Attempted to create timer with INFINITY delay")
|
||||
|
||||
var/hash
|
||||
timer_subsystem = timer_subsystem || SStimer
|
||||
|
||||
// Generate hash if relevant for timed events with the TIMER_UNIQUE flag
|
||||
var/hash
|
||||
if (flags & TIMER_UNIQUE)
|
||||
var/list/hashlist
|
||||
if(flags & TIMER_NO_HASH_WAIT)
|
||||
hashlist = list(callback.object, "([REF(callback.object)])", callback.delegate, flags & TIMER_CLIENT_TIME)
|
||||
else
|
||||
hashlist = list(callback.object, "([REF(callback.object)])", callback.delegate, wait, flags & TIMER_CLIENT_TIME)
|
||||
var/list/hashlist = list(callback.object, "([REF(callback.object)])", callback.delegate, flags & TIMER_CLIENT_TIME)
|
||||
if(!(flags & TIMER_NO_HASH_WAIT))
|
||||
hashlist += wait
|
||||
hashlist += callback.arguments
|
||||
hash = hashlist.Join("|||||||")
|
||||
|
||||
var/datum/timedevent/hash_timer = SStimer.hashes[hash]
|
||||
var/datum/timedevent/hash_timer = timer_subsystem.hashes[hash]
|
||||
if(hash_timer)
|
||||
if (hash_timer.spent) //it's pending deletion, pretend it doesn't exist.
|
||||
hash_timer.hash = null //but keep it from accidentally deleting us
|
||||
if (hash_timer.spent) // it's pending deletion, pretend it doesn't exist.
|
||||
hash_timer.hash = null // but keep it from accidentally deleting us
|
||||
else
|
||||
if (flags & TIMER_OVERRIDE)
|
||||
hash_timer.hash = null //no need having it delete it's hash if we are going to replace it
|
||||
hash_timer.hash = null // no need having it delete it's hash if we are going to replace it
|
||||
qdel(hash_timer)
|
||||
else
|
||||
if (hash_timer.flags & TIMER_STOPPABLE)
|
||||
. = hash_timer.id
|
||||
return
|
||||
else if(flags & TIMER_OVERRIDE)
|
||||
stack_trace("TIMER_OVERRIDE used without TIMER_UNIQUE")
|
||||
stack_trace("TIMER_OVERRIDE used without TIMER_UNIQUE") //this is also caught by grep.
|
||||
|
||||
var/datum/timedevent/timer = new(callback, wait, flags, hash)
|
||||
var/datum/timedevent/timer = new(callback, wait, flags, timer_subsystem, hash, file && "[file]:[line]")
|
||||
return timer.id
|
||||
|
||||
/**
|
||||
* Delete a timer
|
||||
*
|
||||
* Arguments:
|
||||
* * id a timerid or a /datum/timedevent
|
||||
*/
|
||||
/proc/deltimer(id)
|
||||
* Delete a timer
|
||||
*
|
||||
* Arguments:
|
||||
* * id a timerid or a /datum/timedevent
|
||||
*/
|
||||
/proc/deltimer(id, datum/controller/subsystem/timer/timer_subsystem)
|
||||
if (!id)
|
||||
return FALSE
|
||||
if (id == TIMER_ID_NULL)
|
||||
CRASH("Tried to delete a null timerid. Use TIMER_STOPPABLE flag")
|
||||
if (!istext(id))
|
||||
if (istype(id, /datum/timedevent))
|
||||
qdel(id)
|
||||
return TRUE
|
||||
if (istype(id, /datum/timedevent))
|
||||
qdel(id)
|
||||
return TRUE
|
||||
timer_subsystem = timer_subsystem || SStimer
|
||||
//id is string
|
||||
var/datum/timedevent/timer = SStimer.timer_id_dict[id]
|
||||
if (timer && !timer.spent)
|
||||
var/datum/timedevent/timer = timer_subsystem.timer_id_dict[id]
|
||||
if (timer && (!timer.spent || timer.flags & TIMER_DELETE_ME))
|
||||
qdel(timer)
|
||||
return TRUE
|
||||
return FALSE
|
||||
|
||||
/**
|
||||
* Get the remaining deciseconds on a timer
|
||||
*
|
||||
* Arguments:
|
||||
* * id a timerid or a /datum/timedevent
|
||||
*/
|
||||
/proc/timeleft(id)
|
||||
* Get the remaining deciseconds on a timer
|
||||
*
|
||||
* Arguments:
|
||||
* * id a timerid or a /datum/timedevent
|
||||
*/
|
||||
/proc/timeleft(id, datum/controller/subsystem/timer/timer_subsystem)
|
||||
if (!id)
|
||||
return null
|
||||
if (id == TIMER_ID_NULL)
|
||||
CRASH("Tried to get timeleft of a null timerid. Use TIMER_STOPPABLE flag")
|
||||
if (!istext(id))
|
||||
if (istype(id, /datum/timedevent))
|
||||
var/datum/timedevent/timer = id
|
||||
return timer.timeToRun - world.time
|
||||
//id is string
|
||||
var/datum/timedevent/timer = SStimer.timer_id_dict[id]
|
||||
if (timer && !timer.spent)
|
||||
if (istype(id, /datum/timedevent))
|
||||
var/datum/timedevent/timer = id
|
||||
return timer.timeToRun - world.time
|
||||
return null
|
||||
timer_subsystem = timer_subsystem || SStimer
|
||||
//id is string
|
||||
var/datum/timedevent/timer = timer_subsystem.timer_id_dict[id]
|
||||
if(!timer || timer.spent)
|
||||
return null
|
||||
return timer.timeToRun - (timer.flags & TIMER_CLIENT_TIME ? REALTIMEOFDAY : world.time)
|
||||
|
||||
/**
|
||||
* Update the delay on an existing LOOPING timer
|
||||
* Will come into effect on the next process
|
||||
*
|
||||
* Arguments:
|
||||
* * id a timerid or a /datum/timedevent
|
||||
* * new_wait the new wait to give this looping timer
|
||||
*/
|
||||
/proc/updatetimedelay(id, new_wait, datum/controller/subsystem/timer/timer_subsystem)
|
||||
if (!id)
|
||||
return
|
||||
if (id == TIMER_ID_NULL)
|
||||
CRASH("Tried to update the wait of null timerid. Use TIMER_STOPPABLE flag")
|
||||
if (istype(id, /datum/timedevent))
|
||||
var/datum/timedevent/timer = id
|
||||
timer.wait = new_wait
|
||||
return
|
||||
timer_subsystem = timer_subsystem || SStimer
|
||||
//id is string
|
||||
var/datum/timedevent/timer = timer_subsystem.timer_id_dict[id]
|
||||
if(!timer || timer.spent)
|
||||
return
|
||||
if(!(timer.flags & TIMER_LOOP))
|
||||
CRASH("Tried to update the wait of a non looping timer. This is not supported")
|
||||
timer.wait = new_wait
|
||||
|
||||
#undef BUCKET_LEN
|
||||
#undef BUCKET_POS
|
||||
|
||||
@@ -2,6 +2,7 @@ SUBSYSTEM_DEF(title)
|
||||
name = "Title Screen"
|
||||
flags = SS_NO_FIRE
|
||||
init_order = INIT_ORDER_TITLE
|
||||
init_stage = INITSTAGE_EARLY
|
||||
|
||||
var/file_path
|
||||
var/icon/icon
|
||||
@@ -10,7 +11,7 @@ SUBSYSTEM_DEF(title)
|
||||
|
||||
/datum/controller/subsystem/title/Initialize()
|
||||
if(file_path && icon)
|
||||
return
|
||||
return SS_INIT_NO_NEED
|
||||
|
||||
if(fexists("data/previous_title.dat"))
|
||||
var/previous_path = file2text("data/previous_title.dat")
|
||||
@@ -41,7 +42,7 @@ SUBSYSTEM_DEF(title)
|
||||
if(splash_turf)
|
||||
splash_turf.icon = icon
|
||||
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/title/vv_edit_var(var_name, var_value)
|
||||
. = ..()
|
||||
|
||||
@@ -164,6 +164,6 @@ SUBSYSTEM_DEF(traumas)
|
||||
"anime" = typecacheof(list(/datum/species/human/felinid))
|
||||
)
|
||||
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
#undef PHOBIA_FILE
|
||||
|
||||
@@ -3,16 +3,12 @@ SUBSYSTEM_DEF(vis_overlays)
|
||||
wait = 1 MINUTES
|
||||
priority = FIRE_PRIORITY_VIS
|
||||
init_order = INIT_ORDER_VIS
|
||||
flags = SS_NO_INIT
|
||||
|
||||
var/list/vis_overlay_cache
|
||||
var/list/unique_vis_overlays
|
||||
var/list/vis_overlay_cache = list()
|
||||
var/list/unique_vis_overlays = list()
|
||||
var/list/currentrun
|
||||
|
||||
/datum/controller/subsystem/vis_overlays/Initialize()
|
||||
vis_overlay_cache = list()
|
||||
unique_vis_overlays = list()
|
||||
return ..()
|
||||
|
||||
/datum/controller/subsystem/vis_overlays/fire(resumed = FALSE)
|
||||
if(!resumed)
|
||||
currentrun = vis_overlay_cache.Copy()
|
||||
|
||||
@@ -45,7 +45,7 @@ SUBSYSTEM_DEF(weather)
|
||||
for(var/z in SSmapping.levels_by_trait(target_trait))
|
||||
LAZYINITLIST(eligible_zlevels["[z]"])
|
||||
eligible_zlevels["[z]"][W] = probability
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/weather/proc/update_z_level(datum/space_level/level)
|
||||
var/z = level.z_value
|
||||
|
||||
@@ -82,6 +82,7 @@
|
||||
* Calls qdel on the chatmessage when its parent is deleted, used to register qdel signal
|
||||
*/
|
||||
/datum/chatmessage/proc/on_parent_qdel()
|
||||
SIGNAL_HANDLER
|
||||
qdel(src)
|
||||
|
||||
/**
|
||||
|
||||
@@ -311,6 +311,7 @@
|
||||
RegisterSignal(screen_obj, COMSIG_CLICK, .proc/hud_click)
|
||||
|
||||
/datum/component/mood/proc/unmodify_hud(datum/source)
|
||||
SIGNAL_HANDLER
|
||||
if(!screen_obj)
|
||||
return
|
||||
var/mob/living/owner = parent
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
|
||||
|
||||
/datum/component/spawner/proc/stop_spawning(force, hint)
|
||||
SIGNAL_HANDLER
|
||||
STOP_PROCESSING(SSprocessing, src)
|
||||
for(var/mob/living/simple_animal/L in spawned_mobs)
|
||||
if(L.nest == src)
|
||||
|
||||
@@ -33,6 +33,11 @@
|
||||
/// Datum level flags
|
||||
var/datum_flags = NONE
|
||||
|
||||
/// A cached version of our \ref
|
||||
/// The brunt of \ref costs are in creating entries in the string tree (a tree of immutable strings)
|
||||
/// This avoids doing that more then once per datum by ensuring ref strings always have a reference to them after they're first pulled
|
||||
var/cached_ref
|
||||
|
||||
/// A weak reference to another datum
|
||||
var/datum/weakref/weak_reference
|
||||
|
||||
@@ -246,4 +251,4 @@
|
||||
if(QDELETED(source))
|
||||
return
|
||||
SEND_SIGNAL(source, COMSIG_CD_RESET(index), S_TIMER_COOLDOWN_TIMELEFT(source, index))
|
||||
TIMER_COOLDOWN_END(source, index)
|
||||
TIMER_COOLDOWN_END(source, index)
|
||||
|
||||
32
code/datums/helper_datums/stack_end_detector.dm
Normal file
32
code/datums/helper_datums/stack_end_detector.dm
Normal file
@@ -0,0 +1,32 @@
|
||||
/**
|
||||
Stack End Detector.
|
||||
Can detect if a given code stack has exited, used by the mc for stack overflow detection.
|
||||
|
||||
**/
|
||||
/datum/stack_end_detector
|
||||
var/datum/weakref/_WF
|
||||
var/datum/stack_canary/_canary
|
||||
|
||||
/datum/stack_end_detector/New()
|
||||
_canary = new()
|
||||
_WF = WEAKREF(_canary)
|
||||
|
||||
/** Prime the stack overflow detector.
|
||||
Store the return value of this proc call in a proc level var.
|
||||
Can only be called once.
|
||||
**/
|
||||
/datum/stack_end_detector/proc/prime_canary()
|
||||
if (!_canary)
|
||||
CRASH("Prime_canary called twice")
|
||||
. = _canary
|
||||
_canary = null
|
||||
|
||||
/// Returns true if the stack is still going. Calling before the canary has been primed also returns true
|
||||
/datum/stack_end_detector/proc/check()
|
||||
return !!_WF.resolve()
|
||||
|
||||
/// Stack canary. Will go away if the stack it was primed by is ended by byond for return or stack overflow reasons.
|
||||
/datum/stack_canary
|
||||
|
||||
/// empty proc to avoid warnings about unused variables. Call this proc on your canary in the stack it's watching.
|
||||
/datum/stack_canary/proc/use_variable()
|
||||
@@ -6,10 +6,12 @@
|
||||
var/body
|
||||
var/headers
|
||||
var/url
|
||||
/// If present response body will be saved to this file.
|
||||
var/output_file
|
||||
|
||||
var/_raw_response
|
||||
|
||||
/datum/http_request/proc/prepare(method, url, body = "", list/headers)
|
||||
/datum/http_request/proc/prepare(method, url, body = "", list/headers, output_file)
|
||||
if (!length(headers))
|
||||
headers = ""
|
||||
else
|
||||
@@ -19,15 +21,16 @@
|
||||
src.url = url
|
||||
src.body = body
|
||||
src.headers = headers
|
||||
src.output_file = output_file
|
||||
|
||||
/datum/http_request/proc/execute_blocking()
|
||||
_raw_response = rustg_http_request_blocking(method, url, body, headers)
|
||||
_raw_response = rustg_http_request_blocking(method, url, body, headers, build_options())
|
||||
|
||||
/datum/http_request/proc/begin_async()
|
||||
if (in_progress)
|
||||
CRASH("Attempted to re-use a request object.")
|
||||
|
||||
id = rustg_http_request_async(method, url, body, headers)
|
||||
id = rustg_http_request_async(method, url, body, headers, build_options())
|
||||
|
||||
if (isnull(text2num(id)))
|
||||
stack_trace("Proc error: [id]")
|
||||
@@ -35,6 +38,11 @@
|
||||
else
|
||||
in_progress = TRUE
|
||||
|
||||
/datum/http_request/proc/build_options()
|
||||
if(output_file)
|
||||
return json_encode(list("output_filename"=output_file,"body_filename"=null))
|
||||
return null
|
||||
|
||||
/datum/http_request/proc/is_complete()
|
||||
if (isnull(id))
|
||||
return TRUE
|
||||
|
||||
@@ -105,7 +105,7 @@
|
||||
if(stage == END_STAGE)
|
||||
return 1
|
||||
stage = END_STAGE
|
||||
STOP_PROCESSING(SSweather, src)
|
||||
STOP_PROCESSING_DUMB(SSweather, src)
|
||||
update_areas()
|
||||
|
||||
/datum/weather/proc/can_weather_act(mob/living/mob_to_check) // Can this weather impact a mob?
|
||||
|
||||
@@ -221,6 +221,7 @@
|
||||
update_engine()
|
||||
|
||||
/obj/machinery/shuttle/engine/ion/proc/on_capacitor_deleted(datum/source, force)
|
||||
SIGNAL_HANDLER
|
||||
register_capacitor_bank(null)
|
||||
|
||||
|
||||
|
||||
@@ -15,5 +15,5 @@
|
||||
message_admins("An alien egg has been delivered to [ADMIN_VERBOSEJMP(T)].")
|
||||
log_game("An alien egg has been delivered to [AREACOORD(T)]")
|
||||
var/message = "Attention [station_name()], we have entrusted you with a research specimen in [get_area_name(T, TRUE)]. Remember to follow all safety precautions when dealing with the specimen."
|
||||
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, /proc/addtimer, CALLBACK(GLOBAL_PROC, /proc/print_command_report, message), announcement_time))
|
||||
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, /proc/_addtimer_here, CALLBACK(GLOBAL_PROC, /proc/print_command_report, message), announcement_time))
|
||||
return INITIALIZE_HINT_QDEL
|
||||
|
||||
@@ -87,7 +87,7 @@ GLOBAL_VAR(restart_counter)
|
||||
#else
|
||||
cb = VARSET_CALLBACK(SSticker, force_ending, TRUE)
|
||||
#endif
|
||||
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, /proc/addtimer, cb, 10 SECONDS))
|
||||
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, /proc/_addtimer_here, cb, 10 SECONDS))
|
||||
|
||||
|
||||
/world/proc/SetupLogs()
|
||||
|
||||
@@ -9,5 +9,6 @@
|
||||
vv_update_display(D, "marked", VV_MSG_MARKED)
|
||||
|
||||
/datum/admins/proc/handle_marked_del(datum/source)
|
||||
SIGNAL_HANDLER
|
||||
UnregisterSignal(marked_datum, COMSIG_PARENT_QDELETING)
|
||||
marked_datum = null
|
||||
|
||||
@@ -76,5 +76,6 @@
|
||||
UnregisterSignal(removed_item, list(COMSIG_MOVABLE_MOVED, COMSIG_PARENT_QDELETING))
|
||||
|
||||
///This proc is called by signals that remove the food from the plate.
|
||||
/obj/item/plate/proc/ItemMoved(obj/item/moved_item, atom/OldLoc, Dir, Forced)
|
||||
/obj/item/plate/proc/ItemMoved(obj/item/moved_item, forced)
|
||||
SIGNAL_HANDLER
|
||||
ItemRemovedFromPlate(moved_item)
|
||||
|
||||
@@ -165,7 +165,7 @@
|
||||
/datum/job/proc/announce_head(var/mob/living/carbon/human/H, var/channels) //tells the given channel that the given mob is the new department head. See communications.dm for valid channels.
|
||||
if(H && GLOB.announcement_systems.len)
|
||||
//timer because these should come after the captain announcement
|
||||
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, .proc/addtimer, CALLBACK(pick(GLOB.announcement_systems), /obj/machinery/announcement_system/proc/announce, "NEWHEAD", H.real_name, H.job, channels), 1))
|
||||
SSticker.OnRoundstart(CALLBACK(GLOBAL_PROC, .proc/_addtimer_here, CALLBACK(pick(GLOB.announcement_systems), /obj/machinery/announcement_system/proc/announce, "NEWHEAD", H.real_name, H.job, channels), 1))
|
||||
|
||||
//If the configuration option is set to require players to be logged as old enough to play certain jobs, then this proc checks that they are, otherwise it just returns 1
|
||||
/datum/job/proc/player_old_enough(client/C)
|
||||
|
||||
@@ -14,9 +14,11 @@
|
||||
|
||||
// BEGIN_INCLUDE
|
||||
#include "_maps\_basemap.dm"
|
||||
#include "code\__byond_version_compat.dm"
|
||||
#include "code\_compile_options.dm"
|
||||
#include "code\world.dm"
|
||||
#include "code\__DEFINES\_globals.dm"
|
||||
#include "code\__DEFINES\_helpers.dm"
|
||||
#include "code\__DEFINES\_protect.dm"
|
||||
#include "code\__DEFINES\_tick.dm"
|
||||
#include "code\__DEFINES\access.dm"
|
||||
@@ -124,6 +126,7 @@
|
||||
#include "code\__DEFINES\wall_dents.dm"
|
||||
#include "code\__DEFINES\wires.dm"
|
||||
#include "code\__DEFINES\wounds.dm"
|
||||
#include "code\__DEFINES\dcs\helpers.dm"
|
||||
#include "code\__DEFINES\{yogs_defines}\admin.dm"
|
||||
#include "code\__DEFINES\{yogs_defines}\antagonists.dm"
|
||||
#include "code\__DEFINES\{yogs_defines}\atmospherics.dm"
|
||||
@@ -166,6 +169,7 @@
|
||||
#include "code\__HELPERS\matrices.dm"
|
||||
#include "code\__HELPERS\mobs.dm"
|
||||
#include "code\__HELPERS\mouse_control.dm"
|
||||
#include "code\__HELPERS\nameof.dm"
|
||||
#include "code\__HELPERS\names.dm"
|
||||
#include "code\__HELPERS\priority_announce.dm"
|
||||
#include "code\__HELPERS\pronouns.dm"
|
||||
@@ -560,6 +564,7 @@
|
||||
#include "code\datums\helper_datums\events.dm"
|
||||
#include "code\datums\helper_datums\getrev.dm"
|
||||
#include "code\datums\helper_datums\icon_snapshot.dm"
|
||||
#include "code\datums\helper_datums\stack_end_detector.dm"
|
||||
#include "code\datums\helper_datums\teleport.dm"
|
||||
#include "code\datums\looping_sounds\_looping_sound.dm"
|
||||
#include "code\datums\looping_sounds\item_sounds.dm"
|
||||
|
||||
@@ -8,7 +8,7 @@ SUBSYSTEM_DEF(bluespace_locker)
|
||||
bluespaceify_random_locker()
|
||||
if(external_locker)
|
||||
external_locker.take_contents()
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/bluespace_locker/proc/bluespaceify_random_locker()
|
||||
if(external_locker)
|
||||
|
||||
@@ -141,7 +141,7 @@ SUBSYSTEM_DEF(Yogs)
|
||||
for(var/path in subtypesof(/datum/corporation))
|
||||
new path
|
||||
|
||||
return ..()
|
||||
return SS_INIT_SUCCESS
|
||||
|
||||
/datum/controller/subsystem/Yogs/fire(resumed = 0)
|
||||
//END OF SHIFT ANNOUNCER
|
||||
|
||||
Reference in New Issue
Block a user