The 515 MegaPR early downport (#7783)

Co-authored-by: Selis <selis@xynolabs.com>
Co-authored-by: Selis <sirlionfur@hotmail.de>
Co-authored-by: Kashargul <144968721+Kashargul@users.noreply.github.com>
Co-authored-by: SatinIsle <thesatinisle@gmail.com>
Co-authored-by: Heroman <alesha3000@list.ru>
Co-authored-by: Casey <a.roaming.shadow@gmail.com>
Co-authored-by: Raeschen <rycoop29@gmail.com>
This commit is contained in:
Cadyn
2024-02-27 11:17:32 -08:00
committed by GitHub
parent 96a43a09c1
commit b90f7ec922
254 changed files with 2135 additions and 1576 deletions

View File

@@ -0,0 +1,28 @@
/// Allows us to lazyload asset datums
/// Anything inserted here will fully load if directly gotten
/// So this just serves to remove the requirement to load assets fully during init
SUBSYSTEM_DEF(asset_loading)
name = "Asset Loading"
priority = FIRE_PRIORITY_ASSETS
flags = SS_NO_INIT
runlevels = RUNLEVEL_LOBBY|RUNLEVELS_DEFAULT
var/list/datum/asset/generate_queue = list()
/datum/controller/subsystem/asset_loading/fire(resumed)
while(length(generate_queue))
var/datum/asset/to_load = generate_queue[generate_queue.len]
to_load.queued_generation()
if(MC_TICK_CHECK)
return
generate_queue.len--
/datum/controller/subsystem/asset_loading/proc/queue_asset(datum/asset/queue)
#ifdef DO_NOT_DEFER_ASSETS
stack_trace("We queued an instance of [queue.type] for lateloading despite not allowing it")
#endif
generate_queue += queue
/datum/controller/subsystem/asset_loading/proc/dequeue_asset(datum/asset/queue)
generate_queue -= queue

View File

@@ -2,17 +2,39 @@ SUBSYSTEM_DEF(assets)
name = "Assets"
init_order = INIT_ORDER_ASSETS
flags = SS_NO_FIRE
var/list/cache = list()
var/list/datum/asset_cache_item/cache = list()
var/list/preload = list()
var/datum/asset_transport/transport = new()
/datum/controller/subsystem/assets/Initialize(timeofday)
for(var/typepath in typesof(/datum/asset))
var/datum/asset/A = typepath
if (typepath != initial(A._abstract))
get_asset_datum(typepath)
/datum/controller/subsystem/assets/proc/OnConfigLoad()
var/newtransporttype = /datum/asset_transport
switch (config.asset_transport)
if ("webroot")
newtransporttype = /datum/asset_transport/webroot
preload = cache.Copy() //don't preload assets generated during the round
if (newtransporttype == transport.type)
return
for(var/client/C in GLOB.clients)
addtimer(CALLBACK(GLOBAL_PROC, GLOBAL_PROC_REF(getFilesSlow), C, preload, FALSE), 10)
return ..()
var/datum/asset_transport/newtransport = new newtransporttype ()
if (newtransport.validate_config())
transport = newtransport
transport.Load()
/datum/controller/subsystem/assets/Initialize()
OnConfigLoad()
for(var/type in typesof(/datum/asset))
var/datum/asset/A = type
if (type != initial(A._abstract))
load_asset_datum(type)
transport.Initialize(cache)
subsystem_initialized = TRUE
return SS_INIT_SUCCESS
/datum/controller/subsystem/assets/Recover()
cache = SSassets.cache
preload = SSassets.preload

View File

@@ -1,3 +1,26 @@
/*!
## Debugging GC issues
In order to debug `qdel()` failures, there are several tools available.
To enable these tools, define `TESTING` in [_compile_options.dm](https://github.com/tgstation/-tg-station/blob/master/code/_compile_options.dm).
First is a verb called "Find References", which lists **every** refererence to an object in the world. This allows you to track down any indirect or obfuscated references that you might have missed.
Complementing this is another verb, "qdel() then Find References".
This does exactly what you'd expect; it calls `qdel()` on the object and then it finds all references remaining.
This is great, because it means that `Destroy()` will have been called before it starts to find references,
so the only references you'll find will be the ones preventing the object from `qdel()`ing gracefully.
If you have a datum or something you are not destroying directly (say via the singulo),
the next tool is `QDEL_HINT_FINDREFERENCE`. You can return this in `Destroy()` (where you would normally `return ..()`),
to print a list of references once it enters the GC queue.
Finally is a verb, "Show qdel() Log", which shows the deletion log that the garbage subsystem keeps. This is helpful if you are having race conditions or need to review the order of deletions.
Note that for any of these tools to work `TESTING` must be defined.
By using these methods of finding references, you can make your life far, far easier when dealing with `qdel()` failures.
*/
SUBSYSTEM_DEF(garbage)
name = "Garbage"
priority = FIRE_PRIORITY_GARBAGE
@@ -5,8 +28,9 @@ SUBSYSTEM_DEF(garbage)
flags = SS_POST_FIRE_TIMING|SS_BACKGROUND|SS_NO_INIT
runlevels = RUNLEVELS_DEFAULT | RUNLEVEL_LOBBY
init_order = INIT_ORDER_GARBAGE
// init_stage = INITSTAGE_EARLY
var/list/collection_timeout = list(2 MINUTES, 10 SECONDS) // deciseconds to wait before moving something up in the queue to the next level
var/list/collection_timeout = list(GC_FILTER_QUEUE, GC_CHECK_QUEUE, GC_DEL_QUEUE) // deciseconds to wait before moving something up in the queue to the next level
//Stat tracking
var/delslasttick = 0 // number of del()'s we've done this tick
@@ -26,17 +50,15 @@ SUBSYSTEM_DEF(garbage)
var/list/queues
#ifdef REFERENCE_TRACKING
var/list/reference_find_on_fail = list()
#ifdef REFERENCE_TRACKING_DEBUG
//Should we save found refs. Used for unit testing
var/should_save_refs = FALSE
#endif
#endif
/datum/controller/subsystem/garbage/PreInit()
queues = new(GC_QUEUE_COUNT)
pass_counts = new(GC_QUEUE_COUNT)
fail_counts = new(GC_QUEUE_COUNT)
for(var/i in 1 to GC_QUEUE_COUNT)
queues[i] = list()
pass_counts[i] = 0
fail_counts[i] = 0
InitQueues()
/datum/controller/subsystem/garbage/stat_entry(msg)
var/list/counts = list()
@@ -60,39 +82,48 @@ SUBSYSTEM_DEF(garbage)
/datum/controller/subsystem/garbage/Shutdown()
//Adds the del() log to the qdel log file
var/list/dellog = list()
var/list/del_log = list()
//sort by how long it's wasted hard deleting
sortTim(items, cmp=/proc/cmp_qdel_item_time, associative = TRUE)
for(var/path in items)
var/datum/qdel_item/I = items[path]
dellog += "Path: [path]"
var/list/entry = list()
del_log[path] = entry
if (I.qdel_flags & QDEL_ITEM_SUSPENDED_FOR_LAG)
dellog += "\tSUSPENDED FOR LAG"
entry["SUSPENDED FOR LAG"] = TRUE
if (I.failures)
dellog += "\tFailures: [I.failures]"
dellog += "\tqdel() Count: [I.qdels]"
dellog += "\tDestroy() Cost: [I.destroy_time]ms"
entry["Failures"] = I.failures
entry["qdel() Count"] = I.qdels
entry["Destroy() Cost (ms)"] = I.destroy_time
if (I.hard_deletes)
dellog += "\tTotal Hard Deletes: [I.hard_deletes]"
dellog += "\tTime Spent Hard Deleting: [I.hard_delete_time]ms"
dellog += "\tHighest Time Spent Hard Deleting: [I.hard_delete_max]ms"
entry["Total Hard Deletes"] = I.hard_deletes
entry["Time Spend Hard Deleting (ms)"] = I.hard_delete_time
entry["Highest Time Spend Hard Deleting (ms)"] = I.hard_delete_max
if (I.hard_deletes_over_threshold)
dellog += "\tHard Deletes Over Threshold: [I.hard_deletes_over_threshold]"
entry["Hard Deletes Over Threshold"] = I.hard_deletes_over_threshold
if (I.slept_destroy)
dellog += "\tSleeps: [I.slept_destroy]"
entry["Total Sleeps"] = I.slept_destroy
if (I.no_respect_force)
dellog += "\tIgnored force: [I.no_respect_force] times"
entry["Total Ignored Force"] = I.no_respect_force
if (I.no_hint)
dellog += "\tNo hint: [I.no_hint] times"
text2file(dellog.Join(), "[log_path]-qdel.log")
entry["Total No Hint"] = I.no_hint
if(LAZYLEN(I.extra_details))
entry["Deleted Metadata"] = I.extra_details
log_debug("", del_log)
/datum/controller/subsystem/garbage/fire()
//the fact that this resets its processing each fire (rather then resume where it left off) is intentional.
var/queue = GC_QUEUE_CHECK
var/queue = GC_QUEUE_FILTER
while (state == SS_RUNNING)
switch (queue)
if (GC_QUEUE_FILTER)
HandleQueue(GC_QUEUE_FILTER)
queue = GC_QUEUE_FILTER+1
if (GC_QUEUE_CHECK)
HandleQueue(GC_QUEUE_CHECK)
queue = GC_QUEUE_CHECK+1
@@ -102,8 +133,21 @@ SUBSYSTEM_DEF(garbage)
state = SS_RUNNING
break
/datum/controller/subsystem/garbage/proc/HandleQueue(level = GC_QUEUE_CHECK)
if (level == GC_QUEUE_CHECK)
/datum/controller/subsystem/garbage/proc/InitQueues()
if (isnull(queues)) // Only init the queues if they don't already exist, prevents overriding of recovered lists
queues = new(GC_QUEUE_COUNT)
pass_counts = new(GC_QUEUE_COUNT)
fail_counts = new(GC_QUEUE_COUNT)
for(var/i in 1 to GC_QUEUE_COUNT)
queues[i] = list()
pass_counts[i] = 0
fail_counts[i] = 0
/datum/controller/subsystem/garbage/proc/HandleQueue(level = GC_QUEUE_FILTER)
if (level == GC_QUEUE_FILTER)
delslasttick = 0
gcedlasttick = 0
var/cut_off_time = world.time - collection_timeout[level] //ignore entries newer then this
@@ -118,30 +162,33 @@ SUBSYSTEM_DEF(garbage)
lastlevel = level
//We do this rather then for(var/refID in queue) because that sort of for loop copies the whole list.
// 1 from the hard reference in the queue, and 1 from the variable used before this
#define REFS_WE_EXPECT 2
//We do this rather then for(var/list/ref_info in queue) because that sort of for loop copies the whole list.
//Normally this isn't expensive, but the gc queue can grow to 40k items, and that gets costly/causes overrun.
for (var/i in 1 to length(queue))
var/list/L = queue[i]
if (length(L) < 2)
if (length(L) < GC_QUEUE_ITEM_INDEX_COUNT)
count++
if (MC_TICK_CHECK)
return
continue
var/GCd_at_time = L[1]
if(GCd_at_time > cut_off_time)
var/queued_at_time = L[GC_QUEUE_ITEM_QUEUE_TIME]
if(queued_at_time > cut_off_time)
break // Everything else is newer, skip them
count++
var/refID = L[2]
var/datum/D
D = locate(refID)
if (!D || D.gc_destroyed != GCd_at_time) // So if something else coincidently gets the same ref, it's not deleted by mistake
var/datum/D = L[GC_QUEUE_ITEM_REF]
// If that's all we've got, send er off
if (refcount(D) == REFS_WE_EXPECT)
++gcedlasttick
++totalgcs
pass_counts[level]++
#ifdef REFERENCE_TRACKING
reference_find_on_fail -= refID //It's deleted we don't care anymore.
reference_find_on_fail -= ref(D) //It's deleted we don't care anymore.
#endif
if (MC_TICK_CHECK)
return
@@ -157,22 +204,33 @@ SUBSYSTEM_DEF(garbage)
switch (level)
if (GC_QUEUE_CHECK)
#ifdef REFERENCE_TRACKING
if(reference_find_on_fail[refID])
INVOKE_ASYNC(D, /datum/proc/find_references)
// Decides how many refs to look for (potentially)
// Based off the remaining and the ones we can account for
var/remaining_refs = refcount(D) - REFS_WE_EXPECT
if(reference_find_on_fail[ref(D)])
INVOKE_ASYNC(D, TYPE_PROC_REF(/datum,find_references), remaining_refs)
ref_searching = TRUE
#ifdef GC_FAILURE_HARD_LOOKUP
else
INVOKE_ASYNC(D, /datum/proc/find_references)
INVOKE_ASYNC(D, TYPE_PROC_REF(/datum,find_references), remaining_refs)
ref_searching = TRUE
#endif
reference_find_on_fail -= refID
reference_find_on_fail -= ref(D)
#endif
var/type = D.type
var/datum/qdel_item/I = items[type]
log_world("## TESTING: GC: -- \ref[D] | [type] was unable to be GC'd --")
var/message = "## TESTING: GC: -- [ref(D)] | [type] was unable to be GC'd --"
message = "[message] (ref count of [refcount(D)])"
log_world(message)
/*var/detail = D.dump_harddel_info()
if(detail)
LAZYADD(I.extra_details, detail)*/
#ifdef TESTING
for(var/client/admin as anything in GLOB.admins) //Using testing() here would fill the logs with ADMIN_VV garbage
for(var/c in GLOB.admins) //Using testing() here would fill the logs with ADMIN_VV garbage
var/client/admin = c
if(!check_rights_for(admin, R_ADMIN))
continue
to_chat(admin, "## TESTING: GC: -- [ADMIN_VV(D)] | [type] was unable to be GC'd --")
@@ -204,36 +262,41 @@ SUBSYSTEM_DEF(garbage)
queue.Cut(1,count+1)
count = 0
/datum/controller/subsystem/garbage/proc/Queue(datum/D, level = GC_QUEUE_CHECK)
#undef REFS_WE_EXPECT
/datum/controller/subsystem/garbage/proc/Queue(datum/D, level = GC_QUEUE_FILTER)
if (isnull(D))
return
if (level > GC_QUEUE_COUNT)
HardDelete(D)
return
var/gctime = world.time
var/refid = "\ref[D]"
var/queue_time = world.time
if (D.gc_destroyed <= 0)
D.gc_destroyed = queue_time
D.gc_destroyed = gctime
var/list/queue = queues[level]
queue[++queue.len] = list(gctime, refid) // not += for byond reasons
queue[++queue.len] = list(queue_time, D, D.gc_destroyed) // not += for byond reasons
//this is mainly to separate things profile wise.
/datum/controller/subsystem/garbage/proc/HardDelete(datum/D)
++delslasttick
++totaldels
var/type = D.type
var/refID = "\ref[D]"
var/refID = ref(D)
var/datum/qdel_item/type_info = items[type]
/*var/detail = D.dump_harddel_info()
if(detail)
LAZYADD(type_info.extra_details, detail)*/
var/tick_usage = TICK_USAGE
del(D)
tick_usage = TICK_USAGE_TO_MS(tick_usage)
var/datum/qdel_item/I = items[type]
I.hard_deletes++
I.hard_delete_time += tick_usage
if (tick_usage > I.hard_delete_max)
I.hard_delete_max = tick_usage
type_info.hard_deletes++
type_info.hard_delete_time += tick_usage
if (tick_usage > type_info.hard_delete_max)
type_info.hard_delete_max = tick_usage
if (tick_usage > highest_del_ms)
highest_del_ms = tick_usage
highest_del_type_string = "[type]"
@@ -244,15 +307,17 @@ SUBSYSTEM_DEF(garbage)
postpone(time)
var/threshold = 0.5 // Default, make a config
if (threshold && (time > threshold SECONDS))
if (!(I.qdel_flags & QDEL_ITEM_ADMINS_WARNED))
log_and_message_admins("Error: [type]([refID]) took longer than [threshold] seconds to delete (took [round(time/10, 0.1)] seconds to delete)")
I.qdel_flags |= QDEL_ITEM_ADMINS_WARNED
I.hard_deletes_over_threshold++
if (!(type_info.qdel_flags & QDEL_ITEM_ADMINS_WARNED))
log_game("Error: [type]([refID]) took longer than [threshold] seconds to delete (took [round(time/10, 0.1)] seconds to delete)")
message_admins("Error: [type]([refID]) took longer than [threshold] seconds to delete (took [round(time/10, 0.1)] seconds to delete).")
type_info.qdel_flags |= QDEL_ITEM_ADMINS_WARNED
type_info.hard_deletes_over_threshold++
var/overrun_limit = 0 // Default, make a config
if (overrun_limit && I.hard_deletes_over_threshold >= overrun_limit)
I.qdel_flags |= QDEL_ITEM_SUSPENDED_FOR_LAG
if (overrun_limit && type_info.hard_deletes_over_threshold >= overrun_limit)
type_info.qdel_flags |= QDEL_ITEM_SUSPENDED_FOR_LAG
/datum/controller/subsystem/garbage/Recover()
InitQueues() //We first need to create the queues before recovering data
if (istype(SSgarbage.queues))
for (var/i in 1 to SSgarbage.queues.len)
queues[i] |= SSgarbage.queues[i]
@@ -271,79 +336,85 @@ SUBSYSTEM_DEF(garbage)
var/no_hint = 0 //!Number of times it's not even bother to give a qdel hint
var/slept_destroy = 0 //!Number of times it's slept in its destroy
var/qdel_flags = 0 //!Flags related to this type's trip thru qdel.
var/list/extra_details //!Lazylist of string metadata about the deleted objects
/datum/qdel_item/New(mytype)
name = "[mytype]"
/// Should be treated as a replacement for the 'del' keyword.
///
/// Datums passed to this will be given a chance to clean up references to allow the GC to collect them.
/proc/qdel(datum/D, force=FALSE, ...)
if(!istype(D))
del(D)
/proc/qdel(datum/to_delete, force = FALSE)
if(!istype(to_delete))
del(to_delete)
return
var/datum/qdel_item/I = SSgarbage.items[D.type]
if (!I)
I = SSgarbage.items[D.type] = new /datum/qdel_item(D.type)
I.qdels++
var/datum/qdel_item/trash = SSgarbage.items[to_delete.type]
if (isnull(trash))
trash = SSgarbage.items[to_delete.type] = new /datum/qdel_item(to_delete.type)
trash.qdels++
if(isnull(D.gc_destroyed))
if (SEND_SIGNAL(D, COMSIG_PARENT_PREQDELETED, force)) // Give the components a chance to prevent their parent from being deleted
if(!isnull(to_delete.gc_destroyed))
if(to_delete.gc_destroyed == GC_CURRENTLY_BEING_QDELETED)
CRASH("[to_delete.type] destroy proc was called multiple times, likely due to a qdel loop in the Destroy logic")
return
if (SEND_SIGNAL(to_delete, COMSIG_PARENT_PREQDELETED, force)) // Give the components a chance to prevent their parent from being deleted
return
to_delete.gc_destroyed = GC_CURRENTLY_BEING_QDELETED
var/start_time = world.time
var/start_tick = world.tick_usage
SEND_SIGNAL(to_delete, COMSIG_PARENT_QDELETING, force) // Let the (remaining) components know about the result of Destroy
var/hint = to_delete.Destroy(force) // Let our friend know they're about to get fucked up.
if(world.time != start_time)
trash.slept_destroy++
else
trash.destroy_time += TICK_USAGE_TO_MS(start_tick)
if(isnull(to_delete))
return
switch(hint)
if (QDEL_HINT_QUEUE) //qdel should queue the object for deletion.
SSgarbage.Queue(to_delete)
if (QDEL_HINT_IWILLGC)
to_delete.gc_destroyed = world.time
return
D.gc_destroyed = GC_CURRENTLY_BEING_QDELETED
var/start_time = world.time
var/start_tick = world.tick_usage
SEND_SIGNAL(D, COMSIG_PARENT_QDELETING, force) // Let the (remaining) components know about the result of Destroy
var/hint = D.Destroy(arglist(args.Copy(2))) // Let our friend know they're about to get fucked up.
if(world.time != start_time)
I.slept_destroy++
else
I.destroy_time += TICK_USAGE_TO_MS(start_tick)
if(!D)
return
switch(hint)
if (QDEL_HINT_QUEUE) //qdel should queue the object for deletion.
SSgarbage.Queue(D)
if (QDEL_HINT_IWILLGC)
D.gc_destroyed = world.time
if (QDEL_HINT_LETMELIVE) //qdel should let the object live after calling destory.
if(!force)
to_delete.gc_destroyed = null //clear the gc variable (important!)
return
if (QDEL_HINT_LETMELIVE) //qdel should let the object live after calling destory.
if(!force)
D.gc_destroyed = null //clear the gc variable (important!)
return
// Returning LETMELIVE after being told to force destroy
// indicates the objects Destroy() does not respect force
#ifdef TESTING
if(!I.no_respect_force)
testing("WARNING: [D.type] has been force deleted, but is \
returning an immortal QDEL_HINT, indicating it does \
not respect the force flag for qdel(). It has been \
placed in the queue, further instances of this type \
will also be queued.")
#endif
I.no_respect_force++
SSgarbage.Queue(D)
if (QDEL_HINT_HARDDEL) //qdel should assume this object won't gc, and queue a hard delete
SSgarbage.Queue(D, GC_QUEUE_HARDDELETE)
if (QDEL_HINT_HARDDEL_NOW) //qdel should assume this object won't gc, and hard del it post haste.
SSgarbage.HardDelete(D)
#ifdef REFERENCE_TRACKING
if (QDEL_HINT_FINDREFERENCE) //qdel will, if REFERENCE_TRACKING is enabled, display all references to this object, then queue the object for deletion.
SSgarbage.Queue(D)
D.find_references()
if (QDEL_HINT_IFFAIL_FINDREFERENCE) //qdel will, if REFERENCE_TRACKING is enabled and the object fails to collect, display all references to this object.
SSgarbage.Queue(D)
SSgarbage.reference_find_on_fail["\ref[D]"] = TRUE
// Returning LETMELIVE after being told to force destroy
// indicates the objects Destroy() does not respect force
#ifdef TESTING
if(!trash.no_respect_force)
testing("WARNING: [to_delete.type] has been force deleted, but is \
returning an immortal QDEL_HINT, indicating it does \
not respect the force flag for qdel(). It has been \
placed in the queue, further instances of this type \
will also be queued.")
#endif
else
#ifdef TESTING
if(!I.no_hint)
testing("WARNING: [D.type] is not returning a qdel hint. It is being placed in the queue. Further instances of this type will also be queued.")
#endif
I.no_hint++
SSgarbage.Queue(D)
else if(D.gc_destroyed == GC_CURRENTLY_BEING_QDELETED)
CRASH("[D.type] destroy proc was called multiple times, likely due to a qdel loop in the Destroy logic")
trash.no_respect_force++
SSgarbage.Queue(to_delete)
if (QDEL_HINT_HARDDEL) //qdel should assume this object won't gc, and queue a hard delete
SSgarbage.Queue(to_delete, GC_QUEUE_HARDDELETE)
if (QDEL_HINT_HARDDEL_NOW) //qdel should assume this object won't gc, and hard del it post haste.
SSgarbage.HardDelete(to_delete)
#ifdef REFERENCE_TRACKING
if (QDEL_HINT_FINDREFERENCE) //qdel will, if REFERENCE_TRACKING is enabled, display all references to this object, then queue the object for deletion.
SSgarbage.Queue(to_delete)
INVOKE_ASYNC(to_delete, TYPE_PROC_REF(/datum, find_references))
if (QDEL_HINT_IFFAIL_FINDREFERENCE) //qdel will, if REFERENCE_TRACKING is enabled and the object fails to collect, display all references to this object.
SSgarbage.Queue(to_delete)
SSgarbage.reference_find_on_fail[ref(to_delete)] = TRUE
#endif
else
#ifdef TESTING
if(!trash.no_hint)
testing("WARNING: [to_delete.type] is not returning a qdel hint. It is being placed in the queue. Further instances of this type will also be queued.")
#endif
trash.no_hint++
SSgarbage.Queue(to_delete)

View File

@@ -77,23 +77,31 @@ SUBSYSTEM_DEF(overlays)
var/list/result = list()
var/icon/icon = subject.icon
for (var/atom/entry as anything in sources)
if (!entry)
continue
else if (istext(entry))
result += GetStateAppearance(icon, entry)
else if (isicon(entry))
result += GetIconAppearance(entry)
else
if (isloc(entry))
if (entry.flags & OVERLAY_QUEUED)
entry.ImmediateOverlayUpdate()
if (!ispath(entry))
result += entry.appearance
else
var/image/image = entry
result += image.appearance
AppearanceListEntry(entry, result, icon)
return result
//Fixes runtime with overlays present in 515
/datum/controller/subsystem/overlays/proc/AppearanceListEntry(var/atom/entry,var/list/result,var/icon/icon)
if (!entry)
return
else if(islist(entry))
var/list/entry_list = entry
for(var/entry_item in entry_list)
AppearanceListEntry(entry_item)
else if (istext(entry))
result += GetStateAppearance(icon, entry)
else if (isicon(entry))
result += GetIconAppearance(entry)
else
if (isloc(entry))
if (entry.flags & OVERLAY_QUEUED)
entry.ImmediateOverlayUpdate()
if (!ispath(entry))
if(entry.appearance)
result += entry.appearance
else
var/image/image = entry
result += image.appearance
/// Enqueues the atom for an overlay update if not already queued
/atom/proc/QueueOverlayUpdate()