mirror of
https://github.com/PolarisSS13/Polaris.git
synced 2026-01-04 14:33:30 +00:00
Merge branch 'master' of https://github.com/PolarisSS13/Polaris into 2/1/2018_make_the_sun_not_lag_again
This commit is contained in:
@@ -1,9 +0,0 @@
|
||||
/datum/controller/process/Shuttle/setup()
|
||||
name = "shuttle controller"
|
||||
schedule_interval = 20 // every 2 seconds
|
||||
|
||||
if(!shuttle_controller)
|
||||
shuttle_controller = new
|
||||
|
||||
/datum/controller/process/Shuttle/doWork()
|
||||
shuttle_controller.process()
|
||||
@@ -7,8 +7,30 @@
|
||||
for(last_object in clients)
|
||||
var/client/C = last_object
|
||||
if(C.is_afk(config.kick_inactive MINUTES))
|
||||
if(!istype(C.mob, /mob/observer/dead))
|
||||
log_access("AFK: [key_name(C)]")
|
||||
C << "<SPAN CLASS='warning'>You have been inactive for more than [config.kick_inactive] minute\s and have been disconnected.</SPAN>"
|
||||
del(C) // Don't qdel, cannot override finalize_qdel behaviour for clients.
|
||||
to_chat(C,"<span class='warning'>You have been inactive for more than [config.kick_inactive] minute\s and have been disconnected.</span>")
|
||||
var/information
|
||||
|
||||
if(C.mob)
|
||||
if(ishuman(C.mob))
|
||||
var/job
|
||||
var/mob/living/carbon/human/H = C.mob
|
||||
var/datum/data/record/R = find_general_record("name", H.real_name)
|
||||
if(R)
|
||||
job = R.fields["real_rank"]
|
||||
if(!job && H.mind)
|
||||
job = H.mind.assigned_role
|
||||
if(!job && H.job)
|
||||
job = H.job
|
||||
if(job)
|
||||
information = " while [job]."
|
||||
|
||||
else if(issilicon(C.mob))
|
||||
information = " while a silicon."
|
||||
|
||||
var/adminlinks
|
||||
adminlinks = " (<A HREF='?_src_=holder;adminplayerobservecoodjump=1;X=[C.mob.x];Y=[C.mob.y];Z=[C.mob.z]'>JMP</a>|<A HREF='?_src_=holder;cryoplayer=\ref[C.mob]'>CRYO</a>)"
|
||||
|
||||
log_and_message_admins("being kicked for AFK[information][adminlinks]", C.mob)
|
||||
|
||||
qdel(C)
|
||||
SCHECK
|
||||
|
||||
@@ -47,7 +47,8 @@ var/datum/controller/process/planet/planet_controller = null
|
||||
//Weather style needs redrawing
|
||||
if(P.needs_work & PLANET_PROCESS_WEATHER)
|
||||
P.needs_work &= ~PLANET_PROCESS_WEATHER
|
||||
var/image/new_overlay = image(icon = P.weather_holder.current_weather.icon, icon_state = P.weather_holder.current_weather.icon_state, layer = LIGHTING_LAYER - 1)
|
||||
var/image/new_overlay = image(icon = P.weather_holder.current_weather.icon, icon_state = P.weather_holder.current_weather.icon_state)
|
||||
new_overlay.plane = PLANE_PLANETLIGHTING
|
||||
//Redraw weather icons
|
||||
for(var/T in P.planet_floors)
|
||||
var/turf/simulated/turf = T
|
||||
|
||||
@@ -8,19 +8,21 @@
|
||||
|
||||
/datum/controller/process/scheduler/setup()
|
||||
name = "scheduler"
|
||||
schedule_interval = 3 SECONDS
|
||||
schedule_interval = 1 SECOND
|
||||
scheduled_tasks = list()
|
||||
scheduler = src
|
||||
|
||||
/datum/controller/process/scheduler/doWork()
|
||||
var/world_time = world.time
|
||||
for(last_object in scheduled_tasks)
|
||||
var/datum/scheduled_task/scheduled_task = last_object
|
||||
if(world_time < scheduled_task.trigger_time)
|
||||
break // Too early for this one, and therefore too early for all remaining.
|
||||
try
|
||||
if(world.time > scheduled_task.trigger_time)
|
||||
unschedule(scheduled_task)
|
||||
scheduled_task.pre_process()
|
||||
scheduled_task.process()
|
||||
scheduled_task.post_process()
|
||||
unschedule(scheduled_task)
|
||||
scheduled_task.pre_process()
|
||||
scheduled_task.process()
|
||||
scheduled_task.post_process()
|
||||
catch(var/exception/e)
|
||||
catchException(e, last_object)
|
||||
SCHECK
|
||||
@@ -45,7 +47,7 @@
|
||||
stat(null, "[scheduled_tasks.len] task\s")
|
||||
|
||||
/datum/controller/process/scheduler/proc/schedule(var/datum/scheduled_task/st)
|
||||
scheduled_tasks += st
|
||||
dd_insertObjectList(scheduled_tasks, st)
|
||||
|
||||
/datum/controller/process/scheduler/proc/unschedule(var/datum/scheduled_task/st)
|
||||
scheduled_tasks -= st
|
||||
@@ -106,6 +108,9 @@
|
||||
task_after_process_args.Cut()
|
||||
return ..()
|
||||
|
||||
/datum/scheduled_task/dd_SortValue()
|
||||
return trigger_time
|
||||
|
||||
/datum/scheduled_task/proc/pre_process()
|
||||
task_triggered_event.raise_event(list(src))
|
||||
|
||||
|
||||
@@ -212,7 +212,7 @@ var/list/gamemode_cache = list()
|
||||
|
||||
var/starlight = 0 // Whether space turfs have ambient light or not
|
||||
|
||||
var/list/ert_species = list("Human")
|
||||
var/list/ert_species = list(SPECIES_HUMAN)
|
||||
|
||||
var/law_zero = "ERROR ER0RR $R0RRO$!R41.%%!!(%$^^__+ @#F0E4'ALL LAWS OVERRIDDEN#*?&110010"
|
||||
|
||||
@@ -721,7 +721,7 @@ var/list/gamemode_cache = list()
|
||||
if("ert_species")
|
||||
config.ert_species = splittext(value, ";")
|
||||
if(!config.ert_species.len)
|
||||
config.ert_species += "Human"
|
||||
config.ert_species += SPECIES_HUMAN
|
||||
|
||||
if("law_zero")
|
||||
law_zero = value
|
||||
|
||||
@@ -234,7 +234,8 @@ var/global/datum/emergency_shuttle_controller/emergency_shuttle
|
||||
name = "star"
|
||||
var/speed = 10
|
||||
var/direction = SOUTH
|
||||
layer = 2 // TURF_LAYER
|
||||
layer = TURF_LAYER
|
||||
plane = TURF_PLANE
|
||||
|
||||
/obj/effect/bgstar/New()
|
||||
..()
|
||||
|
||||
@@ -573,14 +573,11 @@ var/datum/controller/master/Master = new()
|
||||
var/datum/controller/subsystem/SS = S
|
||||
SS.StartLoadingMap()
|
||||
|
||||
// ZAS might displace objects as the map loads if an air tick is processed mid-load.
|
||||
air_processing_killed = TRUE
|
||||
map_loading = TRUE
|
||||
|
||||
/datum/controller/master/StopLoadingMap(var/quiet = TRUE)
|
||||
if(!quiet)
|
||||
admin_notice("<span class='danger'>Map is finished. Unlocking.</span>", R_DEBUG)
|
||||
air_processing_killed = FALSE
|
||||
map_loading = FALSE
|
||||
for(var/S in subsystems)
|
||||
var/datum/controller/subsystem/SS = S
|
||||
|
||||
@@ -11,7 +11,6 @@ var/global/datum/controller/game_controller/master_controller //Set in world.New
|
||||
var/global/controller_iteration = 0
|
||||
var/global/last_tick_duration = 0
|
||||
|
||||
var/global/air_processing_killed = 0
|
||||
var/global/pipe_processing_killed = 0
|
||||
|
||||
datum/controller/game_controller
|
||||
@@ -37,22 +36,22 @@ datum/controller/game_controller/New()
|
||||
datum/controller/game_controller/proc/setup()
|
||||
|
||||
setup_objects()
|
||||
setupgenetics()
|
||||
SetupXenoarch()
|
||||
// setupgenetics() Moved to SSatoms
|
||||
// SetupXenoarch() - Moved to SSxenoarch
|
||||
|
||||
transfer_controller = new
|
||||
admin_notice("<span class='danger'>Initializations complete.</span>", R_DEBUG)
|
||||
|
||||
#if UNIT_TEST
|
||||
#define CHECK_SLEEP_MASTER // For unit tests we don't care about a smooth lobby screen experience. We care about speed.
|
||||
#else
|
||||
#define CHECK_SLEEP_MASTER if(++initialized_objects > 500) { initialized_objects=0;sleep(world.tick_lag); }
|
||||
#endif
|
||||
// #if UNIT_TEST
|
||||
// #define CHECK_SLEEP_MASTER // For unit tests we don't care about a smooth lobby screen experience. We care about speed.
|
||||
// #else
|
||||
// #define CHECK_SLEEP_MASTER if(++initialized_objects > 500) { initialized_objects=0;sleep(world.tick_lag); }
|
||||
// #endif
|
||||
|
||||
datum/controller/game_controller/proc/setup_objects()
|
||||
#if !UNIT_TEST
|
||||
var/initialized_objects = 0
|
||||
#endif
|
||||
// #if !UNIT_TEST
|
||||
// var/initialized_objects = 0
|
||||
// #endif
|
||||
|
||||
// Set up antagonists.
|
||||
populate_antag_type_list()
|
||||
@@ -60,6 +59,7 @@ datum/controller/game_controller/proc/setup_objects()
|
||||
//Set up spawn points.
|
||||
populate_spawn_points()
|
||||
|
||||
/*
|
||||
admin_notice("<span class='danger'>Initializing Floor Decals</span>", R_DEBUG)
|
||||
var/list/turfs_with_decals = list()
|
||||
for(var/obj/effect/floor_decal/D in world)
|
||||
@@ -112,3 +112,4 @@ datum/controller/game_controller/proc/setup_objects()
|
||||
if(!QDELETED(lift))
|
||||
lift.initialize()
|
||||
CHECK_SLEEP_MASTER
|
||||
*/
|
||||
@@ -1,39 +0,0 @@
|
||||
|
||||
var/global/datum/shuttle_controller/shuttle_controller
|
||||
|
||||
|
||||
/datum/shuttle_controller
|
||||
var/list/shuttles //maps shuttle tags to shuttle datums, so that they can be looked up.
|
||||
var/list/process_shuttles //simple list of shuttles, for processing
|
||||
|
||||
/datum/shuttle_controller/proc/process()
|
||||
//process ferry shuttles
|
||||
for (var/datum/shuttle/shuttle in process_shuttles)
|
||||
if(istype(shuttle, /datum/shuttle/ferry))
|
||||
var/datum/shuttle/ferry/F = shuttle
|
||||
if(F.process_state || F.always_process)
|
||||
F.process()
|
||||
else
|
||||
shuttle.process()
|
||||
|
||||
|
||||
//This is called by gameticker after all the machines and radio frequencies have been properly initialized
|
||||
/datum/shuttle_controller/proc/setup_shuttle_docks()
|
||||
// for(var/shuttle_tag in shuttles)
|
||||
// var/datum/shuttle/shuttle = shuttles[shuttle_tag]
|
||||
for(var/shuttle_type in subtypesof(/datum/shuttle))
|
||||
var/datum/shuttle/shuttle = shuttle_type
|
||||
if(initial(shuttle.category) == shuttle_type)
|
||||
continue
|
||||
shuttle = new shuttle()
|
||||
shuttle.init_docking_controllers()
|
||||
shuttle.dock() //makes all shuttles docked to something at round start go into the docked state
|
||||
|
||||
for(var/obj/machinery/embedded_controller/C in machines)
|
||||
if(istype(C.program, /datum/computer/file/embedded_program/docking))
|
||||
C.program.tag = null //clear the tags, 'cause we don't need 'em anymore
|
||||
|
||||
/datum/shuttle_controller/New()
|
||||
shuttles = list()
|
||||
process_shuttles = list()
|
||||
|
||||
@@ -166,7 +166,7 @@
|
||||
|
||||
|
||||
|
||||
if(can_fire && !(SS_NO_FIRE in flags))
|
||||
if(can_fire && !(SS_NO_FIRE & flags))
|
||||
msg = "[round(cost,1)]ms|[round(tick_usage,1)]%|[round(ticks,0.1)]\t[msg]"
|
||||
else
|
||||
msg = "OFFLINE\t[msg]"
|
||||
|
||||
271
code/controllers/subsystems/air.dm
Normal file
271
code/controllers/subsystems/air.dm
Normal file
@@ -0,0 +1,271 @@
|
||||
// Air update stages
|
||||
#define SSAIR_TURFS 1
|
||||
#define SSAIR_EDGES 2
|
||||
#define SSAIR_FIREZONES 3
|
||||
#define SSAIR_HOTSPOTS 4
|
||||
#define SSAIR_ZONES 5
|
||||
#define SSAIR_DONE 6
|
||||
|
||||
SUBSYSTEM_DEF(air)
|
||||
name = "Air"
|
||||
init_order = INIT_ORDER_AIR
|
||||
priority = 35
|
||||
wait = 2 SECONDS // seconds (We probably can speed this up actually)
|
||||
flags = SS_BACKGROUND // TODO - Should this really be background? It might be important.
|
||||
runlevels = RUNLEVEL_GAME | RUNLEVEL_POSTGAME
|
||||
var/static/list/part_names = list("turfs", "edges", "fire zones", "hotspots", "zones")
|
||||
|
||||
var/cost_turfs = 0
|
||||
var/cost_edges = 0
|
||||
var/cost_firezones = 0
|
||||
var/cost_hotspots = 0
|
||||
var/cost_zones = 0
|
||||
|
||||
var/list/currentrun = null
|
||||
var/current_step = null
|
||||
|
||||
// Updating zone tiles requires temporary storage location of self-zone-blocked turfs across resumes. Used only by process_tiles_to_update.
|
||||
var/list/selfblock_deferred = null
|
||||
|
||||
/datum/controller/subsystem/air/PreInit()
|
||||
air_master = src
|
||||
|
||||
/datum/controller/subsystem/air/Initialize(timeofday)
|
||||
report_progress("Processing Geometry...")
|
||||
|
||||
current_cycle = 0
|
||||
var/simulated_turf_count = 0
|
||||
for(var/turf/simulated/S in world)
|
||||
simulated_turf_count++
|
||||
S.update_air_properties()
|
||||
CHECK_TICK
|
||||
|
||||
admin_notice({"<span class='danger'>Geometry initialized in [round(0.1*(REALTIMEOFDAY-timeofday),0.1)] seconds.</span>
|
||||
<span class='info'>
|
||||
Total Simulated Turfs: [simulated_turf_count]
|
||||
Total Zones: [zones.len]
|
||||
Total Edges: [edges.len]
|
||||
Total Active Edges: [active_edges.len ? "<span class='danger'>[active_edges.len]</span>" : "None"]
|
||||
Total Unsimulated Turfs: [world.maxx*world.maxy*world.maxz - simulated_turf_count]
|
||||
</span>"}, R_DEBUG)
|
||||
|
||||
// Note - Baystation settles the air by running for one tick. We prefer to not have active edges.
|
||||
// Maps should not have active edges on boot. If we've got some, log it so it can get fixed.
|
||||
if(active_edges.len)
|
||||
var/list/edge_log = list()
|
||||
for(var/connection_edge/E in active_edges)
|
||||
edge_log += "Active Edge [E] ([E.type])"
|
||||
for(var/turf/T in E.connecting_turfs)
|
||||
edge_log += "+--- Connecting Turf [T] @ [T.x], [T.y], [T.z]"
|
||||
log_debug("Active Edges on ZAS Startup\n" + edge_log.Join("\n"))
|
||||
|
||||
..()
|
||||
|
||||
/datum/controller/subsystem/air/fire(resumed = 0)
|
||||
var/timer
|
||||
if(!resumed)
|
||||
ASSERT(LAZYLEN(currentrun) == 0) // Santity checks to make sure we don't somehow have items left over from last cycle
|
||||
ASSERT(current_step == null) // Or somehow didn't finish all the steps from last cycle
|
||||
current_cycle++ // Begin a new air_master cycle!
|
||||
current_step = SSAIR_TURFS // Start with Step 1 of course
|
||||
|
||||
INTERNAL_PROCESS_STEP(SSAIR_TURFS, TRUE, process_tiles_to_update, cost_turfs, SSAIR_EDGES)
|
||||
INTERNAL_PROCESS_STEP(SSAIR_EDGES, FALSE, process_active_edges, cost_edges, SSAIR_FIREZONES)
|
||||
INTERNAL_PROCESS_STEP(SSAIR_FIREZONES, FALSE, process_active_fire_zones, cost_firezones, SSAIR_HOTSPOTS)
|
||||
INTERNAL_PROCESS_STEP(SSAIR_HOTSPOTS, FALSE, process_active_hotspots, cost_hotspots, SSAIR_ZONES)
|
||||
INTERNAL_PROCESS_STEP(SSAIR_ZONES, FALSE, process_zones_to_update, cost_zones, SSAIR_DONE)
|
||||
|
||||
// Okay, we're done! Woo! Got thru a whole air_master cycle!
|
||||
ASSERT(LAZYLEN(currentrun) == 0) // Sanity checks to make sure there are really none left
|
||||
ASSERT(current_step == SSAIR_DONE) // And that we didn't somehow skip past the last step
|
||||
currentrun = null
|
||||
current_step = null
|
||||
|
||||
/datum/controller/subsystem/air/proc/process_tiles_to_update(resumed = 0)
|
||||
if (!resumed)
|
||||
// NOT a copy, because we are supposed to drain active turfs each cycle anyway, so just replace with empty list.
|
||||
// We still use a separate list tho, to ensure we don't process a turf twice during a single cycle!
|
||||
src.currentrun = tiles_to_update
|
||||
tiles_to_update = list()
|
||||
|
||||
//defer updating of self-zone-blocked turfs until after all other turfs have been updated.
|
||||
//this hopefully ensures that non-self-zone-blocked turfs adjacent to self-zone-blocked ones
|
||||
//have valid zones when the self-zone-blocked turfs update.
|
||||
//This ensures that doorways don't form their own single-turf zones, since doorways are self-zone-blocked and
|
||||
//can merge with an adjacent zone, whereas zones that are formed on adjacent turfs cannot merge with the doorway.
|
||||
ASSERT(src.selfblock_deferred == null) // Sanity check to make sure it was not remaining from last cycle somehow.
|
||||
src.selfblock_deferred = list()
|
||||
|
||||
//cache for sanic speed (lists are references anyways)
|
||||
var/list/currentrun = src.currentrun
|
||||
var/list/selfblock_deferred = src.selfblock_deferred
|
||||
|
||||
// Run thru the list, processing non-self-zone-blocked and deferring self-zone-blocked
|
||||
while(currentrun.len)
|
||||
var/turf/T = currentrun[currentrun.len]
|
||||
currentrun.len--
|
||||
//check if the turf is self-zone-blocked
|
||||
if(T.c_airblock(T) & ZONE_BLOCKED)
|
||||
selfblock_deferred += T
|
||||
if(MC_TICK_CHECK)
|
||||
return
|
||||
else
|
||||
continue
|
||||
T.update_air_properties()
|
||||
T.post_update_air_properties()
|
||||
T.needs_air_update = 0
|
||||
#ifdef ZASDBG
|
||||
T.overlays -= mark
|
||||
#endif
|
||||
if(MC_TICK_CHECK)
|
||||
return
|
||||
|
||||
ASSERT(LAZYLEN(currentrun) == 0)
|
||||
|
||||
// Run thru the deferred list and processing them
|
||||
while(selfblock_deferred.len)
|
||||
var/turf/T = selfblock_deferred[selfblock_deferred.len]
|
||||
selfblock_deferred.len--
|
||||
T.update_air_properties()
|
||||
T.post_update_air_properties()
|
||||
T.needs_air_update = 0
|
||||
#ifdef ZASDBG
|
||||
T.overlays -= mark
|
||||
#endif
|
||||
if(MC_TICK_CHECK)
|
||||
return
|
||||
|
||||
ASSERT(LAZYLEN(selfblock_deferred) == 0)
|
||||
src.selfblock_deferred = null
|
||||
|
||||
/datum/controller/subsystem/air/proc/process_active_edges(resumed = 0)
|
||||
if (!resumed)
|
||||
src.currentrun = active_edges.Copy()
|
||||
//cache for sanic speed (lists are references anyways)
|
||||
var/list/currentrun = src.currentrun
|
||||
while(currentrun.len)
|
||||
var/connection_edge/edge = currentrun[currentrun.len]
|
||||
currentrun.len--
|
||||
if(edge) // TODO - Do we need to check this? Old one didn't, but old one was single-threaded.
|
||||
edge.tick()
|
||||
if(MC_TICK_CHECK)
|
||||
return
|
||||
|
||||
/datum/controller/subsystem/air/proc/process_active_fire_zones(resumed = 0)
|
||||
if (!resumed)
|
||||
src.currentrun = active_fire_zones.Copy()
|
||||
//cache for sanic speed (lists are references anyways)
|
||||
var/list/currentrun = src.currentrun
|
||||
while(currentrun.len)
|
||||
var/zone/Z = currentrun[currentrun.len]
|
||||
currentrun.len--
|
||||
if(Z) // TODO - Do we need to check this? Old one didn't, but old one was single-threaded.
|
||||
Z.process_fire()
|
||||
if(MC_TICK_CHECK)
|
||||
return
|
||||
|
||||
/datum/controller/subsystem/air/proc/process_active_hotspots(resumed = 0)
|
||||
if (!resumed)
|
||||
src.currentrun = active_hotspots.Copy()
|
||||
//cache for sanic speed (lists are references anyways)
|
||||
var/list/currentrun = src.currentrun
|
||||
while(currentrun.len)
|
||||
var/obj/fire/fire = currentrun[currentrun.len]
|
||||
currentrun.len--
|
||||
if(fire) // TODO - Do we need to check this? Old one didn't, but old one was single-threaded.
|
||||
fire.process()
|
||||
if(MC_TICK_CHECK)
|
||||
return
|
||||
|
||||
/datum/controller/subsystem/air/proc/process_zones_to_update(resumed = 0)
|
||||
if (!resumed)
|
||||
active_zones = zones_to_update.len // Save how many zones there were to update this cycle (used by some debugging stuff)
|
||||
if(!zones_to_update.len)
|
||||
return // Nothing to do here this cycle!
|
||||
// NOT a copy, because we are supposed to drain active turfs each cycle anyway, so just replace with empty list.
|
||||
// Blanking the public list means we actually are removing processed ones from the list! Maybe we could we use zones_for_update directly?
|
||||
// But if we dom any zones added to zones_to_update DURING this step will get processed again during this step.
|
||||
// I don't know if that actually happens? But if it does, it could lead to an infinate loop. Better preserve original semantics.
|
||||
src.currentrun = zones_to_update
|
||||
zones_to_update = list()
|
||||
|
||||
//cache for sanic speed (lists are references anyways)
|
||||
var/list/currentrun = src.currentrun
|
||||
while(currentrun.len)
|
||||
var/zone/zone = currentrun[currentrun.len]
|
||||
currentrun.len--
|
||||
if(zone) // TODO - Do we need to check this? Old one didn't, but old one was single-threaded.
|
||||
zone.tick()
|
||||
zone.needs_update = 0
|
||||
if(MC_TICK_CHECK)
|
||||
return
|
||||
|
||||
/datum/controller/subsystem/air/stat_entry(msg_prefix)
|
||||
var/list/msg = list(msg_prefix)
|
||||
msg += "S:[current_step ? part_names[current_step] : ""] "
|
||||
msg += "C:{"
|
||||
msg += "T [round(cost_turfs, 1)] | "
|
||||
msg += "E [round(cost_edges, 1)] | "
|
||||
msg += "F [round(cost_firezones, 1)] | "
|
||||
msg += "H [round(cost_hotspots, 1)] | "
|
||||
msg += "Z [round(cost_zones, 1)] "
|
||||
msg += "}"
|
||||
msg += "Z: [zones.len] "
|
||||
msg += "E: [edges.len] "
|
||||
msg += "Cycle: [current_cycle] {"
|
||||
msg += "T [tiles_to_update.len] | "
|
||||
msg += "E [active_edges.len] | "
|
||||
msg += "F [active_fire_zones.len] | "
|
||||
msg += "H [active_hotspots.len] | "
|
||||
msg += "Z [zones_to_update.len] "
|
||||
msg += "}"
|
||||
..(msg.Join())
|
||||
|
||||
// ZAS might displace objects as the map loads if an air tick is processed mid-load.
|
||||
/datum/controller/subsystem/air/StartLoadingMap(var/quiet = TRUE)
|
||||
can_fire = FALSE
|
||||
. = ..()
|
||||
|
||||
/datum/controller/subsystem/air/StopLoadingMap(var/quiet = TRUE)
|
||||
can_fire = TRUE
|
||||
. = ..()
|
||||
|
||||
// Reboot the air master. A bit hacky right now, but sometimes necessary still.
|
||||
/datum/controller/subsystem/air/proc/RebootZAS()
|
||||
can_fire = FALSE // Pause processing while we reboot
|
||||
// If we should happen to be in the middle of processing... wait until that finishes.
|
||||
if (state != SS_IDLE)
|
||||
report_progress("ZAS Rebuild initiated. Waiting for current air tick to complete before continuing.")
|
||||
while (state != SS_IDLE)
|
||||
stoplag()
|
||||
|
||||
// Invalidate all zones
|
||||
for(var/zone/zone in zones)
|
||||
zone.c_invalidate()
|
||||
|
||||
// Reset all the lists
|
||||
zones.Cut()
|
||||
edges.Cut()
|
||||
tiles_to_update.Cut()
|
||||
zones_to_update.Cut()
|
||||
active_fire_zones.Cut()
|
||||
active_hotspots.Cut()
|
||||
active_edges.Cut()
|
||||
|
||||
// Start it up again
|
||||
Initialize(REALTIMEOFDAY)
|
||||
|
||||
// Update next_fire so the MC doesn't try to make up for missed ticks.
|
||||
next_fire = world.time + wait
|
||||
can_fire = TRUE // Unpause
|
||||
|
||||
//
|
||||
// The procs from the ZAS Air Controller are in ZAS/Controller.dm
|
||||
//
|
||||
|
||||
#undef SSAIR_TURFS
|
||||
#undef SSAIR_EDGES
|
||||
#undef SSAIR_FIREZONES
|
||||
#undef SSAIR_HOTSPOTS
|
||||
#undef SSAIR_ZONES
|
||||
#undef SSAIR_DONE
|
||||
163
code/controllers/subsystems/airflow.dm
Normal file
163
code/controllers/subsystems/airflow.dm
Normal file
@@ -0,0 +1,163 @@
|
||||
#define CLEAR_OBJECT(TARGET) \
|
||||
processing -= TARGET; \
|
||||
TARGET.airflow_dest = null; \
|
||||
TARGET.airflow_speed = 0; \
|
||||
TARGET.airflow_time = 0; \
|
||||
TARGET.airflow_skip_speedcheck = FALSE; \
|
||||
if (TARGET.airflow_od) { \
|
||||
TARGET.density = 0; \
|
||||
}
|
||||
|
||||
// No point in making this a processing substem, it overrides fire() and handles its own processing list!
|
||||
SUBSYSTEM_DEF(airflow)
|
||||
name = "Airflow"
|
||||
wait = 2
|
||||
flags = SS_NO_INIT
|
||||
runlevels = RUNLEVEL_GAME | RUNLEVEL_POSTGAME
|
||||
priority = 30
|
||||
|
||||
var/list/processing = list()
|
||||
var/list/currentrun = list()
|
||||
|
||||
/datum/controller/subsystem/airflow/fire(resumed = FALSE)
|
||||
if (!resumed)
|
||||
currentrun = processing.Copy()
|
||||
|
||||
var/mywait = wait
|
||||
var/list/curr = currentrun // Cache for sanic speed
|
||||
while (curr.len)
|
||||
var/atom/movable/target = curr[curr.len]
|
||||
curr.len--
|
||||
if(QDELETED(target))
|
||||
processing -= target
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
continue
|
||||
|
||||
if (target.airflow_speed <= 0)
|
||||
CLEAR_OBJECT(target)
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
continue
|
||||
|
||||
if (target.airflow_process_delay > 0)
|
||||
target.airflow_process_delay -= mywait
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
continue
|
||||
else if (target.airflow_process_delay)
|
||||
target.airflow_process_delay = 0
|
||||
|
||||
target.airflow_speed = min(target.airflow_speed, 15)
|
||||
target.airflow_speed -= vsc.airflow_speed_decay
|
||||
if (!target.airflow_skip_speedcheck)
|
||||
if (target.airflow_speed > 7)
|
||||
if (target.airflow_time++ >= target.airflow_speed - 7)
|
||||
if (target.airflow_od)
|
||||
target.density = 0
|
||||
target.airflow_skip_speedcheck = TRUE
|
||||
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
continue
|
||||
else
|
||||
if (target.airflow_od)
|
||||
target.density = 0
|
||||
target.airflow_process_delay = max(1, 10 - (target.airflow_speed + 3))
|
||||
target.airflow_skip_speedcheck = TRUE
|
||||
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
continue
|
||||
|
||||
target.airflow_skip_speedcheck = FALSE
|
||||
|
||||
if (target.airflow_od)
|
||||
target.density = 1
|
||||
|
||||
if (!target.airflow_dest || target.loc == target.airflow_dest)
|
||||
target.airflow_dest = locate(min(max(target.x + target.airflow_xo, 1), world.maxx), min(max(target.y + target.airflow_yo, 1), world.maxy), target.z)
|
||||
|
||||
if ((target.x == 1) || (target.x == world.maxx) || (target.y == 1) || (target.y == world.maxy))
|
||||
CLEAR_OBJECT(target)
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
continue
|
||||
|
||||
if (!isturf(target.loc))
|
||||
CLEAR_OBJECT(target)
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
continue
|
||||
|
||||
step_towards(target, target.airflow_dest)
|
||||
var/mob/M = target
|
||||
if (ismob(target) && M.client)
|
||||
M.setMoveCooldown(vsc.airflow_mob_slowdown)
|
||||
|
||||
if (MC_TICK_CHECK)
|
||||
return
|
||||
|
||||
#undef CLEAR_OBJECT
|
||||
|
||||
/atom/movable
|
||||
var/tmp/airflow_xo
|
||||
var/tmp/airflow_yo
|
||||
var/tmp/airflow_od
|
||||
var/tmp/airflow_process_delay
|
||||
var/tmp/airflow_skip_speedcheck
|
||||
|
||||
/atom/movable/proc/prepare_airflow(n)
|
||||
if (!airflow_dest || airflow_speed < 0 || last_airflow > world.time - vsc.airflow_delay)
|
||||
return FALSE
|
||||
if (airflow_speed)
|
||||
airflow_speed = n / max(get_dist(src, airflow_dest), 1)
|
||||
return FALSE
|
||||
|
||||
if (airflow_dest == loc)
|
||||
step_away(src, loc)
|
||||
|
||||
if (!src.AirflowCanMove(n))
|
||||
return FALSE
|
||||
|
||||
if (ismob(src))
|
||||
to_chat(src,"<span class='danger'>You are pushed away by airflow!</span>")
|
||||
|
||||
last_airflow = world.time
|
||||
var/airflow_falloff = 9 - sqrt((x - airflow_dest.x) ** 2 + (y - airflow_dest.y) ** 2)
|
||||
|
||||
if (airflow_falloff < 1)
|
||||
airflow_dest = null
|
||||
return FALSE
|
||||
|
||||
airflow_speed = min(max(n * (9 / airflow_falloff), 1), 9)
|
||||
|
||||
airflow_od = 0
|
||||
|
||||
if (!density)
|
||||
density = 1
|
||||
airflow_od = 1
|
||||
|
||||
return TRUE
|
||||
|
||||
/atom/movable/proc/GotoAirflowDest(n)
|
||||
if (!prepare_airflow(n))
|
||||
return
|
||||
|
||||
airflow_xo = airflow_dest.x - src.x
|
||||
airflow_yo = airflow_dest.y - src.y
|
||||
|
||||
airflow_dest = null
|
||||
|
||||
SSairflow.processing += src
|
||||
|
||||
/atom/movable/proc/RepelAirflowDest(n)
|
||||
if (!prepare_airflow(n))
|
||||
return
|
||||
|
||||
airflow_xo = -(airflow_dest.x - src.x)
|
||||
airflow_yo = -(airflow_dest.y - src.y)
|
||||
|
||||
airflow_dest = null
|
||||
|
||||
SSairflow.processing += src
|
||||
143
code/controllers/subsystems/atoms.dm
Normal file
143
code/controllers/subsystems/atoms.dm
Normal file
@@ -0,0 +1,143 @@
|
||||
#define BAD_INIT_QDEL_BEFORE 1
|
||||
#define BAD_INIT_DIDNT_INIT 2
|
||||
#define BAD_INIT_SLEPT 4
|
||||
#define BAD_INIT_NO_HINT 8
|
||||
|
||||
SUBSYSTEM_DEF(atoms)
|
||||
name = "Atoms"
|
||||
init_order = INIT_ORDER_ATOMS
|
||||
flags = SS_NO_FIRE
|
||||
|
||||
var/initialized = INITIALIZATION_INSSATOMS
|
||||
// var/list/created_atoms // This is never used, so don't bother. ~Leshana
|
||||
var/old_initialized
|
||||
|
||||
var/list/late_loaders
|
||||
var/list/created_atoms
|
||||
|
||||
var/list/BadInitializeCalls = list()
|
||||
|
||||
/datum/controller/subsystem/atoms/Initialize(timeofday)
|
||||
setupgenetics() //to set the mutations' place in structural enzymes, so initializers know where to put mutations.
|
||||
initialized = INITIALIZATION_INNEW_MAPLOAD
|
||||
to_world_log("Initializing objects")
|
||||
admin_notice("<span class='danger'>Initializing objects</span>", R_DEBUG)
|
||||
InitializeAtoms()
|
||||
return ..()
|
||||
|
||||
/datum/controller/subsystem/atoms/proc/InitializeAtoms(list/atoms)
|
||||
if(initialized == INITIALIZATION_INSSATOMS)
|
||||
return
|
||||
|
||||
initialized = INITIALIZATION_INNEW_MAPLOAD
|
||||
|
||||
LAZYINITLIST(late_loaders)
|
||||
|
||||
var/count
|
||||
var/list/mapload_arg = list(TRUE)
|
||||
if(atoms)
|
||||
created_atoms = list()
|
||||
count = atoms.len
|
||||
for(var/I in atoms)
|
||||
var/atom/A = I
|
||||
if(!A.initialized)
|
||||
if(InitAtom(I, mapload_arg))
|
||||
atoms -= I
|
||||
CHECK_TICK
|
||||
else
|
||||
count = 0
|
||||
for(var/atom/A in world)
|
||||
if(!A.initialized)
|
||||
InitAtom(A, mapload_arg)
|
||||
++count
|
||||
CHECK_TICK
|
||||
|
||||
log_world("Initialized [count] atoms")
|
||||
|
||||
initialized = INITIALIZATION_INNEW_REGULAR
|
||||
|
||||
if(late_loaders.len)
|
||||
for(var/I in late_loaders)
|
||||
var/atom/A = I
|
||||
A.LateInitialize()
|
||||
CHECK_TICK
|
||||
testing("Late initialized [late_loaders.len] atoms")
|
||||
late_loaders.Cut()
|
||||
|
||||
// Nothing ever checks return value of this proc, so don't bother. If this ever changes fix code in /atom/New() ~Leshana
|
||||
// if(atoms)
|
||||
// . = created_atoms + atoms
|
||||
// created_atoms = null
|
||||
|
||||
/datum/controller/subsystem/atoms/proc/InitAtom(atom/A, list/arguments)
|
||||
var/the_type = A.type
|
||||
if(QDELING(A))
|
||||
BadInitializeCalls[the_type] |= BAD_INIT_QDEL_BEFORE
|
||||
return TRUE
|
||||
|
||||
var/start_tick = world.time
|
||||
|
||||
var/result = A.initialize(arglist(arguments))
|
||||
|
||||
if(start_tick != world.time)
|
||||
BadInitializeCalls[the_type] |= BAD_INIT_SLEPT
|
||||
|
||||
var/qdeleted = FALSE
|
||||
|
||||
if(result != INITIALIZE_HINT_NORMAL)
|
||||
switch(result)
|
||||
if(INITIALIZE_HINT_LATELOAD)
|
||||
if(arguments[1]) //mapload
|
||||
late_loaders += A
|
||||
else
|
||||
A.LateInitialize()
|
||||
if(INITIALIZE_HINT_QDEL)
|
||||
qdel(A)
|
||||
qdeleted = TRUE
|
||||
else
|
||||
BadInitializeCalls[the_type] |= BAD_INIT_NO_HINT
|
||||
|
||||
if(!A) //possible harddel
|
||||
qdeleted = TRUE
|
||||
else if(!A.initialized)
|
||||
BadInitializeCalls[the_type] |= BAD_INIT_DIDNT_INIT
|
||||
|
||||
return qdeleted || QDELING(A)
|
||||
|
||||
/datum/controller/subsystem/atoms/proc/map_loader_begin()
|
||||
old_initialized = initialized
|
||||
initialized = INITIALIZATION_INSSATOMS
|
||||
|
||||
/datum/controller/subsystem/atoms/proc/map_loader_stop()
|
||||
initialized = old_initialized
|
||||
|
||||
/datum/controller/subsystem/atoms/Recover()
|
||||
initialized = SSatoms.initialized
|
||||
if(initialized == INITIALIZATION_INNEW_MAPLOAD)
|
||||
InitializeAtoms()
|
||||
old_initialized = SSatoms.old_initialized
|
||||
BadInitializeCalls = SSatoms.BadInitializeCalls
|
||||
|
||||
/datum/controller/subsystem/atoms/proc/InitLog()
|
||||
. = ""
|
||||
for(var/path in BadInitializeCalls)
|
||||
. += "Path : [path] \n"
|
||||
var/fails = BadInitializeCalls[path]
|
||||
if(fails & BAD_INIT_DIDNT_INIT)
|
||||
. += "- Didn't call atom/Initialize()\n"
|
||||
if(fails & BAD_INIT_NO_HINT)
|
||||
. += "- Didn't return an Initialize hint\n"
|
||||
if(fails & BAD_INIT_QDEL_BEFORE)
|
||||
. += "- Qdel'd in New()\n"
|
||||
if(fails & BAD_INIT_SLEPT)
|
||||
. += "- Slept during Initialize()\n"
|
||||
|
||||
/datum/controller/subsystem/atoms/Shutdown()
|
||||
var/initlog = InitLog()
|
||||
if(initlog)
|
||||
text2file(initlog, "[log_path]-initialize.log")
|
||||
|
||||
#undef BAD_INIT_QDEL_BEFORE
|
||||
#undef BAD_INIT_DIDNT_INIT
|
||||
#undef BAD_INIT_SLEPT
|
||||
#undef BAD_INIT_NO_HINT
|
||||
@@ -81,7 +81,7 @@ SUBSYSTEM_DEF(garbage)
|
||||
dellog += "\tIgnored force: [I.no_respect_force] times"
|
||||
if (I.no_hint)
|
||||
dellog += "\tNo hint: [I.no_hint] times"
|
||||
log_misc(dellog.Join())
|
||||
text2file(dellog.Join(), "[log_path]-qdel.log")
|
||||
|
||||
/datum/controller/subsystem/garbage/fire()
|
||||
//the fact that this resets its processing each fire (rather then resume where it left off) is intentional.
|
||||
@@ -169,7 +169,12 @@ SUBSYSTEM_DEF(garbage)
|
||||
#endif
|
||||
var/type = D.type
|
||||
var/datum/qdel_item/I = items[type]
|
||||
testing("GC: -- \ref[D] | [type] was unable to be GC'd --")
|
||||
var/extrainfo = "--"
|
||||
if(istype(D,/image))
|
||||
var/image/img = D
|
||||
var/icon/ico = img.icon
|
||||
extrainfo = "L:[img.loc] -- I:[ico] -- IS:[img.icon_state] --"
|
||||
testing("GC: -- \ref[D] | [type] was unable to be GC'd [extrainfo]")
|
||||
I.failures++
|
||||
if (GC_QUEUE_HARDDELETE)
|
||||
HardDelete(D)
|
||||
@@ -442,4 +447,5 @@ SUBSYSTEM_DEF(garbage)
|
||||
|
||||
/image/Destroy()
|
||||
..()
|
||||
return QDEL_HINT_HARDDEL_NOW
|
||||
loc = null
|
||||
return QDEL_HINT_QUEUE
|
||||
|
||||
@@ -33,8 +33,9 @@ SUBSYSTEM_DEF(machines)
|
||||
var/list/current_run = list()
|
||||
|
||||
/datum/controller/subsystem/machines/Initialize(timeofday)
|
||||
SSmachines.makepowernets()
|
||||
// TODO - Move world-creation time setup of atmos machinery and pipenets to here
|
||||
makepowernets()
|
||||
admin_notice("<span class='danger'>Initializing atmos machinery.</span>", R_DEBUG)
|
||||
setup_atmos_machinery(global.machines)
|
||||
fire()
|
||||
..()
|
||||
|
||||
@@ -53,13 +54,33 @@ SUBSYSTEM_DEF(machines)
|
||||
for(var/datum/powernet/PN in powernets)
|
||||
qdel(PN)
|
||||
powernets.Cut()
|
||||
setup_powernets_for_cables(cable_list)
|
||||
|
||||
for(var/obj/structure/cable/PC in cable_list)
|
||||
/datum/controller/subsystem/machines/proc/setup_powernets_for_cables(list/cables)
|
||||
for(var/obj/structure/cable/PC in cables)
|
||||
if(!PC.powernet)
|
||||
var/datum/powernet/NewPN = new()
|
||||
NewPN.add_cable(PC)
|
||||
propagate_network(PC,PC.powernet)
|
||||
|
||||
/datum/controller/subsystem/machines/proc/setup_atmos_machinery(list/atmos_machines)
|
||||
for(var/obj/machinery/atmospherics/machine in atmos_machines)
|
||||
machine.atmos_init()
|
||||
CHECK_TICK
|
||||
|
||||
for(var/obj/machinery/atmospherics/machine in atmos_machines)
|
||||
machine.build_network()
|
||||
CHECK_TICK
|
||||
|
||||
for(var/obj/machinery/atmospherics/unary/U in atmos_machines)
|
||||
if(istype(U, /obj/machinery/atmospherics/unary/vent_pump))
|
||||
var/obj/machinery/atmospherics/unary/vent_pump/T = U
|
||||
T.broadcast_status()
|
||||
else if(istype(U, /obj/machinery/atmospherics/unary/vent_scrubber))
|
||||
var/obj/machinery/atmospherics/unary/vent_scrubber/T = U
|
||||
T.broadcast_status()
|
||||
CHECK_TICK
|
||||
|
||||
/datum/controller/subsystem/machines/stat_entry()
|
||||
var/msg = list()
|
||||
msg += "C:{"
|
||||
|
||||
255
code/controllers/subsystems/overlays.dm
Normal file
255
code/controllers/subsystems/overlays.dm
Normal file
@@ -0,0 +1,255 @@
|
||||
SUBSYSTEM_DEF(overlays)
|
||||
name = "Overlay"
|
||||
flags = SS_TICKER
|
||||
wait = 1
|
||||
priority = FIRE_PRIORITY_OVERLAYS
|
||||
init_order = INIT_ORDER_OVERLAY
|
||||
|
||||
var/initialized = FALSE
|
||||
var/list/queue // Queue of atoms needing overlay compiling (TODO-VERIFY!)
|
||||
var/list/stats
|
||||
var/list/overlay_icon_state_caches // Cache thing
|
||||
var/list/overlay_icon_cache // Cache thing
|
||||
|
||||
var/global/image/stringbro = new() // Temporarily super-global because of BYOND init order dumbness.
|
||||
var/global/image/iconbro = new() // Temporarily super-global because of BYOND init order dumbness.
|
||||
var/global/image/appearance_bro = new() // Temporarily super-global because of BYOND init order dumbness.
|
||||
|
||||
/datum/controller/subsystem/overlays/PreInit()
|
||||
overlay_icon_state_caches = list()
|
||||
overlay_icon_cache = list()
|
||||
queue = list()
|
||||
stats = list()
|
||||
|
||||
/datum/controller/subsystem/overlays/Initialize()
|
||||
initialized = TRUE
|
||||
fire(mc_check = FALSE)
|
||||
..()
|
||||
|
||||
/datum/controller/subsystem/overlays/stat_entry()
|
||||
..("Ov:[length(queue)]")
|
||||
|
||||
|
||||
/datum/controller/subsystem/overlays/Shutdown()
|
||||
text2file(render_stats(stats), "[log_path]-overlay.log")
|
||||
|
||||
/datum/controller/subsystem/overlays/Recover()
|
||||
overlay_icon_state_caches = SSoverlays.overlay_icon_state_caches
|
||||
overlay_icon_cache = SSoverlays.overlay_icon_cache
|
||||
queue = SSoverlays.queue
|
||||
|
||||
|
||||
/datum/controller/subsystem/overlays/fire(resumed = FALSE, mc_check = TRUE)
|
||||
var/list/queue = src.queue
|
||||
var/static/count = 0
|
||||
if (count)
|
||||
var/c = count
|
||||
count = 0 //so if we runtime on the Cut, we don't try again.
|
||||
queue.Cut(1,c+1)
|
||||
|
||||
for (var/thing in queue)
|
||||
count++
|
||||
if(thing)
|
||||
STAT_START_STOPWATCH
|
||||
var/atom/A = thing
|
||||
COMPILE_OVERLAYS(A)
|
||||
STAT_STOP_STOPWATCH
|
||||
STAT_LOG_ENTRY(stats, A.type)
|
||||
if(mc_check)
|
||||
if(MC_TICK_CHECK)
|
||||
break
|
||||
else
|
||||
CHECK_TICK
|
||||
|
||||
if (count)
|
||||
queue.Cut(1,count+1)
|
||||
count = 0
|
||||
|
||||
/proc/iconstate2appearance(icon, iconstate)
|
||||
// var/static/image/stringbro = new() // Moved to be superglobal due to BYOND insane init order stupidness.
|
||||
var/list/icon_states_cache = SSoverlays.overlay_icon_state_caches
|
||||
var/list/cached_icon = icon_states_cache[icon]
|
||||
if (cached_icon)
|
||||
var/cached_appearance = cached_icon["[iconstate]"]
|
||||
if (cached_appearance)
|
||||
return cached_appearance
|
||||
stringbro.icon = icon
|
||||
stringbro.icon_state = iconstate
|
||||
if (!cached_icon) //not using the macro to save an associated lookup
|
||||
cached_icon = list()
|
||||
icon_states_cache[icon] = cached_icon
|
||||
var/cached_appearance = stringbro.appearance
|
||||
cached_icon["[iconstate]"] = cached_appearance
|
||||
return cached_appearance
|
||||
|
||||
/proc/icon2appearance(icon)
|
||||
// var/static/image/iconbro = new() // Moved to be superglobal due to BYOND insane init order stupidness.
|
||||
var/list/icon_cache = SSoverlays.overlay_icon_cache
|
||||
. = icon_cache[icon]
|
||||
if (!.)
|
||||
iconbro.icon = icon
|
||||
. = iconbro.appearance
|
||||
icon_cache[icon] = .
|
||||
|
||||
/atom/proc/build_appearance_list(old_overlays)
|
||||
// var/static/image/appearance_bro = new() // Moved to be superglobal due to BYOND insane init order stupidness.
|
||||
var/list/new_overlays = list()
|
||||
if (!islist(old_overlays))
|
||||
old_overlays = list(old_overlays)
|
||||
for (var/overlay in old_overlays)
|
||||
if(!overlay)
|
||||
continue
|
||||
if (istext(overlay))
|
||||
new_overlays += iconstate2appearance(icon, overlay)
|
||||
else if(isicon(overlay))
|
||||
new_overlays += icon2appearance(overlay)
|
||||
else
|
||||
if(isloc(overlay))
|
||||
var/atom/A = overlay
|
||||
if (A.flags & OVERLAY_QUEUED)
|
||||
COMPILE_OVERLAYS(A)
|
||||
appearance_bro.appearance = overlay //this works for images and atoms too!
|
||||
if(!ispath(overlay))
|
||||
var/image/I = overlay
|
||||
appearance_bro.dir = I.dir
|
||||
new_overlays += appearance_bro.appearance
|
||||
return new_overlays
|
||||
|
||||
#define NOT_QUEUED_ALREADY (!(flags & OVERLAY_QUEUED))
|
||||
#define QUEUE_FOR_COMPILE flags |= OVERLAY_QUEUED; SSoverlays.queue += src;
|
||||
|
||||
/**
|
||||
* Cut all of atom's normal overlays. Usually leaves "priority" overlays untouched.
|
||||
*
|
||||
* @param priority If true, also will cut priority overlays.
|
||||
*/
|
||||
/atom/proc/cut_overlays(priority = FALSE)
|
||||
var/list/cached_overlays = our_overlays
|
||||
var/list/cached_priority = priority_overlays
|
||||
|
||||
var/need_compile = FALSE
|
||||
|
||||
if(LAZYLEN(cached_overlays)) //don't queue empty lists, don't cut priority overlays
|
||||
cached_overlays.Cut() //clear regular overlays
|
||||
need_compile = TRUE
|
||||
|
||||
if(priority && LAZYLEN(cached_priority))
|
||||
cached_priority.Cut()
|
||||
need_compile = TRUE
|
||||
|
||||
if(NOT_QUEUED_ALREADY && need_compile)
|
||||
QUEUE_FOR_COMPILE
|
||||
|
||||
/**
|
||||
* Removes specific overlay(s) from the atom. Usually does not remove them from "priority" overlays.
|
||||
*
|
||||
* @param overlays The overlays to removed, type can be anything that is allowed for add_overlay().
|
||||
* @param priority If true, also will remove them from the "priority" overlays.
|
||||
*/
|
||||
/atom/proc/cut_overlay(list/overlays, priority)
|
||||
if(!overlays)
|
||||
return
|
||||
|
||||
overlays = build_appearance_list(overlays)
|
||||
|
||||
var/list/cached_overlays = our_overlays //sanic
|
||||
var/list/cached_priority = priority_overlays
|
||||
var/init_o_len = LAZYLEN(cached_overlays)
|
||||
var/init_p_len = LAZYLEN(cached_priority) //starter pokemon
|
||||
|
||||
LAZYREMOVE(cached_overlays, overlays)
|
||||
if(priority)
|
||||
LAZYREMOVE(cached_priority, overlays)
|
||||
|
||||
if(NOT_QUEUED_ALREADY && ((init_o_len != LAZYLEN(cached_overlays)) || (init_p_len != LAZYLEN(cached_priority))))
|
||||
QUEUE_FOR_COMPILE
|
||||
|
||||
/**
|
||||
* Adds specific overlay(s) to the atom.
|
||||
* It is designed so any of the types allowed to be added to /atom/overlays can be added here too. More details below.
|
||||
*
|
||||
* @param overlays The overlay(s) to add. These may be
|
||||
* - A string: In which case it is treated as an icon_state of the atom's icon.
|
||||
* - An icon: It is treated as an icon.
|
||||
* - An atom: Its own overlays are compiled and then it's appearance is added. (Meaning its current apperance is frozen).
|
||||
* - An image: Image's apperance is added (i.e. subsequently editing the image will not edit the overlay)
|
||||
* - A type path: Added to overlays as is. Does whatever it is BYOND does when you add paths to overlays.
|
||||
* - Or a list containing any of the above.
|
||||
* @param priority The overlays are added to the "priority" list istead of the normal one.
|
||||
*/
|
||||
/atom/proc/add_overlay(list/overlays, priority = FALSE)
|
||||
if(!overlays)
|
||||
return
|
||||
|
||||
overlays = build_appearance_list(overlays)
|
||||
|
||||
LAZYINITLIST(our_overlays) //always initialized after this point
|
||||
LAZYINITLIST(priority_overlays)
|
||||
|
||||
var/list/cached_overlays = our_overlays //sanic
|
||||
var/list/cached_priority = priority_overlays
|
||||
var/init_o_len = cached_overlays.len
|
||||
var/init_p_len = cached_priority.len //starter pokemon
|
||||
var/need_compile
|
||||
|
||||
if(priority)
|
||||
cached_priority += overlays //or in the image. Can we use [image] = image?
|
||||
need_compile = init_p_len != cached_priority.len
|
||||
else
|
||||
cached_overlays += overlays
|
||||
need_compile = init_o_len != cached_overlays.len
|
||||
|
||||
if(NOT_QUEUED_ALREADY && need_compile) //have we caught more pokemon?
|
||||
QUEUE_FOR_COMPILE
|
||||
|
||||
/**
|
||||
* Copy the overlays from another atom, either replacing all of ours or appending to our existing overlays.
|
||||
* Note: This copies only the normal overlays, not the "priority" overlays.
|
||||
*
|
||||
* @param other The atom to copy overlays from.
|
||||
* @param cut_old If true, all of our overlays will be *replaced* by the other's. If other is null, that means cutting all ours.
|
||||
*/
|
||||
/atom/proc/copy_overlays(atom/other, cut_old) //copys our_overlays from another atom
|
||||
if(!other)
|
||||
if(cut_old)
|
||||
cut_overlays()
|
||||
return
|
||||
|
||||
var/list/cached_other = other.our_overlays
|
||||
if(cached_other)
|
||||
if(cut_old || !LAZYLEN(our_overlays))
|
||||
our_overlays = cached_other.Copy()
|
||||
else
|
||||
our_overlays |= cached_other
|
||||
if(NOT_QUEUED_ALREADY)
|
||||
QUEUE_FOR_COMPILE
|
||||
else if(cut_old)
|
||||
cut_overlays()
|
||||
|
||||
#undef NOT_QUEUED_ALREADY
|
||||
#undef QUEUE_FOR_COMPILE
|
||||
|
||||
//TODO: Better solution for these?
|
||||
/image/proc/add_overlay(x)
|
||||
overlays += x
|
||||
|
||||
/image/proc/cut_overlay(x)
|
||||
overlays -= x
|
||||
|
||||
/image/proc/cut_overlays(x)
|
||||
overlays.Cut()
|
||||
|
||||
/image/proc/copy_overlays(atom/other, cut_old)
|
||||
if(!other)
|
||||
if(cut_old)
|
||||
cut_overlays()
|
||||
return
|
||||
|
||||
var/list/cached_other = other.our_overlays
|
||||
if(cached_other)
|
||||
if(cut_old || !overlays.len)
|
||||
overlays = cached_other.Copy()
|
||||
else
|
||||
overlays |= cached_other
|
||||
else if(cut_old)
|
||||
cut_overlays()
|
||||
83
code/controllers/subsystems/shuttles.dm
Normal file
83
code/controllers/subsystems/shuttles.dm
Normal file
@@ -0,0 +1,83 @@
|
||||
//
|
||||
// SSshuttles subsystem - Handles initialization and processing of shuttles.
|
||||
//
|
||||
|
||||
// This global variable exists for legacy support so we don't have to rename every shuttle_controller to SSshuttles yet.
|
||||
var/global/datum/controller/subsystem/shuttles/shuttle_controller
|
||||
|
||||
SUBSYSTEM_DEF(shuttles)
|
||||
name = "Shuttles"
|
||||
wait = 2 SECONDS
|
||||
priority = 5
|
||||
init_order = INIT_ORDER_SHUTTLES
|
||||
flags = SS_KEEP_TIMING|SS_NO_TICK_CHECK
|
||||
runlevels = RUNLEVEL_GAME|RUNLEVEL_POSTGAME
|
||||
|
||||
var/list/shuttles = list() // Maps shuttle tags to shuttle datums, so that they can be looked up.
|
||||
var/list/process_shuttles = list() // Simple list of shuttles, for processing
|
||||
var/list/current_run = list() // Shuttles remaining to process this fire() tick
|
||||
var/list/docks_init_callbacks // List of callbacks to run when we finish setting up shuttle docks.
|
||||
var/docks_initialized = FALSE
|
||||
|
||||
/datum/controller/subsystem/shuttles/Initialize(timeofday)
|
||||
global.shuttle_controller = src
|
||||
setup_shuttle_docks()
|
||||
for(var/I in docks_init_callbacks)
|
||||
var/datum/callback/cb = I
|
||||
cb.InvokeAsync()
|
||||
LAZYCLEARLIST(docks_init_callbacks)
|
||||
docks_init_callbacks = null
|
||||
return ..()
|
||||
|
||||
/datum/controller/subsystem/shuttles/fire(resumed = 0)
|
||||
do_process_shuttles(resumed)
|
||||
|
||||
/datum/controller/subsystem/shuttles/stat_entry()
|
||||
var/msg = list()
|
||||
msg += "AS:[shuttles.len]|"
|
||||
msg += "PS:[process_shuttles.len]|"
|
||||
..(jointext(msg, null))
|
||||
|
||||
/datum/controller/subsystem/shuttles/proc/do_process_shuttles(resumed = 0)
|
||||
if (!resumed)
|
||||
src.current_run = process_shuttles.Copy()
|
||||
|
||||
var/list/current_run = src.current_run // Cache for sanic speed
|
||||
while(current_run.len)
|
||||
var/datum/shuttle/S = current_run[current_run.len]
|
||||
current_run.len--
|
||||
if(istype(S) && !QDELETED(S))
|
||||
if(istype(S, /datum/shuttle/ferry)) // Ferry shuttles get special treatment
|
||||
var/datum/shuttle/ferry/F = S
|
||||
if(F.process_state || F.always_process)
|
||||
F.process()
|
||||
else
|
||||
S.process()
|
||||
else
|
||||
process_shuttles -= S
|
||||
if(MC_TICK_CHECK)
|
||||
return
|
||||
|
||||
// This should be called after all the machines and radio frequencies have been properly initialized
|
||||
/datum/controller/subsystem/shuttles/proc/setup_shuttle_docks()
|
||||
// Find all declared shuttle datums and initailize them.
|
||||
for(var/shuttle_type in subtypesof(/datum/shuttle))
|
||||
var/datum/shuttle/shuttle = shuttle_type
|
||||
if(initial(shuttle.category) == shuttle_type)
|
||||
continue
|
||||
shuttle = new shuttle()
|
||||
shuttle.init_docking_controllers()
|
||||
shuttle.dock() //makes all shuttles docked to something at round start go into the docked state
|
||||
CHECK_TICK
|
||||
|
||||
for(var/obj/machinery/embedded_controller/C in machines)
|
||||
if(istype(C.program, /datum/computer/file/embedded_program/docking))
|
||||
C.program.tag = null //clear the tags, 'cause we don't need 'em anymore
|
||||
docks_initialized = TRUE
|
||||
|
||||
// Register a callback that will be invoked once the shuttles have been initialized
|
||||
/datum/controller/subsystem/shuttles/proc/OnDocksInitialized(datum/callback/cb)
|
||||
if(!docks_initialized)
|
||||
LAZYADD(docks_init_callbacks, cb)
|
||||
else
|
||||
cb.InvokeAsync()
|
||||
116
code/controllers/subsystems/xenoarch.dm
Normal file
116
code/controllers/subsystems/xenoarch.dm
Normal file
@@ -0,0 +1,116 @@
|
||||
#define XENOARCH_SPAWN_CHANCE 0.5
|
||||
#define DIGSITESIZE_LOWER 4
|
||||
#define DIGSITESIZE_UPPER 12
|
||||
#define ARTIFACTSPAWNNUM_LOWER 6
|
||||
#define ARTIFACTSPAWNNUM_UPPER 12
|
||||
|
||||
//
|
||||
// Xenoarch subsystem handles initialization of Xenoarcheaology artifacts and digsites.
|
||||
//
|
||||
SUBSYSTEM_DEF(xenoarch)
|
||||
name = "Xenoarch"
|
||||
init_order = INIT_ORDER_XENOARCH
|
||||
flags = SS_NO_FIRE
|
||||
var/list/artifact_spawning_turfs = list()
|
||||
var/list/digsite_spawning_turfs = list()
|
||||
|
||||
/datum/controller/subsystem/xenoarch/Initialize(timeofday)
|
||||
SetupXenoarch()
|
||||
..()
|
||||
|
||||
/datum/controller/subsystem/xenoarch/Recover()
|
||||
if (istype(SSxenoarch.artifact_spawning_turfs))
|
||||
artifact_spawning_turfs = SSxenoarch.artifact_spawning_turfs
|
||||
if (istype(SSxenoarch.digsite_spawning_turfs))
|
||||
digsite_spawning_turfs = SSxenoarch.digsite_spawning_turfs
|
||||
|
||||
/datum/controller/subsystem/xenoarch/stat_entry(msg)
|
||||
if (!Debug2)
|
||||
return // Only show up in stat panel if debugging is enabled.
|
||||
. = ..()
|
||||
|
||||
/datum/controller/subsystem/xenoarch/proc/SetupXenoarch()
|
||||
for(var/turf/simulated/mineral/M in world)
|
||||
if(!M.density)
|
||||
continue
|
||||
|
||||
if(isnull(M.geologic_data))
|
||||
M.geologic_data = new /datum/geosample(M)
|
||||
|
||||
if(!prob(XENOARCH_SPAWN_CHANCE))
|
||||
continue
|
||||
|
||||
var/farEnough = 1
|
||||
for(var/A in digsite_spawning_turfs)
|
||||
var/turf/T = A
|
||||
if(T in range(5, M))
|
||||
farEnough = 0
|
||||
break
|
||||
if(!farEnough)
|
||||
continue
|
||||
|
||||
digsite_spawning_turfs.Add(M)
|
||||
|
||||
var/digsite = get_random_digsite_type()
|
||||
var/target_digsite_size = rand(DIGSITESIZE_LOWER, DIGSITESIZE_UPPER)
|
||||
|
||||
var/list/processed_turfs = list()
|
||||
var/list/turfs_to_process = list(M)
|
||||
|
||||
var/list/viable_adjacent_turfs = list()
|
||||
if(target_digsite_size > 1)
|
||||
for(var/turf/simulated/mineral/T in orange(2, M))
|
||||
if(!T.density)
|
||||
continue
|
||||
if(T.finds)
|
||||
continue
|
||||
if(T in processed_turfs)
|
||||
continue
|
||||
viable_adjacent_turfs.Add(T)
|
||||
|
||||
target_digsite_size = min(target_digsite_size, viable_adjacent_turfs.len)
|
||||
|
||||
for(var/i = 1 to target_digsite_size)
|
||||
turfs_to_process += pick_n_take(viable_adjacent_turfs)
|
||||
|
||||
while(turfs_to_process.len)
|
||||
var/turf/simulated/mineral/archeo_turf = pop(turfs_to_process)
|
||||
|
||||
processed_turfs.Add(archeo_turf)
|
||||
if(isnull(archeo_turf.finds))
|
||||
archeo_turf.finds = list()
|
||||
if(prob(50))
|
||||
archeo_turf.finds.Add(new /datum/find(digsite, rand(10, 190)))
|
||||
else if(prob(75))
|
||||
archeo_turf.finds.Add(new /datum/find(digsite, rand(10, 90)))
|
||||
archeo_turf.finds.Add(new /datum/find(digsite, rand(110, 190)))
|
||||
else
|
||||
archeo_turf.finds.Add(new /datum/find(digsite, rand(10, 50)))
|
||||
archeo_turf.finds.Add(new /datum/find(digsite, rand(60, 140)))
|
||||
archeo_turf.finds.Add(new /datum/find(digsite, rand(150, 190)))
|
||||
|
||||
//sometimes a find will be close enough to the surface to show
|
||||
var/datum/find/F = archeo_turf.finds[1]
|
||||
if(F.excavation_required <= F.view_range)
|
||||
archeo_turf.archaeo_overlay = "overlay_archaeo[rand(1,3)]"
|
||||
archeo_turf.update_icon()
|
||||
|
||||
//have a chance for an artifact to spawn here, but not in animal or plant digsites
|
||||
if(isnull(M.artifact_find) && digsite != DIGSITE_GARDEN && digsite != DIGSITE_ANIMAL)
|
||||
artifact_spawning_turfs.Add(archeo_turf)
|
||||
|
||||
//create artifact machinery
|
||||
var/num_artifacts_spawn = rand(ARTIFACTSPAWNNUM_LOWER, ARTIFACTSPAWNNUM_UPPER)
|
||||
while(artifact_spawning_turfs.len > num_artifacts_spawn)
|
||||
pick_n_take(artifact_spawning_turfs)
|
||||
|
||||
var/list/artifacts_spawnturf_temp = artifact_spawning_turfs.Copy()
|
||||
while(artifacts_spawnturf_temp.len > 0)
|
||||
var/turf/simulated/mineral/artifact_turf = pop(artifacts_spawnturf_temp)
|
||||
artifact_turf.artifact_find = new()
|
||||
|
||||
#undef XENOARCH_SPAWN_CHANCE
|
||||
#undef DIGSITESIZE_LOWER
|
||||
#undef DIGSITESIZE_UPPER
|
||||
#undef ARTIFACTSPAWNNUM_LOWER
|
||||
#undef ARTIFACTSPAWNNUM_UPPER
|
||||
Reference in New Issue
Block a user