mirror of
https://github.com/PolarisSS13/Polaris.git
synced 2026-01-03 05:52:17 +00:00
Merge branch 'master' of https://github.com/PolarisSS13/Polaris into weapon_dulling
This commit is contained in:
@@ -7,13 +7,12 @@
|
||||
#define PROCESS_STATUS_HUNG 6
|
||||
|
||||
// Process time thresholds
|
||||
#define PROCESS_DEFAULT_HANG_WARNING_TIME 300 // 30 seconds
|
||||
#define PROCESS_DEFAULT_HANG_ALERT_TIME 600 // 60 seconds
|
||||
#define PROCESS_DEFAULT_HANG_RESTART_TIME 900 // 90 seconds
|
||||
#define PROCESS_DEFAULT_SCHEDULE_INTERVAL 50 // 50 ticks
|
||||
#define PROCESS_DEFAULT_SLEEP_INTERVAL 8 // 1/8th of a tick
|
||||
#define PROCESS_DEFAULT_HANG_WARNING_TIME 300 // 30 seconds
|
||||
#define PROCESS_DEFAULT_HANG_ALERT_TIME 600 // 60 seconds
|
||||
#define PROCESS_DEFAULT_HANG_RESTART_TIME 900 // 90 seconds
|
||||
#define PROCESS_DEFAULT_SCHEDULE_INTERVAL 50 // 50 ticks
|
||||
#define PROCESS_DEFAULT_SLEEP_INTERVAL 20 // 20% of a tick
|
||||
#define PROCESS_DEFAULT_DEFER_USAGE 90 // 90% of a tick
|
||||
|
||||
// SCHECK macros
|
||||
// This references src directly to work around a weird bug with try/catch
|
||||
#define SCHECK_EVERY(this_many_calls) if(++src.calls_since_last_scheck >= this_many_calls) sleepCheck()
|
||||
#define SCHECK sleepCheck()
|
||||
// Sleep check macro
|
||||
#define SCHECK if(world.tick_usage >= next_sleep_usage) defer()
|
||||
|
||||
@@ -48,7 +48,10 @@
|
||||
// This controls how often the process will yield (call sleep(0)) while it is running.
|
||||
// Every concurrent process should sleep periodically while running in order to allow other
|
||||
// processes to execute concurrently.
|
||||
var/tmp/sleep_interval
|
||||
var/tmp/sleep_interval = PROCESS_DEFAULT_SLEEP_INTERVAL
|
||||
|
||||
// Defer usage; the tick usage at which this process will defer until the next tick
|
||||
var/tmp/defer_usage = PROCESS_DEFAULT_DEFER_USAGE
|
||||
|
||||
// hang_warning_time - this is the time (in 1/10 seconds) after which the server will begin to show "maybe hung" in the context window
|
||||
var/tmp/hang_warning_time = PROCESS_DEFAULT_HANG_WARNING_TIME
|
||||
@@ -59,19 +62,13 @@
|
||||
// hang_restart_time - After this much time(in 1/10 seconds), the server will automatically kill and restart the process.
|
||||
var/tmp/hang_restart_time = PROCESS_DEFAULT_HANG_RESTART_TIME
|
||||
|
||||
// How many times in the current run has the process deferred work till the next tick?
|
||||
var/tmp/cpu_defer_count = 0
|
||||
|
||||
// How many SCHECKs have been skipped (to limit btime calls)
|
||||
var/tmp/calls_since_last_scheck = 0
|
||||
// Number of deciseconds to delay before starting the process
|
||||
var/start_delay = 0
|
||||
|
||||
/**
|
||||
* recordkeeping vars
|
||||
*/
|
||||
|
||||
// Records the time (1/10s timeoftick) at which the process last finished sleeping
|
||||
var/tmp/last_slept = 0
|
||||
|
||||
// Records the time (1/10s timeofgame) at which the process last began running
|
||||
var/tmp/run_start = 0
|
||||
|
||||
@@ -85,11 +82,29 @@
|
||||
|
||||
var/tmp/last_object
|
||||
|
||||
// How many times in the current run has the process deferred work till the next tick?
|
||||
var/tmp/cpu_defer_count = 0
|
||||
|
||||
// Counts the number of times an exception has occurred; gets reset after 10
|
||||
var/tmp/list/exceptions = list()
|
||||
|
||||
// Number of deciseconds to delay before starting the process
|
||||
var/start_delay = 0
|
||||
// The next tick_usage the process will sleep at
|
||||
var/tmp/next_sleep_usage
|
||||
|
||||
// Last run duration, in seconds
|
||||
var/tmp/last_run_time = 0
|
||||
|
||||
// Last 20 run durations
|
||||
var/tmp/list/last_twenty_run_times = list()
|
||||
|
||||
// Highest run duration, in seconds
|
||||
var/tmp/highest_run_time = 0
|
||||
|
||||
// Tick usage at start of current run (updates upon deferring)
|
||||
var/tmp/tick_usage_start
|
||||
|
||||
// Accumulated tick usage from before each deferral
|
||||
var/tmp/tick_usage_accumulated = 0
|
||||
|
||||
/datum/controller/process/New(var/datum/controller/processScheduler/scheduler)
|
||||
..()
|
||||
@@ -97,9 +112,6 @@
|
||||
previousStatus = "idle"
|
||||
idle()
|
||||
name = "process"
|
||||
schedule_interval = 50
|
||||
sleep_interval = world.tick_lag / PROCESS_DEFAULT_SLEEP_INTERVAL
|
||||
last_slept = 0
|
||||
run_start = 0
|
||||
ticks = 0
|
||||
last_task = 0
|
||||
@@ -112,6 +124,10 @@
|
||||
// Initialize defer count
|
||||
cpu_defer_count = 0
|
||||
|
||||
// Prepare usage tracking (defer() updates these)
|
||||
tick_usage_start = world.tick_usage
|
||||
tick_usage_accumulated = 0
|
||||
|
||||
running()
|
||||
main.processStarted(src)
|
||||
|
||||
@@ -119,11 +135,23 @@
|
||||
|
||||
/datum/controller/process/proc/finished()
|
||||
ticks++
|
||||
recordRunTime()
|
||||
idle()
|
||||
main.processFinished(src)
|
||||
|
||||
onFinish()
|
||||
|
||||
/datum/controller/process/proc/recordRunTime()
|
||||
// Convert from tick usage (100/tick) to seconds of CPU time used
|
||||
var/total_usage = (tick_usage_accumulated + (world.tick_usage - tick_usage_start)) / 1000 * world.tick_lag
|
||||
|
||||
last_run_time = total_usage
|
||||
if(total_usage > highest_run_time)
|
||||
highest_run_time = total_usage
|
||||
if(last_twenty_run_times.len == 20)
|
||||
last_twenty_run_times.Cut(1, 2)
|
||||
last_twenty_run_times += total_usage
|
||||
|
||||
/datum/controller/process/proc/doWork()
|
||||
|
||||
/datum/controller/process/proc/setup()
|
||||
@@ -183,10 +211,9 @@
|
||||
// This should del
|
||||
del(src)
|
||||
|
||||
// Do not call this directly - use SHECK or SCHECK_EVERY
|
||||
/datum/controller/process/proc/sleepCheck(var/tickId = 0)
|
||||
calls_since_last_scheck = 0
|
||||
if (killed)
|
||||
// Do not call this directly - use SHECK
|
||||
/datum/controller/process/proc/defer()
|
||||
if(killed)
|
||||
// The kill proc is the only place where killed is set.
|
||||
// The kill proc should have deleted this datum, and all sleeping procs that are
|
||||
// owned by it.
|
||||
@@ -196,15 +223,14 @@
|
||||
handleHung()
|
||||
CRASH("Process [name] hung and was restarted.")
|
||||
|
||||
if (main.getCurrentTickElapsedTime() > main.timeAllowance)
|
||||
tick_usage_accumulated += (world.tick_usage - tick_usage_start)
|
||||
if(world.tick_usage < defer_usage)
|
||||
sleep(0)
|
||||
else
|
||||
sleep(world.tick_lag)
|
||||
cpu_defer_count++
|
||||
last_slept = 0
|
||||
else
|
||||
if (TimeOfTick > last_slept + sleep_interval)
|
||||
// If we haven't slept in sleep_interval deciseconds, sleep to allow other work to proceed.
|
||||
sleep(0)
|
||||
last_slept = TimeOfTick
|
||||
tick_usage_start = world.tick_usage
|
||||
next_sleep_usage = min(world.tick_usage + sleep_interval, defer_usage)
|
||||
|
||||
/datum/controller/process/proc/update()
|
||||
// Clear delta
|
||||
@@ -231,14 +257,14 @@
|
||||
return
|
||||
|
||||
/datum/controller/process/proc/getContext()
|
||||
return "<tr><td>[name]</td><td>[main.averageRunTime(src)]</td><td>[main.last_run_time[src]]</td><td>[main.highest_run_time[src]]</td><td>[ticks]</td></tr>\n"
|
||||
return "<tr><td>[name]</td><td>[getAverageRunTime()]</td><td>[last_run_time]</td><td>[highest_run_time]</td><td>[ticks]</td></tr>\n"
|
||||
|
||||
/datum/controller/process/proc/getContextData()
|
||||
return list(
|
||||
"name" = name,
|
||||
"averageRunTime" = main.averageRunTime(src),
|
||||
"lastRunTime" = main.last_run_time[src],
|
||||
"highestRunTime" = main.highest_run_time[src],
|
||||
"averageRunTime" = getAverageRunTime(),
|
||||
"lastRunTime" = last_run_time,
|
||||
"highestRunTime" = highest_run_time,
|
||||
"ticks" = ticks,
|
||||
"schedule" = schedule_interval,
|
||||
"status" = getStatusText(),
|
||||
@@ -286,7 +312,6 @@
|
||||
name = target.name
|
||||
schedule_interval = target.schedule_interval
|
||||
sleep_interval = target.sleep_interval
|
||||
last_slept = 0
|
||||
run_start = 0
|
||||
times_killed = target.times_killed
|
||||
ticks = target.ticks
|
||||
@@ -309,21 +334,31 @@
|
||||
disabled = 0
|
||||
|
||||
/datum/controller/process/proc/getAverageRunTime()
|
||||
return main.averageRunTime(src)
|
||||
var/t = 0
|
||||
var/c = 0
|
||||
for(var/time in last_twenty_run_times)
|
||||
t += time
|
||||
c++
|
||||
|
||||
if(c > 0)
|
||||
return t / c
|
||||
return c
|
||||
|
||||
/datum/controller/process/proc/getLastRunTime()
|
||||
return main.getProcessLastRunTime(src)
|
||||
return last_run_time
|
||||
|
||||
/datum/controller/process/proc/getHighestRunTime()
|
||||
return main.getProcessHighestRunTime(src)
|
||||
return highest_run_time
|
||||
|
||||
/datum/controller/process/proc/getTicks()
|
||||
return ticks
|
||||
|
||||
/datum/controller/process/proc/statProcess()
|
||||
var/averageRunTime = round(getAverageRunTime(), 0.1)/10
|
||||
var/lastRunTime = round(getLastRunTime(), 0.1)/10
|
||||
var/highestRunTime = round(getHighestRunTime(), 0.1)/10
|
||||
stat("[name]", "T#[getTicks()] | AR [averageRunTime] | LR [lastRunTime] | HR [highestRunTime] | D [cpu_defer_count]")
|
||||
var/averageRunTime = round(getAverageRunTime(), 0.001)
|
||||
var/lastRunTime = round(last_run_time, 0.001)
|
||||
var/highestRunTime = round(highest_run_time, 0.001)
|
||||
var/deferTime = round(cpu_defer_count / 10 * world.tick_lag, 0.01)
|
||||
stat("[name]", "T#[getTicks()] | AR [averageRunTime] | LR [lastRunTime] | HR [highestRunTime] | D [deferTime]")
|
||||
|
||||
/datum/controller/process/proc/catchException(var/exception/e, var/thrower)
|
||||
if(istype(e)) // Real runtimes go to the real error handler
|
||||
|
||||
@@ -20,18 +20,6 @@ var/global/datum/controller/processScheduler/processScheduler
|
||||
// Process last queued times (world time)
|
||||
var/tmp/datum/controller/process/list/last_queued = new
|
||||
|
||||
// Process last start times (real time)
|
||||
var/tmp/datum/controller/process/list/last_start = new
|
||||
|
||||
// Process last run durations
|
||||
var/tmp/datum/controller/process/list/last_run_time = new
|
||||
|
||||
// Per process list of the last 20 durations
|
||||
var/tmp/datum/controller/process/list/last_twenty_run_times = new
|
||||
|
||||
// Process highest run time
|
||||
var/tmp/datum/controller/process/list/highest_run_time = new
|
||||
|
||||
// How long to sleep between runs (set to tick_lag in New)
|
||||
var/tmp/scheduler_sleep_interval
|
||||
|
||||
@@ -41,22 +29,12 @@ var/global/datum/controller/processScheduler/processScheduler
|
||||
// Setup for these processes will be deferred until all the other processes are set up.
|
||||
var/tmp/list/deferredSetupList = new
|
||||
|
||||
var/tmp/currentTick = 0
|
||||
|
||||
var/tmp/timeAllowance = 0
|
||||
|
||||
var/tmp/cpuAverage = 0
|
||||
|
||||
var/tmp/timeAllowanceMax = 0
|
||||
|
||||
/datum/controller/processScheduler/New()
|
||||
..()
|
||||
// When the process scheduler is first new'd, tick_lag may be wrong, so these
|
||||
// get re-initialized when the process scheduler is started.
|
||||
// (These are kept here for any processes that decide to process before round start)
|
||||
scheduler_sleep_interval = world.tick_lag
|
||||
timeAllowance = world.tick_lag * 0.5
|
||||
timeAllowanceMax = world.tick_lag
|
||||
|
||||
/**
|
||||
* deferSetupFor
|
||||
@@ -88,20 +66,12 @@ var/global/datum/controller/processScheduler/processScheduler
|
||||
isRunning = 1
|
||||
// tick_lag will have been set by now, so re-initialize these
|
||||
scheduler_sleep_interval = world.tick_lag
|
||||
timeAllowance = world.tick_lag * 0.5
|
||||
timeAllowanceMax = world.tick_lag
|
||||
updateStartDelays()
|
||||
spawn(0)
|
||||
process()
|
||||
|
||||
/datum/controller/processScheduler/proc/process()
|
||||
updateCurrentTickData()
|
||||
|
||||
for(var/i=world.tick_lag,i<world.tick_lag*50,i+=world.tick_lag)
|
||||
spawn(i) updateCurrentTickData()
|
||||
while(isRunning)
|
||||
// Hopefully spawning this for 50 ticks in the future will make it the first thing in the queue.
|
||||
spawn(world.tick_lag*50) updateCurrentTickData()
|
||||
checkRunningProcesses()
|
||||
queueProcesses()
|
||||
runQueuedProcesses()
|
||||
@@ -148,20 +118,6 @@ var/global/datum/controller/processScheduler/processScheduler
|
||||
process.idle()
|
||||
idle.Add(process)
|
||||
|
||||
// init recordkeeping vars
|
||||
last_start.Add(process)
|
||||
last_start[process] = 0
|
||||
last_run_time.Add(process)
|
||||
last_run_time[process] = 0
|
||||
last_twenty_run_times.Add(process)
|
||||
last_twenty_run_times[process] = list()
|
||||
highest_run_time.Add(process)
|
||||
highest_run_time[process] = 0
|
||||
|
||||
// init starts and stops record starts
|
||||
recordStart(process, 0)
|
||||
recordEnd(process, 0)
|
||||
|
||||
// Set up process
|
||||
process.setup()
|
||||
|
||||
@@ -178,24 +134,9 @@ var/global/datum/controller/processScheduler/processScheduler
|
||||
queued.Remove(oldProcess)
|
||||
idle.Add(newProcess)
|
||||
|
||||
last_start.Remove(oldProcess)
|
||||
last_start.Add(newProcess)
|
||||
last_start[newProcess] = 0
|
||||
|
||||
last_run_time.Add(newProcess)
|
||||
last_run_time[newProcess] = last_run_time[oldProcess]
|
||||
last_run_time.Remove(oldProcess)
|
||||
|
||||
last_twenty_run_times.Add(newProcess)
|
||||
last_twenty_run_times[newProcess] = last_twenty_run_times[oldProcess]
|
||||
last_twenty_run_times.Remove(oldProcess)
|
||||
|
||||
highest_run_time.Add(newProcess)
|
||||
highest_run_time[newProcess] = highest_run_time[oldProcess]
|
||||
highest_run_time.Remove(oldProcess)
|
||||
|
||||
recordStart(newProcess, 0)
|
||||
recordEnd(newProcess, 0)
|
||||
newProcess.last_run_time = oldProcess.last_run_time
|
||||
newProcess.last_twenty_run_times = oldProcess.last_twenty_run_times
|
||||
newProcess.highest_run_time = oldProcess.highest_run_time
|
||||
|
||||
nameToProcessMap[newProcess.name] = newProcess
|
||||
|
||||
@@ -210,11 +151,10 @@ var/global/datum/controller/processScheduler/processScheduler
|
||||
|
||||
/datum/controller/processScheduler/proc/processStarted(var/datum/controller/process/process)
|
||||
setRunningProcessState(process)
|
||||
recordStart(process)
|
||||
last_queued[process] = world.time
|
||||
|
||||
/datum/controller/processScheduler/proc/processFinished(var/datum/controller/process/process)
|
||||
setIdleProcessState(process)
|
||||
recordEnd(process)
|
||||
|
||||
/datum/controller/processScheduler/proc/setIdleProcessState(var/datum/controller/process/process)
|
||||
if (process in running)
|
||||
@@ -243,63 +183,6 @@ var/global/datum/controller/processScheduler/processScheduler
|
||||
if (!(process in running))
|
||||
running += process
|
||||
|
||||
/datum/controller/processScheduler/proc/recordStart(var/datum/controller/process/process, var/time = null)
|
||||
if (isnull(time))
|
||||
time = TimeOfGame
|
||||
last_queued[process] = world.time
|
||||
last_start[process] = time
|
||||
else
|
||||
last_queued[process] = (time == 0 ? 0 : world.time)
|
||||
last_start[process] = time
|
||||
|
||||
/datum/controller/processScheduler/proc/recordEnd(var/datum/controller/process/process, var/time = null)
|
||||
if (isnull(time))
|
||||
time = TimeOfGame
|
||||
|
||||
var/lastRunTime = time - last_start[process]
|
||||
|
||||
if(lastRunTime < 0)
|
||||
lastRunTime = 0
|
||||
|
||||
recordRunTime(process, lastRunTime)
|
||||
|
||||
/**
|
||||
* recordRunTime
|
||||
* Records a run time for a process
|
||||
*/
|
||||
/datum/controller/processScheduler/proc/recordRunTime(var/datum/controller/process/process, time)
|
||||
last_run_time[process] = time
|
||||
if(time > highest_run_time[process])
|
||||
highest_run_time[process] = time
|
||||
|
||||
var/list/lastTwenty = last_twenty_run_times[process]
|
||||
if (lastTwenty.len == 20)
|
||||
lastTwenty.Cut(1, 2)
|
||||
lastTwenty.len++
|
||||
lastTwenty[lastTwenty.len] = time
|
||||
|
||||
/**
|
||||
* averageRunTime
|
||||
* returns the average run time (over the last 20) of the process
|
||||
*/
|
||||
/datum/controller/processScheduler/proc/averageRunTime(var/datum/controller/process/process)
|
||||
var/lastTwenty = last_twenty_run_times[process]
|
||||
|
||||
var/t = 0
|
||||
var/c = 0
|
||||
for(var/time in lastTwenty)
|
||||
t += time
|
||||
c++
|
||||
|
||||
if(c > 0)
|
||||
return t / c
|
||||
return c
|
||||
|
||||
/datum/controller/processScheduler/proc/getProcessLastRunTime(var/datum/controller/process/process)
|
||||
return last_run_time[process]
|
||||
|
||||
/datum/controller/processScheduler/proc/getProcessHighestRunTime(var/datum/controller/process/process)
|
||||
return highest_run_time[process]
|
||||
|
||||
/datum/controller/processScheduler/proc/getStatusData()
|
||||
var/list/data = new
|
||||
@@ -338,34 +221,12 @@ var/global/datum/controller/processScheduler/processScheduler
|
||||
var/datum/controller/process/process = nameToProcessMap[processName]
|
||||
process.disable()
|
||||
|
||||
/datum/controller/processScheduler/proc/getCurrentTickElapsedTime()
|
||||
if (world.time > currentTick)
|
||||
updateCurrentTickData()
|
||||
return 0
|
||||
else
|
||||
return TimeOfTick
|
||||
|
||||
/datum/controller/processScheduler/proc/updateCurrentTickData()
|
||||
if (world.time > currentTick)
|
||||
// New tick!
|
||||
currentTick = world.time
|
||||
updateTimeAllowance()
|
||||
cpuAverage = (world.cpu + cpuAverage + cpuAverage) / 3
|
||||
|
||||
/datum/controller/processScheduler/proc/updateTimeAllowance()
|
||||
// Time allowance goes down linearly with world.cpu.
|
||||
var/tmp/error = cpuAverage - 100
|
||||
var/tmp/timeAllowanceDelta = SIMPLE_SIGN(error) * -0.5 * world.tick_lag * max(0, 0.001 * abs(error))
|
||||
|
||||
//timeAllowance = world.tick_lag * min(1, 0.5 * ((200/max(1,cpuAverage)) - 1))
|
||||
timeAllowance = min(timeAllowanceMax, max(0, timeAllowance + timeAllowanceDelta))
|
||||
|
||||
/datum/controller/processScheduler/proc/statProcesses()
|
||||
if(!isRunning)
|
||||
stat("Processes", "Scheduler not running")
|
||||
return
|
||||
stat("Processes", "[processes.len] (R [running.len] / Q [queued.len] / I [idle.len])")
|
||||
stat(null, "[round(cpuAverage, 0.1)] CPU, [round(timeAllowance, 0.1)/10] TA")
|
||||
for(var/datum/controller/process/p in processes)
|
||||
p.statProcess()
|
||||
|
||||
|
||||
@@ -25,6 +25,21 @@
|
||||
catchException(e, last_object)
|
||||
SCHECK
|
||||
|
||||
// We've been restarted, probably due to having a massive list of tasks.
|
||||
// Lets copy over the task list as safely as we can and try to chug thru it...
|
||||
// Note: We won't be informed about tasks being destroyed, but this is the best we can do.
|
||||
/datum/controller/process/scheduler/copyStateFrom(var/datum/controller/process/scheduler/target)
|
||||
scheduled_tasks = list()
|
||||
for(var/st in target.scheduled_tasks)
|
||||
if(!deleted(st) && istype(st, /datum/scheduled_task))
|
||||
schedule(st)
|
||||
scheduler = src
|
||||
|
||||
// We are being killed. Least we can do is deregister all those events we registered
|
||||
/datum/controller/process/scheduler/onKill()
|
||||
for(var/st in scheduled_tasks)
|
||||
destroyed_event.unregister(st, src)
|
||||
|
||||
/datum/controller/process/scheduler/statProcess()
|
||||
..()
|
||||
stat(null, "[scheduled_tasks.len] task\s")
|
||||
@@ -130,4 +145,4 @@
|
||||
|
||||
/proc/repeat_scheduled_task(var/trigger_delay, var/datum/scheduled_task/st)
|
||||
st.trigger_time = world.time + trigger_delay
|
||||
scheduler.schedule(st)
|
||||
scheduler.schedule(st)
|
||||
|
||||
@@ -94,6 +94,7 @@ var/list/gamemode_cache = list()
|
||||
var/allow_extra_antags = 0
|
||||
var/guests_allowed = 1
|
||||
var/debugparanoid = 0
|
||||
var/panic_bunker = 0
|
||||
|
||||
var/serverurl
|
||||
var/server
|
||||
|
||||
@@ -61,7 +61,7 @@
|
||||
T:UpdateDamageIcon()
|
||||
|
||||
feedback_add_details("changeling_powers","A[stage]")
|
||||
if(!do_mob(src, T, 150))
|
||||
if(!do_mob(src, T, 150) || G.state != GRAB_KILL)
|
||||
src << "<span class='warning'>Our absorption of [T] has been interrupted!</span>"
|
||||
changeling.isabsorbing = 0
|
||||
return
|
||||
|
||||
@@ -95,10 +95,8 @@
|
||||
|
||||
Trigger(var/atom/movable/A)
|
||||
if(teleport_x && teleport_y && teleport_z)
|
||||
|
||||
A.x = teleport_x
|
||||
A.y = teleport_y
|
||||
A.z = teleport_z
|
||||
var/turf/T = locate(teleport_x, teleport_y, teleport_z)
|
||||
A.forceMove(T)
|
||||
|
||||
/* Random teleporter, teleports atoms to locations ranging from teleport_x - teleport_x_offset, etc */
|
||||
|
||||
@@ -110,8 +108,5 @@
|
||||
Trigger(var/atom/movable/A)
|
||||
if(teleport_x && teleport_y && teleport_z)
|
||||
if(teleport_x_offset && teleport_y_offset && teleport_z_offset)
|
||||
|
||||
A.x = rand(teleport_x, teleport_x_offset)
|
||||
A.y = rand(teleport_y, teleport_y_offset)
|
||||
A.z = rand(teleport_z, teleport_z_offset)
|
||||
|
||||
var/turf/T = locate(rand(teleport_x, teleport_x_offset), rand(teleport_y, teleport_y_offset), rand(teleport_z, teleport_z_offset))
|
||||
A.forceMove(T)
|
||||
|
||||
@@ -42,7 +42,8 @@
|
||||
|
||||
/obj/item/weapon/handcuffs/proc/can_place(var/mob/target, var/mob/user)
|
||||
if(istype(user, /mob/living/silicon/robot))
|
||||
return 1
|
||||
if(user.Adjacent(target))
|
||||
return 1
|
||||
else
|
||||
for(var/obj/item/weapon/grab/G in target.grabbed_by)
|
||||
if(G.loc == user && G.state >= GRAB_AGGRESSIVE)
|
||||
|
||||
@@ -136,7 +136,7 @@
|
||||
/obj/effect/energy_net/user_unbuckle_mob(mob/user)
|
||||
user.setClickCooldown(DEFAULT_ATTACK_COOLDOWN)
|
||||
visible_message("<span class='danger'>[user] begins to tear at \the [src]!</span>")
|
||||
if(do_after(usr, escape_time, incapacitation_flags = INCAPACITATION_DEFAULT & ~(INCAPACITATION_RESTRAINED | INCAPACITATION_BUCKLED_FULLY)))
|
||||
if(do_after(usr, escape_time, src, incapacitation_flags = INCAPACITATION_DEFAULT & ~(INCAPACITATION_RESTRAINED | INCAPACITATION_BUCKLED_FULLY)))
|
||||
if(!buckled_mob)
|
||||
return
|
||||
visible_message("<span class='danger'>[user] manages to tear \the [src] apart!</span>")
|
||||
|
||||
@@ -162,7 +162,8 @@ var/list/admin_verbs_server = list(
|
||||
/client/proc/check_customitem_activity,
|
||||
/client/proc/nanomapgen_DumpImage,
|
||||
/client/proc/modify_server_news,
|
||||
/client/proc/recipe_dump
|
||||
/client/proc/recipe_dump,
|
||||
/client/proc/panicbunker
|
||||
)
|
||||
var/list/admin_verbs_debug = list(
|
||||
/client/proc/getruntimelog, //allows us to access runtime logs to somebody,
|
||||
|
||||
15
code/modules/admin/verbs/panicbunker.dm
Normal file
15
code/modules/admin/verbs/panicbunker.dm
Normal file
@@ -0,0 +1,15 @@
|
||||
/client/proc/panicbunker()
|
||||
set category = "Server"
|
||||
set name = "Toggle Panic Bunker"
|
||||
if (!config.sql_enabled)
|
||||
to_chat(usr, "<span class='adminnotice'>The Database is not enabled!</span>")
|
||||
return
|
||||
|
||||
config.panic_bunker = (!config.panic_bunker)
|
||||
|
||||
log_admin("[key_name(usr)] has toggled the Panic Bunker, it is now [(config.panic_bunker?"on":"off")]")
|
||||
message_admins("[key_name_admin(usr)] has toggled the Panic Bunker, it is now [(config.panic_bunker?"enabled":"disabled")].")
|
||||
if (config.panic_bunker && (!dbcon || !dbcon.IsConnected()))
|
||||
message_admins("The Database is not connected! Panic bunker will not work until the connection is reestablished.")
|
||||
feedback_add_details("admin_verb","PANIC") //If you are copy-pasting this, ensure the 2nd parameter is unique to the new proc!
|
||||
|
||||
@@ -461,14 +461,17 @@ Traitors and the like can also be revived with the previous role mostly intact.
|
||||
|
||||
//Write the appearance and whatnot out to the character
|
||||
picked_client.prefs.copy_to(new_character)
|
||||
if(new_character.dna)
|
||||
new_character.dna.ResetUIFrom(new_character)
|
||||
new_character.sync_organ_dna()
|
||||
if(inhabit)
|
||||
new_character.key = player_key
|
||||
|
||||
//Were they any particular special role? If so, copy.
|
||||
var/datum/antagonist/antag_data = get_antag_data(new_character.mind.special_role)
|
||||
if(antag_data)
|
||||
antag_data.add_antagonist(new_character.mind)
|
||||
antag_data.place_mob(new_character)
|
||||
//Were they any particular special role? If so, copy.
|
||||
if(new_character.mind)
|
||||
var/datum/antagonist/antag_data = get_antag_data(new_character.mind.special_role)
|
||||
if(antag_data)
|
||||
antag_data.add_antagonist(new_character.mind)
|
||||
antag_data.place_mob(new_character)
|
||||
|
||||
//If desired, apply equipment.
|
||||
if(equipment)
|
||||
|
||||
@@ -251,6 +251,14 @@
|
||||
var/sql_computerid = sql_sanitize_text(src.computer_id)
|
||||
var/sql_admin_rank = sql_sanitize_text(admin_rank)
|
||||
|
||||
//Panic bunker code
|
||||
if (isnum(player_age) && player_age == 0) //first connection
|
||||
if (config.panic_bunker && !holder && !deadmin_holder)
|
||||
log_access("Failed Login: [key] - New account attempting to connect during panic bunker")
|
||||
message_admins("<span class='adminnotice'>Failed Login: [key] - New account attempting to connect during panic bunker</span>")
|
||||
to_chat(src, "Sorry but the server is currently not accepting connections from never before seen players.")
|
||||
qdel(src)
|
||||
return 0
|
||||
|
||||
if(sql_id)
|
||||
//Player already identified previously, we need to just update the 'lastseen', 'ip' and 'computer_id' variables
|
||||
@@ -266,7 +274,6 @@
|
||||
var/DBQuery/query_accesslog = dbcon.NewQuery("INSERT INTO `erro_connection_log`(`id`,`datetime`,`serverip`,`ckey`,`ip`,`computerid`) VALUES(null,Now(),'[serverip]','[sql_ckey]','[sql_ip]','[sql_computerid]');")
|
||||
query_accesslog.Execute()
|
||||
|
||||
|
||||
#undef TOPIC_SPAM_DELAY
|
||||
#undef UPLOAD_LIMIT
|
||||
#undef MIN_CLIENT_VERSION
|
||||
|
||||
@@ -149,17 +149,19 @@
|
||||
/obj/item/rig_module/chem_dispenser/ninja
|
||||
interface_desc = "Dispenses loaded chemicals directly into the wearer's bloodstream. This variant is made to be extremely light and flexible."
|
||||
|
||||
//just over a syringe worth of each. Want more? Go refill. Gives the ninja another reason to have to show their face.
|
||||
//Want more? Go refill. Gives the ninja another reason to have to show their face.
|
||||
charges = list(
|
||||
list("tricordrazine", "tricordrazine", 0, 20),
|
||||
list("tramadol", "tramadol", 0, 20),
|
||||
list("dexalin plus", "dexalinp", 0, 20),
|
||||
list("antibiotics", "spaceacillin", 0, 20),
|
||||
list("antitoxins", "anti_toxin", 0, 20),
|
||||
list("nutrients", "glucose", 0, 80),
|
||||
list("clotting agent", "myelamine", 0, 80),
|
||||
list("hyronalin", "hyronalin", 0, 20),
|
||||
list("radium", "radium", 0, 20)
|
||||
list("tricordrazine", "tricordrazine", 0, 30),
|
||||
list("tramadol", "tramadol", 0, 30),
|
||||
list("dexalin plus", "dexalinp", 0, 30),
|
||||
list("antibiotics", "spaceacillin", 0, 30),
|
||||
list("antitoxins", "anti_toxin", 0, 60),
|
||||
list("nutrients", "glucose", 0, 80),
|
||||
list("bicaridine", "bicaridine", 0, 30),
|
||||
list("clotting agent", "myelamine", 0, 30),
|
||||
list("peridaxon", "peridaxon", 0, 30),
|
||||
list("hyronalin", "hyronalin", 0, 30),
|
||||
list("radium", "radium", 0, 30)
|
||||
)
|
||||
|
||||
/obj/item/rig_module/chem_dispenser/accepts_item(var/obj/item/input_item, var/mob/living/user)
|
||||
|
||||
@@ -93,7 +93,7 @@
|
||||
/obj/item/rig_module/vision,
|
||||
/obj/item/rig_module/voice,
|
||||
/obj/item/rig_module/fabricator/energy_net,
|
||||
/obj/item/rig_module/chem_dispenser,
|
||||
/obj/item/rig_module/chem_dispenser/ninja,
|
||||
/obj/item/rig_module/grenade_launcher,
|
||||
/obj/item/rig_module/ai_container,
|
||||
/obj/item/rig_module/power_sink,
|
||||
|
||||
@@ -921,9 +921,9 @@
|
||||
|
||||
//Brain damage from Oxyloss
|
||||
if(should_have_organ("brain"))
|
||||
var/brainOxPercent = 0.02 //Default2% of your current oxyloss is applied as brain damage, 50 oxyloss is 1 brain damage
|
||||
var/brainOxPercent = 0.015 //Default 1.5% of your current oxyloss is applied as brain damage, 50 oxyloss is 1 brain damage
|
||||
if(CE_STABLE in chem_effects)
|
||||
brainOxPercent = 0.01 //Halved in effect
|
||||
brainOxPercent = 0.008 //Halved in effect
|
||||
if(oxyloss >= 20 && prob(5))
|
||||
adjustBrainLoss(brainOxPercent * oxyloss)
|
||||
|
||||
|
||||
@@ -75,9 +75,14 @@
|
||||
if(affecting)
|
||||
if(affecting.buckled)
|
||||
return null
|
||||
if(!affecting.Adjacent(affecting.grabbed_by))
|
||||
qdel(src)
|
||||
return null
|
||||
if(state >= GRAB_AGGRESSIVE)
|
||||
animate(affecting, pixel_x = 0, pixel_y = 0, 4, 1)
|
||||
return affecting
|
||||
. = affecting
|
||||
qdel(src)
|
||||
return
|
||||
return null
|
||||
|
||||
|
||||
@@ -294,6 +299,9 @@
|
||||
return
|
||||
if(world.time < (last_action + 20))
|
||||
return
|
||||
if(!M.Adjacent(user))
|
||||
qdel(src)
|
||||
return
|
||||
|
||||
last_action = world.time
|
||||
reset_kill_state() //using special grab moves will interrupt choking them
|
||||
@@ -339,6 +347,9 @@
|
||||
qdel(src)
|
||||
|
||||
/obj/item/weapon/grab/proc/reset_kill_state()
|
||||
if(!assailant)
|
||||
qdel(src)
|
||||
return
|
||||
if(state == GRAB_KILL)
|
||||
assailant.visible_message("<span class='warning'>[assailant] lost \his tight grip on [affecting]'s neck!</span>")
|
||||
hud.icon_state = "kill"
|
||||
@@ -365,12 +376,20 @@
|
||||
break_strength++
|
||||
break_chance_table = list(3, 18, 45, 100)
|
||||
|
||||
|
||||
if(GRAB_KILL)
|
||||
grab_name = "stranglehold"
|
||||
break_chance_table = list(5, 20, 40, 80, 100)
|
||||
|
||||
//It's easier to break out of a grab by a smaller mob
|
||||
break_strength += max(size_difference(affecting, assailant), 0)
|
||||
|
||||
var/break_chance = break_chance_table[Clamp(break_strength, 1, break_chance_table.len)]
|
||||
if(prob(break_chance))
|
||||
if(grab_name)
|
||||
if(state == GRAB_KILL)
|
||||
reset_kill_state()
|
||||
return
|
||||
else if(grab_name)
|
||||
affecting.visible_message("<span class='warning'>[affecting] has broken free of [assailant]'s [grab_name]!</span>")
|
||||
qdel(src)
|
||||
|
||||
|
||||
@@ -94,7 +94,7 @@
|
||||
M.adjustToxLoss(-4 * removed)
|
||||
|
||||
/datum/reagent/carthatoline
|
||||
name = "carthatoline"
|
||||
name = "Carthatoline"
|
||||
id = "carthatoline"
|
||||
description = "Carthatoline is strong evacuant used to treat severe poisoning."
|
||||
reagent_state = LIQUID
|
||||
|
||||
Reference in New Issue
Block a user