* SSMetrics

* We were a bit too silly

* Forgot to commit this

* Logs CPU

* Removes global data from all ss

* And puts it on the metrics ss

* Update metrics.dm

* Logs profiler data

* Adds profile configs

* Update code/controllers/subsystem/metrics.dm

Co-authored-by: adamsong <adamsong@users.noreply.github.com>

* Log request errors

* Final fixes

* Rebuilds for 1.2.0-yogs1

* Apparnetly you can't split macro calls on multiple lines

* Org is called yogstation13 not yogstation

---------

Co-authored-by: alexkar598 <>
Co-authored-by: adamsong <adamsong@users.noreply.github.com>
This commit is contained in:
alexkar598
2023-07-28 01:16:53 -04:00
committed by GitHub
parent 1532e52f17
commit 0ff2f1b025
36 changed files with 393 additions and 82 deletions

View File

@@ -107,6 +107,23 @@
#define rustg_cnoise_generate(percentage, smoothing_iterations, birth_limit, death_limit, width, height) \
RUSTG_CALL(RUST_G, "cnoise_generate")(percentage, smoothing_iterations, birth_limit, death_limit, width, height)
/**
* This proc generates a grid of perlin-like noise
*
* Returns a single string that goes row by row, with values of 1 representing an turned on cell, and a value of 0 representing a turned off cell.
*
* Arguments:
* * seed: seed for the function
* * accuracy: how close this is to the original perlin noise, as accuracy approaches infinity, the noise becomes more and more perlin-like
* * stamp_size: Size of a singular stamp used by the algorithm, think of this as the same stuff as frequency in perlin noise
* * world_size: size of the returned grid.
* * lower_range: lower bound of values selected for. (inclusive)
* * upper_range: upper bound of values selected for. (exclusive)
*/
#define rustg_dbp_generate(seed, accuracy, stamp_size, world_size, lower_range, upper_range) \
RUSTG_CALL(RUST_G, "dbp_generate")(seed, accuracy, stamp_size, world_size, lower_range, upper_range)
#define rustg_dmi_strip_metadata(fname) RUSTG_CALL(RUST_G, "dmi_strip_metadata")(fname)
#define rustg_dmi_create_png(path, width, height, data) RUSTG_CALL(RUST_G, "dmi_create_png")(path, width, height, data)
#define rustg_dmi_resize_png(path, width, height, resizetype) RUSTG_CALL(RUST_G, "dmi_resize_png")(path, width, height, resizetype)
@@ -126,6 +143,22 @@
#define rustg_git_revparse(rev) RUSTG_CALL(RUST_G, "rg_git_revparse")(rev)
#define rustg_git_commit_date(rev) RUSTG_CALL(RUST_G, "rg_git_commit_date")(rev)
#define rustg_hash_string(algorithm, text) RUSTG_CALL(RUST_G, "hash_string")(algorithm, text)
#define rustg_hash_file(algorithm, fname) RUSTG_CALL(RUST_G, "hash_file")(algorithm, fname)
#define rustg_hash_generate_totp(seed) RUSTG_CALL(RUST_G, "generate_totp")(seed)
#define rustg_hash_generate_totp_tolerance(seed, tolerance) RUSTG_CALL(RUST_G, "generate_totp_tolerance")(seed, tolerance)
#define RUSTG_HASH_MD5 "md5"
#define RUSTG_HASH_SHA1 "sha1"
#define RUSTG_HASH_SHA256 "sha256"
#define RUSTG_HASH_SHA512 "sha512"
#define RUSTG_HASH_XXH64 "xxh64"
#define RUSTG_HASH_BASE64 "base64"
#ifdef RUSTG_OVERRIDE_BUILTINS
#define md5(thing) (isfile(thing) ? rustg_hash_file(RUSTG_HASH_MD5, "[thing]") : rustg_hash_string(RUSTG_HASH_MD5, thing))
#endif
#define RUSTG_HTTP_METHOD_GET "get"
#define RUSTG_HTTP_METHOD_PUT "put"
#define RUSTG_HTTP_METHOD_DELETE "delete"
@@ -136,6 +169,9 @@
#define rustg_http_request_async(method, url, body, headers, options) RUSTG_CALL(RUST_G, "http_request_async")(method, url, body, headers, options)
#define rustg_http_check_request(req_id) RUSTG_CALL(RUST_G, "http_check_request")(req_id)
#define rustg_influxdb2_publish(data, endpoint, token) RUSTG_CALL(RUST_G, "influxdb2_publish")(data, endpoint, token)
#define rustg_influxdb2_publish_profile(data, endpoint, token, round_id) RUSTG_CALL(RUST_G, "influxdb2_publish_profile")(data, endpoint, token, round_id)
#define RUSTG_JOB_NO_RESULTS_YET "NO RESULTS YET"
#define RUSTG_JOB_NO_SUCH_JOB "NO SUCH JOB"
#define RUSTG_JOB_ERROR "JOB PANICKED"
@@ -147,6 +183,47 @@
#define rustg_noise_get_at_coordinates(seed, x, y) RUSTG_CALL(RUST_G, "noise_get_at_coordinates")(seed, x, y)
/**
* Register a list of nodes into a rust library. This list of nodes must have been serialized in a json.
* Node {// Index of this node in the list of nodes
* unique_id: usize,
* // Position of the node in byond
* x: usize,
* y: usize,
* z: usize,
* // Indexes of nodes connected to this one
* connected_nodes_id: Vec<usize>}
* It is important that the node with the unique_id 0 is the first in the json, unique_id 1 right after that, etc.
* It is also important that all unique ids follow. {0, 1, 2, 4} is not a correct list and the registering will fail
* Nodes should not link across z levels.
* A node cannot link twice to the same node and shouldn't link itself either
*/
#define rustg_register_nodes_astar(json) RUSTG_CALL(RUST_G, "register_nodes_astar")(json)
/**
* Add a new node to the static list of nodes. Same rule as registering_nodes applies.
* This node unique_id must be equal to the current length of the static list of nodes
*/
#define rustg_add_node_astar(json) RUSTG_CALL(RUST_G, "add_node_astar")(json)
/**²
* Remove every link to the node with unique_id. Replace that node by null
*/
#define rustg_remove_node_astart(unique_id) RUSTG_CALL(RUST_G, "remove_node_astar")(unique_id)
/**
* Compute the shortest path between start_node and goal_node using A*. Heuristic used is simple geometric distance
*/
#define rustg_generate_path_astar(start_node_id, goal_node_id) RUSTG_CALL(RUST_G, "generate_path_astar")(start_node_id, goal_node_id)
#define RUSTG_REDIS_ERROR_CHANNEL "RUSTG_REDIS_ERROR_CHANNEL"
#define rustg_redis_connect(addr) RUSTG_CALL(RUST_G, "redis_connect")(addr)
/proc/rustg_redis_disconnect() return RUSTG_CALL(RUST_G, "redis_disconnect")()
#define rustg_redis_subscribe(channel) RUSTG_CALL(RUST_G, "redis_subscribe")(channel)
/proc/rustg_redis_get_messages() return RUSTG_CALL(RUST_G, "redis_get_messages")()
#define rustg_redis_publish(channel, message) RUSTG_CALL(RUST_G, "redis_publish")(channel, message)
#define rustg_sql_connect_pool(options) RUSTG_CALL(RUST_G, "sql_connect_pool")(options)
#define rustg_sql_query_async(handle, query, params) RUSTG_CALL(RUST_G, "sql_query_async")(handle, query, params)
#define rustg_sql_query_blocking(handle, query, params) RUSTG_CALL(RUST_G, "sql_query_blocking")(handle, query, params)
@@ -161,6 +238,9 @@
/proc/rustg_unix_timestamp()
return text2num(RUSTG_CALL(RUST_G, "unix_timestamp")())
/proc/rustg_unix_timestamp_int()
return RUSTG_CALL(RUST_G, "unix_timestamp_int")()
#define rustg_raw_read_toml_file(path) json_decode(RUSTG_CALL(RUST_G, "toml_file_to_json")(path) || "null")
/proc/rustg_read_toml_file(path)
@@ -179,6 +259,9 @@
else
CRASH(output["content"])
#define rustg_unzip_download_async(url, unzip_directory) RUSTG_CALL(RUST_G, "unzip_download_async")(url, unzip_directory)
#define rustg_unzip_check(job_id) RUSTG_CALL(RUST_G, "unzip_check")("[job_id]")
#define rustg_url_encode(text) RUSTG_CALL(RUST_G, "url_encode")("[text]")
#define rustg_url_decode(text) RUSTG_CALL(RUST_G, "url_decode")(text)
@@ -187,18 +270,20 @@
#define url_decode(text) rustg_url_decode(text)
#endif
#define rustg_hash_string(algorithm, text) RUSTG_CALL(RUST_G, "hash_string")(algorithm, text)
#define rustg_hash_file(algorithm, fname) RUSTG_CALL(RUST_G, "hash_file")(algorithm, fname)
#define rustg_hash_generate_totp(seed) RUSTG_CALL(RUST_G, "generate_totp")(seed)
#define rustg_hash_generate_totp_tolerance(seed, tolerance) RUSTG_CALL(RUST_G, "generate_totp_tolerance")(seed, tolerance)
/**
* This proc generates a noise grid using worley noise algorithm
*
* Returns a single string that goes row by row, with values of 1 representing an alive cell, and a value of 0 representing a dead cell.
*
* Arguments:
* * region_size: The size of regions
* * threshold: the value that determines wether a cell is dead or alive
* * node_per_region_chance: chance of a node existiing in a region
* * size: size of the returned grid
* * node_min: minimum amount of nodes in a region (after the node_per_region_chance is applied)
* * node_max: maximum amount of nodes in a region
*/
#define rustg_worley_generate(region_size, threshold, node_per_region_chance, size, node_min, node_max) \
RUSTG_CALL(RUST_G, "worley_generate")(region_size, threshold, node_per_region_chance, size, node_min, node_max)
#define RUSTG_HASH_MD5 "md5"
#define RUSTG_HASH_SHA1 "sha1"
#define RUSTG_HASH_SHA256 "sha256"
#define RUSTG_HASH_SHA512 "sha512"
#define RUSTG_HASH_XXH64 "xxh64"
#define RUSTG_HASH_BASE64 "base64"
#ifdef RUSTG_OVERRIDE_BUILTINS
#define md5(thing) (isfile(thing) ? rustg_hash_file(RUSTG_HASH_MD5, "[thing]") : rustg_hash_string(RUSTG_HASH_MD5, thing))
#endif

View File

@@ -517,7 +517,14 @@
/datum/config_entry/string/vpn_lookup_key // Key for VPN lookup API
protection = CONFIG_ENTRY_LOCKED | CONFIG_ENTRY_HIDDEN
/datum/config_entry/flag/auto_profile
/datum/config_entry/string/metrics_api
protection = CONFIG_ENTRY_LOCKED | CONFIG_ENTRY_HIDDEN
/datum/config_entry/string/metrics_token
protection = CONFIG_ENTRY_LOCKED | CONFIG_ENTRY_HIDDEN
/datum/config_entry/string/metrics_api_profile
protection = CONFIG_ENTRY_LOCKED | CONFIG_ENTRY_HIDDEN
/datum/config_entry/string/metrics_token_profile
protection = CONFIG_ENTRY_LOCKED | CONFIG_ENTRY_HIDDEN
/datum/config_entry/flag/disable_gc_failure_hard_deletes

View File

@@ -303,3 +303,17 @@
if (NAMEOF(src, queued_priority)) //editing this breaks things.
return FALSE
. = ..()
/datum/controller/subsystem/proc/get_metrics()
SHOULD_CALL_PARENT(TRUE)
. = list()
.["@measurement"] = "subsystem"
.["@tags"] = list("subsystem" = type)
.["$cost"] = cost
.["$tick_usage"] = tick_usage
.["$tick_overrun"] = tick_overrun
.["$last_fire"] = last_fire
.["$next_fire"] = next_fire
.["$tick_allocation_avg"] = tick_allocation_avg
.["$times_fired"] = times_fired
.["$postponed_fires"] = postponed_fires

View File

@@ -11,6 +11,9 @@ SUBSYSTEM_DEF(acid)
msg = "P:[length(processing)]"
return ..()
/datum/controller/subsystem/acid/get_metrics()
. = ..()
.["processing"] = length(processing)
/datum/controller/subsystem/acid/fire(resumed = 0)
if (!resumed)

View File

@@ -17,6 +17,10 @@ SUBSYSTEM_DEF(adjacent_air)
#endif
return ..()
/datum/controller/subsystem/adjacent_air/get_metrics()
. = ..()
.["queued"] = length(queue)
/datum/controller/subsystem/adjacent_air/Initialize()
while(length(queue))
fire(mc_check = FALSE)

View File

@@ -65,6 +65,23 @@ SUBSYSTEM_DEF(air)
msg += "AT/MS:[round((cost ? active_turfs.len/cost : 0),0.1)]"
return ..()
/datum/controller/subsystem/air/get_metrics()
. = ..()
.["cost_equalize"] = cost_equalize
.["cost_turfs"] = cost_turfs
.["cost_groups"] = cost_groups
.["cost_highpressure"] = cost_highpressure
.["cost_hotspots"] = cost_hotspots
.["cost_superconductivity"] = cost_superconductivity
.["cost_pipenets"] = cost_pipenets
.["cost_rebuilds"] = cost_rebuilds
.["cost_atmos_machinery"] = cost_atmos_machinery
.["active_turfs"] = active_turfs.len
.["excited_gruops"] = get_amt_excited_groups()
.["hotspts"] = hotspots.len
.["networks"] = networks.len
.["high_pressure_delta"] = high_pressure_delta.len
.["active_super_conductivity"] = active_super_conductivity.len
/datum/controller/subsystem/air/Initialize(timeofday)
extools_update_ssair()

View File

@@ -22,6 +22,17 @@ SUBSYSTEM_DEF(area_contents)
msg = "A:[length(currentrun)] MR:[length(marked_for_clearing)] TC:[total_to_clear] CF:[total_clearing_from]"
return ..()
/datum/controller/subsystem/area_contents/get_metrics()
. = ..()
var/total_clearing_from = 0
var/total_to_clear = 0
for(var/area/to_clear as anything in marked_for_clearing)
total_to_clear += length(to_clear.turfs_to_uncontain)
total_clearing_from += length(to_clear.contained_turfs)
.["areas"] = length(currentrun)
.["marked_for_clearing"] = length(marked_for_clearing)
.["total_to_clear"] = total_to_clear
.["total_clearing_from"] = total_clearing_from
/datum/controller/subsystem/area_contents/fire(resumed)
if(!resumed)

View File

@@ -14,6 +14,10 @@ SUBSYSTEM_DEF(asset_loading)
return ..()
/datum/controller/subsystem/asset_loading/get_metrics()
. = ..()
.["queued"] = length(generate_queue)
/datum/controller/subsystem/asset_loading/fire(resumed)
while(length(generate_queue))
var/datum/asset/to_load = generate_queue[generate_queue.len]

View File

@@ -12,6 +12,11 @@ SUBSYSTEM_DEF(augury)
msg = "W:[watchers.len]|D:[length(doombringers)]"
return ..()
/datum/controller/subsystem/augury/get_metrics()
. = ..()
.["watchers"] = watchers.len
.["doombringers"] = length(doombringers)
/datum/controller/subsystem/augury/proc/register_doom(atom/A, severity)
doombringers[A] = severity
RegisterSignal(A, COMSIG_PARENT_QDELETING, PROC_REF(unregister_doom))

View File

@@ -413,6 +413,13 @@ SUBSYSTEM_DEF(demo)
msg += "}"
return ..(msg)
/datum/controller/subsystem/demo/get_metrics()
. = ..()
.["remaining_turfs"] = marked_turfs.len
.["remaining_new"] = marked_new.len
.["remaining_updated"] = marked_dirty.len
.["remaining_deleted"] = del_list.len
/datum/controller/subsystem/demo/proc/mark_turf(turf/T)
if(!can_fire)
return

View File

@@ -23,6 +23,10 @@ SUBSYSTEM_DEF(disease)
msg = "P:[length(active_diseases)]"
return ..()
/datum/controller/subsystem/disease/get_metrics()
. = ..()
.["active_diseases"] = length(active_diseases)
/datum/controller/subsystem/disease/proc/get_disease_name(id)
var/datum/disease/advance/A = archive_diseases[id]
if(A.name)

View File

@@ -66,6 +66,24 @@ SUBSYSTEM_DEF(explosions)
msg += "} "
return ..()
/datum/controller/subsystem/explosions/get_metrics()
. = ..()
.["cost_lowturf"] = cost_lowturf
.["cost_medturf"] = cost_medturf
.["cost_highturf"] = cost_highturf
.["cost_flameturf"] = cost_flameturf
.["cost_low_mov_atom"] = cost_low_mov_atom
.["cost_med_mov_atom"] = cost_med_mov_atom
.["cost_high_mov_atom"] = cost_high_mov_atom
.["cost_throwturf"] = cost_throwturf
.["lowturfs"] = lowturf.len
.["medturfs"] = medturf.len
.["highturfs"] = highturf.len
.["flameturfs"] = flameturf.len
.["low_mov_atom"] = low_mov_atom.len
.["med_mov_atom"] = med_mov_atom.len
.["high_mov_atom"] = high_mov_atom.len
.["throwturf"] = throwturf.len
/datum/controller/subsystem/explosions/proc/is_exploding()
return (lowturf.len || medturf.len || highturf.len || flameturf.len || throwturf.len || low_mov_atom.len || med_mov_atom.len || high_mov_atom.len)

View File

@@ -12,6 +12,9 @@ SUBSYSTEM_DEF(fire_burning)
msg = "P:[length(processing)]"
return ..()
/datum/controller/subsystem/fire_burning/get_metrics()
. = ..()
.["queued"] = length(processing)
/datum/controller/subsystem/fire_burning/fire(resumed = 0)
if (!resumed)

View File

@@ -80,6 +80,17 @@ SUBSYSTEM_DEF(garbage)
msg += "|F:[fail_counts.Join(",")]"
return ..()
/datum/controller/subsystem/garbage/get_metrics()
. = ..()
for(var/i in 1 to GC_QUEUE_COUNT)
.["queue_items_[i]"] = length(queues[i])
.["queue_passed_[i]"] = pass_counts[i]
.["queue_failed_[i]"] = fail_counts[i]
.["delslasttick"] = delslasttick
.["gcedlasttick"] = gcedlasttick
.["totaldels"] = totaldels
.["totalgcs"] = totalgcs
/datum/controller/subsystem/garbage/Shutdown()
//Adds the del() log to the qdel log file
var/list/dellog = list()

View File

@@ -14,6 +14,11 @@ SUBSYSTEM_DEF(idlenpcpool)
msg = "IdleNPCS:[length(idlelist)]|Z:[length(zlist)]"
return ..()
/datum/controller/subsystem/idlenpcpool/get_metrics()
. = ..()
.["idle_npcs"] = length(GLOB.simple_animals[AI_IDLE])
.["z_zombies"] = length(GLOB.simple_animals[AI_Z_OFF])
/datum/controller/subsystem/idlenpcpool/proc/MaxZChanged()
if (!islist(idle_mobs_by_zlevel))
idle_mobs_by_zlevel = new /list(world.maxz,0)

View File

@@ -13,6 +13,11 @@ SUBSYSTEM_DEF(lighting)
msg = "L:[length(sources_queue)]|C:[length(corners_queue)]|O:[length(objects_queue)]"
return ..()
/datum/controller/subsystem/lighting/get_metrics()
. = ..()
.["queue_sources"] = length(sources_queue)
.["queue_corners"] = length(corners_queue)
.["queue_objects"] = length(objects_queue)
/datum/controller/subsystem/lighting/Initialize(timeofday)
if(!initialized)

View File

@@ -27,6 +27,10 @@ SUBSYSTEM_DEF(machines)
msg = "M:[length(processing)]|PN:[length(powernets)]"
return ..()
/datum/controller/subsystem/machines/get_metrics()
. = ..()
.["machines"] = length(processing)
.["powernets"] = length(powernets)
/datum/controller/subsystem/machines/fire(resumed = 0)
if (!resumed)

View File

@@ -0,0 +1,107 @@
#define METRICS_BUFFER_MAX_DEFAULT 15000
#define METRICS_BUFFER_PUBLISH_DEFAULT (METRICS_BUFFER_MAX_DEFAULT / 10)
SUBSYSTEM_DEF(metrics)
name = "Metrics"
wait = 25 //measured in ticks
runlevels = RUNLEVEL_LOBBY | RUNLEVELS_DEFAULT
flags = SS_TICKER
var/list/queue = list()
var/world_init_time = 0 //set in world/New()
var/last_warning = 0
var/threshold = METRICS_BUFFER_MAX_DEFAULT
/datum/controller/subsystem/metrics/stat_entry(msg)
msg = "Q:[length(queue)]/[threshold]([round(length(queue)/threshold*100, 0.1)]%)"
return ..()
/datum/controller/subsystem/metrics/get_metrics()
. = ..()
.["cpu"] = world.cpu
.["map_cpu"] = world.map_cpu
.["elapsed_real"] = (REALTIMEOFDAY - SSmetrics.world_init_time)
.["elapsed_processed"] = world.time
if(!isnull(GLOB.round_id))
.["round_id"] = GLOB.round_id
.["clients"] = length(GLOB.clients)
.["runlevel"] = Master.current_runlevel
/datum/controller/subsystem/metrics/Initialize(start_timeofday)
if(!CONFIG_GET(string/metrics_api))
flags |= SS_NO_FIRE // Disable firing to save CPU
return SS_INIT_SUCCESS
/datum/controller/subsystem/metrics/fire(resumed)
var/timestamp = rustg_unix_timestamp_int();
for(var/datum/controller/subsystem/SS in Master.subsystems)
var/metrics = SS.get_metrics()
metrics["@timestamp"] = timestamp
ingest(metrics)
/datum/controller/subsystem/metrics/proc/ingest(line)
if (flags & SS_NO_FIRE)
return
if (queue.len > threshold)
if((last_warning + (5 MINUTES)) < world.time)
message_admins("Metrics buffer exceeded max size, dropping data. Please report this")
log_game("Metrics buffer exceeded max size, dropping data.")
last_warning = world.time
return
queue[++queue.len] = line
/////////////
//Publisher//
/////////////
SUBSYSTEM_DEF(metrics_publish)
name = "Metrics (Publish)"
wait = 2.5 SECONDS
runlevels = RUNLEVEL_LOBBY | RUNLEVELS_DEFAULT
flags = SS_BACKGROUND
var/threshold = METRICS_BUFFER_PUBLISH_DEFAULT
var/last_profile_publish = 0
var/profile_publish_interval = 5 MINUTES
/datum/controller/subsystem/metrics_publish/stat_entry(msg)
msg = "Q:[length(SSmetrics.queue)]/[threshold]([round(length(SSmetrics.queue)/threshold*100, 0.1)]%)"
return ..()
/datum/controller/subsystem/metrics_publish/Initialize(start_timeofday)
if(!CONFIG_GET(string/metrics_api))
flags |= SS_NO_FIRE // Disable firing to save CPU
return SS_INIT_SUCCESS
/datum/controller/subsystem/metrics_publish/fire(resumed)
queue_publish()
profile_publish()
/datum/controller/subsystem/metrics_publish/proc/queue_publish()
if (length(SSmetrics.queue) > threshold)
var/id = rustg_influxdb2_publish(json_encode(SSmetrics.queue), CONFIG_GET(string/metrics_api), CONFIG_GET(string/metrics_token))
SSmetrics.queue = list()
handle_response(id, "subsystem")
/datum/controller/subsystem/metrics_publish/proc/profile_publish()
if ((last_profile_publish + profile_publish_interval) < REALTIMEOFDAY)
last_profile_publish = REALTIMEOFDAY
var/data = world.Profile(PROFILE_REFRESH, "json")
world.Profile(PROFILE_CLEAR)
var/id = rustg_influxdb2_publish_profile(data, CONFIG_GET(string/metrics_api_profile), CONFIG_GET(string/metrics_token_profile), GLOB.round_id)
handle_response(id, "profiler")
/datum/controller/subsystem/metrics_publish/proc/handle_response(id, metric_type)
set waitfor = FALSE
var/datum/http_request/request = new
request.from_id(id)
UNTIL(request.is_complete())
var/datum/http_response/response = request.into_response()
if(response.errored)
log_world("Failed to publish [metric_type] metrics (send error)")
log_world(response.error)
if(response.status_code >= 400)
log_world("Failed to publish [metric_type] metrics (status [response.status_code])")
log_world(response.body)
#undef METRICS_BUFFER_MAX_DEFAULT
#undef METRICS_BUFFER_PUBLISH_DEFAULT

View File

@@ -16,6 +16,10 @@ SUBSYSTEM_DEF(mobs)
msg = "P:[length(GLOB.mob_living_list)]"
return ..()
/datum/controller/subsystem/mobs/get_metrics()
. = ..()
.["mobs"] = length(GLOB.mob_living_list)
/datum/controller/subsystem/mobs/proc/MaxZChanged()
if (!islist(clients_by_zlevel))
clients_by_zlevel = new /list(world.maxz,0)

View File

@@ -11,6 +11,10 @@ SUBSYSTEM_DEF(npcpool)
msg = "NPCS:[length(activelist)]"
return ..()
/datum/controller/subsystem/npcpool/get_metrics()
. = ..()
.["npcs"] = length(GLOB.simple_animals[AI_ON])
/datum/controller/subsystem/npcpool/fire(resumed = FALSE)
if (!resumed)

View File

@@ -14,6 +14,10 @@ SUBSYSTEM_DEF(processing)
msg = "[stat_tag]:[length(processing)]"
return ..()
/datum/controller/subsystem/processing/get_metrics()
. = ..()
.["processing_queue"] = length(processing)
/datum/controller/subsystem/processing/fire(resumed = 0)
if (!resumed)
currentrun = processing.Copy()

View File

@@ -1,63 +0,0 @@
#define PROFILER_FILENAME "profiler.json"
SUBSYSTEM_DEF(profiler)
name = "Profiler"
init_order = INIT_ORDER_PROFILER
runlevels = RUNLEVELS_DEFAULT | RUNLEVEL_LOBBY
wait = 5 MINUTES
var/fetch_cost = 0
var/write_cost = 0
/datum/controller/subsystem/profiler/stat_entry(msg)
msg += "F:[round(fetch_cost,1)]ms"
msg += "|W:[round(write_cost,1)]ms"
return msg
/datum/controller/subsystem/profiler/Initialize()
if(CONFIG_GET(flag/auto_profile))
StartProfiling()
else
StopProfiling() //Stop the early start from world/New
return SS_INIT_SUCCESS
/datum/controller/subsystem/profiler/fire()
if(CONFIG_GET(flag/auto_profile))
DumpFile()
/datum/controller/subsystem/profiler/Shutdown()
if(CONFIG_GET(flag/auto_profile))
DumpFile()
return ..()
/datum/controller/subsystem/profiler/proc/StartProfiling()
#if DM_BUILD < 1506 || DM_VERSION < 513
stack_trace("Auto profiling unsupported on this byond version")
CONFIG_SET(flag/auto_profile, FALSE)
#else
world.Profile(PROFILE_START)
#endif
/datum/controller/subsystem/profiler/proc/StopProfiling()
#if DM_BUILD >= 1506 && DM_VERSION >= 513
world.Profile(PROFILE_STOP)
#endif
/datum/controller/subsystem/profiler/proc/DumpFile()
#if DM_BUILD < 1506 || DM_VERSION < 513
stack_trace("Auto profiling unsupported on this byond version")
CONFIG_SET(flag/auto_profile, FALSE)
#else
var/timer = TICK_USAGE_REAL
var/current_profile_data = world.Profile(PROFILE_REFRESH,format="json")
fetch_cost = MC_AVERAGE(fetch_cost, TICK_DELTA_TO_MS(TICK_USAGE_REAL - timer))
CHECK_TICK
if(!length(current_profile_data)) //Would be nice to have explicit proc to check this
stack_trace("Warning, profiling stopped manually before dump.")
var/json_file = file("[GLOB.log_directory]/[PROFILER_FILENAME]")
if(fexists(json_file))
fdel(json_file)
timer = TICK_USAGE_REAL
WRITE_FILE(json_file, current_profile_data)
write_cost = MC_AVERAGE(write_cost, TICK_DELTA_TO_MS(TICK_USAGE_REAL - timer))
WRITE_FILE(json_file, current_profile_data)
#endif

View File

@@ -43,6 +43,11 @@ SUBSYSTEM_DEF(runechat)
msg += "ActMsgs:[bucket_count] SecQueue:[length(second_queue)]"
return msg
/datum/controller/subsystem/runechat/get_metrics()
. = ..()
.["buckets"] = bucket_count
.["second_queue"] = length(second_queue)
/datum/controller/subsystem/runechat/fire(resumed = FALSE)
// Store local references to datum vars as it is faster to access them this way
var/list/bucket_list = src.bucket_list

View File

@@ -12,6 +12,9 @@ SUBSYSTEM_DEF(spacedrift)
msg = "P:[length(processing)]"
return ..()
/datum/controller/subsystem/spacedrift/get_metrics()
. = ..()
.["queued"] = length(processing)
/datum/controller/subsystem/spacedrift/fire(resumed = 0)
if (!resumed)

View File

@@ -40,6 +40,10 @@ SUBSYSTEM_DEF(tgui)
msg = "P:[length(open_uis)]"
return ..()
/datum/controller/subsystem/tgui/get_metrics()
. = ..()
.["open_uis"] = length(open_uis)
/datum/controller/subsystem/tgui/fire(resumed = FALSE)
if(!resumed)
src.current_run = open_uis.Copy()

View File

@@ -16,6 +16,9 @@ SUBSYSTEM_DEF(throwing)
msg = "P:[length(processing)]"
return ..()
/datum/controller/subsystem/throwing/get_metrics()
. = ..()
.["queued"] = length(processing)
/datum/controller/subsystem/throwing/fire(resumed = 0)
if (!resumed)

View File

@@ -61,6 +61,15 @@ SUBSYSTEM_DEF(timer)
msg = "B:[bucket_count] P:[length(second_queue)] H:[length(hashes)] C:[length(clienttime_timers)] S:[length(timer_id_dict)] RST:[bucket_reset_count]"
return ..()
/datum/controller/subsystem/timer/get_metrics()
. = ..()
.["buckets"] = bucket_count
.["second_queue"] = length(second_queue)
.["hashes"] = length(hashes)
.["clienttime_timers"] = length(clienttime_timers)
.["timer_id_dict"] = length(timer_id_dict)
.["bucket_reset_count"] = bucket_reset_count
/datum/controller/subsystem/timer/proc/dump_timer_buckets(full = TRUE)
var/list/to_log = list("Timer bucket reset. world.time: [world.time], head_offset: [head_offset], practical_offset: [practical_offset]")
if (full)

View File

@@ -70,6 +70,18 @@ SUBSYSTEM_DEF(wardrobe)
msg += " ID:[inspect_delay] NI:[last_inspect_time + inspect_delay]"
return ..()
/datum/controller/subsystem/wardrobe/get_metrics()
. = ..()
.["canon_minimum"] = length(canon_minimum)
.["order_list"] = length(order_list)
.["preloaded_stock"] = length(preloaded_stock)
.["cache_intensity"] = length(cache_intensity)
.["overflow_lienency"] = length(overflow_lienency)
.["stock_hit"] = length(stock_hit)
.["stock_miss"] = length(stock_miss)
.["inspect_delay"] = length(inspect_delay)
.["one_go_master"] = one_go_master
/datum/controller/subsystem/wardrobe/fire(resumed=FALSE)
if(current_task != SSWARDROBE_INSPECT && world.time - last_inspect_time >= inspect_delay)
current_task = SSWARDROBE_INSPECT

View File

@@ -26,6 +26,13 @@
/datum/http_request/proc/execute_blocking()
_raw_response = rustg_http_request_blocking(method, url, body, headers, build_options())
/datum/http_request/proc/from_id(id)
if (in_progress)
CRASH("Attempted to re-use a request object.")
src.id = id
in_progress = TRUE
/datum/http_request/proc/begin_async()
if (in_progress)
CRASH("Attempted to re-use a request object.")

View File

@@ -24,6 +24,8 @@ GLOBAL_VAR(restart_counter)
world.Profile(PROFILE_START)
#endif
SSmetrics.world_init_time = REALTIMEOFDAY
log_world("World loaded at [time_stamp()]!")
GLOB.config_error_log = GLOB.world_manifest_log = GLOB.world_pda_log = GLOB.world_job_debug_log = GLOB.sql_error_log = GLOB.world_href_log = GLOB.world_runtime_log = GLOB.world_attack_log = GLOB.world_game_log = "data/logs/config_error.[GUID()].log" //temporary file used to record errors with loading config, moved to log directory once logging is set bl

View File

@@ -437,8 +437,6 @@ DEFAULT_VIEW_SQUARE 15x15
## More API details can be found here: https://centcom.melonmesa.com
CENTCOM_BAN_DB https://centcom.melonmesa.com/ban/search
#AUTO_PROFILE
## Uncomment to disable hard deleting garbage collection failures. (Hard deleting GC failures causes lag in order to bring memory usage down and keep bugged objects from hanging around and causing bugs in other things.) You can safely enable this for performance on production if the lag spikes are too disruptive)
#DISABLE_GC_FAILURE_HARD_DELETES

View File

@@ -54,3 +54,8 @@ FEEDBACK_LOGIN username
## Password used to access the database.
FEEDBACK_PASSWORD password
#METRICS_API
#METRICS_TOKEN
#METRICS_API_PROFILE
#METRICS_TOKEN_PROFILE

View File

@@ -8,7 +8,7 @@ export BYOND_MAJOR=514
export BYOND_MINOR=1589
#rust_g git tag
export RUST_G_VERSION=1.2.0
export RUST_G_VERSION=1.2.0-yogs1
#node version
export NODE_VERSION=14

Binary file not shown.

View File

@@ -4,6 +4,6 @@ set -euo pipefail
source dependencies.sh
mkdir -p ~/.byond/bin
wget -O ~/.byond/bin/librust_g.so "https://github.com/tgstation/rust-g/releases/download/$RUST_G_VERSION/librust_g.so"
wget -O ~/.byond/bin/librust_g.so "https://github.com/yogstation13/rust-g/releases/download/$RUST_G_VERSION/librust_g.so"
chmod +x ~/.byond/bin/librust_g.so
ldd ~/.byond/bin/librust_g.so

View File

@@ -358,6 +358,7 @@
#include "code\controllers\subsystem\machines.dm"
#include "code\controllers\subsystem\mapping.dm"
#include "code\controllers\subsystem\materials.dm"
#include "code\controllers\subsystem\metrics.dm"
#include "code\controllers\subsystem\minor_mapping.dm"
#include "code\controllers\subsystem\mobs.dm"
#include "code\controllers\subsystem\moods.dm"
@@ -369,7 +370,6 @@
#include "code\controllers\subsystem\pathfinder.dm"
#include "code\controllers\subsystem\persistence.dm"
#include "code\controllers\subsystem\persistent_paintings.dm"
#include "code\controllers\subsystem\profiler.dm"
#include "code\controllers\subsystem\radiation.dm"
#include "code\controllers\subsystem\radio.dm"
#include "code\controllers\subsystem\research.dm"