Add Picture-in-Picture Mode for AI's, Working on Resolving Runtime

WIP DESC
This commit is contained in:
Rykka
2020-05-10 12:31:49 -04:00
parent 6021131272
commit 3b3a7b960c
22 changed files with 1485 additions and 24749 deletions

View File

@@ -10,17 +10,36 @@
/mob/observer/eye/aiEye/New()
..()
visualnet = cameranet
/mob/observer/eye/aiEye/Destroy()
if(owner)
var/mob/living/silicon/ai/ai = owner
ai.all_eyes -= src
owner = null
. = ..()
/mob/observer/eye/aiEye/setLoc(var/T, var/cancel_tracking = 1)
if(..())
if(owner)
T = get_turf(T)
loc = T
var/mob/living/silicon/ai/ai = owner
if(cancel_tracking)
ai.ai_cancel_tracking()
//Holopad
if(use_static)
ai.camera_visibility(src)
if(ai.client && !ai.multicam_on)
ai.client.eye = src
if(ai.master_multicam)
ai.master_multicam.refresh_view()
if(ai.holo)
if(ai.hologram_follow)
ai.holo.move_hologram(ai)
return 1
// AI MOVEMENT
@@ -46,6 +65,7 @@
if(!newloc)
newloc = src.loc
eyeobj = new /mob/observer/eye/aiEye(newloc)
all_eyes += eyeobj
eyeobj.owner = src
eyeobj.name = "[src.name] (AI Eye)" // Give it a name
if(client)
@@ -66,7 +86,7 @@
/atom/proc/move_camera_by_click()
if(istype(usr, /mob/living/silicon/ai))
var/mob/living/silicon/ai/AI = usr
if(AI.eyeobj && AI.client.eye == AI.eyeobj)
if(AI.eyeobj && (AI.multicam_on || (AI.client.eye == AI.eyeobj)))
var/turf/T = get_turf(src)
if(T)
AI.eyeobj.setLoc(T)

View File

@@ -25,12 +25,12 @@
// Add an eye to the chunk, then update if changed.
/datum/chunk/proc/add(mob/observer/eye/eye)
if(!eye.owner)
return
/datum/chunk/proc/add(mob/observer/eye/eye, add_images = TRUE)
if(add_images)
var/client/client = eye.GetViewerClient()
if(client)
client.images += obscured
eye.visibleChunks += src
if(eye.owner.client)
eye.owner.client.images += obscured
visible++
seenby += eye
if(changed && !updating)
@@ -38,12 +38,12 @@
// Remove an eye from the chunk, then update if changed.
/datum/chunk/proc/remove(mob/observer/eye/eye)
if(!eye.owner)
return
/datum/chunk/proc/remove(mob/observer/eye/eye, remove_images = TRUE)
if(remove_images)
var/client/client = eye.GetViewerClient()
if(client)
client.images -= obscured
eye.visibleChunks -= src
if(eye.owner.client)
eye.owner.client.images -= obscured
seenby -= eye
if(visible > 0)
visible--
@@ -92,10 +92,11 @@
obscured -= t.obfuscations[obfuscation.type]
for(var/eye in seenby)
var/mob/observer/eye/m = eye
if(!m || !m.owner)
if(!m)
continue
if(m.owner.client)
m.owner.client.images -= t.obfuscations[obfuscation.type]
var/client/client = m.GetViewerClient()
if(client)
client.images -= t.obfuscations[obfuscation.type]
for(var/turf in visRemoved)
var/turf/t = turf
@@ -109,11 +110,12 @@
obscured += t.obfuscations[obfuscation.type]
for(var/eye in seenby)
var/mob/observer/eye/m = eye
if(!m || !m.owner)
if(!m)
seenby -= m
continue
if(m.owner.client)
m.owner.client.images += t.obfuscations[obfuscation.type]
var/client/client = m.GetViewerClient()
if(client)
client.images += t.obfuscations[obfuscation.type]
/datum/chunk/proc/acquireVisibleTurfs(var/list/visible)

View File

@@ -23,6 +23,8 @@
var/ghostimage = null
var/datum/visualnet/visualnet
var/use_static = TRUE
var/static_visibility_range = 16
/mob/observer/eye/Destroy()
if(owner)
@@ -67,8 +69,8 @@
visualnet.updateVisibility(owner, 0)
owner.loc = loc
visualnet.updateVisibility(owner, 0)
visualnet.visibility(src)
if(use_static)
visualnet.visibility(src, owner.client)
return 1
return 0
@@ -85,6 +87,11 @@
return
return eyeobj.EyeMove(n, direct)
/mob/observer/eye/proc/GetViewerClient()
if(owner)
return owner.client
return null
/mob/observer/eye/EyeMove(n, direct)
var/initial = initial(sprint)

View File

@@ -2,6 +2,8 @@
//
// The datum containing all the chunks.
#define CHUNK_SIZE 16
/datum/visualnet
// The chunks of the map, mapping the areas that an object can see.
var/list/chunks = list()
@@ -36,29 +38,63 @@
// Updates what the aiEye can see. It is recommended you use this when the aiEye moves or it's location is set.
/datum/visualnet/proc/visibility(mob/observer/eye/eye)
// 0xf = 15
var/x1 = max(0, eye.x - 16) & ~0xf
var/y1 = max(0, eye.y - 16) & ~0xf
var/x2 = min(world.maxx, eye.x + 16) & ~0xf
var/y2 = min(world.maxy, eye.y + 16) & ~0xf
/datum/visualnet/proc/visibility(list/moved_eyes, client/C, list/other_eyes)
if(!islist(moved_eyes))
moved_eyes = moved_eyes ? list(moved_eyes) : list()
if(islist(other_eyes))
other_eyes = (other_eyes - moved_eyes)
else
other_eyes = list()
var/list/visibleChunks = list()
var/list/chunks_pre_seen = list()
var/list/chunks_post_seen = list()
for(var/x = x1; x <= x2; x += 16)
for(var/y = y1; y <= y2; y += 16)
visibleChunks += getChunk(x, y, eye.z)
for(var/V in moved_eyes)
var/mob/observer/eye/eye = V
if(C)
chunks_pre_seen |= eye.visibleChunks
// 0xf = 15
var/static_range = eye.static_visibility_range
var/x1 = max(0, eye.x - static_range) & ~(CHUNK_SIZE - 1)
var/y1 = max(0, eye.y - static_range) & ~(CHUNK_SIZE - 1)
var/x2 = min(world.maxx, eye.x + static_range) & ~(CHUNK_SIZE - 1)
var/y2 = min(world.maxy, eye.y + static_range) & ~(CHUNK_SIZE - 1)
var/list/remove = eye.visibleChunks - visibleChunks
var/list/add = visibleChunks - eye.visibleChunks
var/list/visibleChunks = list()
for(var/chunk in remove)
var/datum/chunk/c = chunk
c.remove(eye)
for(var/x = x1; x <= x2; x += CHUNK_SIZE)
for(var/y = y1; y <= y2; y += CHUNK_SIZE)
visibleChunks |= getChunk(x, y, eye.z)
for(var/chunk in add)
var/datum/chunk/c = chunk
c.add(eye)
var/list/remove = eye.visibleChunks - visibleChunks
var/list/add = visibleChunks - eye.visibleChunks
for(var/chunk in remove)
var/datum/chunk/c = chunk
c.remove(eye, FALSE)
for(var/chunk in add)
var/datum/chunk/c = chunk
c.add(eye, FALSE)
if(C)
chunks_post_seen |= eye.visibleChunks
if(C)
for(var/V in other_eyes)
var/mob/observer/eye/eye = V
chunks_post_seen |= eye.visibleChunks
var/list/remove = chunks_pre_seen - chunks_post_seen
var/list/add = chunks_post_seen - chunks_pre_seen
for(var/chunk in remove)
var/datum/chunk/c = chunk
C.images -= c.obscured
for(var/chunk in add)
var/datum/chunk/c = chunk
C.images += c.obscured
// Updates the chunks that the turf is located in. Use this when obstacles are destroyed or when doors open.

View File

@@ -85,6 +85,14 @@ var/list/ai_verbs_default = list(
var/datum/ai_icon/selected_sprite // The selected icon set
var/custom_sprite = 0 // Whether the selected icon is custom
var/carded
// Multicam Vars
var/multicam_allowed = TRUE
var/multicam_on = FALSE
var/obj/screen/movable/pic_in_pic/ai/master_multicam
var/list/multicam_screens = list()
var/list/all_eyes = list()
var/max_multicams = 6
can_be_antagged = TRUE
@@ -478,12 +486,27 @@ var/list/ai_verbs_default = list(
return
/mob/living/silicon/ai/proc/camera_visibility(mob/observer/eye/aiEye/moved_eye)
cameranet.visibility(moved_eye, client, all_eyes)
/mob/living/silicon/ai/forceMove(atom/destination)
. = ..()
if(.)
end_multicam()
/mob/living/silicon/ai/reset_view(atom/A)
if(camera)
camera.set_light(0)
if(istype(A,/obj/machinery/camera))
camera = A
..()
if(A != GLOB.ai_camera_room_landmark)
end_multicam()
. = ..()
if(.)
if(!A && isturf(loc) && eyeobj)
end_multicam()
client.eye = eyeobj
client.perspective = MOB_PERSPECTIVE
if(istype(A,/obj/machinery/camera))
if(camera_light_on) A.set_light(AI_CAMERA_LUMINOSITY)
else A.set_light(0)

View File

@@ -94,6 +94,7 @@
spawn(20)
to_chat(src, "Backup battery online. Scanners, camera, and radio interface offline. Beginning fault-detection.")
end_multicam()
sleep(50)
if (loc.power_equip)
if (!istype(T, /turf/space))

View File

@@ -1,4 +1,4 @@
/mob/living/silicon/ai/Login() //ThisIsDumb(TM) TODO: tidy this up <20>_<EFBFBD> ~Carn
/mob/living/silicon/ai/Login() //ThisIsDumb(TM) TODO: tidy this up <20>_<EFBFBD> ~Carn
..()
for(var/obj/effect/rune/rune in rune_list)
client.images += rune.blood_image
@@ -6,5 +6,7 @@
for(var/obj/machinery/ai_status_display/O in machines) //change status
O.mode = 1
O.emotion = "Neutral"
if(multicam_on)
end_multicam()
src.view_core()
return

View File

@@ -0,0 +1,260 @@
//Picture in picture
/obj/screen/movable/pic_in_pic/ai
var/mob/living/silicon/ai/ai
var/mutable_appearance/highlighted_background
var/highlighted = FALSE
var/mob/observer/eye/aiEye/pic_in_pic/aiEye
/obj/screen/movable/pic_in_pic/ai/Initialize()
. = ..()
aiEye = new /mob/observer/eye/aiEye/pic_in_pic()
aiEye.screen = src
/obj/screen/movable/pic_in_pic/ai/Destroy()
set_ai(null)
QDEL_NULL(aiEye)
return ..()
/obj/screen/movable/pic_in_pic/ai/Click()
..()
if(ai)
ai.select_main_multicam_window(src)
/obj/screen/movable/pic_in_pic/ai/make_backgrounds()
..()
highlighted_background = new /mutable_appearance()
highlighted_background.icon = 'icons/misc/pic_in_pic.dmi'
highlighted_background.icon_state = "background_highlight"
highlighted_background.layer = DISPOSAL_LAYER
highlighted_background.plane = PLATING_PLANE
/obj/screen/movable/pic_in_pic/ai/add_background()
if((width > 0) && (height > 0))
var/matrix/M = matrix()
M.Scale(width + 0.5, height + 0.5)
M.Translate((width-1)/2 * world.icon_size, (height-1)/2 * world.icon_size)
highlighted_background.transform = M
standard_background.transform = M
overlays += highlighted ? highlighted_background : standard_background
/obj/screen/movable/pic_in_pic/ai/set_view_size(width, height, do_refresh = TRUE)
aiEye.static_visibility_range = (round(max(width, height) / 2) + 1)
if(ai)
ai.camera_visibility(aiEye)
..()
/obj/screen/movable/pic_in_pic/ai/set_view_center(atom/target, do_refresh = TRUE)
..()
aiEye.setLoc(get_turf(target))
/obj/screen/movable/pic_in_pic/ai/refresh_view()
..()
aiEye.setLoc(get_turf(center))
/obj/screen/movable/pic_in_pic/ai/proc/highlight()
if(highlighted)
return
highlighted = TRUE
overlays -= standard_background
overlays += highlighted_background
/obj/screen/movable/pic_in_pic/ai/proc/unhighlight()
if(!highlighted)
return
highlighted = FALSE
overlays -= highlighted_background
overlays += standard_background
/obj/screen/movable/pic_in_pic/ai/proc/set_ai(mob/living/silicon/ai/new_ai)
if(ai)
ai.multicam_screens -= src
ai.all_eyes -= aiEye
if(ai.master_multicam == src)
ai.master_multicam = null
if(ai.multicam_on)
unshow_to(ai.client)
ai = new_ai
if(new_ai)
new_ai.multicam_screens += src
ai.all_eyes += aiEye
if(new_ai.multicam_on)
show_to(new_ai.client)
//Turf, area, and landmark for the viewing room
/turf/unsimulated/ai_visible
name = ""
icon = 'icons/misc/pic_in_pic.dmi'
icon_state = "room_background"
flags = NOJAUNT
/turf/unsimulated/ai_visible/Initialize()
. = ..()
/area/ai_multicam_room
name = "ai_multicam_room"
icon_state = "ai_camera_room"
dynamic_lighting = FALSE
ambience = list()
GLOBAL_DATUM(ai_camera_room_landmark, /obj/effect/landmark/ai_multicam_room)
/obj/effect/landmark/ai_multicam_room
name = "ai camera room"
icon_state = "x"
/obj/effect/landmark/ai_multicam_room/Initialize()
. = ..()
qdel(GLOB.ai_camera_room_landmark)
GLOB.ai_camera_room_landmark = src
/obj/effect/landmark/ai_multicam_room/Destroy()
if(GLOB.ai_camera_room_landmark == src)
GLOB.ai_camera_room_landmark = null
return ..()
//Dummy camera eyes
/mob/observer/eye/aiEye/pic_in_pic
name = "Secondary AI Eye"
var/obj/screen/movable/pic_in_pic/ai/screen
var/list/cameras_telegraphed = list()
var/telegraph_cameras = TRUE
var/telegraph_range = 7
/mob/observer/eye/aiEye/pic_in_pic/GetViewerClient()
if(screen && screen.ai)
return screen.ai.client
/mob/observer/eye/aiEye/pic_in_pic/setLoc(turf/T)
T = get_turf(T)
forceMove(T)
if(screen && screen.ai)
screen.ai.camera_visibility(src)
else
cameranet.visibility(src)
update_camera_telegraphing()
/mob/observer/eye/aiEye/pic_in_pic/proc/update_camera_telegraphing()
if(!telegraph_cameras)
return
var/list/obj/machinery/camera/add = list()
var/list/obj/machinery/camera/remove = list()
var/list/obj/machinery/camera/visible = list()
for(var/VV in visibleChunks)
var/datum/chunk/camera/CC = VV
for(var/V in CC.cameras)
var/obj/machinery/camera/C = V
if (!C.can_use() || (get_dist(C, src) > telegraph_range))
continue
visible |= C
add = visible - cameras_telegraphed
remove = cameras_telegraphed - visible
for(var/V in remove)
var/obj/machinery/camera/C = V
if(QDELETED(C))
continue
cameras_telegraphed -= C
C.in_use_lights--
C.update_icon()
for(var/V in add)
var/obj/machinery/camera/C = V
if(QDELETED(C))
continue
cameras_telegraphed |= C
C.in_use_lights++
C.update_icon()
/mob/observer/eye/aiEye/pic_in_pic/proc/disable_camera_telegraphing()
telegraph_cameras = FALSE
for(var/V in cameras_telegraphed)
var/obj/machinery/camera/C = V
if(QDELETED(C))
continue
C.in_use_lights--
C.update_icon()
cameras_telegraphed.Cut()
/mob/observer/eye/aiEye/pic_in_pic/Destroy()
disable_camera_telegraphing()
if(screen && screen.ai)
screen.ai.all_eyes -= src
return ..()
//AI procs
/mob/living/silicon/ai/proc/drop_new_multicam(silent = FALSE)
if(!multicam_allowed)
if(!silent)
to_chat(src, "<span class='warning'>This action is currently disabled. Contact an administrator to enable this feature.</span>")
return
if(!eyeobj)
return
if(multicam_screens.len >= max_multicams)
if(!silent)
to_chat(src, "<span class='warning'>Cannot place more than [max_multicams] multicamera windows.</span>")
return
var/obj/screen/movable/pic_in_pic/ai/C = new /obj/screen/movable/pic_in_pic/ai()
C.set_view_size(3, 3, FALSE)
C.set_view_center(get_turf(eyeobj))
C.set_ai(src)
if(!silent)
to_chat(src, "<span class='notice'>Added new multicamera window.</span>")
return C
/mob/living/silicon/ai/proc/toggle_multicam()
if(!multicam_allowed)
to_chat(src, "<span class='warning'>This action is currently disabled. Contact an administrator to enable this feature.</span>")
return
if(multicam_on)
end_multicam()
else
start_multicam()
/mob/living/silicon/ai/proc/start_multicam()
if(multicam_on || aiRestorePowerRoutine || !isturf(loc))
return
if(!GLOB.ai_camera_room_landmark)
to_chat(src, "<span class='warning'>This function is not available at this time.</span>")
return
multicam_on = TRUE
refresh_multicam()
to_chat(src, "<span class='notice'>Multiple-camera viewing mode activated.</span>")
/mob/living/silicon/ai/proc/refresh_multicam()
reset_view(GLOB.ai_camera_room_landmark)
if(client)
for(var/V in multicam_screens)
var/obj/screen/movable/pic_in_pic/P = V
P.show_to(client)
/mob/living/silicon/ai/proc/end_multicam()
if(!multicam_on)
return
multicam_on = FALSE
select_main_multicam_window(null)
if(client)
for(var/V in multicam_screens)
var/obj/screen/movable/pic_in_pic/P = V
P.unshow_to(client)
reset_view()
to_chat(src, "<span class='notice'>Multiple-camera viewing mode deactivated.</span>")
/mob/living/silicon/ai/proc/select_main_multicam_window(obj/screen/movable/pic_in_pic/ai/P)
if(master_multicam == P)
return
if(master_multicam)
master_multicam.set_view_center(get_turf(eyeobj), FALSE)
master_multicam.unhighlight()
master_multicam = null
if(P)
P.highlight()
eyeobj.setLoc(get_turf(P.center))
P.set_view_center(eyeobj)
master_multicam = P

View File

@@ -410,7 +410,7 @@
return
/mob/living/silicon/reset_view()
..()
. = ..()
if(cameraFollow)
cameraFollow = null

View File

@@ -229,7 +229,7 @@
else
client.perspective = EYE_PERSPECTIVE
client.eye = loc
return
return 1
/mob/proc/show_inv(mob/user as mob)