/mob/proc/say() return /mob/verb/whisper(message as text) set name = "Whisper" set category = "IC" usr.say(message,whispering=1) /mob/verb/say_verb(message as text) set name = "Say" set category = "IC" set_typing_indicator(FALSE) usr.say(message) /mob/verb/me_verb(message as text) set name = "Me" set category = "IC" if(say_disabled) //This is here to try to identify lag problems to_chat(usr, "Speech is currently admin-disabled.") return //VOREStation Edit Start if(muffled) return me_verb_subtle(message) message = sanitize_or_reflect(message,src) //VOREStation Edit - Reflect too-long messages (within reason) //VOREStation Edit End set_typing_indicator(FALSE) if(use_me) usr.emote("me",usr.emote_type,message) else usr.emote(message) /mob/proc/say_dead(var/message) if(say_disabled) //This is here to try to identify lag problems to_chat(usr, "Speech is currently admin-disabled.") return if(!client) return // Clientless mobs shouldn't be trying to talk in deadchat. if(!src.client.holder) if(!config.dsay_allowed) to_chat(src, "Deadchat is globally muted.") return if(!is_preference_enabled(/datum/client_preference/show_dsay)) to_chat(usr, "You have deadchat muted.") return message = encode_html_emphasis(message) say_dead_direct("[pick("complains","moans","whines","laments","blubbers")], \"[message]\"", src) /mob/proc/say_understands(var/mob/other,var/datum/language/speaking = null) if (src.stat == DEAD) return 1 //Universal speak makes everything understandable, for obvious reasons. else if(src.universal_speak || src.universal_understand) return 1 //Languages are handled after. if (!speaking) if(!other) return 1 if(other.universal_speak) return 1 if(isAI(src) && ispAI(other)) return 1 if (istype(other, src.type) || istype(src, other.type)) return 1 return 0 if(speaking.flags & INNATE) return 1 //Language check. for(var/datum/language/L in src.languages) if(speaking.name == L.name) return 1 return 0 /* ***Deprecated*** let this be handled at the hear_say or hear_radio proc This is left in for robot speaking when humans gain binary channel access until I get around to rewriting robot_talk() proc. There is no language handling build into it however there is at the /mob level so we accept the call for it but just ignore it. */ /mob/proc/say_quote(var/message, var/datum/language/speaking = null) var/verb = "says" var/ending = copytext(message, length(message)) if(ending=="!") verb=pick("exclaims","shouts","yells") else if(ending=="?") verb="asks" return verb /mob/proc/emote(var/act, var/type, var/message) if(act == "me") return custom_emote(type, message) /mob/proc/get_ear() // returns an atom representing a location on the map from which this // mob can hear things // should be overloaded for all mobs whose "ear" is separate from their "mob" return get_turf(src) /mob/proc/say_test(var/text) var/ending = copytext(text, length(text)) if (ending == "?") return "1" else if (ending == "!") return "2" return "0" //parses the message mode code (e.g. :h, :w) from text, such as that supplied to say. //returns the message mode string or null for no message mode. //standard mode is the mode returned for the special ';' radio code. /mob/proc/parse_message_mode(var/message, var/standard_mode="headset") if(length(message) >= 1 && copytext(message,1,2) == ";") return standard_mode if(length(message) >= 2) var/channel_prefix = copytext(message, 1 ,3) return department_radio_keys[channel_prefix] return null //parses the language code (e.g. :j) from text, such as that supplied to say. //returns the language object only if the code corresponds to a language that src can speak, otherwise null. /mob/proc/parse_language(var/message) var/prefix = copytext(message,1,2) // This is for audible emotes if(length(message) >= 1 && prefix == "!") return GLOB.all_languages["Noise"] if(length(message) >= 2 && is_language_prefix(prefix)) var/language_prefix = lowertext(copytext(message, 2 ,3)) var/datum/language/L = GLOB.language_keys[language_prefix] if (can_speak(L)) return L else return GLOB.all_languages[LANGUAGE_GIBBERISH] return null