-
- {shouldShowMenu ? (
-
-
- {/* Once we moved the sidebar to be position:fixed, we still
+
+
+
+ {shouldShowMenu ? (
+
+
+ {/* Once we moved the sidebar to be position:fixed, we still
needed something to take up its space in the CSS grid.
This should be fixable via CSS, but sigh, it's 3 days before the event */}
-
-
- ) : (
-
+
+
+
+
diff --git a/src/audioAnalysis.ts b/src/audioAnalysis.ts
deleted file mode 100644
index 6a4eb567..00000000
--- a/src/audioAnalysis.ts
+++ /dev/null
@@ -1,84 +0,0 @@
-import { Dispatch } from 'react'
-import {
- Action,
- MediaReceivedSpeakingDataAction
-} from './Actions'
-
-/* "Who's speaking" implementation notes
- * In another iteration of videochat, all audio streams got run through this audio analyzer code
- * Such that, when someone made a bunch of noise, their WebRTC PeerID got dispatched and set on an array of
- * IDs of "people who are speaking", which gets passed into MediaChatView.
- *
- * What currently exists:
- * - this analysis code is currently not referenced, but probably needs tweaking to handle Twilio IDs
- * - MediaChatView still gets a list of 'speaking' peers, but doesn't do anything with that data.
- *
- * I suspect we want to throw this out, and each individual speaker view should be responsible for detecting
- * whether it's speaking and updating the UI appropriately.
- * I also believe Twilio might give us some of this data automatically if we ask for it.
- *
- * I'm temporarily leaving all this code in place in case we want it.
- * But if you implement a different system and don't touch any of this, feel free to completely remove
- * this file, MediaReceivedSpeakingDataAction, state.speakingPeerIds, and everything that falls out from that.
-*/
-
-/*
-const peerAnalysers: [string, AnalyserNode][] = []
-
-function setUpAnalyser (stream: MediaStream): AnalyserNode {
- const audioCtx = new (window.AudioContext ||
- (window as any).webkitAudioContext)()
- const source = audioCtx.createMediaStreamSource(stream)
- var analyser = audioCtx.createAnalyser()
- analyser.minDecibels = -90
- analyser.maxDecibels = -10
- analyser.smoothingTimeConstant = 0.85
-
- source.connect(analyser)
-
- return analyser
-}
-
-let shouldStopAnalysing = false
-function startAnalyserLoop (dispatch: Dispatch) {
- console.log('Starting analyser loop')
-
- const average = (ns: Uint8Array) => {
- let sum = 0
- for (let i = 0; i < ns.length; i++) {
- sum += ns[i]
- }
- return (sum /= ns.length)
- }
-
- const analyse = () => {
- const list: string[] = []
-
- if (shouldStopAnalysing) {
- shouldStopAnalysing = false
- return
- }
-
- peerAnalysers.forEach(([id, a]) => {
- a.fftSize = 2048
- const bufferLength = a.fftSize
- const byteFrequencyDataArray = new Uint8Array(bufferLength)
-
- a.getByteFrequencyData(byteFrequencyDataArray)
-
- if (average(byteFrequencyDataArray) > 1) {
- list.push(id)
- }
- })
-
- dispatch(MediaReceivedSpeakingDataAction(list))
-
- window.requestAnimationFrame(analyse)
- }
- window.requestAnimationFrame(analyse)
-}
-
-export function stopAudioAnalyserLoop () {
- shouldStopAnalysing = true
-}
-*/
diff --git a/src/components/LocalMediaView.tsx b/src/components/LocalMediaView.tsx
deleted file mode 100644
index fbc915c1..00000000
--- a/src/components/LocalMediaView.tsx
+++ /dev/null
@@ -1,82 +0,0 @@
-import React, { useState, useContext } from 'react'
-import { FaCog, FaVolumeUp, FaVolumeMute, FaVideo, FaVideoSlash, FaUser } from 'react-icons/fa'
-
-import { DispatchContext } from '../App'
-import { ShowModalAction } from '../Actions'
-import { Modal } from '../modals'
-import { useMediaChatContext } from '../videochat/mediaChatContext'
-
-interface Props {
- speaking: boolean
- hideUI?: boolean
-}
-
-export default function LocalMediaView (props: Props) {
- const dispatch = useContext(DispatchContext)
- const { localStreamView, setMicEnabled, setCameraEnabled, cameraEnabled, micEnabled } = useMediaChatContext()
-
- const onChangeVideo = (e) => {
- setCameraEnabled(!cameraEnabled)
- }
-
- const onChangeAudio = (e) => {
- setMicEnabled(!micEnabled)
- }
-
- const showMediaSelector = () => {
- dispatch(ShowModalAction(Modal.MediaSelector))
- }
-
- if (!localStreamView) {
- return null
- }
-
- return (
-
- )
-}
diff --git a/src/components/MediaChatButtonView.tsx b/src/components/MediaChatButtonView.tsx
deleted file mode 100644
index 913b87b7..00000000
--- a/src/components/MediaChatButtonView.tsx
+++ /dev/null
@@ -1,185 +0,0 @@
-import React from 'react'
-import { FaCog } from 'react-icons/fa'
-import { SetAudioOnlyModeAction, SetTextOnlyModeAction, ShowModalAction, ShowModalWithOptionsAction, StopVideoChatAction } from '../Actions'
-import { DispatchContext } from '../App'
-import { Modal } from '../modals'
-import { useMediaChatContext } from '../videochat/mediaChatContext'
-
-interface Props {
- inMediaChat: boolean
- textOnlyMode: boolean
- offscreenCount: number
- totalCount: number
- audioOnlyMode: boolean
- canJoinVideoChat: boolean
-}
-const MediaChatButtonView = (props: Props) => {
- const {
- currentMic,
- currentCamera,
- publishingCamera,
- publishingMic,
- inCall,
- publishMedia,
- publishAudio,
- unpublishMedia
- } = useMediaChatContext()
-
- const dispatch = React.useContext(DispatchContext)
-
- const leaveVideoChat = () => {
- dispatch(StopVideoChatAction())
- unpublishMedia()
- }
-
- const joinVideoChat = async () => {
- if (!props.canJoinVideoChat) return
- if (currentMic || currentCamera) {
- publishMedia()
- } else {
- dispatch(ShowModalAction(Modal.MediaSelector))
- }
- }
-
- const showMediaSelector = () => {
- dispatch(ShowModalAction(Modal.MediaSelector))
- }
-
- const joinAudioChat = async () => {
- if (!props.canJoinVideoChat) return
- if (currentMic) {
- publishAudio()
- } else {
- dispatch(
- ShowModalWithOptionsAction(Modal.MediaSelector, { hideVideo: true })
- )
- }
- }
-
- const enableTextOnlyMode = () => {
- const prompt = confirm('This will disable all audio/video aspects of this space other than the ' +
- 'stream in the theater. You will no longer be able to see or hear other participants, but you can still ' +
- 'interact via text chat.\n\nSwitching modes will refresh your page - please be patient while it reloads.'
- )
- if (prompt) {
- dispatch(SetTextOnlyModeAction(true, true))
- }
- }
-
- const disableTextOnlyMode = () => {
- const prompt = confirm('Re-enabling video and audio mode means that you will be able to see and hear video and audio from ' +
- 'other participants. Your camera and microphone will default to off when you switch modes.\b\n\nSwitching modes will ' +
- 'refresh your page - please be patient while it reloads.'
- )
- if (prompt) {
- dispatch(SetTextOnlyModeAction(false, true))
- }
- }
-
- const toggleAudioOnlyMode = () => {
- let text
- if (props.audioOnlyMode) {
- text =
- 'This will show you others' webcam feeds. ' +
- 'This will not affect whether or not you are sending your own webcam feed to others.'
- } else {
- text = 'This will hide all video feeds from other attendees. ' +
- "You will still be able to hear them, but you won't see them. This may improve performance if things are slow.\n\n" +
- 'Note that you will still broadcast your webcam feed to others if you enable it, and you will ' +
- 'still be able to see the talks broadcast in the Theater.'
- }
- const prompt = confirm(text)
- if (prompt) {
- dispatch(SetAudioOnlyModeAction(!props.audioOnlyMode))
- }
- }
-
- let chatButtons
- if (props.inMediaChat) {
- let leaveButtonLabel = ''
- if (publishingCamera && publishingMic) {
- leaveButtonLabel = 'Turn off Webcam and Mic'
- } else if (publishingCamera) {
- // This case shouldn't ever exist with the current UI
- leaveButtonLabel = 'Turn off Webcam'
- } else if (publishingMic) {
- leaveButtonLabel = 'Turn off Mic'
- }
- chatButtons = (
- <>
-
- {leaveButtonLabel}
-
-
-
-
- >
- )
- } else if (props.textOnlyMode) {
- chatButtons = [
-
- Re-Enable Audio/Video
-
- ]
- } else if (props.canJoinVideoChat) {
- chatButtons = [
-
- {inCall ? 'Turn on Webcam + Mic' : Turn on Webcam + Mic}
- ,
-
- {inCall ? 'Turn on Mic' : Turn on Mic}
- ,
-
-
-
- ]
- }
-
- var offscreenLabel
- if (props.totalCount > 0) {
- offscreenLabel = (
-
- {props.totalCount} {props.totalCount === 1 ? 'person is' : 'people are'} in the call{publishingCamera || publishingMic ? ', not including you' : ''}.
-
- )
- }
-
- return (
-
- {offscreenLabel}
- {chatButtons}
-
-
- {props.audioOnlyMode ? 'Show' : 'Hide'} all video feeds
-
-
- Mute all audio and video
-
-
-
- )
-}
-
-export default MediaChatButtonView
diff --git a/src/components/MediaChatView.tsx b/src/components/MediaChatView.tsx
deleted file mode 100644
index 5ddaa18d..00000000
--- a/src/components/MediaChatView.tsx
+++ /dev/null
@@ -1,161 +0,0 @@
-import React, { useEffect, VideoHTMLAttributes, useRef, useState } from 'react'
-import LocalMediaView from './LocalMediaView'
-
-import '../../style/videoChat.css'
-import { useMediaChatContext } from '../videochat/mediaChatContext'
-import ParticipantChatView from './ParticipantChatView'
-import MediaChatButtonView from './MediaChatButtonView'
-import { SetTextOnlyModeAction } from '../Actions'
-import { DispatchContext } from '../App'
-import { MinimalUser } from '../../server/src/user'
-
-interface MediaProps {
- visibleSpeakers: [string, Date][]
- currentSpeaker: string
- numberOfFaces: number
- inMediaChat: boolean
- textOnlyMode: boolean
- audioOnlyMode: boolean
- currentUser: MinimalUser
-}
-
-export default function MediaChatView (props: MediaProps) {
- const { publishingCamera, callParticipants, inCall, joinCallFailed } = useMediaChatContext()
- const dispatch = React.useContext(DispatchContext)
-
- // TODO: props.visibleSpeakers should never be undefined, but it is?!
- const visibleSpeakers = (props.visibleSpeakers || []).map(x => x[0])
-
- console.log('Re-rendering media chat view?')
-
- if (!inCall) {
- if (joinCallFailed) {
- return (
-
-
- Could not connect to audio/video! Rooms are max 50 chatters - if you
- want to use audio/video, try moving to another room. Otherwise, it
- may be a network issue.
-
-
- )
- } else if (props.textOnlyMode) {
- const disableTextMode = () => {
- const prompt = confirm('Entering video/audio mode means that you will be able to see and hear video and audio from ' +
- 'other participants. Your camera and microphone will default to off when you switch modes. Switching modes will ' +
- 'refresh your page - please be patient while it reloads.'
- )
- if (prompt) {
- dispatch(SetTextOnlyModeAction(false, true))
- }
- }
-
- return (
-
- There may be a voice/video call happening here that you can't see.
- Disable Text-Only Mode.
-
- )
- } else {
- return (
-
- Attempting to connect to room.
-
- )
- }
- }
-
- // TODO: Is this a meaningful fail state? What causes this? s
- if (!callParticipants) {
- return
- }
-
- let playerVideo
- if (publishingCamera) {
- playerVideo =
- }
-
- const participants = Array.from(callParticipants.values())
- .filter(p => p.audioTracks.size + p.videoTracks.size > 0)
-
- const videoParticipantIds = participants
- .filter((p) => visibleSpeakers.includes(p.identity))
-
- const audioParticipantIds = participants
- .filter((p) => !visibleSpeakers.includes(p.identity))
-
- console.log('[NUM_FACES]: ', props.numberOfFaces)
- // We might have folks on video, but no "dominantSpeaker" notifications generated yet.
- // We should still flesh out the wall o' faces in that situation.
- // NOTE: This will result in video folks being added in whatever they appear in an Arrayified callParticipants
- // I don't know what that ordering is, or if it's deterministic.
- while (videoParticipantIds.length < props.numberOfFaces && audioParticipantIds.length > 0) {
- videoParticipantIds.push(audioParticipantIds.shift())
- }
-
- const videoParticipants = videoParticipantIds.map((p) => {
- return (
-
- )
- })
-
- const audioParticipants = audioParticipantIds
- .map((p) => {
-
- })
-
- // TODO: This will eventually need to check for speakers as well
- // It's unclear to me if this logic should live here
- // (vs inside MediaChatButtonView, or in the data model proper)
- const canJoinVideoChat = props.currentUser.isMod
-
- // If we're showing the bar, we don't override the height; if we're hiding we force it to 0.
- // We still want it to render the audioParticipants, so that's why we still paint it.
- // TODO: this is jank
- const customStyle = { height: playerVideo || (!props.audioOnlyMode && videoParticipants.length > 0) ? undefined : '0px' }
- return (
-
@@ -186,30 +175,8 @@ export default function SettingsView (props: Props) {
This will automatically transcribe spoken audio as messages in the
text chat.
-
- )
-}
diff --git a/src/components/WelcomeModalView.tsx b/src/components/WelcomeModalView.tsx
index 062cc7cc..294f8a73 100644
--- a/src/components/WelcomeModalView.tsx
+++ b/src/components/WelcomeModalView.tsx
@@ -12,10 +12,6 @@ export default function WelcomeModalView (props: {}) {
You can move from virtual room to virtual room to talk to different
people.
-
- You may see or hear others on videochat, but they can't see or hear you
- unless you explicitly turn on your audio or video.
-
Our hope is to facilitate smaller group conversations, to capture the feel of an in-person conference, and to give
you a fun and playful space to explore with your fellow attendees.
diff --git a/src/message/enums.ts b/src/message/enums.ts
index 11890505..0cfc02dc 100644
--- a/src/message/enums.ts
+++ b/src/message/enums.ts
@@ -13,5 +13,4 @@ export enum MessageType {
Mod = 'MOD',
Error = 'ERROR',
Command = 'COMMAND',
- Caption = 'CAPTION'
}
diff --git a/src/message/index.ts b/src/message/index.ts
index 6a2d52a7..45b1ee74 100644
--- a/src/message/index.ts
+++ b/src/message/index.ts
@@ -1,6 +1,5 @@
export { MessageType } from './enums'
export {
- CaptionMessage,
ChatMessage,
CommandMessage,
ConnectedMessage,
@@ -18,7 +17,6 @@ export {
WhisperMessage
} from './types'
export {
- createCaptionMessage,
createChatMessage,
createCommandMessage,
createConnectedMessage,
@@ -33,7 +31,6 @@ export {
createSameRoomMessage,
createShoutMessage,
createWhisperMessage,
- isCaptionMessage,
isDeletableMessage,
isMovementMessage
} from './utils'
diff --git a/src/message/types.ts b/src/message/types.ts
index bd38185a..a073e942 100644
--- a/src/message/types.ts
+++ b/src/message/types.ts
@@ -15,7 +15,6 @@ export type Message =
| ModMessage
| ErrorMessage
| CommandMessage
- | CaptionMessage;
export interface BaseMessage {
type: T;
@@ -71,11 +70,6 @@ export interface ChatMessage extends BaseMessage {
message: string;
}
-export interface CaptionMessage extends BaseMessage {
- userId: string;
- message: string;
-}
-
export interface WhisperMessage extends BaseMessage {
userId: string;
message: string;
diff --git a/src/message/utils.ts b/src/message/utils.ts
index 4c2ec62f..721c3406 100644
--- a/src/message/utils.ts
+++ b/src/message/utils.ts
@@ -3,7 +3,6 @@ import { v4 as uuid } from 'uuid'
import { MessageType } from './enums'
import {
BaseMessage,
- CaptionMessage,
ChatMessage,
CommandMessage,
ConnectedMessage,
@@ -35,8 +34,7 @@ const deletableMessageTypes = [
MessageType.Chat,
MessageType.Emote,
MessageType.Shout,
- MessageType.Dance,
- MessageType.Caption
+ MessageType.Dance
]
export const isDeletableMessage = (
@@ -61,9 +59,6 @@ export const isMovementMessage = (
message: Message
): message is AnyMovementMessage => movementMessageTypes.includes(message.type)
-export const isCaptionMessage = (message: Message): message is CaptionMessage =>
- message.type === MessageType.Caption
-
/**
* message creators
*/
@@ -150,16 +145,6 @@ export const createChatMessage = (
message
})
-export const createCaptionMessage = (
- id: string,
- userId: string,
- message: string
-): CaptionMessage => ({
- ...createBaseMessage(MessageType.Caption, id),
- userId,
- message
-})
-
export const createWhisperMessage = (
userId: string,
message: string,
diff --git a/src/modals.ts b/src/modals.ts
index f7ded995..703c1b9e 100644
--- a/src/modals.ts
+++ b/src/modals.ts
@@ -4,7 +4,6 @@ export enum Modal {
ProfileEdit,
NoteWall,
Settings,
- MediaSelector,
CodeOfConduct,
Schedule,
Help,
diff --git a/src/networking.ts b/src/networking.ts
index 50a4bf4d..ed3e4996 100644
--- a/src/networking.ts
+++ b/src/networking.ts
@@ -14,7 +14,6 @@ import {
import { User } from '../server/src/user'
import {
Action,
- CaptionMessageAction,
ChatMessageAction,
CommandMessageAction,
ConnectAction,
@@ -279,30 +278,6 @@ export async function sendChatMessage (id: string, text: string) {
}
}
-export async function sendCaption (id: string, text: string) {
- // TODO: This may or may not be problematic
- if (text.length > MESSAGE_MAX_LENGTH) {
- console.log(
- `Sorry, can't send messages over ${MESSAGE_MAX_LENGTH} characters!`
- )
- return
- }
-
- const result: RoomResponse | Error | any = await callAzureFunction(
- 'sendCaption',
- {
- id: id,
- text: text
- }
- )
-
- console.log(result)
-
- if (result && result.error) {
- myDispatch(ErrorAction(result.error))
- }
-}
-
export async function fetchProfile (userId: string) {
const result = await callAzureFunction('fetchProfile', { userId })
if (result.error) {
@@ -420,10 +395,6 @@ export async function getAllRooms (): Promise<{ [roomId: string]: Room }> {
// "Real" HTTP Functions
// used just as an HTTP request, do not refactor to WS
// ---------------------------------------------------------------
-export async function fetchTwilioToken () {
- return await callAzureFunction('twilioToken')
-}
-
export async function fetchCognitiveServicesKey () {
return await callAzureFunction('cognitiveServicesKey')
}
@@ -486,13 +457,6 @@ function generateEventMapping (userId: string, dispatch: Dispatch) {
dispatch(ChatMessageAction(messageId, otherId, message))
},
- caption: (messageId, otherId, message) => {
- console.log('Received caption', otherId, message)
- console.log(otherId, message, userId)
- if (otherId === userId) return
-
- dispatch(CaptionMessageAction(messageId, otherId, message))
- },
mods: (otherId, message) => {
dispatch(ModMessageAction(otherId, message))
},
diff --git a/src/reducer.ts b/src/reducer.ts
index 4e158758..6db36091 100644
--- a/src/reducer.ts
+++ b/src/reducer.ts
@@ -9,7 +9,6 @@ import { Action, ActionType } from './Actions'
import Config from './config'
import { Deferred } from './Deferred'
import {
- createCaptionMessage,
createChatMessage,
createCommandMessage,
createConnectedMessage,
@@ -24,7 +23,6 @@ import {
createSameRoomMessage,
createShoutMessage,
createWhisperMessage,
- isCaptionMessage,
isDeletableMessage,
isMovementMessage,
Message,
@@ -34,7 +32,6 @@ import { Modal } from './modals'
import {
connect,
fetchProfile,
- sendCaption,
sendChatMessage,
toggleUserBan,
toggleUserMod,
@@ -83,22 +80,6 @@ export interface State {
// Settings data
useSimpleNames?: boolean;
- /** This is poorly named, but being "in media chat" means "is publishing audio and/or video" */
- inMediaChat: boolean;
- keepCameraWhenMoving?: boolean;
- captionsEnabled: boolean;
-
- /** text-only mode functionally overrides audio-only mode, since we don't even connect to Twilio */
- textOnlyMode?: boolean;
- audioOnlyMode?: boolean;
-
- /** Tuples of userId and when they were last the visible speaker */
- visibleSpeakers: [string, Date][];
- currentSpeaker?: string;
-
- // How many people (other than you) to show in media chat
- numberOfFaces: number;
-
// If this is set to something other than Modal.None, that will indicate
// which modal view should be rendered on top of the chat view
activeModal: Modal;
@@ -139,18 +120,14 @@ export const defaultState: State = {
ids: []
},
whispers: [],
- visibleSpeakers: [],
autoscrollChat: true,
userMap: {},
roomData: {},
presenceData: {},
- inMediaChat: false,
activeModal: Modal.None,
activeModalOptions: {},
isBanned: false,
serverSettings: DEFAULT_SERVER_SETTINGS,
- numberOfFaces: 5,
- captionsEnabled: false,
hasDismissedAModal: false,
unlockableBadges: [],
obeliskNotes: []
@@ -313,17 +290,6 @@ export default produce((draft: State, action: Action) => {
)
}
- if (action.type === ActionType.CaptionMessage) {
- addMessage(
- draft,
- createCaptionMessage(
- action.value.messageId,
- action.value.name,
- action.value.message
- )
- )
- }
-
if (action.type === ActionType.Whisper) {
const whisperMessage = createWhisperMessage(
action.value.name,
@@ -460,43 +426,6 @@ export default produce((draft: State, action: Action) => {
addMessage(draft, createErrorMessage(action.value))
}
- if (action.type === ActionType.MediaReceivedSpeakingData) {
- draft.currentSpeaker = action.value
- if (action.value !== null && action.value !== draft.userId) {
- if (
- !draft.visibleSpeakers.find(([userId, _]) => userId === action.value)
- ) {
- if (draft.visibleSpeakers.length < draft.numberOfFaces) {
- draft.visibleSpeakers.push([action.value, new Date()])
- } else {
- // Find the oldest speaker and replace them
- let oldestIndex = -1
- let oldestTime = new Date()
- for (let i = 0; i < draft.visibleSpeakers.length; i++) {
- if (draft.visibleSpeakers[i][1] < oldestTime) {
- oldestTime = draft.visibleSpeakers[i][1]
- oldestIndex = i
- }
- }
- draft.visibleSpeakers[oldestIndex] = [action.value, new Date()]
- }
- }
- }
- }
-
- if (action.type === ActionType.SetNumberOfFaces) {
- draft.numberOfFaces = action.value
- }
-
- if (action.type === ActionType.StartVideoChat) {
- draft.inMediaChat = true
- }
-
- if (action.type === ActionType.StopVideoChat) {
- // stopAudioAnalyserLoop()
- draft.inMediaChat = false
- }
-
// UI Actions
if (action.type === ActionType.SendMessage) {
const messageId: string = uuidv4()
@@ -574,15 +503,6 @@ export default produce((draft: State, action: Action) => {
}
}
- if (action.type === ActionType.SendCaption) {
- const messageId: string = uuidv4()
- sendCaption(messageId, action.value)
- addMessage(
- draft,
- createCaptionMessage(messageId, draft.userId, action.value)
- )
- }
-
if (action.type === ActionType.StartWhisper) {
draft.prepopulatedInput = `/whisper ${action.value} `
}
@@ -632,36 +552,6 @@ export default produce((draft: State, action: Action) => {
Storage.setUseSimpleNames(action.value)
}
- if (action.type === ActionType.SetKeepCameraWhenMoving) {
- draft.keepCameraWhenMoving = action.value
- Storage.setKeepCameraWhenMoving(action.value)
- }
-
- if (action.type === ActionType.SetCaptionsEnabled) {
- draft.captionsEnabled = action.value
-
- if (original(draft).captionsEnabled !== current(draft).captionsEnabled) {
- draft.messages.ids = filteredMessageIds(draft)
- }
-
- Storage.setCaptionsEnabled(action.value)
- }
-
- if (action.type === ActionType.SetTextOnlyMode) {
- draft.textOnlyMode = action.textOnlyMode
- if (!action.refresh) {
- Storage.setTextOnlyMode(action.textOnlyMode)
- } else {
- Storage.setTextOnlyMode(action.textOnlyMode).then(() =>
- window.location.reload()
- )
- }
- }
-
- if (action.type === ActionType.SetAudioOnlyMode) {
- draft.audioOnlyMode = action.value
- }
-
if (action.type === ActionType.Authenticate) {
draft.checkedAuthentication = true
@@ -878,8 +768,7 @@ const shouldShowMessage = (
serverSettings: {
movementMessagesHideRoomIds,
movementMessagesHideThreshold
- },
- captionsEnabled
+ }
}: State,
message: Message
): boolean =>
@@ -891,8 +780,7 @@ const shouldShowMessage = (
(isHiddenRoom(movementMessagesHideRoomIds, message.roomId) ||
isBusyRoom(movementMessagesHideThreshold, message.numUsersInRoom))
)
- ) || // or it's a caption message and captions are not enabled
- (isCaptionMessage(message) && !captionsEnabled)
+ )
const filteredMessageIds = (state: State) =>
Object.entries(state.messages.entities).reduce((acc, [id, message]) => {
diff --git a/src/room.ts b/src/room.ts
index 4497b18a..cea1f198 100644
--- a/src/room.ts
+++ b/src/room.ts
@@ -12,7 +12,6 @@ export interface Room {
id: string;
description: string;
users?: string[];
- videoUsers?: string[];
mediaChat?: boolean;
hidden?: boolean;
hasNoteWall?: boolean;
diff --git a/src/speechRecognizer.ts b/src/speechRecognizer.ts
deleted file mode 100644
index 06a28daa..00000000
--- a/src/speechRecognizer.ts
+++ /dev/null
@@ -1,70 +0,0 @@
-import * as sdk from 'microsoft-cognitiveservices-speech-sdk'
-
-import {
- ResultReason,
- CancellationReason
-} from 'microsoft-cognitiveservices-speech-sdk'
-import { Dispatch } from 'react'
-import { Action, SendCaptionAction } from './Actions'
-import { fetchCognitiveServicesKey } from './networking'
-
-let recognizer: sdk.SpeechRecognizer | undefined
-let speechConfig: sdk.SpeechConfig | undefined
-
-export async function setUpSpeechRecognizer (
- deviceId: string,
- dispatch: Dispatch
-): Promise {
- console.log('Starting recognition')
- if (recognizer) {
- await recognizer.stopContinuousRecognitionAsync()
- }
-
- if (!speechConfig) {
- const { cognitiveServicesKey, cognitiveServicesRegion } = await fetchCognitiveServicesKey()
- speechConfig = sdk.SpeechConfig.fromSubscription(
- cognitiveServicesKey,
- cognitiveServicesRegion
- )
- }
-
- const audioConfig = sdk.AudioConfig.fromMicrophoneInput(deviceId)
- recognizer = new sdk.SpeechRecognizer(speechConfig, audioConfig)
-
- recognizer.recognizing = (s, e) => {
- console.log('RECOGNIZING: ', e.result.text)
- }
- recognizer.recognized = (s, e) => {
- if (e.result.reason === ResultReason.RecognizedSpeech) {
- console.log(`RECOGNIZED: Text=${e.result.text}`)
- if (e.result.text !== '') {
- dispatch(SendCaptionAction(e.result.text))
- }
- } else if (e.result.reason === ResultReason.NoMatch) {
- console.log('NOMATCH: Speech could not be recognized.')
- }
- }
-
- recognizer.canceled = (s, e) => {
- console.log(`CANCELED: Reason=${e.reason}`)
-
- if (e.reason === CancellationReason.Error) {
- console.log(`"CANCELED: ErrorCode=${e.errorCode}`)
- console.log(`"CANCELED: ErrorDetails=${e.errorDetails}`)
- console.log('CANCELED: Did you update the subscription info?')
- }
-
- recognizer.stopContinuousRecognitionAsync()
- }
-
- recognizer.sessionStopped = (s, e) => {
- console.log('\n Session stopped event.')
- recognizer.stopContinuousRecognitionAsync()
- }
-
- recognizer.startContinuousRecognitionAsync()
-}
-
-export async function stopSpeechRecognizer () {
- await recognizer.stopContinuousRecognitionAsync()
-}
diff --git a/src/storage.ts b/src/storage.ts
index ea6f67b2..3ee5c99f 100644
--- a/src/storage.ts
+++ b/src/storage.ts
@@ -69,34 +69,6 @@ export async function getUseSimpleNames (): Promise {
return await localforage.getItem(useSimpleNamesKey) || false
}
-// Video chat settings
-export async function setKeepCameraWhenMoving (keepCameraWhenMoving: boolean) {
- await localforage.setItem(keepCameraWhenMovingKey, keepCameraWhenMoving)
-}
-
-export async function getKeepCameraWhenMoving (): Promise {
- const keepCameraWhenMoving: boolean = await localforage.getItem(keepCameraWhenMovingKey)
- return keepCameraWhenMoving === null ? false : keepCameraWhenMoving
-}
-
-export async function setTextOnlyMode (textOnlyMode: boolean) {
- await localforage.setItem(textOnlyModeKey, textOnlyMode)
-}
-
-export async function getTextOnlyMode (): Promise {
- const textOnlyMode: boolean = await localforage.getItem(textOnlyModeKey)
- return textOnlyMode == null ? false : textOnlyMode
-}
-
-export async function setCaptionsEnabled (enabled: boolean) {
- await localforage.setItem(captionsEnabledKey, enabled)
-}
-
-export async function getCaptionsEnabled (): Promise {
- const captionsEnabled: boolean = await localforage.getItem(captionsEnabledKey)
- return captionsEnabled == null ? false : captionsEnabled
-}
-
// Keys
const messagesKey = 'messages'
@@ -106,6 +78,3 @@ const rainbowGateKey = 'FeatureRainbowGateVisited'
const wasColoredEnteringKey = 'WasColoredEntering'
const themeKey = 'UserSelectedTheme'
const useSimpleNamesKey = 'UseSimpleNames'
-const keepCameraWhenMovingKey = 'KeepCameraWhenMoving'
-const textOnlyModeKey = 'TextOnlyMode'
-const captionsEnabledKey = 'CaptionsEnabled'
diff --git a/src/videochat/mediaChatContext.tsx b/src/videochat/mediaChatContext.tsx
deleted file mode 100644
index eb6f43a0..00000000
--- a/src/videochat/mediaChatContext.tsx
+++ /dev/null
@@ -1,94 +0,0 @@
-import React, { useContext } from 'react'
-import { nie } from '../utils'
-import * as Twilio from 'twilio-video'
-
-export const MediaChatContext = React.createContext({
- prepareForMediaChat: async () => console.log('Not implemented'),
- prepareMediaDevices: async () => console.log('Not implemented'),
-
- publishMedia: () => console.log('Not implemented'),
- publishAudio: () => console.log('Not implemented'),
- unpublishMedia: () => console.log('Not implemented'),
-
- publishingCamera: false,
- publishingMic: false,
-
- cameras: [],
- mics: [],
-
- setCurrentCamera: nie,
- setCurrentMic: nie,
-
- currentMic: undefined,
- currentCamera: undefined,
-
- localStreamView: React.createElement('div'),
-
- inCall: false,
- joinCallFailed: false,
- joinCall: nie,
- leaveCall: () => console.log('Not implemented'),
-
- callParticipants: undefined,
-
- cameraEnabled: false,
- micEnabled: false,
-
- setCameraEnabled: (enabled: boolean) => console.log('Not implemented'),
- setMicEnabled: (enabled: boolean) => console.log('Not implemented')
-})
-
-type MediaChatContextProps = {
- // Request media permissions, also do any token handshaking needed
- prepareForMediaChat: () => Promise
- prepareMediaDevices: () => Promise
-
- publishingCamera: boolean
- publishingMic: boolean
-
- cameras: DeviceInfo[]
- mics: DeviceInfo[]
-
- currentCamera?: DeviceInfo
- currentMic?: DeviceInfo
-
- setCurrentCamera: (deviceId: string) => void
- setCurrentMic: (deviceId: string) => void
-
- localStreamView: React.ReactNode
-
- inCall: boolean
- joinCallFailed: boolean // true if joinCall has thrown an exception
- joinCall: (room: string, keepCameraWhenMoving: boolean) => void
- leaveCall: () => void
- // The Twilio implementation has an object referencing the active call.
- // Does it conceptually make sense to add one to the public interface?
-
- // Hide or show your camera/mic across the network
- // Note that the Twilio implementation has publishVideo() as well,
- // but we don't currently reference it externally.
- publishMedia: () => void
- publishAudio: () => void
- unpublishMedia: () => void
-
- callParticipants?: Map
- cameraEnabled: boolean
- micEnabled: boolean
-
- setCameraEnabled: (enabled: boolean) => void
- setMicEnabled: (enabled: boolean) => void
-}
-
-export type Participant = {
- userId: string
- muted: boolean
- streamView: React.ReactNode
- shouldShow: boolean
-}
-
-export type DeviceInfo = {
- id: string
- name: string
-}
-
-export const useMediaChatContext = () => useContext(MediaChatContext)
diff --git a/src/videochat/twilio/AudioTrack.tsx b/src/videochat/twilio/AudioTrack.tsx
deleted file mode 100644
index cb80db59..00000000
--- a/src/videochat/twilio/AudioTrack.tsx
+++ /dev/null
@@ -1,26 +0,0 @@
-import { useEffect, useRef } from 'react'
-import { AudioTrack as IAudioTrack } from 'twilio-video'
-
-interface AudioTrackProps {
- track: IAudioTrack;
-}
-
-// TODO: Figure out what's up with activeSinkId
-
-export default function AudioTrack ({ track }: AudioTrackProps) {
-// const { activeSinkId } = useAppState()
- const audioEl = useRef()
-
- useEffect(() => {
- audioEl.current = track.attach()
- audioEl.current.setAttribute('data-cy-audio-track-name', track.name)
- document.body.appendChild(audioEl.current)
- return () => track.detach().forEach(el => el.remove())
- }, [track])
-
- // useEffect(() => {
- // audioEl.current?.setSinkId?.(activeSinkId)
- // }, [activeSinkId])
-
- return null
-}
diff --git a/src/videochat/twilio/ParticipantTracks.tsx b/src/videochat/twilio/ParticipantTracks.tsx
deleted file mode 100644
index e7d73add..00000000
--- a/src/videochat/twilio/ParticipantTracks.tsx
+++ /dev/null
@@ -1,65 +0,0 @@
-
-import React from 'react'
-import { Participant, Track } from 'twilio-video'
-import Publication from './Publication'
-import usePublications, { TrackPublication } from './usePublications'
-
-interface ParticipantTracksProps {
- participant: Participant;
- // These handle whether the video/audio is run on the client-side only - they don't change the underlying tracks
- displayVideo: boolean;
- displayAudio: boolean;
- videoOnly?: boolean;
- enableScreenShare?: boolean;
- videoPriority?: Track.Priority | null;
- isLocalParticipant?: boolean;
-}
-
-/*
- * The object model for the Room object (found here: https://www.twilio.com/docs/video/migrating-1x-2x#object-model) shows
- * that Participant objects have TrackPublications, and TrackPublication objects have Tracks.
- *
- * The React components in this application follow the same pattern. This ParticipantTracks component renders Publications,
- * and the Publication component renders Tracks.
- */
-
-// TODO: This should handle mute
-
-export default function ParticipantTracks ({
- participant,
- displayVideo,
- displayAudio,
- videoOnly,
- videoPriority,
- isLocalParticipant
-}: ParticipantTracksProps) {
- const publications = usePublications(participant)
- const videoPublications = publications.filter(p => p.kind === 'video')
- const audioPublications = publications.filter(p => p.kind === 'audio')
-
- let finalPublications: TrackPublication[] = []
-
- if (displayVideo && videoPublications.length > 0) {
- finalPublications.push(videoPublications[0])
- }
-
- if (displayAudio) {
- finalPublications = finalPublications.concat(audioPublications)
- }
-
- return (
- <>
- {
- finalPublications.map(publication => {
- return
- })
- }
- >
- )
-}
diff --git a/src/videochat/twilio/Publication.tsx b/src/videochat/twilio/Publication.tsx
deleted file mode 100644
index ce27d76e..00000000
--- a/src/videochat/twilio/Publication.tsx
+++ /dev/null
@@ -1,41 +0,0 @@
-import React from 'react'
-import useTrack from './useTrack'
-import AudioTrack from './AudioTrack'
-import VideoTrack from './VideoTrack'
-
-import {
- AudioTrack as IAudioTrack,
- LocalTrackPublication,
- LocalVideoTrack,
- RemoteTrackPublication,
- RemoteVideoTrack,
- Track
-} from 'twilio-video'
-
-interface PublicationProps {
- publication: LocalTrackPublication | RemoteTrackPublication;
- isLocalParticipant?: boolean;
- videoOnly?: boolean;
- videoPriority?: Track.Priority | null;
-}
-
-export default function Publication ({ publication, isLocalParticipant, videoOnly, videoPriority }: PublicationProps) {
- const track = useTrack(publication)
-
- if (!track) return null
-
- switch (track.kind) {
- case 'video':
- return (
-
- )
- case 'audio':
- return videoOnly ? null :
- default:
- return null
- }
-}
diff --git a/src/videochat/twilio/VideoTrack.tsx b/src/videochat/twilio/VideoTrack.tsx
deleted file mode 100644
index 04010596..00000000
--- a/src/videochat/twilio/VideoTrack.tsx
+++ /dev/null
@@ -1,44 +0,0 @@
-
-import React, { useRef, useEffect } from 'react'
-import { LocalVideoTrack, RemoteVideoTrack, Track } from 'twilio-video'
-import useMediaStreamTrack from './useMediaStreamTrack'
-import useVideoTrackDimensions from './useVideoTrackDimensions'
-
-interface VideoTrackProps {
- track: LocalVideoTrack | RemoteVideoTrack;
- isLocal?: boolean;
- priority?: Track.Priority | null;
-}
-
-export default function VideoTrack ({ track, isLocal, priority }: VideoTrackProps) {
- const ref = useRef(null!)
- const mediaStreamTrack = useMediaStreamTrack(track)
- const dimensions = useVideoTrackDimensions(track)
- const isPortrait = (dimensions?.height ?? 0) > (dimensions?.width ?? 0)
- console.log('Rendering videotrack', track)
- useEffect(() => {
- const el = ref.current
- el.muted = true
- if ((track as RemoteVideoTrack).setPriority && priority) {
- (track as RemoteVideoTrack).setPriority(priority)
- }
- track.attach(el)
- return () => {
- track.detach(el)
- if ((track as RemoteVideoTrack).setPriority && priority) {
- // Passing `null` to setPriority will set the track's priority to that which it was published with.
- (track as RemoteVideoTrack).setPriority(null)
- }
- }
- }, [track, priority, mediaStreamTrack])
-
- // The local video track is mirrored if it is not facing the environment.
- const isFrontFacing = mediaStreamTrack?.getSettings().facingMode !== 'environment'
- const style = {
- transform: isLocal && isFrontFacing ? 'rotateY(180deg)' : '',
- objectFit: isPortrait || track.name.includes('screen') ? ('contain' as const) : ('cover' as const)
- }
-
- // eslint-disable-next-line jsx-a11y/media-has-caption
- return
-}
diff --git a/src/videochat/twilio/useMediaStreamTrack.ts b/src/videochat/twilio/useMediaStreamTrack.ts
deleted file mode 100644
index f3416c28..00000000
--- a/src/videochat/twilio/useMediaStreamTrack.ts
+++ /dev/null
@@ -1,28 +0,0 @@
-
-import { useState, useEffect } from 'react'
-import { AudioTrack, VideoTrack } from 'twilio-video'
-
-/*
- * This hook allows components to reliably use the 'mediaStreamTrack' property of
- * an AudioTrack or a VideoTrack. Whenever 'localTrack.restart(...)' is called, it
- * will replace the mediaStreamTrack property of the localTrack, but the localTrack
- * object will stay the same. Therefore this hook is needed in order for components
- * to rerender in response to the mediaStreamTrack changing.
- */
-export default function useMediaStreamTrack (track?: AudioTrack | VideoTrack) {
- const [mediaStreamTrack, setMediaStreamTrack] = useState(track?.mediaStreamTrack)
-
- useEffect(() => {
- setMediaStreamTrack(track?.mediaStreamTrack)
-
- if (track) {
- const handleStarted = () => { console.log('Set media stream track!'); setMediaStreamTrack(track.mediaStreamTrack) }
- track.on('started', handleStarted)
- return () => {
- track.off('started', handleStarted)
- }
- }
- }, [track])
-
- return mediaStreamTrack
-}
diff --git a/src/videochat/twilio/usePublications.ts b/src/videochat/twilio/usePublications.ts
deleted file mode 100644
index e1df923e..00000000
--- a/src/videochat/twilio/usePublications.ts
+++ /dev/null
@@ -1,27 +0,0 @@
-import { useEffect, useState } from 'react'
-import { LocalTrackPublication, Participant, RemoteTrackPublication } from 'twilio-video'
-
-export type TrackPublication = LocalTrackPublication | RemoteTrackPublication;
-
-export default function usePublications (participant: Participant) {
- const [publications, setPublications] = useState([])
-
- useEffect(() => {
- // Reset the publications when the 'participant' variable changes.
- setPublications(Array.from(participant.tracks.values()) as TrackPublication[])
-
- const publicationAdded = (publication: TrackPublication) =>
- setPublications(prevPublications => [...prevPublications, publication])
- const publicationRemoved = (publication: TrackPublication) =>
- setPublications(prevPublications => prevPublications.filter(p => p !== publication))
-
- participant.on('trackPublished', publicationAdded)
- participant.on('trackUnpublished', publicationRemoved)
- return () => {
- participant.off('trackPublished', publicationAdded)
- participant.off('trackUnpublished', publicationRemoved)
- }
- }, [participant])
-
- return publications
-}
diff --git a/src/videochat/twilio/useTrack.tsx b/src/videochat/twilio/useTrack.tsx
deleted file mode 100644
index fd69bbff..00000000
--- a/src/videochat/twilio/useTrack.tsx
+++ /dev/null
@@ -1,25 +0,0 @@
-
-import { useEffect, useState } from 'react'
-import { LocalTrackPublication, RemoteTrackPublication } from 'twilio-video'
-
-export default function useTrack (publication: LocalTrackPublication | RemoteTrackPublication | undefined) {
- const [track, setTrack] = useState(publication && publication.track)
-
- useEffect(() => {
- // Reset the track when the 'publication' variable changes.
- setTrack(publication && publication.track)
-
- if (publication) {
- const removeTrack = () => setTrack(null)
-
- publication.on('subscribed', setTrack)
- publication.on('unsubscribed', removeTrack)
- return () => {
- publication.off('subscribed', setTrack)
- publication.off('unsubscribed', removeTrack)
- }
- }
- }, [publication])
-
- return track
-}
diff --git a/src/videochat/twilio/useVideoTrackDimensions.ts b/src/videochat/twilio/useVideoTrackDimensions.ts
deleted file mode 100644
index 23ba4e29..00000000
--- a/src/videochat/twilio/useVideoTrackDimensions.ts
+++ /dev/null
@@ -1,26 +0,0 @@
-
-import { useState, useEffect } from 'react'
-import { LocalVideoTrack, RemoteVideoTrack } from 'twilio-video'
-
-type TrackType = LocalVideoTrack | RemoteVideoTrack;
-
-export default function useVideoTrackDimensions (track?: TrackType) {
- const [dimensions, setDimensions] = useState(track?.dimensions)
-
- useEffect(() => {
- setDimensions(track?.dimensions)
-
- if (track) {
- const handleDimensionsChanged = (track: TrackType) => setDimensions({
- width: track.dimensions.width,
- height: track.dimensions.height
- })
- track.on('dimensionsChanged', handleDimensionsChanged)
- return () => {
- track.off('dimensionsChanged', handleDimensionsChanged)
- }
- }
- }, [track])
-
- return dimensions
-}
diff --git a/src/videochat/twilioChatContext.tsx b/src/videochat/twilioChatContext.tsx
deleted file mode 100644
index d375d418..00000000
--- a/src/videochat/twilioChatContext.tsx
+++ /dev/null
@@ -1,491 +0,0 @@
-import * as React from 'react'
-import { useState, useEffect, useContext } from 'react'
-import * as Twilio from 'twilio-video'
-import { HideModalAction, MediaReceivedSpeakingDataAction, RefreshReactAction, StartVideoChatAction, StopVideoChatAction } from '../Actions'
-
-import { DispatchContext } from '../App'
-
-import { fetchTwilioToken } from '../networking'
-import { setUpSpeechRecognizer, stopSpeechRecognizer } from '../speechRecognizer'
-import { DeviceInfo, MediaChatContext } from './mediaChatContext'
-import ParticipantTracks from './twilio/ParticipantTracks'
-import VideoTrack from './twilio/VideoTrack'
-
-export const TwilioChatContextProvider = (props: {
- active: boolean;
- children: React.ReactNode;
-}) => {
- const dispatch = useContext(DispatchContext)
-
- const [token, setToken] = useState()
- const [roomId, setRoomId] = useState()
- const [room, setRoom] = useState()
- const [joinCallFailed, setJoinCallFailed] = useState(false)
-
- const [micEnabled, setMicEnabled] = useState(true)
- const [cameraEnabled, setCameraEnabled] = useState(true)
-
- const [cameras, setCameras] = useState([])
- const [mics, setMics] = useState([])
-
- const [currentMic, setCurrentMicInternal] = useState()
- const [currentCamera, setCurrentCameraInternal] = useState()
-
- // These are separate from current to handle the case of the media selector
- // where we need both mic and camera enabled, but may not want to show
- // the camera in the background
- const [publishingCamera, setPublishingCamera] = useState()
- const [publishingMic, setPublishingMic] = useState()
-
- const [remoteParticipants, setRemoteParticipants] = useState