You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
425 lines
12 KiB
425 lines
12 KiB
// src/components/ChatWindow.js
|
|
|
|
import React, {
|
|
useState,
|
|
useRef,
|
|
useEffect,
|
|
useLayoutEffect,
|
|
useCallback,
|
|
} from 'react';
|
|
import '../styles/ChatWindow.css';
|
|
import { FaPaperPlane, FaMicrophone } from 'react-icons/fa';
|
|
import Lipsync from './Lipsync';
|
|
import useAudio from '../hooks/useAudio';
|
|
import useChat from '../hooks/useChat';
|
|
import useStore from '../store';
|
|
import VoiceRecordingIndicator from './VoiceRecordingIndicator';
|
|
import EmotionBubble from './EmotionBubble';
|
|
|
|
/**
|
|
* Reads ?name=John from the current URL if present,
|
|
* returns that string or null if not present.
|
|
*/
|
|
function getUrlNameParam() {
|
|
const params = new URLSearchParams(window.location.search);
|
|
return params.get('name'); // e.g. "Bob" or null
|
|
}
|
|
|
|
function ChatWindow({
|
|
lipsyncRef,
|
|
// The default welcome message if no prop is passed
|
|
initialMessage = "Please introduce yourself and end greeting us with a suitable, open question",
|
|
// controlling idle timeouts
|
|
startIdleTimers,
|
|
cancelIdleTimers,
|
|
}) {
|
|
const messagesEndRef = useRef(null);
|
|
const userManuallyOffRef = useRef(false);
|
|
|
|
// from store
|
|
const {
|
|
isAssistantSpeaking,
|
|
setIsAssistantSpeaking,
|
|
setIsUserSpeaking,
|
|
// We'll read assistantEmotion so we can spawn emoticon bubbles
|
|
assistantEmotion,
|
|
// [NEW] We'll also get/set the shouldWave state
|
|
setShouldWave,
|
|
} = useStore();
|
|
|
|
// ========== TEXT CHAT DISABLE LOGIC (while user is speaking) ==========
|
|
const [disableTextChat, setDisableTextChat] = useState(false);
|
|
const [hasWavedWelcome, setHasWavedWelcome] = useState(false);
|
|
|
|
/**
|
|
* (NEW) We'll build a custom initial message if the URL has ?name=...
|
|
* e.g. if name=Alice =>
|
|
* "Please introduce yourself... You will now be talking to Alice, so address them using their name."
|
|
*/
|
|
const nameParam = getUrlNameParam();
|
|
const customInitialMsg = nameParam
|
|
? `${initialMessage} You will now be talking to ${nameParam}, so please address them directly with their name when communicating.`
|
|
: initialMessage;
|
|
|
|
// from useChat
|
|
const {
|
|
messages,
|
|
setMessages,
|
|
sendUserMessage,
|
|
sendAudioMessage,
|
|
sendIdleSystemMessage,
|
|
} = useChat({
|
|
initialMessage: customInitialMsg, // pass the possibly extended message
|
|
onMp3Chunk: (blob) => handleMp3Chunk(blob),
|
|
});
|
|
|
|
// handle idle timers with stable reference
|
|
const onUserActivity = useCallback(() => {
|
|
cancelIdleTimers?.();
|
|
startIdleTimers?.(sendIdleSystemMessage);
|
|
}, [cancelIdleTimers, startIdleTimers, sendIdleSystemMessage]);
|
|
|
|
// ========== A) USER VOICE RECORDING ==========
|
|
const {
|
|
isRecording,
|
|
isVoiceActive,
|
|
handleSpeakButton,
|
|
startListening,
|
|
stopListening,
|
|
elapsedTime,
|
|
currentRms,
|
|
} = useAudio({
|
|
onAudioCaptured: (audioBlob) => {
|
|
setMessages((prev) => [
|
|
...prev,
|
|
{ text: '🎤 (You sent audio)', sender: 'user', isAudio: true },
|
|
]);
|
|
|
|
const reader = new FileReader();
|
|
reader.readAsDataURL(audioBlob);
|
|
reader.onloadend = () => {
|
|
const base64data = reader.result.split(',')[1];
|
|
sendAudioMessage(base64data);
|
|
};
|
|
|
|
onUserActivity();
|
|
},
|
|
onError: (err) => {
|
|
console.error(err);
|
|
},
|
|
silentStopTime: 3000,
|
|
onAutoStop: () => {
|
|
// Auto-restart after avatar finishes, unless user forcibly toggled off
|
|
},
|
|
onManualStop: () => {
|
|
setDisableTextChat(false);
|
|
},
|
|
});
|
|
|
|
// Whenever isVoiceActive changes or assistant starts speaking => disable chat
|
|
useEffect(() => {
|
|
if (isVoiceActive || isAssistantSpeaking) {
|
|
setDisableTextChat(true);
|
|
console.log('Started recording user voice or avatar is speaking.');
|
|
setIsUserSpeaking(true);
|
|
} else if (!isRecording) {
|
|
setDisableTextChat(false);
|
|
console.log('Stopped recording user voice & avatar not speaking.');
|
|
setIsUserSpeaking(false);
|
|
}
|
|
}, [isVoiceActive, isAssistantSpeaking, isRecording, setIsUserSpeaking]);
|
|
|
|
// ========== B) AVATAR MP3 CHUNK QUEUE LOGIC ==========
|
|
const audioQueueRef = useRef([]);
|
|
const audioElementRef = useRef(null);
|
|
const isPlayingRef = useRef(false);
|
|
const [isMessageFinalized, setIsMessageFinalized] = useState(false);
|
|
const messageTimeoutRef = useRef(null);
|
|
|
|
const finalizeCurrentMessage = useCallback(() => {
|
|
console.log('No chunks arrived recently => finalizing message.');
|
|
setIsMessageFinalized(true);
|
|
}, []);
|
|
|
|
const playNextInQueue = useCallback(() => {
|
|
const audioEl = audioElementRef.current;
|
|
if (!audioEl) return;
|
|
|
|
if (audioQueueRef.current.length === 0) {
|
|
isPlayingRef.current = false;
|
|
setIsAssistantSpeaking(false);
|
|
|
|
if (!userManuallyOffRef.current && isVoiceActive && !isRecording) {
|
|
startListening();
|
|
}
|
|
|
|
if (isMessageFinalized) {
|
|
console.log('Done playing all chunks => clearing entire queue now.');
|
|
audioQueueRef.current = [];
|
|
setIsMessageFinalized(false);
|
|
}
|
|
return;
|
|
}
|
|
|
|
isPlayingRef.current = true;
|
|
setIsAssistantSpeaking(true);
|
|
|
|
const blob = audioQueueRef.current[0];
|
|
const url = URL.createObjectURL(blob);
|
|
audioEl.src = url;
|
|
|
|
audioEl
|
|
.play()
|
|
.then(() => {
|
|
// once it starts playing, do nothing special
|
|
})
|
|
.catch((err) => {
|
|
console.warn('Audio play() blocked or errored:', err);
|
|
});
|
|
}, [
|
|
setIsAssistantSpeaking,
|
|
isMessageFinalized,
|
|
isRecording,
|
|
isVoiceActive,
|
|
startListening,
|
|
]);
|
|
|
|
const handleMp3Chunk = useCallback(
|
|
(blob) => {
|
|
if (isMessageFinalized) {
|
|
console.log('Ignoring new chunk; message is finalized.');
|
|
return;
|
|
}
|
|
|
|
if (messageTimeoutRef.current) {
|
|
clearTimeout(messageTimeoutRef.current);
|
|
}
|
|
|
|
audioQueueRef.current.push(blob);
|
|
console.log(
|
|
'Pushed new chunk into queue. Queue length:',
|
|
audioQueueRef.current.length
|
|
);
|
|
|
|
if (!isPlayingRef.current) {
|
|
playNextInQueue();
|
|
}
|
|
|
|
messageTimeoutRef.current = setTimeout(() => {
|
|
finalizeCurrentMessage();
|
|
}, 2000);
|
|
},
|
|
[isMessageFinalized, finalizeCurrentMessage, playNextInQueue]
|
|
);
|
|
|
|
useEffect(() => {
|
|
const audioEl = audioElementRef.current;
|
|
if (!audioEl) return;
|
|
|
|
const handlePlaying = () => {
|
|
// debug
|
|
};
|
|
|
|
const handleEnded = () => {
|
|
const old = audioQueueRef.current.shift();
|
|
console.log('Finished chunk => removing from queue.', old);
|
|
URL.revokeObjectURL(audioEl.src);
|
|
audioEl.src = '';
|
|
|
|
playNextInQueue();
|
|
};
|
|
|
|
const handlePause = () => {
|
|
// if forcibly paused => treat as ended
|
|
handleEnded();
|
|
};
|
|
|
|
audioEl.addEventListener('playing', handlePlaying);
|
|
audioEl.addEventListener('ended', handleEnded);
|
|
audioEl.addEventListener('pause', handlePause);
|
|
|
|
return () => {
|
|
audioEl.removeEventListener('playing', handlePlaying);
|
|
audioEl.removeEventListener('ended', handleEnded);
|
|
audioEl.removeEventListener('pause', handlePause);
|
|
};
|
|
}, [playNextInQueue]);
|
|
|
|
// ========== C) AUDIOCONTEXT + LIPSYNC HOOKUP ==========
|
|
const audioContextRef = useRef(null);
|
|
|
|
useEffect(() => {
|
|
if (!audioElementRef.current) return;
|
|
|
|
if (!audioContextRef.current) {
|
|
audioContextRef.current =
|
|
new (window.AudioContext || window.webkitAudioContext)();
|
|
|
|
const source = audioContextRef.current.createMediaElementSource(
|
|
audioElementRef.current
|
|
);
|
|
source.connect(audioContextRef.current.destination);
|
|
|
|
if (lipsyncRef && !lipsyncRef.current) {
|
|
lipsyncRef.current = new Lipsync(0.5, 0.6, 1.0, audioContextRef.current);
|
|
}
|
|
if (lipsyncRef?.current?.connectAudioNode) {
|
|
lipsyncRef.current.connectAudioNode(source);
|
|
}
|
|
}
|
|
}, [lipsyncRef]);
|
|
|
|
// ====== (E) EMOTION BUBBLE LOGIC ======
|
|
const [emotionBubbles, setEmotionBubbles] = useState([]);
|
|
|
|
useEffect(() => {
|
|
if (!assistantEmotion) return;
|
|
if (assistantEmotion === 'Unknown') return;
|
|
setEmotionBubbles((prev) => [
|
|
...prev,
|
|
{ id: Date.now(), emotion: assistantEmotion },
|
|
]);
|
|
}, [assistantEmotion]);
|
|
|
|
// ====== (F) TRIGGER WAVE: FIRST & GOODBYE ======
|
|
useEffect(() => {
|
|
if (messages.length === 0) return;
|
|
|
|
const assistantMessages = messages.filter((m) => m.sender === 'assistant');
|
|
if (assistantMessages.length === 0) return;
|
|
|
|
// 1) If exactly 1 assistant message => we just got the first => wave
|
|
if (assistantMessages.length === 1 && !hasWavedWelcome) {
|
|
setShouldWave(true);
|
|
setTimeout(() => setShouldWave(false), 1200);
|
|
setHasWavedWelcome(true); // mark that we've waved
|
|
}
|
|
|
|
// 2) Check if the latest assistant message includes "goodbye"
|
|
const lastMsg = assistantMessages[assistantMessages.length - 1];
|
|
if (lastMsg && /goodbye/i.test(lastMsg.text)) {
|
|
setShouldWave(true);
|
|
setTimeout(() => setShouldWave(false), 1200);
|
|
setHasWavedWelcome(false); // If we re-start the APP, we want to be greeted again
|
|
}
|
|
}, [messages, hasWavedWelcome, setShouldWave]);
|
|
|
|
// Show input after first assistant message
|
|
const [showInput, setShowInput] = useState(false);
|
|
useEffect(() => {
|
|
if (!showInput && messages.some((m) => m.sender === 'assistant')) {
|
|
setShowInput(true);
|
|
}
|
|
}, [messages, showInput]);
|
|
|
|
const [input, setInput] = useState('');
|
|
const handleSendText = useCallback(() => {
|
|
if (disableTextChat) return;
|
|
if (!input.trim()) return;
|
|
sendUserMessage(input);
|
|
setInput('');
|
|
onUserActivity();
|
|
}, [disableTextChat, input, sendUserMessage, onUserActivity]);
|
|
|
|
useLayoutEffect(() => {
|
|
if (messagesEndRef.current) {
|
|
messagesEndRef.current.scrollIntoView({ behavior: 'smooth' });
|
|
}
|
|
}, [messages]);
|
|
|
|
const handleToggleSpeak = () => {
|
|
if (isVoiceActive) {
|
|
userManuallyOffRef.current = true;
|
|
} else {
|
|
userManuallyOffRef.current = false;
|
|
}
|
|
handleSpeakButton();
|
|
onUserActivity();
|
|
};
|
|
|
|
return (
|
|
<>
|
|
<div className="chat-window">
|
|
<div className="messages">
|
|
{messages.map((msg, idx) => (
|
|
<div
|
|
key={idx}
|
|
className={`message ${
|
|
msg.sender === 'user' ? 'user' : 'backend'
|
|
}`}
|
|
>
|
|
{msg.text}
|
|
</div>
|
|
))}
|
|
<div ref={messagesEndRef} />
|
|
</div>
|
|
|
|
{showInput && (
|
|
<div className="input-area fade-in">
|
|
<input
|
|
type="text"
|
|
placeholder="Write here"
|
|
value={input}
|
|
onChange={(e) => setInput(e.target.value)}
|
|
onKeyDown={(e) => {
|
|
if (e.key === 'Enter') {
|
|
handleSendText();
|
|
}
|
|
}}
|
|
disabled={disableTextChat}
|
|
/>
|
|
<button
|
|
className="send-button"
|
|
onClick={handleSendText}
|
|
disabled={disableTextChat}
|
|
>
|
|
<FaPaperPlane />
|
|
<span>Send</span>
|
|
</button>
|
|
|
|
<button
|
|
className={`speak-button ${isRecording ? 'recording' : ''}`}
|
|
onClick={handleToggleSpeak}
|
|
disabled={isAssistantSpeaking}
|
|
>
|
|
<FaMicrophone />
|
|
<span>{isVoiceActive ? 'Stop' : 'Speak'}</span>
|
|
</button>
|
|
</div>
|
|
)}
|
|
</div>
|
|
|
|
{/* Single <audio> for the queue-based approach */}
|
|
<audio
|
|
ref={audioElementRef}
|
|
style={{ display: 'none' }}
|
|
controls={false}
|
|
preload="auto"
|
|
/>
|
|
|
|
{isVoiceActive && (
|
|
<div className="voice-indicator-floating">
|
|
<VoiceRecordingIndicator
|
|
isRecording={isRecording}
|
|
elapsedTime={elapsedTime}
|
|
maxTime={Number(process.env.REACT_APP_MAX_VOICE_TIME || 30)}
|
|
rmsValue={currentRms || 0}
|
|
/>
|
|
</div>
|
|
)}
|
|
|
|
{/* (F) RENDER EMOTION BUBBLES */}
|
|
{emotionBubbles.map((b) => (
|
|
<EmotionBubble
|
|
key={b.id}
|
|
emotion={b.emotion}
|
|
onAnimationEnd={() => {
|
|
setEmotionBubbles((prev) => prev.filter((x) => x.id !== b.id));
|
|
}}
|
|
/>
|
|
))}
|
|
</>
|
|
);
|
|
}
|
|
|
|
export default ChatWindow;
|
|
|