commit
cd39943b8b
65 changed files with 1496198 additions and 0 deletions
@ -0,0 +1,5 @@ |
|||
# .env |
|||
REACT_APP_WS_TALK_ENDPOINT=ws://localhost:3000/talkToAvatarElevenlabs |
|||
REACT_APP_IDLE_TIMEOUT_ENABLED=true |
|||
REACT_APP_PERSIST_USER_ID=true |
|||
REACT_APP_MAX_VOICE_TIME=30 |
|||
@ -0,0 +1,23 @@ |
|||
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. |
|||
|
|||
# dependencies |
|||
/node_modules |
|||
/.pnp |
|||
.pnp.js |
|||
|
|||
# testing |
|||
/coverage |
|||
|
|||
# production |
|||
/build |
|||
|
|||
# misc |
|||
.DS_Store |
|||
.env.local |
|||
.env.development.local |
|||
.env.test.local |
|||
.env.production.local |
|||
|
|||
npm-debug.log* |
|||
yarn-debug.log* |
|||
yarn-error.log* |
|||
@ -0,0 +1,70 @@ |
|||
# Getting Started with Create React App |
|||
|
|||
This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app). |
|||
|
|||
## Available Scripts |
|||
|
|||
In the project directory, you can run: |
|||
|
|||
### `npm start` |
|||
|
|||
Runs the app in the development mode.\ |
|||
Open [http://localhost:3000](http://localhost:3000) to view it in your browser. |
|||
|
|||
The page will reload when you make changes.\ |
|||
You may also see any lint errors in the console. |
|||
|
|||
### `npm test` |
|||
|
|||
Launches the test runner in the interactive watch mode.\ |
|||
See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information. |
|||
|
|||
### `npm run build` |
|||
|
|||
Builds the app for production to the `build` folder.\ |
|||
It correctly bundles React in production mode and optimizes the build for the best performance. |
|||
|
|||
The build is minified and the filenames include the hashes.\ |
|||
Your app is ready to be deployed! |
|||
|
|||
See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information. |
|||
|
|||
### `npm run eject` |
|||
|
|||
**Note: this is a one-way operation. Once you `eject`, you can't go back!** |
|||
|
|||
If you aren't satisfied with the build tool and configuration choices, you can `eject` at any time. This command will remove the single build dependency from your project. |
|||
|
|||
Instead, it will copy all the configuration files and the transitive dependencies (webpack, Babel, ESLint, etc) right into your project so you have full control over them. All of the commands except `eject` will still work, but they will point to the copied scripts so you can tweak them. At this point you're on your own. |
|||
|
|||
You don't have to ever use `eject`. The curated feature set is suitable for small and middle deployments, and you shouldn't feel obligated to use this feature. However we understand that this tool wouldn't be useful if you couldn't customize it when you are ready for it. |
|||
|
|||
## Learn More |
|||
|
|||
You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started). |
|||
|
|||
To learn React, check out the [React documentation](https://reactjs.org/). |
|||
|
|||
### Code Splitting |
|||
|
|||
This section has moved here: [https://facebook.github.io/create-react-app/docs/code-splitting](https://facebook.github.io/create-react-app/docs/code-splitting) |
|||
|
|||
### Analyzing the Bundle Size |
|||
|
|||
This section has moved here: [https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size](https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size) |
|||
|
|||
### Making a Progressive Web App |
|||
|
|||
This section has moved here: [https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app](https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app) |
|||
|
|||
### Advanced Configuration |
|||
|
|||
This section has moved here: [https://facebook.github.io/create-react-app/docs/advanced-configuration](https://facebook.github.io/create-react-app/docs/advanced-configuration) |
|||
|
|||
### Deployment |
|||
|
|||
This section has moved here: [https://facebook.github.io/create-react-app/docs/deployment](https://facebook.github.io/create-react-app/docs/deployment) |
|||
|
|||
### `npm run build` fails to minify |
|||
|
|||
This section has moved here: [https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify](https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify) |
|||
File diff suppressed because it is too large
@ -0,0 +1,54 @@ |
|||
{ |
|||
"name": "brex-avatars", |
|||
"version": "0.1.0", |
|||
"private": true, |
|||
"dependencies": { |
|||
"@react-three/drei": "^9.117.3", |
|||
"@react-three/fiber": "^8.17.10", |
|||
"@react-three/postprocessing": "^2.16.3", |
|||
"@testing-library/jest-dom": "^5.17.0", |
|||
"@testing-library/react": "^13.4.0", |
|||
"@testing-library/user-event": "^13.5.0", |
|||
"js-cookie": "^3.0.5", |
|||
"react": "^18.3.1", |
|||
"react-circular-progressbar": "^2.1.0", |
|||
"react-dom": "^18.3.1", |
|||
"react-icons": "^5.3.0", |
|||
"react-scripts": "^5.0.1", |
|||
"react-toastify": "^11.0.2", |
|||
"three": "^0.170.0", |
|||
"uuid": "^11.0.3", |
|||
"web-vitals": "^2.1.4", |
|||
"zustand": "^5.0.2" |
|||
}, |
|||
"scripts": { |
|||
"start": "react-scripts start", |
|||
"build": "react-scripts build", |
|||
"test": "react-scripts test", |
|||
"eject": "react-scripts eject" |
|||
}, |
|||
"eslintConfig": { |
|||
"extends": [ |
|||
"react-app", |
|||
"react-app/jest" |
|||
] |
|||
}, |
|||
"browserslist": { |
|||
"production": [ |
|||
">0.2%", |
|||
"not dead", |
|||
"not op_mini all" |
|||
], |
|||
"development": [ |
|||
"last 1 chrome version", |
|||
"last 1 firefox version", |
|||
"last 1 safari version" |
|||
] |
|||
}, |
|||
"devDependencies": { |
|||
"@babel/plugin-proposal-private-property-in-object": "^7.21.11", |
|||
"@pmmmwh/react-refresh-webpack-plugin": "^0.5.15", |
|||
"react-refresh": "^0.16.0", |
|||
"webpack-hot-middleware": "^2.26.1" |
|||
} |
|||
} |
|||
|
After Width: | Height: | Size: 3.8 KiB |
|
After Width: | Height: | Size: 506 KiB |
@ -0,0 +1,21 @@ |
|||
<!DOCTYPE html> |
|||
<html lang="en"> |
|||
<head> |
|||
<meta charset="utf-8" /> |
|||
<link rel="icon" href="%PUBLIC_URL%/favicon.ico" /> |
|||
<meta name="viewport" content="width=device-width, initial-scale=1" /> |
|||
<meta name="theme-color" content="#000000" /> |
|||
<meta |
|||
name="B.REX Avatars" |
|||
content="Avatar communication application created by B.REX" |
|||
title="B.REX Avatars" |
|||
/> |
|||
<link rel="apple-touch-icon" href="%PUBLIC_URL%/logo192.png" /> |
|||
<link rel="manifest" href="%PUBLIC_URL%/manifest.json" /> |
|||
<title>React App</title> |
|||
</head> |
|||
<body> |
|||
<noscript>You need to enable JavaScript to run this app.</noscript> |
|||
<div id="root"></div> |
|||
</body> |
|||
</html> |
|||
Binary file not shown.
|
After Width: | Height: | Size: 5.2 KiB |
|
After Width: | Height: | Size: 9.4 KiB |
@ -0,0 +1,25 @@ |
|||
{ |
|||
"short_name": "B.REX Avatars", |
|||
"name": "B.REX Avatar Engine V0.2", |
|||
"icons": [ |
|||
{ |
|||
"src": "favicon.ico", |
|||
"sizes": "64x64 32x32 24x24 16x16", |
|||
"type": "image/x-icon" |
|||
}, |
|||
{ |
|||
"src": "logo192.png", |
|||
"type": "image/png", |
|||
"sizes": "192x192" |
|||
}, |
|||
{ |
|||
"src": "logo512.png", |
|||
"type": "image/png", |
|||
"sizes": "512x512" |
|||
} |
|||
], |
|||
"start_url": ".", |
|||
"display": "standalone", |
|||
"theme_color": "#000000", |
|||
"background_color": "#ffffff" |
|||
} |
|||
@ -0,0 +1,3 @@ |
|||
# https://www.robotstxt.org/robotstxt.html |
|||
User-agent: * |
|||
Disallow: |
|||
@ -0,0 +1,195 @@ |
|||
// src/App.js
|
|||
|
|||
import React, { useState, useRef, Suspense, useCallback } from 'react'; |
|||
import Scene from './components/Scene'; |
|||
import ChatWindow from './components/ChatWindow'; |
|||
import Loader from './components/Loader'; |
|||
import './styles/App.css'; |
|||
//import useStore from './store';
|
|||
import useLangStore from './langstore'; |
|||
import { FaComments } from 'react-icons/fa'; |
|||
import { ToastContainer, toast } from 'react-toastify'; |
|||
import 'react-toastify/dist/ReactToastify.css'; |
|||
|
|||
const LANG_OPTIONS = [ |
|||
{ code: 'en', label: 'English 🇬🇧' }, |
|||
{ code: 'es', label: 'Spanish 🇪🇸' }, |
|||
{ code: 'zh', label: 'Chinese 🇨🇳' }, |
|||
{ code: 'hi', label: 'Hindi 🇮🇳' }, |
|||
{ code: 'ar', label: 'Arabic 🇸🇦' }, |
|||
{ code: 'pt', label: 'Portuguese 🇧🇷' }, |
|||
{ code: 'ru', label: 'Russian 🇷🇺' }, |
|||
{ code: 'ja', label: 'Japanese 🇯🇵' }, |
|||
{ code: 'de', label: 'German 🇩🇪' }, |
|||
{ code: 'fr', label: 'French 🇫🇷' }, |
|||
]; |
|||
|
|||
function LanguageSelector() { |
|||
const { currentLanguage, setCurrentLanguage } = useLangStore(); |
|||
const [isOpen, setIsOpen] = useState(false); |
|||
|
|||
const handleToggle = () => setIsOpen(!isOpen); |
|||
const handleSelect = (langCode) => { |
|||
setCurrentLanguage(langCode); |
|||
setIsOpen(false); |
|||
}; |
|||
|
|||
const curr = LANG_OPTIONS.find(opt => opt.code === currentLanguage) || LANG_OPTIONS[0]; |
|||
|
|||
return ( |
|||
<div className="language-selector-container"> |
|||
<button onClick={handleToggle} className="language-selector-button"> |
|||
{curr.label} |
|||
</button> |
|||
{isOpen && ( |
|||
<div className="language-dropup"> |
|||
{LANG_OPTIONS.map(opt => ( |
|||
<button |
|||
key={opt.code} |
|||
onClick={() => handleSelect(opt.code)} |
|||
className="language-option" |
|||
> |
|||
{opt.label} |
|||
</button> |
|||
))} |
|||
</div> |
|||
)} |
|||
</div> |
|||
); |
|||
} |
|||
|
|||
function extractLanguageName(label) { |
|||
return label.split(' ')[0]; |
|||
} |
|||
|
|||
function App() { |
|||
const [conversationStarted, setConversationStarted] = useState(false); |
|||
const [initialMessage, setInitialMessage] = useState( |
|||
"Please introduce yourself and end greeting us with a suitable, open question" |
|||
); |
|||
|
|||
const { currentLanguage } = useLangStore(); |
|||
const lipsyncRef = useRef(null); |
|||
|
|||
// =========================
|
|||
// IDLE TIMEOUT LOGIC
|
|||
// =========================
|
|||
const idleTimeoutEnabled = process.env.REACT_APP_IDLE_TIMEOUT_ENABLED === "true"; |
|||
|
|||
// We store references to the 3 timers
|
|||
const stage1TimerRef = useRef(null); |
|||
const stage2TimerRef = useRef(null); |
|||
const stage3TimerRef = useRef(null); |
|||
|
|||
// Cancels all existing timers
|
|||
const cancelIdleTimers = useCallback(() => { |
|||
if (stage1TimerRef.current) clearTimeout(stage1TimerRef.current); |
|||
if (stage2TimerRef.current) clearTimeout(stage2TimerRef.current); |
|||
if (stage3TimerRef.current) clearTimeout(stage3TimerRef.current); |
|||
}, []); |
|||
|
|||
// Starts the 3-stage idle timers
|
|||
// 1) 1 minute => "The user has not spoken in a while..."
|
|||
// 2) 2 more minutes => "long idle"
|
|||
// 3) 30 sec after that => reset to start screen
|
|||
const startIdleTimers = useCallback((sendIdleSystemMessage) => { |
|||
if (!idleTimeoutEnabled) return; // skip if not enabled
|
|||
|
|||
cancelIdleTimers(); |
|||
|
|||
// Stage 1 => 1 min
|
|||
stage1TimerRef.current = setTimeout(() => { |
|||
sendIdleSystemMessage?.( |
|||
"The user talking to you has not spoken to you in a while. Using the language you are speaking in right now, pose an engaging question to keep the conversation flowing." |
|||
); |
|||
}, 60000); |
|||
|
|||
// Stage 2 => after 3 min total
|
|||
stage2TimerRef.current = setTimeout(() => { |
|||
sendIdleSystemMessage?.( |
|||
"The user talking to you is now in idle for a long time. Using the language you are speaking in right now, tell the user goodbye and say that he should just come back if he needs you again." |
|||
); |
|||
}, 60000 + 120000); |
|||
|
|||
// Stage 3 => 3.5 min => reset app
|
|||
stage3TimerRef.current = setTimeout(() => { |
|||
|
|||
// 1) Stop and clear Lipsync so the old AudioContext won't cause errors
|
|||
if (lipsyncRef?.current) { |
|||
lipsyncRef.current.stop(); |
|||
lipsyncRef.current = null; |
|||
} |
|||
|
|||
setConversationStarted(false); |
|||
setInitialMessage("Please introduce yourself and end greeting us with a suitable, open question"); |
|||
cancelIdleTimers(); |
|||
}, 60000 + 120000 + 30000); |
|||
}, [idleTimeoutEnabled, cancelIdleTimers]); |
|||
|
|||
// =========================
|
|||
// "Start Conversation" logic
|
|||
// =========================
|
|||
const handleStartConversation = () => { |
|||
const chosenLangObj = LANG_OPTIONS.find(opt => opt.code === currentLanguage) |
|||
|| LANG_OPTIONS[0]; |
|||
const chosenLangName = extractLanguageName(chosenLangObj.label); |
|||
|
|||
const msg = `Please introduce yourself and end greeting us with a suitable, open question.
|
|||
Ensure that from now on the only language in which you speak in this chat is no longer English, but: ${chosenLangName}`;
|
|||
|
|||
setInitialMessage(msg); |
|||
setConversationStarted(true); |
|||
startIdleTimers(); |
|||
|
|||
toast.info( |
|||
"Welcome! You can use the text chat or speak to the avatar with your microphone.", |
|||
{ |
|||
autoClose: 8000, |
|||
closeOnClick: true, |
|||
hideProgressBar: false, |
|||
progressStyle: { |
|||
background: "linear-gradient(to right, red, orange, yellow, green, blue, indigo, violet)", |
|||
}, |
|||
} |
|||
); |
|||
}; |
|||
|
|||
return ( |
|||
<div className="App"> |
|||
<Suspense fallback={<Loader fadeOut={false} />}> |
|||
<Scene lipsyncRef={lipsyncRef} conversationStarted={conversationStarted} /> |
|||
</Suspense> |
|||
|
|||
{conversationStarted ? ( |
|||
<ChatWindow |
|||
lipsyncRef={lipsyncRef} |
|||
initialMessage={initialMessage} |
|||
// pass the idle timers control down
|
|||
startIdleTimers={startIdleTimers} |
|||
cancelIdleTimers={cancelIdleTimers} |
|||
/> |
|||
) : ( |
|||
<div className="start-conversation-button-wrapper"> |
|||
<LanguageSelector /> |
|||
<button |
|||
className="start-conversation-button" |
|||
onClick={handleStartConversation} |
|||
> |
|||
<FaComments /> |
|||
<span>Start Conversation</span> |
|||
</button> |
|||
</div> |
|||
)} |
|||
|
|||
<ToastContainer |
|||
position="top-right" |
|||
newestOnTop |
|||
closeOnClick |
|||
draggable |
|||
pauseOnHover |
|||
/> |
|||
</div> |
|||
); |
|||
} |
|||
|
|||
export default App; |
|||
@ -0,0 +1,11 @@ |
|||
// src/App.test.js
|
|||
import { render, screen } from '@testing-library/react'; |
|||
import App from './App'; |
|||
|
|||
test('renders learn react link', () => { |
|||
render(<App />); |
|||
// adapt or remove this test
|
|||
// const linkElement = screen.getByText(/learn react/i);
|
|||
// expect(linkElement).toBeInTheDocument();
|
|||
expect(true).toBe(true); |
|||
}); |
|||
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
@ -0,0 +1,24 @@ |
|||
# Takes A2F blendshape export as argument and sets all mouthPucker, mouthClose and jawOpen weights to 0 |
|||
# Usage: 'python3 script.py input.json' |
|||
|
|||
import sys |
|||
import json |
|||
|
|||
if len(sys.argv) != 2: |
|||
print("Please specify exactly one json file.") |
|||
sys.exit() |
|||
|
|||
with open(sys.argv[1]) as f: |
|||
d = json.load(f) |
|||
puckeridx = d["facsNames"].index("mouthPucker") |
|||
closeidx = d["facsNames"].index("mouthClose") |
|||
jawidx = d["facsNames"].index("jawOpen") |
|||
|
|||
for frame in d["weightMat"]: |
|||
frame[puckeridx] = 0.0 |
|||
frame[closeidx] = 0.0 |
|||
frame[jawidx] = 0.0 |
|||
|
|||
|
|||
with open(sys.argv[1].split('.')[0]+"_new.json", 'w') as of: |
|||
json.dump(d, of, ensure_ascii=False, indent=4) |
|||
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
File diff suppressed because it is too large
@ -0,0 +1,553 @@ |
|||
// src/components/Avatar.js
|
|||
|
|||
import React, { useRef, useEffect, useCallback, useMemo } from 'react'; |
|||
import { useFrame } from '@react-three/fiber'; |
|||
import * as THREE from 'three'; |
|||
import { useGLTF } from '@react-three/drei'; |
|||
import useStore from '../store'; |
|||
|
|||
// Import all emotion JSONs
|
|||
import emotionNeutral from '../blendshapeData/neutral.json'; |
|||
import emotionAnger from '../blendshapeData/anger.json'; |
|||
import emotionSad from '../blendshapeData/sad.json'; |
|||
import emotionHappy from '../blendshapeData/happy.json'; |
|||
import emotionSurprise from '../blendshapeData/surprise.json'; |
|||
import emotionDisgust from '../blendshapeData/disgust.json'; |
|||
import emotionFear from '../blendshapeData/fear.json'; |
|||
|
|||
/** |
|||
* Avatar |
|||
* |
|||
* - Loads kira.glb (body + face). |
|||
* - Plays idle/talk animations (AnimationMixer). |
|||
* - While speaking, applies local JSON-based facial animations |
|||
* for eyes, brows, cheeks, etc. (lipsync overrides the mouth). |
|||
* - Also uses assistantEmotion from Zustand to pick |
|||
* an emotion file among [Anger, Sad, Happy, Surprise, Disgust, Fear, Neutral]. |
|||
* - Now adds "listening" and "waving" animations (if found) for special states. |
|||
* |
|||
* PROPS: |
|||
* - lipsyncRef: a ref to Lipsync instance |
|||
* - isVoiceActive: if user’s mic is on (we fade to "listening" if so) |
|||
* - shouldWave: if you want to force a wave animation (e.g. first greeting or final goodbye) |
|||
*/ |
|||
function Avatar({ lipsyncRef, isVoiceActive = false }) { |
|||
// From Zustand store
|
|||
const isAssistantSpeaking = useStore((state) => state.isAssistantSpeaking); |
|||
const assistantEmotion = useStore((state) => state.assistantEmotion); |
|||
const isUserSpeaking = useStore((state) => state.isUserSpeaking); |
|||
|
|||
// We'll read shouldWave from the store
|
|||
const shouldWave = useStore((state) => state.shouldWave); |
|||
|
|||
// We choose which JSON file to use based on emotion & speaking status
|
|||
const emotionFile = useMemo(() => { |
|||
if (!isAssistantSpeaking) { |
|||
return emotionNeutral; // If not speaking => neutral
|
|||
} |
|||
// If speaking => choose file by emotion
|
|||
switch (assistantEmotion) { |
|||
case 'Happiness': |
|||
return emotionHappy; |
|||
case 'Sadness': |
|||
return emotionSad; |
|||
case 'Anger': |
|||
return emotionAnger; |
|||
case 'Fear': |
|||
return emotionFear; |
|||
case 'Disgust': |
|||
return emotionDisgust; |
|||
case 'Surprise': |
|||
return emotionSurprise; |
|||
default: |
|||
return emotionNeutral; |
|||
} |
|||
}, [assistantEmotion, isAssistantSpeaking]); |
|||
|
|||
// Log current emotion whenever speaking starts
|
|||
useEffect(() => { |
|||
if (isAssistantSpeaking) { |
|||
console.log('Current Assistant Emotion:', assistantEmotion || 'Neutral'); |
|||
} |
|||
}, [assistantEmotion, isAssistantSpeaking]); |
|||
|
|||
// Load GLTF model
|
|||
const { scene: avatarScene, animations } = useGLTF('/kira.glb'); |
|||
const avatarRef = useRef(null); |
|||
const mixerRef = useRef(null); |
|||
|
|||
// Refs for main actions
|
|||
const idleActionRef = useRef(null); |
|||
const talkActionRef = useRef(null); |
|||
const listeningActionRef = useRef(null); |
|||
const wavingActionRef = useRef(null); |
|||
const currentActionRef = useRef(null); |
|||
|
|||
// Morph references
|
|||
const headMeshRef = useRef(null); |
|||
const teethMeshRef = useRef(null); |
|||
const eyeLeftMeshRef = useRef(null); |
|||
const eyeRightMeshRef = useRef(null); |
|||
|
|||
// MOUTH + LIPSYNC references
|
|||
const mouthPuckerIndexRef = useRef(null); |
|||
const mouthCloseIndexRef = useRef(null); |
|||
const jawOpenIndexRef = useRef(null); |
|||
const jawOpenIndexTeethRef = useRef(null); |
|||
|
|||
// [NEW] We'll store the previous lipsync values for smoothing:
|
|||
const prevPuckerRef = useRef(0); |
|||
const prevCloseRef = useRef(0); |
|||
const prevJawRef = useRef(0); |
|||
|
|||
// Full references for eyes, brows, cheeks, etc.
|
|||
const eyeBlinkLeftIndexRef = useRef(null); |
|||
const eyeLookDownLeftIndexRef = useRef(null); |
|||
const eyeLookInLeftIndexRef = useRef(null); |
|||
const eyeLookOutLeftIndexRef = useRef(null); |
|||
const eyeLookUpLeftIndexRef = useRef(null); |
|||
const eyeSquintLeftIndexRef = useRef(null); |
|||
const eyeWideLeftIndexRef = useRef(null); |
|||
|
|||
const eyeBlinkRightIndexRef = useRef(null); |
|||
const eyeLookDownRightIndexRef = useRef(null); |
|||
const eyeLookInRightIndexRef = useRef(null); |
|||
const eyeLookOutRightIndexRef = useRef(null); |
|||
const eyeLookUpRightIndexRef = useRef(null); |
|||
const eyeSquintRightIndexRef = useRef(null); |
|||
const eyeWideRightIndexRef = useRef(null); |
|||
|
|||
const jawForwardIndexRef = useRef(null); |
|||
const jawLeftIndexRef = useRef(null); |
|||
const jawRightIndexRef = useRef(null); |
|||
const mouthFunnelIndexRef = useRef(null); |
|||
const mouthLeftIndexRef = useRef(null); |
|||
const mouthRightIndexRef = useRef(null); |
|||
const mouthSmileLeftIndexRef = useRef(null); |
|||
const mouthSmileRightIndexRef = useRef(null); |
|||
const mouthFrownLeftIndexRef = useRef(null); |
|||
const mouthFrownRightIndexRef = useRef(null); |
|||
const mouthDimpleLeftIndexRef = useRef(null); |
|||
const mouthDimpleRightIndexRef = useRef(null); |
|||
const mouthStretchLeftIndexRef = useRef(null); |
|||
const mouthStretchRightIndexRef = useRef(null); |
|||
const mouthRollLowerIndexRef = useRef(null); |
|||
const mouthRollUpperIndexRef = useRef(null); |
|||
const mouthShrugLowerIndexRef = useRef(null); |
|||
const mouthShrugUpperIndexRef = useRef(null); |
|||
const mouthPressLeftIndexRef = useRef(null); |
|||
const mouthPressRightIndexRef = useRef(null); |
|||
const mouthLowerDownLeftIndexRef = useRef(null); |
|||
const mouthLowerDownRightIndexRef = useRef(null); |
|||
const mouthUpperUpLeftIndexRef = useRef(null); |
|||
const mouthUpperUpRightIndexRef = useRef(null); |
|||
|
|||
const browDownLeftIndexRef = useRef(null); |
|||
const browDownRightIndexRef = useRef(null); |
|||
const browInnerUpIndexRef = useRef(null); |
|||
const browOuterUpLeftIndexRef = useRef(null); |
|||
const browOuterUpRightIndexRef = useRef(null); |
|||
const cheekPuffIndexRef = useRef(null); |
|||
const cheekSquintLeftIndexRef = useRef(null); |
|||
const cheekSquintRightIndexRef = useRef(null); |
|||
const noseSneerLeftIndexRef = useRef(null); |
|||
const noseSneerRightIndexRef = useRef(null); |
|||
const tongueOutIndexRef = useRef(null); |
|||
|
|||
// We'll cycle frames in the JSON-based emotion file each frame
|
|||
let currentFrame = 0; |
|||
|
|||
// Crossfade helper
|
|||
const fadeToAction = useCallback((action, duration = 1.0) => { |
|||
if (!mixerRef.current || !action) return; |
|||
const previousAction = currentActionRef.current; |
|||
|
|||
if (!previousAction) { |
|||
action.reset().setEffectiveTimeScale(1).setEffectiveWeight(1).play(); |
|||
currentActionRef.current = action; |
|||
return; |
|||
} |
|||
if (previousAction !== action) { |
|||
action.reset().setEffectiveTimeScale(1).setEffectiveWeight(1).play(); |
|||
action.crossFadeFrom(previousAction, duration, false); |
|||
|
|||
setTimeout(() => { |
|||
if (previousAction !== action) { |
|||
previousAction.stop(); |
|||
} |
|||
}, duration * 1000); |
|||
|
|||
currentActionRef.current = action; |
|||
} |
|||
}, []); |
|||
|
|||
// On mount, setup animations + morph references
|
|||
useEffect(() => { |
|||
if (!avatarScene || !animations || animations.length === 0) return; |
|||
avatarRef.current = avatarScene; |
|||
|
|||
mixerRef.current = new THREE.AnimationMixer(avatarScene); |
|||
|
|||
// find clips containing "idle" or "talk" or "listen" or "wave"
|
|||
const idleClip = animations.find((clip) => |
|||
clip.name.toLowerCase().includes('idle') |
|||
); |
|||
const talkClip = animations.find((clip) => |
|||
clip.name.toLowerCase().includes('talk') |
|||
); |
|||
const listeningClip = animations.find((clip) => |
|||
clip.name.toLowerCase().includes('listen') |
|||
); |
|||
const wavingClip = animations.find((clip) => |
|||
clip.name.toLowerCase().includes('waving') |
|||
); |
|||
|
|||
if (!idleClip) { |
|||
console.warn("Could not find any animation containing 'idle'."); |
|||
} |
|||
if (!talkClip) { |
|||
console.warn("Could not find any animation containing 'talk'."); |
|||
} |
|||
if (!listeningClip) { |
|||
console.warn("Could not find any animation containing 'listen'."); |
|||
} |
|||
if (!wavingClip) { |
|||
console.warn("Could not find any animation containing 'wave'."); |
|||
} |
|||
|
|||
// create actions if found
|
|||
if (idleClip) { |
|||
const idleAction = mixerRef.current.clipAction(idleClip, avatarScene); |
|||
idleAction.loop = THREE.LoopRepeat; |
|||
idleActionRef.current = idleAction; |
|||
// start idle by default
|
|||
idleAction.reset().setEffectiveWeight(1).play(); |
|||
currentActionRef.current = idleAction; |
|||
} |
|||
|
|||
if (talkClip) { |
|||
const talkAction = mixerRef.current.clipAction(talkClip, avatarScene); |
|||
talkAction.loop = THREE.LoopRepeat; |
|||
talkActionRef.current = talkAction; |
|||
} |
|||
|
|||
if (listeningClip) { |
|||
const la = mixerRef.current.clipAction(listeningClip, avatarScene); |
|||
la.loop = THREE.LoopRepeat; |
|||
listeningActionRef.current = la; |
|||
} |
|||
|
|||
if (wavingClip) { |
|||
const wa = mixerRef.current.clipAction(wavingClip, avatarScene); |
|||
wa.loop = THREE.LoopRepeat; |
|||
wavingActionRef.current = wa; |
|||
} |
|||
|
|||
// Positioning
|
|||
avatarScene.position.set(0, -4.6, 3.6); |
|||
avatarScene.scale.set(3, 3, 3); |
|||
|
|||
// Find references
|
|||
const headMesh = avatarScene.getObjectByName('Wolf3D_Head'); |
|||
const teethMesh = avatarScene.getObjectByName('Wolf3D_Teeth'); |
|||
const eyeLeftMesh = avatarScene.getObjectByName('EyeLeft'); |
|||
const eyeRightMesh = avatarScene.getObjectByName('EyeRight'); |
|||
|
|||
if (headMesh && headMesh.morphTargetDictionary) { |
|||
headMeshRef.current = headMesh; |
|||
// map references
|
|||
eyeBlinkLeftIndexRef.current = headMesh.morphTargetDictionary['eyeBlinkLeft']; |
|||
eyeBlinkRightIndexRef.current = headMesh.morphTargetDictionary['eyeBlinkRight']; |
|||
eyeLookDownLeftIndexRef.current = headMesh.morphTargetDictionary['eyeLookDownLeft']; |
|||
eyeLookDownRightIndexRef.current = headMesh.morphTargetDictionary['eyeLookDownRight']; |
|||
eyeLookInLeftIndexRef.current = headMesh.morphTargetDictionary['eyeLookInLeft']; |
|||
eyeLookInRightIndexRef.current = headMesh.morphTargetDictionary['eyeLookInRight']; |
|||
eyeLookOutLeftIndexRef.current = headMesh.morphTargetDictionary['eyeLookOutLeft']; |
|||
eyeLookOutRightIndexRef.current = headMesh.morphTargetDictionary['eyeLookOutRight']; |
|||
eyeLookUpLeftIndexRef.current = headMesh.morphTargetDictionary['eyeLookUpLeft']; |
|||
eyeLookUpRightIndexRef.current = headMesh.morphTargetDictionary['eyeLookUpRight']; |
|||
eyeSquintLeftIndexRef.current = headMesh.morphTargetDictionary['eyeSquintLeft']; |
|||
eyeSquintRightIndexRef.current = headMesh.morphTargetDictionary['eyeSquintRight']; |
|||
eyeWideLeftIndexRef.current = headMesh.morphTargetDictionary['eyeWideLeft']; |
|||
eyeWideRightIndexRef.current = headMesh.morphTargetDictionary['eyeWideRight']; |
|||
|
|||
browDownLeftIndexRef.current = headMesh.morphTargetDictionary['browDownLeft']; |
|||
browDownRightIndexRef.current = headMesh.morphTargetDictionary['browDownRight']; |
|||
browInnerUpIndexRef.current = headMesh.morphTargetDictionary['browInnerUp']; |
|||
browOuterUpLeftIndexRef.current = headMesh.morphTargetDictionary['browOuterUpLeft']; |
|||
browOuterUpRightIndexRef.current = headMesh.morphTargetDictionary['browOuterUpRight']; |
|||
cheekPuffIndexRef.current = headMesh.morphTargetDictionary['cheekPuff']; |
|||
cheekSquintLeftIndexRef.current = headMesh.morphTargetDictionary['cheekSquintLeft']; |
|||
cheekSquintRightIndexRef.current = headMesh.morphTargetDictionary['cheekSquintRight']; |
|||
noseSneerLeftIndexRef.current = headMesh.morphTargetDictionary['noseSneerLeft']; |
|||
noseSneerRightIndexRef.current = headMesh.morphTargetDictionary['noseSneerRight']; |
|||
|
|||
jawForwardIndexRef.current = headMesh.morphTargetDictionary['jawForward']; |
|||
jawLeftIndexRef.current = headMesh.morphTargetDictionary['jawLeft']; |
|||
jawRightIndexRef.current = headMesh.morphTargetDictionary['jawRight']; |
|||
jawOpenIndexRef.current = headMesh.morphTargetDictionary['jawOpen']; |
|||
|
|||
mouthCloseIndexRef.current = headMesh.morphTargetDictionary['mouthClose']; |
|||
mouthFunnelIndexRef.current = headMesh.morphTargetDictionary['mouthFunnel']; |
|||
mouthPuckerIndexRef.current = headMesh.morphTargetDictionary['mouthPucker']; |
|||
mouthLeftIndexRef.current = headMesh.morphTargetDictionary['mouthLeft']; |
|||
mouthRightIndexRef.current = headMesh.morphTargetDictionary['mouthRight']; |
|||
mouthSmileLeftIndexRef.current = headMesh.morphTargetDictionary['mouthSmileLeft']; |
|||
mouthSmileRightIndexRef.current = headMesh.morphTargetDictionary['mouthSmileRight']; |
|||
mouthFrownLeftIndexRef.current = headMesh.morphTargetDictionary['mouthFrownLeft']; |
|||
mouthFrownRightIndexRef.current = headMesh.morphTargetDictionary['mouthFrownRight']; |
|||
mouthDimpleLeftIndexRef.current = headMesh.morphTargetDictionary['mouthDimpleLeft']; |
|||
mouthDimpleRightIndexRef.current = headMesh.morphTargetDictionary['mouthDimpleRight']; |
|||
mouthStretchLeftIndexRef.current = headMesh.morphTargetDictionary['mouthStretchLeft']; |
|||
mouthStretchRightIndexRef.current = headMesh.morphTargetDictionary['mouthStretchRight']; |
|||
mouthRollLowerIndexRef.current = headMesh.morphTargetDictionary['mouthRollLower']; |
|||
mouthRollUpperIndexRef.current = headMesh.morphTargetDictionary['mouthRollUpper']; |
|||
mouthShrugLowerIndexRef.current = headMesh.morphTargetDictionary['mouthShrugLower']; |
|||
mouthShrugUpperIndexRef.current = headMesh.morphTargetDictionary['mouthShrugUpper']; |
|||
mouthPressLeftIndexRef.current = headMesh.morphTargetDictionary['mouthPressLeft']; |
|||
mouthPressRightIndexRef.current = headMesh.morphTargetDictionary['mouthPressRight']; |
|||
mouthLowerDownLeftIndexRef.current = headMesh.morphTargetDictionary['mouthLowerDownLeft']; |
|||
mouthLowerDownRightIndexRef.current = headMesh.morphTargetDictionary['mouthLowerDownRight']; |
|||
mouthUpperUpLeftIndexRef.current = headMesh.morphTargetDictionary['mouthUpperUpLeft']; |
|||
mouthUpperUpRightIndexRef.current = headMesh.morphTargetDictionary['mouthUpperUpRight']; |
|||
|
|||
tongueOutIndexRef.current = headMesh.morphTargetDictionary['tongueOut']; |
|||
} |
|||
|
|||
if (teethMesh && teethMesh.morphTargetDictionary) { |
|||
teethMeshRef.current = teethMesh; |
|||
jawOpenIndexTeethRef.current = teethMesh.morphTargetDictionary['jawOpen']; |
|||
} |
|||
|
|||
if (eyeLeftMesh && eyeLeftMesh.morphTargetDictionary) { |
|||
eyeLeftMeshRef.current = eyeLeftMesh; |
|||
} |
|||
if (eyeRightMesh && eyeRightMesh.morphTargetDictionary) { |
|||
eyeRightMeshRef.current = eyeRightMesh; |
|||
} |
|||
|
|||
return () => { |
|||
if (mixerRef.current) { |
|||
mixerRef.current.stopAllAction(); |
|||
} |
|||
}; |
|||
}, [avatarScene, animations, fadeToAction]); |
|||
|
|||
// This effect decides which animation to play
|
|||
useEffect(() => { |
|||
if (!mixerRef.current) return; |
|||
|
|||
// If forced wave => do that first
|
|||
if (shouldWave && wavingActionRef.current) { |
|||
fadeToAction(wavingActionRef.current, 0.8); |
|||
return; |
|||
} |
|||
|
|||
// If assistant is speaking => talk
|
|||
if (isAssistantSpeaking && talkActionRef.current) { |
|||
fadeToAction(talkActionRef.current, 1.0); |
|||
return; |
|||
} |
|||
|
|||
// If userSpeaking => listening
|
|||
if (isUserSpeaking && listeningActionRef.current) { |
|||
fadeToAction(listeningActionRef.current, 1.0); |
|||
return; |
|||
} |
|||
|
|||
// else => idle
|
|||
if (idleActionRef.current) { |
|||
fadeToAction(idleActionRef.current, 1.0); |
|||
} |
|||
}, [isVoiceActive, isAssistantSpeaking, shouldWave, isUserSpeaking, fadeToAction]); |
|||
|
|||
// Safely set a single morph target influence by LERPing from the current to the new value.
|
|||
function setVal(mesh, idx, targetVal) { |
|||
if (mesh && idx != null) { |
|||
const currentVal = mesh.morphTargetInfluences[idx] ?? 0; |
|||
// Adjust the lerp speed to taste. Lower = slower fade.
|
|||
const lerpFactor = 0.1; |
|||
mesh.morphTargetInfluences[idx] = THREE.MathUtils.lerp( |
|||
currentVal, |
|||
targetVal, |
|||
lerpFactor |
|||
); |
|||
} |
|||
} |
|||
|
|||
// Applies a single frame from the chosen emotionFile
|
|||
function applyAnimationFrame(file, frame) { |
|||
if (!file || !file.weightMat) return; |
|||
const shapes = file.weightMat[frame]; |
|||
if (!shapes) return; |
|||
|
|||
const headMesh = headMeshRef.current; |
|||
const teethMesh = teethMeshRef.current; |
|||
const eyeLeftMesh = eyeLeftMeshRef.current; |
|||
const eyeRightMesh = eyeRightMeshRef.current; |
|||
|
|||
// Example usage: setVal(...) with LERP
|
|||
setVal(headMesh, eyeBlinkLeftIndexRef.current, shapes[0]); |
|||
setVal(eyeLeftMesh, eyeLookDownLeftIndexRef.current, shapes[1]); |
|||
setVal(eyeLeftMesh, eyeLookInLeftIndexRef.current, shapes[2]); |
|||
setVal(eyeLeftMesh, eyeLookOutLeftIndexRef.current, shapes[3]); |
|||
setVal(eyeLeftMesh, eyeLookUpLeftIndexRef.current, shapes[4]); |
|||
setVal(headMesh, eyeSquintLeftIndexRef.current, shapes[5]); |
|||
setVal(headMesh, eyeWideLeftIndexRef.current, shapes[6]); |
|||
setVal(headMesh, eyeBlinkRightIndexRef.current, shapes[7]); |
|||
setVal(eyeRightMesh, eyeLookDownRightIndexRef.current, shapes[8]); |
|||
setVal(eyeRightMesh, eyeLookInRightIndexRef.current, shapes[9]); |
|||
setVal(eyeRightMesh, eyeLookOutRightIndexRef.current, shapes[10]); |
|||
setVal(eyeRightMesh, eyeLookUpRightIndexRef.current, shapes[11]); |
|||
setVal(headMesh, eyeSquintRightIndexRef.current, shapes[12]); |
|||
setVal(headMesh, eyeWideRightIndexRef.current, shapes[13]); |
|||
setVal(headMesh, jawForwardIndexRef.current, shapes[14]); |
|||
setVal(teethMesh, jawForwardIndexRef.current, shapes[14]); |
|||
setVal(headMesh, jawLeftIndexRef.current, shapes[15]); |
|||
setVal(teethMesh, jawLeftIndexRef.current, shapes[15]); |
|||
setVal(headMesh, jawRightIndexRef.current, shapes[16]); |
|||
setVal(teethMesh, jawRightIndexRef.current, shapes[16]); |
|||
// The next lines were commented out in your original code for lipsync overlay:
|
|||
// setVal(headMesh, jawOpenIndexRef.current, shapes[17]);
|
|||
// setVal(teethMesh, jawOpenIndexTeethRef.current, shapes[17]);
|
|||
// setVal(headMesh, mouthCloseIndexRef.current, shapes[18]);
|
|||
|
|||
// setVal(headMesh, mouthFunnelIndexRef.current, shapes[19]);
|
|||
// setVal(headMesh, mouthPuckerIndexRef.current, shapes[20]);
|
|||
// setVal(headMesh, mouthLeftIndexRef.current, shapes[21]);
|
|||
// setVal(headMesh, mouthRightIndexRef.current, shapes[22]);
|
|||
setVal(headMesh, mouthSmileLeftIndexRef.current, shapes[23]); |
|||
setVal(headMesh, mouthSmileRightIndexRef.current, shapes[24]); |
|||
setVal(headMesh, mouthFrownLeftIndexRef.current, shapes[25]); |
|||
setVal(headMesh, mouthFrownRightIndexRef.current, shapes[26]); |
|||
setVal(headMesh, mouthDimpleLeftIndexRef.current, shapes[27]); |
|||
setVal(headMesh, mouthDimpleRightIndexRef.current, shapes[28]); |
|||
setVal(headMesh, mouthStretchLeftIndexRef.current, shapes[29]); |
|||
setVal(headMesh, mouthStretchRightIndexRef.current, shapes[30]); |
|||
// setVal(headMesh, mouthRollLowerIndexRef.current, shapes[31]);
|
|||
// setVal(headMesh, mouthRollUpperIndexRef.current, shapes[32]);
|
|||
setVal(headMesh, mouthShrugLowerIndexRef.current, shapes[33]); |
|||
setVal(headMesh, mouthShrugUpperIndexRef.current, shapes[34]); |
|||
// setVal(headMesh, mouthPressLeftIndexRef.current, shapes[35]);
|
|||
// setVal(headMesh, mouthPressRightIndexRef.current, shapes[36]);
|
|||
// setVal(headMesh, mouthLowerDownLeftIndexRef.current, shapes[37]);
|
|||
// setVal(headMesh, mouthLowerDownRightIndexRef.current, shapes[38]);
|
|||
// setVal(headMesh, mouthUpperUpLeftIndexRef.current, shapes[39]);
|
|||
// setVal(headMesh, mouthUpperUpRightIndexRef.current, shapes[40]);
|
|||
|
|||
setVal(headMesh, browDownLeftIndexRef.current, shapes[41]); |
|||
setVal(headMesh, browDownRightIndexRef.current, shapes[42]); |
|||
setVal(headMesh, browInnerUpIndexRef.current, shapes[43]); |
|||
setVal(headMesh, browOuterUpLeftIndexRef.current, shapes[44]); |
|||
setVal(headMesh, browOuterUpRightIndexRef.current, shapes[45]); |
|||
setVal(headMesh, cheekPuffIndexRef.current, shapes[46]); |
|||
setVal(headMesh, cheekSquintLeftIndexRef.current, shapes[47]); |
|||
setVal(headMesh, cheekSquintRightIndexRef.current, shapes[48]); |
|||
setVal(headMesh, noseSneerLeftIndexRef.current, shapes[49]); |
|||
setVal(headMesh, noseSneerRightIndexRef.current, shapes[50]); |
|||
setVal(teethMesh, tongueOutIndexRef.current, shapes[51]); |
|||
} |
|||
|
|||
// on each frame
|
|||
useFrame((_, delta) => { |
|||
// update animation mixer
|
|||
if (mixerRef.current) { |
|||
mixerRef.current.update(delta); |
|||
} |
|||
|
|||
// If we have a valid emotionFile, cycle its frames
|
|||
if (emotionFile && emotionFile.weightMat) { |
|||
if (currentFrame >= emotionFile.numFrames) { |
|||
currentFrame = 0; |
|||
} |
|||
applyAnimationFrame(emotionFile, currentFrame); |
|||
currentFrame += 1; |
|||
} |
|||
|
|||
// Lipsync overlay for mouth if speaking
|
|||
if (isAssistantSpeaking && lipsyncRef && lipsyncRef.current) { |
|||
const values = lipsyncRef.current.update(); |
|||
if (values) { |
|||
let [puckerTarget, closeTarget, jawTarget] = values; |
|||
|
|||
// The close can't exceed jaw - 0.05
|
|||
const maxClose = jawTarget - 0.05; |
|||
if (closeTarget > maxClose) { |
|||
closeTarget = maxClose; |
|||
} |
|||
|
|||
// [NEW] Lerp to these new lipsync values from the old frame
|
|||
const lerpSpeed = 0.3; // tweak to taste
|
|||
const newPucker = THREE.MathUtils.lerp(prevPuckerRef.current, puckerTarget, lerpSpeed); |
|||
const newClose = THREE.MathUtils.lerp(prevCloseRef.current, closeTarget, lerpSpeed); |
|||
const newJaw = THREE.MathUtils.lerp(prevJawRef.current, jawTarget, lerpSpeed); |
|||
|
|||
// store them for next frame
|
|||
prevPuckerRef.current = newPucker; |
|||
prevCloseRef.current = newClose; |
|||
prevJawRef.current = newJaw; |
|||
|
|||
// apply to morph influences
|
|||
if (headMeshRef.current) { |
|||
if (mouthPuckerIndexRef.current != null) |
|||
headMeshRef.current.morphTargetInfluences[mouthPuckerIndexRef.current] = newPucker; |
|||
if (mouthCloseIndexRef.current != null) |
|||
headMeshRef.current.morphTargetInfluences[mouthCloseIndexRef.current] = newClose; |
|||
if (jawOpenIndexRef.current != null) |
|||
headMeshRef.current.morphTargetInfluences[jawOpenIndexRef.current] = newJaw; |
|||
} |
|||
if (teethMeshRef.current && jawOpenIndexTeethRef.current != null) { |
|||
teethMeshRef.current.morphTargetInfluences[jawOpenIndexTeethRef.current] = newJaw; |
|||
} |
|||
} else { |
|||
// if no lipsync values => smoothly go to 0
|
|||
const lerpSpeed = 0.3; |
|||
const newPucker = THREE.MathUtils.lerp(prevPuckerRef.current, 0, lerpSpeed); |
|||
const newClose = THREE.MathUtils.lerp(prevCloseRef.current, 0, lerpSpeed); |
|||
const newJaw = THREE.MathUtils.lerp(prevJawRef.current, 0, lerpSpeed); |
|||
|
|||
prevPuckerRef.current = newPucker; |
|||
prevCloseRef.current = newClose; |
|||
prevJawRef.current = newJaw; |
|||
|
|||
if (headMeshRef.current) { |
|||
if (mouthPuckerIndexRef.current != null) |
|||
headMeshRef.current.morphTargetInfluences[mouthPuckerIndexRef.current] = newPucker; |
|||
if (mouthCloseIndexRef.current != null) |
|||
headMeshRef.current.morphTargetInfluences[mouthCloseIndexRef.current] = newClose; |
|||
if (jawOpenIndexRef.current != null) |
|||
headMeshRef.current.morphTargetInfluences[jawOpenIndexRef.current] = newJaw; |
|||
} |
|||
if (teethMeshRef.current && jawOpenIndexTeethRef.current != null) { |
|||
teethMeshRef.current.morphTargetInfluences[jawOpenIndexTeethRef.current] = newJaw; |
|||
} |
|||
} |
|||
} else { |
|||
// Not speaking => smoothly go to 0
|
|||
const lerpSpeed = 0.3; |
|||
const newPucker = THREE.MathUtils.lerp(prevPuckerRef.current, 0, lerpSpeed); |
|||
const newClose = THREE.MathUtils.lerp(prevCloseRef.current, 0, lerpSpeed); |
|||
const newJaw = THREE.MathUtils.lerp(prevJawRef.current, 0, lerpSpeed); |
|||
|
|||
prevPuckerRef.current = newPucker; |
|||
prevCloseRef.current = newClose; |
|||
prevJawRef.current = newJaw; |
|||
|
|||
if (headMeshRef.current) { |
|||
if (mouthPuckerIndexRef.current != null) |
|||
headMeshRef.current.morphTargetInfluences[mouthPuckerIndexRef.current] = newPucker; |
|||
if (mouthCloseIndexRef.current != null) |
|||
headMeshRef.current.morphTargetInfluences[mouthCloseIndexRef.current] = newClose; |
|||
if (jawOpenIndexRef.current != null) |
|||
headMeshRef.current.morphTargetInfluences[jawOpenIndexRef.current] = newJaw; |
|||
} |
|||
if (teethMeshRef.current && jawOpenIndexTeethRef.current != null) { |
|||
teethMeshRef.current.morphTargetInfluences[jawOpenIndexTeethRef.current] = newJaw; |
|||
} |
|||
} |
|||
}); |
|||
|
|||
return <primitive object={avatarScene} />; |
|||
} |
|||
|
|||
export default Avatar; |
|||
@ -0,0 +1,425 @@ |
|||
// src/components/ChatWindow.js
|
|||
|
|||
import React, { |
|||
useState, |
|||
useRef, |
|||
useEffect, |
|||
useLayoutEffect, |
|||
useCallback, |
|||
} from 'react'; |
|||
import '../styles/ChatWindow.css'; |
|||
import { FaPaperPlane, FaMicrophone } from 'react-icons/fa'; |
|||
import Lipsync from './Lipsync'; |
|||
import useAudio from '../hooks/useAudio'; |
|||
import useChat from '../hooks/useChat'; |
|||
import useStore from '../store'; |
|||
import VoiceRecordingIndicator from './VoiceRecordingIndicator'; |
|||
import EmotionBubble from './EmotionBubble'; |
|||
|
|||
/** |
|||
* Reads ?name=John from the current URL if present, |
|||
* returns that string or null if not present. |
|||
*/ |
|||
function getUrlNameParam() { |
|||
const params = new URLSearchParams(window.location.search); |
|||
return params.get('name'); // e.g. "Bob" or null
|
|||
} |
|||
|
|||
function ChatWindow({ |
|||
lipsyncRef, |
|||
// The default welcome message if no prop is passed
|
|||
initialMessage = "Please introduce yourself and end greeting us with a suitable, open question", |
|||
// controlling idle timeouts
|
|||
startIdleTimers, |
|||
cancelIdleTimers, |
|||
}) { |
|||
const messagesEndRef = useRef(null); |
|||
const userManuallyOffRef = useRef(false); |
|||
|
|||
// from store
|
|||
const { |
|||
isAssistantSpeaking, |
|||
setIsAssistantSpeaking, |
|||
setIsUserSpeaking, |
|||
// We'll read assistantEmotion so we can spawn emoticon bubbles
|
|||
assistantEmotion, |
|||
// [NEW] We'll also get/set the shouldWave state
|
|||
setShouldWave, |
|||
} = useStore(); |
|||
|
|||
// ========== TEXT CHAT DISABLE LOGIC (while user is speaking) ==========
|
|||
const [disableTextChat, setDisableTextChat] = useState(false); |
|||
const [hasWavedWelcome, setHasWavedWelcome] = useState(false); |
|||
|
|||
/** |
|||
* (NEW) We'll build a custom initial message if the URL has ?name=... |
|||
* e.g. if name=Alice => |
|||
* "Please introduce yourself... You will now be talking to Alice, so address them using their name." |
|||
*/ |
|||
const nameParam = getUrlNameParam(); |
|||
const customInitialMsg = nameParam |
|||
? `${initialMessage} You will now be talking to ${nameParam}, so please address them directly with their name when communicating.` |
|||
: initialMessage; |
|||
|
|||
// from useChat
|
|||
const { |
|||
messages, |
|||
setMessages, |
|||
sendUserMessage, |
|||
sendAudioMessage, |
|||
sendIdleSystemMessage, |
|||
} = useChat({ |
|||
initialMessage: customInitialMsg, // pass the possibly extended message
|
|||
onMp3Chunk: (blob) => handleMp3Chunk(blob), |
|||
}); |
|||
|
|||
// handle idle timers with stable reference
|
|||
const onUserActivity = useCallback(() => { |
|||
cancelIdleTimers?.(); |
|||
startIdleTimers?.(sendIdleSystemMessage); |
|||
}, [cancelIdleTimers, startIdleTimers, sendIdleSystemMessage]); |
|||
|
|||
// ========== A) USER VOICE RECORDING ==========
|
|||
const { |
|||
isRecording, |
|||
isVoiceActive, |
|||
handleSpeakButton, |
|||
startListening, |
|||
stopListening, |
|||
elapsedTime, |
|||
currentRms, |
|||
} = useAudio({ |
|||
onAudioCaptured: (audioBlob) => { |
|||
setMessages((prev) => [ |
|||
...prev, |
|||
{ text: '🎤 (You sent audio)', sender: 'user', isAudio: true }, |
|||
]); |
|||
|
|||
const reader = new FileReader(); |
|||
reader.readAsDataURL(audioBlob); |
|||
reader.onloadend = () => { |
|||
const base64data = reader.result.split(',')[1]; |
|||
sendAudioMessage(base64data); |
|||
}; |
|||
|
|||
onUserActivity(); |
|||
}, |
|||
onError: (err) => { |
|||
console.error(err); |
|||
}, |
|||
silentStopTime: 3000, |
|||
onAutoStop: () => { |
|||
// Auto-restart after avatar finishes, unless user forcibly toggled off
|
|||
}, |
|||
onManualStop: () => { |
|||
setDisableTextChat(false); |
|||
}, |
|||
}); |
|||
|
|||
// Whenever isVoiceActive changes or assistant starts speaking => disable chat
|
|||
useEffect(() => { |
|||
if (isVoiceActive || isAssistantSpeaking) { |
|||
setDisableTextChat(true); |
|||
console.log('Started recording user voice or avatar is speaking.'); |
|||
setIsUserSpeaking(true); |
|||
} else if (!isRecording) { |
|||
setDisableTextChat(false); |
|||
console.log('Stopped recording user voice & avatar not speaking.'); |
|||
setIsUserSpeaking(false); |
|||
} |
|||
}, [isVoiceActive, isAssistantSpeaking, isRecording, setIsUserSpeaking]); |
|||
|
|||
// ========== B) AVATAR MP3 CHUNK QUEUE LOGIC ==========
|
|||
const audioQueueRef = useRef([]); |
|||
const audioElementRef = useRef(null); |
|||
const isPlayingRef = useRef(false); |
|||
const [isMessageFinalized, setIsMessageFinalized] = useState(false); |
|||
const messageTimeoutRef = useRef(null); |
|||
|
|||
const finalizeCurrentMessage = useCallback(() => { |
|||
console.log('No chunks arrived recently => finalizing message.'); |
|||
setIsMessageFinalized(true); |
|||
}, []); |
|||
|
|||
const playNextInQueue = useCallback(() => { |
|||
const audioEl = audioElementRef.current; |
|||
if (!audioEl) return; |
|||
|
|||
if (audioQueueRef.current.length === 0) { |
|||
isPlayingRef.current = false; |
|||
setIsAssistantSpeaking(false); |
|||
|
|||
if (!userManuallyOffRef.current && isVoiceActive && !isRecording) { |
|||
startListening(); |
|||
} |
|||
|
|||
if (isMessageFinalized) { |
|||
console.log('Done playing all chunks => clearing entire queue now.'); |
|||
audioQueueRef.current = []; |
|||
setIsMessageFinalized(false); |
|||
} |
|||
return; |
|||
} |
|||
|
|||
isPlayingRef.current = true; |
|||
setIsAssistantSpeaking(true); |
|||
|
|||
const blob = audioQueueRef.current[0]; |
|||
const url = URL.createObjectURL(blob); |
|||
audioEl.src = url; |
|||
|
|||
audioEl |
|||
.play() |
|||
.then(() => { |
|||
// once it starts playing, do nothing special
|
|||
}) |
|||
.catch((err) => { |
|||
console.warn('Audio play() blocked or errored:', err); |
|||
}); |
|||
}, [ |
|||
setIsAssistantSpeaking, |
|||
isMessageFinalized, |
|||
isRecording, |
|||
isVoiceActive, |
|||
startListening, |
|||
]); |
|||
|
|||
const handleMp3Chunk = useCallback( |
|||
(blob) => { |
|||
if (isMessageFinalized) { |
|||
console.log('Ignoring new chunk; message is finalized.'); |
|||
return; |
|||
} |
|||
|
|||
if (messageTimeoutRef.current) { |
|||
clearTimeout(messageTimeoutRef.current); |
|||
} |
|||
|
|||
audioQueueRef.current.push(blob); |
|||
console.log( |
|||
'Pushed new chunk into queue. Queue length:', |
|||
audioQueueRef.current.length |
|||
); |
|||
|
|||
if (!isPlayingRef.current) { |
|||
playNextInQueue(); |
|||
} |
|||
|
|||
messageTimeoutRef.current = setTimeout(() => { |
|||
finalizeCurrentMessage(); |
|||
}, 2000); |
|||
}, |
|||
[isMessageFinalized, finalizeCurrentMessage, playNextInQueue] |
|||
); |
|||
|
|||
useEffect(() => { |
|||
const audioEl = audioElementRef.current; |
|||
if (!audioEl) return; |
|||
|
|||
const handlePlaying = () => { |
|||
// debug
|
|||
}; |
|||
|
|||
const handleEnded = () => { |
|||
const old = audioQueueRef.current.shift(); |
|||
console.log('Finished chunk => removing from queue.', old); |
|||
URL.revokeObjectURL(audioEl.src); |
|||
audioEl.src = ''; |
|||
|
|||
playNextInQueue(); |
|||
}; |
|||
|
|||
const handlePause = () => { |
|||
// if forcibly paused => treat as ended
|
|||
handleEnded(); |
|||
}; |
|||
|
|||
audioEl.addEventListener('playing', handlePlaying); |
|||
audioEl.addEventListener('ended', handleEnded); |
|||
audioEl.addEventListener('pause', handlePause); |
|||
|
|||
return () => { |
|||
audioEl.removeEventListener('playing', handlePlaying); |
|||
audioEl.removeEventListener('ended', handleEnded); |
|||
audioEl.removeEventListener('pause', handlePause); |
|||
}; |
|||
}, [playNextInQueue]); |
|||
|
|||
// ========== C) AUDIOCONTEXT + LIPSYNC HOOKUP ==========
|
|||
const audioContextRef = useRef(null); |
|||
|
|||
useEffect(() => { |
|||
if (!audioElementRef.current) return; |
|||
|
|||
if (!audioContextRef.current) { |
|||
audioContextRef.current = |
|||
new (window.AudioContext || window.webkitAudioContext)(); |
|||
|
|||
const source = audioContextRef.current.createMediaElementSource( |
|||
audioElementRef.current |
|||
); |
|||
source.connect(audioContextRef.current.destination); |
|||
|
|||
if (lipsyncRef && !lipsyncRef.current) { |
|||
lipsyncRef.current = new Lipsync(0.5, 0.6, 1.0, audioContextRef.current); |
|||
} |
|||
if (lipsyncRef?.current?.connectAudioNode) { |
|||
lipsyncRef.current.connectAudioNode(source); |
|||
} |
|||
} |
|||
}, [lipsyncRef]); |
|||
|
|||
// ====== (E) EMOTION BUBBLE LOGIC ======
|
|||
const [emotionBubbles, setEmotionBubbles] = useState([]); |
|||
|
|||
useEffect(() => { |
|||
if (!assistantEmotion) return; |
|||
if (assistantEmotion === 'Unknown') return; |
|||
setEmotionBubbles((prev) => [ |
|||
...prev, |
|||
{ id: Date.now(), emotion: assistantEmotion }, |
|||
]); |
|||
}, [assistantEmotion]); |
|||
|
|||
// ====== (F) TRIGGER WAVE: FIRST & GOODBYE ======
|
|||
useEffect(() => { |
|||
if (messages.length === 0) return; |
|||
|
|||
const assistantMessages = messages.filter((m) => m.sender === 'assistant'); |
|||
if (assistantMessages.length === 0) return; |
|||
|
|||
// 1) If exactly 1 assistant message => we just got the first => wave
|
|||
if (assistantMessages.length === 1 && !hasWavedWelcome) { |
|||
setShouldWave(true); |
|||
setTimeout(() => setShouldWave(false), 1200); |
|||
setHasWavedWelcome(true); // mark that we've waved
|
|||
} |
|||
|
|||
// 2) Check if the latest assistant message includes "goodbye"
|
|||
const lastMsg = assistantMessages[assistantMessages.length - 1]; |
|||
if (lastMsg && /goodbye/i.test(lastMsg.text)) { |
|||
setShouldWave(true); |
|||
setTimeout(() => setShouldWave(false), 1200); |
|||
setHasWavedWelcome(false); // If we re-start the APP, we want to be greeted again
|
|||
} |
|||
}, [messages, hasWavedWelcome, setShouldWave]); |
|||
|
|||
// Show input after first assistant message
|
|||
const [showInput, setShowInput] = useState(false); |
|||
useEffect(() => { |
|||
if (!showInput && messages.some((m) => m.sender === 'assistant')) { |
|||
setShowInput(true); |
|||
} |
|||
}, [messages, showInput]); |
|||
|
|||
const [input, setInput] = useState(''); |
|||
const handleSendText = useCallback(() => { |
|||
if (disableTextChat) return; |
|||
if (!input.trim()) return; |
|||
sendUserMessage(input); |
|||
setInput(''); |
|||
onUserActivity(); |
|||
}, [disableTextChat, input, sendUserMessage, onUserActivity]); |
|||
|
|||
useLayoutEffect(() => { |
|||
if (messagesEndRef.current) { |
|||
messagesEndRef.current.scrollIntoView({ behavior: 'smooth' }); |
|||
} |
|||
}, [messages]); |
|||
|
|||
const handleToggleSpeak = () => { |
|||
if (isVoiceActive) { |
|||
userManuallyOffRef.current = true; |
|||
} else { |
|||
userManuallyOffRef.current = false; |
|||
} |
|||
handleSpeakButton(); |
|||
onUserActivity(); |
|||
}; |
|||
|
|||
return ( |
|||
<> |
|||
<div className="chat-window"> |
|||
<div className="messages"> |
|||
{messages.map((msg, idx) => ( |
|||
<div |
|||
key={idx} |
|||
className={`message ${ |
|||
msg.sender === 'user' ? 'user' : 'backend' |
|||
}`}
|
|||
> |
|||
{msg.text} |
|||
</div> |
|||
))} |
|||
<div ref={messagesEndRef} /> |
|||
</div> |
|||
|
|||
{showInput && ( |
|||
<div className="input-area fade-in"> |
|||
<input |
|||
type="text" |
|||
placeholder="Write here" |
|||
value={input} |
|||
onChange={(e) => setInput(e.target.value)} |
|||
onKeyDown={(e) => { |
|||
if (e.key === 'Enter') { |
|||
handleSendText(); |
|||
} |
|||
}} |
|||
disabled={disableTextChat} |
|||
/> |
|||
<button |
|||
className="send-button" |
|||
onClick={handleSendText} |
|||
disabled={disableTextChat} |
|||
> |
|||
<FaPaperPlane /> |
|||
<span>Send</span> |
|||
</button> |
|||
|
|||
<button |
|||
className={`speak-button ${isRecording ? 'recording' : ''}`} |
|||
onClick={handleToggleSpeak} |
|||
disabled={isAssistantSpeaking} |
|||
> |
|||
<FaMicrophone /> |
|||
<span>{isVoiceActive ? 'Stop' : 'Speak'}</span> |
|||
</button> |
|||
</div> |
|||
)} |
|||
</div> |
|||
|
|||
{/* Single <audio> for the queue-based approach */} |
|||
<audio |
|||
ref={audioElementRef} |
|||
style={{ display: 'none' }} |
|||
controls={false} |
|||
preload="auto" |
|||
/> |
|||
|
|||
{isVoiceActive && ( |
|||
<div className="voice-indicator-floating"> |
|||
<VoiceRecordingIndicator |
|||
isRecording={isRecording} |
|||
elapsedTime={elapsedTime} |
|||
maxTime={Number(process.env.REACT_APP_MAX_VOICE_TIME || 30)} |
|||
rmsValue={currentRms || 0} |
|||
/> |
|||
</div> |
|||
)} |
|||
|
|||
{/* (F) RENDER EMOTION BUBBLES */} |
|||
{emotionBubbles.map((b) => ( |
|||
<EmotionBubble |
|||
key={b.id} |
|||
emotion={b.emotion} |
|||
onAnimationEnd={() => { |
|||
setEmotionBubbles((prev) => prev.filter((x) => x.id !== b.id)); |
|||
}} |
|||
/> |
|||
))} |
|||
</> |
|||
); |
|||
} |
|||
|
|||
export default ChatWindow; |
|||
@ -0,0 +1,51 @@ |
|||
// src/components/EmotionBubble.js
|
|||
import React, { useEffect } from 'react'; |
|||
import '../styles/EmotionBubble.css'; |
|||
|
|||
function emotionToEmoji(emotion) { |
|||
switch (emotion) { |
|||
case 'Happiness': |
|||
return '😊'; |
|||
case 'Sadness': |
|||
return '😢'; |
|||
case 'Anger': |
|||
return '😡'; |
|||
case 'Fear': |
|||
return '😨'; |
|||
case 'Disgust': |
|||
return '🤢'; |
|||
case 'Surprise': |
|||
return '😲'; |
|||
default: |
|||
// If backend returns 'Unknown' or anything else => no emoji
|
|||
return null; |
|||
} |
|||
} |
|||
|
|||
export default function EmotionBubble({ emotion, onAnimationEnd }) { |
|||
const emoji = emotionToEmoji(emotion); |
|||
|
|||
// Always call useEffect unconditionally:
|
|||
useEffect(() => { |
|||
// If no valid emoji => skip animation logic
|
|||
if (!emoji) return; |
|||
|
|||
const duration = 1500; |
|||
const timer = setTimeout(() => { |
|||
onAnimationEnd && onAnimationEnd(); |
|||
}, duration); |
|||
|
|||
return () => clearTimeout(timer); |
|||
}, [emoji, onAnimationEnd]); |
|||
|
|||
// If there's no emoji => return null
|
|||
if (!emoji) { |
|||
return null; |
|||
} |
|||
|
|||
return ( |
|||
<div className="emotion-bubble"> |
|||
{emoji} |
|||
</div> |
|||
); |
|||
} |
|||
@ -0,0 +1,163 @@ |
|||
// src/components/Lipsync.js
|
|||
// Standalone lipsync class suitable for use in a React environment as a module.
|
|||
|
|||
const AudioContextClass = window.AudioContext || window.webkitAudioContext; |
|||
|
|||
class Lipsync { |
|||
constructor(threshold = 0.5, smoothness = 0.6, pitch = 1, audioContext) { |
|||
if (!audioContext) { |
|||
console.error("No AudioContext provided to Lipsync."); |
|||
return; |
|||
} |
|||
this.context = audioContext; |
|||
this.threshold = threshold; |
|||
this.smoothness = smoothness; |
|||
this.pitch = pitch; |
|||
|
|||
// Reference frequency bins
|
|||
this.refFBins = [0, 500, 700, 3000, 6000]; |
|||
this.fBins = [...this.refFBins]; |
|||
this.defineFBins(this.pitch); |
|||
|
|||
this.energy = [0,0,0,0,0,0,0,0]; |
|||
this.lipsyncBSW = [0,0,0]; |
|||
|
|||
this.sample = null; |
|||
this.stream = null; |
|||
this.analyser = null; |
|||
this.gainNode = null; |
|||
this.data = null; |
|||
this.working = false; |
|||
|
|||
this.init(); |
|||
} |
|||
|
|||
defineFBins(pitch) { |
|||
for (let i = 0; i < this.refFBins.length; i++) |
|||
this.fBins[i] = this.refFBins[i] * pitch; |
|||
} |
|||
|
|||
init() { |
|||
this.gainNode = this.context.createGain(); |
|||
this.analyser = this.context.createAnalyser(); |
|||
this.analyser.fftSize = 1024; |
|||
this.analyser.smoothingTimeConstant = this.smoothness; |
|||
this.data = new Float32Array(this.analyser.frequencyBinCount); |
|||
} |
|||
|
|||
connectAudioNode(audioNode) { |
|||
if (!this.context || !this.analyser) return; |
|||
audioNode.connect(this.analyser); |
|||
this.working = true; |
|||
} |
|||
|
|||
startMic() { |
|||
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) { |
|||
console.error("getUserMedia not supported."); |
|||
return; |
|||
} |
|||
this.stopSample(); |
|||
navigator.mediaDevices.getUserMedia({ audio: true }) |
|||
.then(stream => { |
|||
this.stream = stream; |
|||
this.sample = this.context.createMediaStreamSource(stream); |
|||
this.sample.connect(this.analyser); |
|||
this.working = true; |
|||
}) |
|||
.catch(e => console.error("ERROR: getUserMedia:", e)); |
|||
} |
|||
|
|||
startSample(URL) { |
|||
this.stopSample(); |
|||
const request = new XMLHttpRequest(); |
|||
request.open('GET', URL, true); |
|||
request.responseType = 'arraybuffer'; |
|||
request.onload = () => { |
|||
this.context.decodeAudioData(request.response, buffer => { |
|||
this.stopSample(); |
|||
this.sample = this.context.createBufferSource(); |
|||
this.sample.buffer = buffer; |
|||
this.playSample(); |
|||
}, e => console.error("Failed to load audio.", URL)); |
|||
}; |
|||
request.send(); |
|||
} |
|||
|
|||
playSample() { |
|||
if (!this.sample || !this.analyser) return; |
|||
this.sample.connect(this.analyser); |
|||
this.sample.onended = () => { this.working = false; }; |
|||
this.sample.start(0); |
|||
this.working = true; |
|||
} |
|||
|
|||
update() { |
|||
if (!this.working || !this.analyser) return null; |
|||
this.analyser.getFloatFrequencyData(this.data); |
|||
this.binAnalysis(); |
|||
this.lipAnalysis(); |
|||
return this.lipsyncBSW; |
|||
} |
|||
|
|||
stop() { |
|||
this.stopSample(); |
|||
this.working = false; |
|||
} |
|||
|
|||
stopSample() { |
|||
if (this.sample && this.sample.buffer) { |
|||
try { |
|||
this.sample.stop(0); |
|||
} catch (e) {} |
|||
} |
|||
if (this.stream) { |
|||
const tracks = this.stream.getTracks(); |
|||
tracks.forEach(t => t.stop()); |
|||
this.stream = null; |
|||
} |
|||
} |
|||
|
|||
binAnalysis() { |
|||
const nfft = this.analyser.frequencyBinCount; |
|||
const fs = this.context.sampleRate; |
|||
const fBins = this.fBins; |
|||
const energy = this.energy; |
|||
|
|||
for (let binInd = 0; binInd < fBins.length - 1; binInd++) { |
|||
const indxIn = Math.round(fBins[binInd] * nfft / (fs/2)); |
|||
const indxEnd = Math.round(fBins[binInd+1] * nfft / (fs/2)); |
|||
energy[binInd] = 0; |
|||
for (let i = indxIn; i < indxEnd; i++) { |
|||
let value = this.threshold + (this.data[i] + 20) / 140; |
|||
value = value > 0 ? value : 0; |
|||
energy[binInd] += value; |
|||
} |
|||
energy[binInd] /= (indxEnd - indxIn); |
|||
} |
|||
} |
|||
|
|||
lipAnalysis() { |
|||
const energy = this.energy; |
|||
if (!energy) return; |
|||
let value = 0; |
|||
|
|||
// Kiss
|
|||
value = (0.5 - (energy[2])) * 2; |
|||
if (energy[1] < 0.2) |
|||
value = value * (energy[1]*5); |
|||
value = Math.max(0, Math.min(value, 1)); |
|||
this.lipsyncBSW[0] = value*0.8; //Add multiplier to get fishlips removed
|
|||
|
|||
// Lips closed
|
|||
value = energy[3]*3; |
|||
value = Math.max(0, Math.min(value, 1)); |
|||
this.lipsyncBSW[1] = value*0.6; //Add multiplier to press lips less
|
|||
|
|||
// Jaw
|
|||
value = energy[1]*0.8 - energy[3]*0.8; |
|||
value = Math.max(0, Math.min(value, 1)); |
|||
this.lipsyncBSW[2] = value*1.4; //Add multiplier to move mouth more
|
|||
} |
|||
} |
|||
|
|||
export default Lipsync; |
|||
@ -0,0 +1,20 @@ |
|||
// Loader.js
|
|||
import React from 'react'; |
|||
import { useProgress } from '@react-three/drei'; |
|||
import '../styles/Loader.css'; |
|||
|
|||
function Loader() { |
|||
const { progress } = useProgress(); |
|||
// progress = 0..100 as assets load
|
|||
|
|||
return ( |
|||
<div className="loader-overlay"> |
|||
<div className="loader-content"> |
|||
<div className="spinner" /> |
|||
<p>Loading Conversation... {Math.floor(progress)}%</p> |
|||
</div> |
|||
</div> |
|||
); |
|||
} |
|||
|
|||
export default Loader; |
|||
@ -0,0 +1,185 @@ |
|||
// src/components/Scene.js
|
|||
|
|||
import React, { Suspense, useRef, useState, useEffect, useCallback } from 'react'; |
|||
import { Canvas, useLoader, useFrame, useThree } from '@react-three/fiber'; |
|||
import * as THREE from 'three'; |
|||
import { |
|||
EffectComposer, |
|||
Noise, |
|||
Vignette, |
|||
FXAA, |
|||
} from '@react-three/postprocessing'; |
|||
import Avatar from './Avatar'; |
|||
import Loader from './Loader'; |
|||
|
|||
function BackgroundSphere() { |
|||
const gradientTexture = useLoader(THREE.TextureLoader, '/gradient.jpg'); |
|||
return ( |
|||
<mesh scale={[-20, 20, 20]}> |
|||
<sphereGeometry args={[1, 16, 16]} /> |
|||
<meshBasicMaterial map={gradientTexture} side={THREE.BackSide} /> |
|||
</mesh> |
|||
); |
|||
} |
|||
|
|||
function SceneContent({ lipsyncRef }) { |
|||
const headBoneRef = useRef(null); |
|||
|
|||
return ( |
|||
<> |
|||
<directionalLight color="#FFC999" intensity={2.5} position={[-5, 5, 5]} castShadow /> |
|||
<directionalLight color="#99C9FF" intensity={1.5} position={[5, 3, 5]} /> |
|||
<directionalLight color="#ffffff" intensity={2.0} position={[0, 5, -5]} /> |
|||
|
|||
<BackgroundSphere /> |
|||
|
|||
<EffectComposer> |
|||
<FXAA /> |
|||
<Noise opacity={0.04} /> |
|||
<Vignette eskil={false} offset={0.1} darkness={1.1} /> |
|||
</EffectComposer> |
|||
|
|||
<Avatar lipsyncRef={lipsyncRef} /> |
|||
|
|||
<FindHeadBone headBoneRef={headBoneRef} /> |
|||
|
|||
<FollowCam headBoneRef={headBoneRef} /> |
|||
</> |
|||
); |
|||
} |
|||
|
|||
// Finds the head bone in the scene
|
|||
function FindHeadBone({ headBoneRef }) { |
|||
const { scene } = useThree(); |
|||
useEffect(() => { |
|||
if (!scene) return; |
|||
// Attempt to find your bone. Adjust name if needed
|
|||
const bone = scene.getObjectByName('main-armature: head') |
|||
|| scene.getObjectByName('head') |
|||
|| scene.getObjectByName('Head'); |
|||
|
|||
if (bone) { |
|||
console.log('Found head bone:', bone.name); |
|||
headBoneRef.current = bone; |
|||
} else { |
|||
console.warn("Couldn't find head bone named 'main-armature: head'!"); |
|||
} |
|||
}, [scene, headBoneRef]); |
|||
return null; |
|||
} |
|||
|
|||
// The camera that softly follows the head
|
|||
function FollowCam({ headBoneRef }) { |
|||
const { camera } = useThree(); |
|||
|
|||
const offset = new THREE.Vector3(0, 0.45, 2); |
|||
const followSpeed = 0.005; |
|||
|
|||
// This factor controls how much vertical pitch we adopt from the avatar's head
|
|||
const verticalFactor = 0.05; |
|||
|
|||
useFrame(() => { |
|||
const headBone = headBoneRef.current; |
|||
if (!headBone) return; |
|||
|
|||
// 1) get bone world pos + orientation
|
|||
const targetPos = new THREE.Vector3(); |
|||
headBone.getWorldPosition(targetPos); |
|||
|
|||
const boneWorldQuat = new THREE.Quaternion(); |
|||
headBone.getWorldQuaternion(boneWorldQuat); |
|||
|
|||
// 2) offset in local space => behind/above the head
|
|||
const offsetWorld = offset.clone().applyQuaternion(boneWorldQuat); |
|||
const desiredPos = targetPos.clone().add(offsetWorld); |
|||
|
|||
// 3) Lerp position
|
|||
camera.position.lerp(desiredPos, followSpeed); |
|||
|
|||
// 4) Partial rotation
|
|||
const currentQuat = camera.quaternion.clone(); |
|||
|
|||
// do a full lookAt => see final orientation
|
|||
camera.lookAt(targetPos); |
|||
const finalQuat = camera.quaternion.clone(); |
|||
|
|||
// revert camera
|
|||
camera.quaternion.copy(currentQuat); |
|||
|
|||
// read eulers from finalQuat
|
|||
const finalEuler = new THREE.Euler().setFromQuaternion(finalQuat, 'YXZ'); |
|||
const currentEuler = new THREE.Euler().setFromQuaternion(currentQuat, 'YXZ'); |
|||
|
|||
// partial pitch
|
|||
const newPitch = THREE.MathUtils.lerp(currentEuler.x, finalEuler.x, verticalFactor); |
|||
// full yaw
|
|||
const newYaw = finalEuler.y; |
|||
// full roll (or partial if you prefer)
|
|||
const newRoll = finalEuler.z; |
|||
|
|||
finalEuler.set(newPitch, newYaw, newRoll, 'YXZ'); |
|||
|
|||
const partialQuat = new THREE.Quaternion().setFromEuler(finalEuler); |
|||
|
|||
// slerp from current to partial
|
|||
camera.quaternion.slerp(partialQuat, followSpeed); |
|||
}); |
|||
|
|||
return null; |
|||
} |
|||
|
|||
function PerformanceCheck({ conversationStarted, dpr, setDpr }) { |
|||
const framesCountRef = useRef(0); |
|||
const lastCheckRef = useRef(performance.now()); |
|||
|
|||
useFrame(() => { |
|||
if (conversationStarted) return; |
|||
framesCountRef.current++; |
|||
const now = performance.now(); |
|||
const elapsed = now - lastCheckRef.current; |
|||
if (elapsed >= 3000) { |
|||
const fps = (framesCountRef.current * 1000) / elapsed; |
|||
framesCountRef.current = 0; |
|||
lastCheckRef.current = now; |
|||
|
|||
console.log(`FPS (avg over 3s): ${fps.toFixed(1)}, DPR: ${dpr.toFixed(2)}`); |
|||
|
|||
if (fps < 30 && dpr > 0.5) { |
|||
const newDpr = Math.max(0.5, dpr * 0.9); |
|||
setDpr(newDpr); |
|||
console.log(`Low FPS => Reducing DPR to ${newDpr.toFixed(2)}`); |
|||
} else if (fps > 45 && dpr < 1.0) { |
|||
const newDpr = Math.min(1.0, dpr * 1.1); |
|||
setDpr(newDpr); |
|||
console.log(`High FPS => Increasing DPR to ${newDpr.toFixed(2)}`); |
|||
} |
|||
} |
|||
}); |
|||
|
|||
return null; |
|||
} |
|||
|
|||
function Scene({ lipsyncRef, conversationStarted }) { |
|||
const [dpr, setDpr] = useState(1.0); |
|||
|
|||
useEffect(() => { |
|||
if (conversationStarted) { |
|||
console.log(`Conversation started. Locking DPR at ${dpr.toFixed(2)}`); |
|||
} |
|||
}, [conversationStarted, dpr]); |
|||
|
|||
return ( |
|||
<div style={{ width: '100vw', height: '100vh' }}> |
|||
<Canvas |
|||
dpr={dpr} |
|||
camera={{ fov: 40, position: [-0.6, 0, 5], rotation: [0, 0, 0], near: 0.1, far: 100 }} |
|||
style={{ width: '100%', height: '100%' }} |
|||
> |
|||
<PerformanceCheck conversationStarted={conversationStarted} dpr={dpr} setDpr={setDpr} /> |
|||
<SceneContent lipsyncRef={lipsyncRef} /> |
|||
</Canvas> |
|||
</div> |
|||
); |
|||
} |
|||
|
|||
export default Scene; |
|||
@ -0,0 +1,68 @@ |
|||
// src/components/VoiceRecordingIndicator.js
|
|||
import React, { useMemo } from 'react'; |
|||
import { CircularProgressbar, buildStyles } from 'react-circular-progressbar'; |
|||
import { FaMicrophone } from 'react-icons/fa'; |
|||
import 'react-circular-progressbar/dist/styles.css'; |
|||
import '../styles/VoiceRecordingIndicator.css'; |
|||
|
|||
function VoiceRecordingIndicator({ |
|||
isRecording = false, |
|||
elapsedTime = 0, |
|||
maxTime = 30, |
|||
rmsValue = 0, // NEW: pass in from props
|
|||
}) { |
|||
// compute the % remaining
|
|||
const pct = useMemo(() => { |
|||
const remain = maxTime - elapsedTime; |
|||
if (remain <= 0) return 0; |
|||
return (remain / maxTime) * 100; |
|||
}, [elapsedTime, maxTime]); |
|||
|
|||
// Scale the mic icon according to RMS
|
|||
// For example, if RMS is 9, we want scale ~ 1.9 maybe
|
|||
// Let's clamp the scale so it's never below 1.0 or above 2.0
|
|||
const scale = useMemo(() => { |
|||
// typical RMS might go up to 10 if user is speaking loudly
|
|||
// map e.g. 0..10 => 1..2
|
|||
const minRms = 0; |
|||
const maxRms = 10; |
|||
const minScale = 1.0; |
|||
const maxScale = 1.5; |
|||
|
|||
const clampedRms = Math.min(Math.max(rmsValue, minRms), maxRms); |
|||
const ratio = (clampedRms - minRms) / (maxRms - minRms); // 0..1
|
|||
return minScale + ratio * (maxScale - minScale); // 1..2
|
|||
}, [rmsValue]); |
|||
|
|||
// Decide icon color. If user is definitely speaking, color red, else grey.
|
|||
// We can pick a threshold, e.g. RMS > 3 = speaking
|
|||
const color = rmsValue > 3 ? 'red' : 'grey'; |
|||
|
|||
return ( |
|||
<div className={`voice-indicator-container ${isRecording ? 'fade-in' : 'fade-out'}`}> |
|||
<div className="voice-indicator-inner"> |
|||
<CircularProgressbar |
|||
value={pct} |
|||
background |
|||
backgroundPadding={6} |
|||
styles={buildStyles({ |
|||
backgroundColor: '#fff', |
|||
textColor: '#fff', |
|||
pathColor: '#007AFF', |
|||
})} |
|||
/> |
|||
<div |
|||
className="voice-indicator-icon-center" |
|||
style={{ |
|||
transform: `translate(-50%, -50%) scale(${scale})`, |
|||
color: color, |
|||
}} |
|||
> |
|||
<FaMicrophone /> |
|||
</div> |
|||
</div> |
|||
</div> |
|||
); |
|||
} |
|||
|
|||
export default VoiceRecordingIndicator; |
|||
@ -0,0 +1,29 @@ |
|||
// src/components/audioWorker.js
|
|||
/* eslint-disable no-restricted-globals */ |
|||
|
|||
// This worker receives a base64-encoded string, decodes it into an ArrayBuffer,
|
|||
// and sends it back to the main thread.
|
|||
|
|||
onmessage = (e) => { |
|||
const { type, base64data } = e.data; |
|||
|
|||
if (type === 'DECODE_BASE64') { |
|||
try { |
|||
// Decode base64 to a binary string
|
|||
const binaryString = atob(base64data); |
|||
// Convert binary string to a typed array
|
|||
const len = binaryString.length; |
|||
const bytes = new Uint8Array(len); |
|||
|
|||
for (let i = 0; i < len; i++) { |
|||
bytes[i] = binaryString.charCodeAt(i); |
|||
} |
|||
|
|||
// Post the ArrayBuffer back to the main thread
|
|||
postMessage({ success: true, buffer: bytes.buffer }, [bytes.buffer]); |
|||
} catch (err) { |
|||
console.error('Worker: Error decoding base64 data', err); |
|||
postMessage({ success: false, error: 'Decoding error' }); |
|||
} |
|||
} |
|||
}; |
|||
@ -0,0 +1,261 @@ |
|||
import { useState, useRef, useCallback, useEffect } from 'react'; |
|||
import { toast } from 'react-toastify'; |
|||
|
|||
export default function useAudio({ |
|||
onAudioCaptured, |
|||
onError, |
|||
silentStopTime = 3000, |
|||
onAutoStop, |
|||
onManualStop, |
|||
}) { |
|||
const [isRecording, setIsRecording] = useState(false); |
|||
const [isVoiceActive, setIsVoiceActive] = useState(false); |
|||
|
|||
// Max time from .env or fallback 30
|
|||
const maxRecordingTime = parseInt(process.env.REACT_APP_MAX_VOICE_TIME || '30', 10); |
|||
|
|||
// For the circular countdown
|
|||
const [elapsedTime, setElapsedTime] = useState(0); |
|||
const startTimeRef = useRef(null); |
|||
|
|||
// RMS -> so we can animate the icon
|
|||
const [currentRms, setCurrentRms] = useState(0); |
|||
|
|||
// REFS for min-speak & silence detection
|
|||
const minSpeakTimeBeforeSilenceMs = 1000; |
|||
const accumulatedSpeakMsRef = useRef(0); |
|||
const hasSpokenForMinTimeRef = useRef(false); |
|||
|
|||
// [NEW] We enforce a 4s minimum total record time
|
|||
const minRecordTimeBeforeSilenceMs = 4000; |
|||
|
|||
// [NEW] We also define a minimum "speech duration" to consider this file "valid"
|
|||
// If user never accumulates e.g. 500ms of speech above threshold, it's "empty."
|
|||
const minSpeechDurationMs = 500; |
|||
|
|||
const isVoiceActiveRef = useRef(false); |
|||
useEffect(() => { |
|||
isVoiceActiveRef.current = isVoiceActive; |
|||
}, [isVoiceActive]); |
|||
|
|||
const audioStreamRef = useRef(null); |
|||
const mediaRecorderRef = useRef(null); |
|||
const audioContextAnalyzingRef = useRef(null); |
|||
const isManualStopRef = useRef(false); |
|||
|
|||
// ------------------ STOP LISTENING ------------------
|
|||
const stopListening = useCallback( |
|||
(discard = false) => { |
|||
setIsRecording(false); |
|||
|
|||
if (mediaRecorderRef.current && mediaRecorderRef.current.state !== 'inactive') { |
|||
mediaRecorderRef.current.stop(); |
|||
} |
|||
if (audioStreamRef.current) { |
|||
audioStreamRef.current.getTracks().forEach((track) => track.stop()); |
|||
audioStreamRef.current = null; |
|||
} |
|||
if (audioContextAnalyzingRef.current) { |
|||
audioContextAnalyzingRef.current.close(); |
|||
audioContextAnalyzingRef.current = null; |
|||
} |
|||
|
|||
if (discard) { |
|||
onManualStop && onManualStop(); |
|||
} |
|||
}, |
|||
[onManualStop] |
|||
); |
|||
|
|||
// ------------------ START LISTENING ------------------
|
|||
const startListening = useCallback(() => { |
|||
setIsRecording(true); |
|||
isManualStopRef.current = false; |
|||
|
|||
navigator.mediaDevices |
|||
.getUserMedia({ audio: true }) |
|||
.then((stream) => { |
|||
audioStreamRef.current = stream; |
|||
mediaRecorderRef.current = new MediaRecorder(stream); |
|||
|
|||
const chunks = []; |
|||
mediaRecorderRef.current.ondataavailable = (event) => { |
|||
chunks.push(event.data); |
|||
}; |
|||
|
|||
mediaRecorderRef.current.onstop = () => { |
|||
// If user manually pressed stop => discard
|
|||
if (isManualStopRef.current) { |
|||
isManualStopRef.current = false; |
|||
stopListening(true); |
|||
return; |
|||
} |
|||
|
|||
// [NEW] Check if we have enough speech duration
|
|||
if (accumulatedSpeakMsRef.current < minSpeechDurationMs) { |
|||
console.log('No meaningful speech detected => discarding file.'); |
|||
setIsRecording(false); |
|||
return; |
|||
} |
|||
|
|||
// Otherwise => create final Blob to send
|
|||
const audioBlob = new Blob(chunks, { type: 'audio/wav' }); |
|||
onAudioCaptured && onAudioCaptured(audioBlob); |
|||
onAutoStop && onAutoStop(); |
|||
setIsRecording(false); |
|||
}; |
|||
|
|||
mediaRecorderRef.current.start(); |
|||
|
|||
// Setup for continuous analysis
|
|||
if (!audioContextAnalyzingRef.current) { |
|||
audioContextAnalyzingRef.current = |
|||
new (window.AudioContext || window.webkitAudioContext)(); |
|||
} |
|||
const source = audioContextAnalyzingRef.current.createMediaStreamSource(stream); |
|||
const analyser = audioContextAnalyzingRef.current.createAnalyser(); |
|||
analyser.fftSize = 2048; |
|||
source.connect(analyser); |
|||
|
|||
const dataArray = new Uint8Array(analyser.fftSize); |
|||
|
|||
let lastFrameTime = performance.now(); |
|||
let silenceStart = null; |
|||
let totalRecordedMs = 0; // track total record time
|
|||
|
|||
accumulatedSpeakMsRef.current = 0; // reset speech accumulation each start
|
|||
hasSpokenForMinTimeRef.current = false; |
|||
|
|||
const monitor = () => { |
|||
if (!isVoiceActiveRef.current) { |
|||
return; // user canceled or manualStop
|
|||
} |
|||
|
|||
const now = performance.now(); |
|||
const delta = now - lastFrameTime; // ms since last frame
|
|||
lastFrameTime = now; |
|||
totalRecordedMs += delta; |
|||
|
|||
// measure amplitude
|
|||
analyser.getByteTimeDomainData(dataArray); |
|||
|
|||
let sum = 0; |
|||
for (let i = 0; i < dataArray.length; i++) { |
|||
const sample = dataArray[i] - 128; |
|||
sum += sample * sample; |
|||
} |
|||
const meanSq = sum / dataArray.length; |
|||
const rms = Math.sqrt(meanSq); |
|||
|
|||
setCurrentRms(rms); |
|||
|
|||
// We'll define a threshold for "speaking" vs "silence"
|
|||
const silenceThreshold = 3; |
|||
const isAboveThreshold = rms > silenceThreshold; |
|||
|
|||
if (isAboveThreshold) { |
|||
// user is speaking
|
|||
silenceStart = null; |
|||
accumulatedSpeakMsRef.current += delta; |
|||
if ( |
|||
!hasSpokenForMinTimeRef.current && |
|||
accumulatedSpeakMsRef.current >= minSpeakTimeBeforeSilenceMs |
|||
) { |
|||
hasSpokenForMinTimeRef.current = true; |
|||
} |
|||
} else { |
|||
// user is silent
|
|||
if ( |
|||
hasSpokenForMinTimeRef.current && |
|||
totalRecordedMs >= minRecordTimeBeforeSilenceMs |
|||
) { |
|||
if (!silenceStart) { |
|||
silenceStart = now; |
|||
} else { |
|||
const silenceElapsed = now - silenceStart; |
|||
if (silenceElapsed > silentStopTime) { |
|||
if (mediaRecorderRef.current) { |
|||
mediaRecorderRef.current.stop(); // triggers onstop => final blob
|
|||
} |
|||
return; |
|||
} |
|||
} |
|||
} |
|||
} |
|||
|
|||
requestAnimationFrame(monitor); |
|||
}; |
|||
|
|||
requestAnimationFrame(monitor); |
|||
}) |
|||
.catch((error) => { |
|||
console.error('Error accessing microphone:', error); |
|||
setIsVoiceActive(false); |
|||
setIsRecording(false); |
|||
|
|||
// Show an "Error" toast that doesn't auto-close
|
|||
toast.error( |
|||
'Attention: Missing microphone permissions, please allow microphone use.', |
|||
{ autoClose: false } |
|||
); |
|||
|
|||
onError && onError(error); |
|||
}); |
|||
}, [ |
|||
onAudioCaptured, |
|||
onError, |
|||
onAutoStop, |
|||
silentStopTime, |
|||
stopListening, |
|||
]); |
|||
|
|||
// ------------------ HANDLE SPEAK BUTTON ------------------
|
|||
const handleSpeakButton = useCallback(() => { |
|||
if (!isVoiceActive) { |
|||
// Reset state for a fresh session
|
|||
accumulatedSpeakMsRef.current = 0; |
|||
hasSpokenForMinTimeRef.current = false; |
|||
|
|||
setIsVoiceActive(true); |
|||
startListening(); |
|||
} else { |
|||
isManualStopRef.current = true; |
|||
setIsVoiceActive(false); |
|||
stopListening(true); |
|||
} |
|||
}, [isVoiceActive, startListening, stopListening]); |
|||
|
|||
// ------------------ ELAPSED TIME (for UI countdown) ------------------
|
|||
useEffect(() => { |
|||
let intervalId; |
|||
if (isRecording) { |
|||
startTimeRef.current = Date.now(); |
|||
intervalId = setInterval(() => { |
|||
const diffSeconds = Math.floor((Date.now() - startTimeRef.current) / 1000); |
|||
setElapsedTime(diffSeconds); |
|||
|
|||
// Force stop if we exceed maxRecordingTime
|
|||
if (diffSeconds >= maxRecordingTime && mediaRecorderRef.current) { |
|||
mediaRecorderRef.current.stop(); // triggers onstop => final blob
|
|||
} |
|||
}, 1000); |
|||
} else { |
|||
setElapsedTime(0); |
|||
} |
|||
|
|||
return () => { |
|||
if (intervalId) clearInterval(intervalId); |
|||
}; |
|||
}, [isRecording, maxRecordingTime]); |
|||
|
|||
return { |
|||
isRecording, |
|||
isVoiceActive, |
|||
setIsVoiceActive, |
|||
startListening, |
|||
stopListening, |
|||
handleSpeakButton, |
|||
elapsedTime, |
|||
currentRms, |
|||
}; |
|||
} |
|||
@ -0,0 +1,241 @@ |
|||
// src/hooks/useChat.js
|
|||
|
|||
import { useState, useEffect, useRef, useCallback } from 'react'; |
|||
import useStore from '../store'; // Zustand store for isAssistantSpeaking, setAssistantEmotion
|
|||
import useLangStore from '../langstore'; |
|||
import { toast } from 'react-toastify'; |
|||
import Cookies from 'js-cookie'; |
|||
import useWebSocketStore from '../store/useWebSocketStore'; |
|||
|
|||
export default function useChat({ |
|||
onMp3Chunk, // callback for receiving MP3 data
|
|||
initialMessage, |
|||
}) { |
|||
const { isAssistantSpeaking, setIsAssistantSpeaking, setAssistantEmotion } = useStore(); |
|||
const { currentLanguage } = useLangStore(); |
|||
|
|||
const [messages, setMessages] = useState([]); |
|||
const [threadId, setThreadId] = useState(null); |
|||
|
|||
// The Zustand-based WS store
|
|||
const { ws, isWsOpen, connect, disconnect } = useWebSocketStore(); |
|||
|
|||
const [hasSentInitialMessage, setHasSentInitialMessage] = useState(false); |
|||
const initialMessageRef = useRef(initialMessage); |
|||
|
|||
// We'll store a user ID in this ref
|
|||
const userIdRef = useRef(null); |
|||
|
|||
// On mount, decide if we generate or persist user ID
|
|||
useEffect(() => { |
|||
initialMessageRef.current = initialMessage; |
|||
}, [initialMessage]); |
|||
|
|||
useEffect(() => { |
|||
const persistEnabled = process.env.REACT_APP_PERSIST_USER_ID === 'true'; |
|||
|
|||
if (persistEnabled) { |
|||
const existingId = Cookies.get('myUserId'); |
|||
if (existingId) { |
|||
console.log('Reusing user ID from cookie:', existingId); |
|||
userIdRef.current = existingId; |
|||
} else { |
|||
const newId = generateRandomUserId(); |
|||
userIdRef.current = newId; |
|||
Cookies.set('myUserId', newId, { expires: 365 }); |
|||
console.log('Generated new user ID and stored in cookie:', newId); |
|||
} |
|||
} else { |
|||
const newId = generateRandomUserId(); |
|||
userIdRef.current = newId; |
|||
console.log('Generated ephemeral user ID:', newId); |
|||
} |
|||
}, []); |
|||
|
|||
function generateRandomUserId() { |
|||
return `user_${Math.random().toString(36).substring(2, 10)}`; |
|||
} |
|||
|
|||
// 1) On mount => connect
|
|||
useEffect(() => { |
|||
const wsUrl = |
|||
process.env.REACT_APP_WS_TALK_ENDPOINT || |
|||
'ws://localhost:3000/talkToAvatarElevenlabs'; |
|||
connect(wsUrl); |
|||
|
|||
return () => { |
|||
// On unmount => disconnect
|
|||
disconnect(); |
|||
}; |
|||
// empty dep array => runs once
|
|||
}, [connect, disconnect]); |
|||
|
|||
// 2) Attach onmessage once ws is non-null
|
|||
useEffect(() => { |
|||
if (!ws) return; |
|||
|
|||
ws.onmessage = (event) => { |
|||
if (typeof event.data === 'string') { |
|||
// Probably JSON => parse
|
|||
try { |
|||
const data = JSON.parse(event.data); |
|||
|
|||
// textDelta => partial text
|
|||
if (data.textDelta) { |
|||
setMessages((prev) => { |
|||
const last = prev[prev.length - 1]; |
|||
if (last && last.sender === 'assistant' && !last.isAudio) { |
|||
return [ |
|||
...prev.slice(0, -1), |
|||
{ ...last, text: last.text + data.textDelta }, |
|||
]; |
|||
} |
|||
return [ |
|||
...prev, |
|||
{ text: data.textDelta, sender: 'assistant', isAudio: false }, |
|||
]; |
|||
}); |
|||
} |
|||
|
|||
// userTranscript => the user said
|
|||
if (data.userTranscript) { |
|||
setMessages((prev) => [ |
|||
...prev, |
|||
{ text: data.userTranscript, sender: 'user', isAudio: true }, |
|||
]); |
|||
} |
|||
|
|||
// emotion => e.g. "Happiness"
|
|||
if (data.emotion) { |
|||
console.log('Assistant Emotion:', data.emotion); |
|||
setAssistantEmotion(data.emotion); |
|||
} |
|||
|
|||
// threadId => store for subsequent messages
|
|||
if (data.threadId) { |
|||
setThreadId(data.threadId); |
|||
} |
|||
|
|||
// error => log
|
|||
if (data.error) { |
|||
console.error('Error in talkToAvatarElevenlabs:', data.error); |
|||
} |
|||
} catch (err) { |
|||
console.warn('Failed to parse WS message as JSON:', event.data, err); |
|||
} |
|||
} else { |
|||
// BINARY => MP3 chunk
|
|||
if (onMp3Chunk && event.data instanceof Blob) { |
|||
onMp3Chunk(event.data); |
|||
} |
|||
} |
|||
}; |
|||
}, [ws, onMp3Chunk, setMessages, setAssistantEmotion]); |
|||
|
|||
// 3) Once ws is open => if we have not yet sent the initial message => do so
|
|||
useEffect(() => { |
|||
if (isWsOpen && !hasSentInitialMessage && initialMessageRef.current) { |
|||
sendUserMessage(initialMessageRef.current, false); |
|||
setHasSentInitialMessage(true); |
|||
} |
|||
}, [isWsOpen, hasSentInitialMessage]); |
|||
|
|||
// --- HELPER: send text
|
|||
const sendUserMessage = useCallback( |
|||
(messageText, display = true) => { |
|||
if (!ws || ws.readyState !== WebSocket.OPEN) { |
|||
console.warn('WS not open; cannot send text message.'); |
|||
return; |
|||
} |
|||
if (!messageText.trim()) return; |
|||
|
|||
if (display) { |
|||
setMessages((prev) => [...prev, { text: messageText, sender: 'user' }]); |
|||
} |
|||
|
|||
ws.send( |
|||
JSON.stringify({ |
|||
userId: userIdRef.current || 'unknown_user', |
|||
assistantId: 'asst_QWWv7T2CB9CzI8dpFAXM1DJR', |
|||
voiceId: 'IZz3GqmkMBrbhPzhbz1W', |
|||
language_code: currentLanguage, |
|||
voice_settings: { |
|||
stability: 0.5, |
|||
similarity_boost: 0.8, |
|||
use_speaker_boost: false, |
|||
}, |
|||
threadId, |
|||
message: messageText, |
|||
}) |
|||
); |
|||
}, |
|||
[ws, setMessages, currentLanguage, threadId] |
|||
); |
|||
|
|||
// --- HELPER: send idle system messages (stable callback)
|
|||
const sendIdleSystemMessage = useCallback( |
|||
(messageText) => { |
|||
if (!ws || ws.readyState !== WebSocket.OPEN) { |
|||
console.warn('WS not open; cannot send system idle message.'); |
|||
return; |
|||
} |
|||
if (!messageText.trim()) return; |
|||
|
|||
ws.send( |
|||
JSON.stringify({ |
|||
userId: userIdRef.current || 'unknown_user', |
|||
assistantId: 'asst_QWWv7T2CB9CzI8dpFAXM1DJR', |
|||
voiceId: 'IZz3GqmkMBrbhPzhbz1W', |
|||
language_code: currentLanguage, |
|||
voice_settings: { |
|||
stability: 0.5, |
|||
similarity_boost: 0.8, |
|||
use_speaker_boost: false, |
|||
}, |
|||
threadId, |
|||
message: messageText, |
|||
}) |
|||
); |
|||
}, |
|||
[ws, currentLanguage, threadId] |
|||
); |
|||
|
|||
// --- HELPER: send user’s microphone audio
|
|||
const sendAudioMessage = useCallback( |
|||
(base64Audio) => { |
|||
if (!ws || ws.readyState !== WebSocket.OPEN) { |
|||
console.warn('WS not open; cannot send audio message.'); |
|||
return; |
|||
} |
|||
if (!base64Audio) return; |
|||
|
|||
ws.send( |
|||
JSON.stringify({ |
|||
userId: userIdRef.current || 'unknown_user', |
|||
assistantId: 'asst_QWWv7T2CB9CzI8dpFAXM1DJR', |
|||
voiceId: 'IZz3GqmkMBrbhPzhbz1W', |
|||
language_code: currentLanguage, |
|||
voice_settings: { |
|||
stability: 0.5, |
|||
similarity_boost: 0.8, |
|||
use_speaker_boost: false, |
|||
}, |
|||
threadId, |
|||
audio: base64Audio, |
|||
}) |
|||
); |
|||
}, |
|||
[ws, currentLanguage, threadId] |
|||
); |
|||
|
|||
return { |
|||
messages, |
|||
setMessages, |
|||
threadId, |
|||
isAssistantSpeaking, |
|||
setIsAssistantSpeaking, |
|||
sendUserMessage, |
|||
sendAudioMessage, |
|||
sendIdleSystemMessage, // <-- exported so ChatWindow can import & pass to idle timers
|
|||
}; |
|||
} |
|||
@ -0,0 +1,13 @@ |
|||
body { |
|||
margin: 0; |
|||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', |
|||
'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', |
|||
sans-serif; |
|||
-webkit-font-smoothing: antialiased; |
|||
-moz-osx-font-smoothing: grayscale; |
|||
} |
|||
|
|||
code { |
|||
font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New', |
|||
monospace; |
|||
} |
|||
@ -0,0 +1,9 @@ |
|||
// src/index.js
|
|||
import React from 'react'; |
|||
import ReactDOM from 'react-dom/client'; |
|||
import App from './App'; |
|||
import './styles/App.css'; |
|||
import './styles/Loader.css'; |
|||
|
|||
const root = ReactDOM.createRoot(document.getElementById('root')); |
|||
root.render(<App />); |
|||
@ -0,0 +1,9 @@ |
|||
// src/langstore.js
|
|||
import { create } from 'zustand'; |
|||
|
|||
const useLangStore = create((set) => ({ |
|||
currentLanguage: 'en', // Default
|
|||
setCurrentLanguage: (lang) => set({ currentLanguage: lang }), |
|||
})); |
|||
|
|||
export default useLangStore; |
|||
|
After Width: | Height: | Size: 2.6 KiB |
@ -0,0 +1,14 @@ |
|||
// src/reportWebVitals.js
|
|||
const reportWebVitals = onPerfEntry => { |
|||
if (onPerfEntry && onPerfEntry instanceof Function) { |
|||
import('web-vitals').then(({ getCLS, getFID, getFCP, getLCP, getTTFB }) => { |
|||
getCLS(onPerfEntry); |
|||
getFID(onPerfEntry); |
|||
getFCP(onPerfEntry); |
|||
getLCP(onPerfEntry); |
|||
getTTFB(onPerfEntry); |
|||
}); |
|||
} |
|||
}; |
|||
|
|||
export default reportWebVitals; |
|||
@ -0,0 +1,2 @@ |
|||
// src/setupTests.js
|
|||
import '@testing-library/jest-dom'; |
|||
@ -0,0 +1,22 @@ |
|||
// src/store.js
|
|||
import { create } from 'zustand'; |
|||
|
|||
const useStore = create((set) => ({ |
|||
// Existing states
|
|||
isAssistantSpeaking: false, |
|||
setIsAssistantSpeaking: (val) => set({ isAssistantSpeaking: val }), |
|||
|
|||
// NEW:
|
|||
assistantEmotion: null, |
|||
setAssistantEmotion: (emotion) => set({ assistantEmotion: emotion }), |
|||
|
|||
// [ADDED] For user speaking state (used for "listening" animation)
|
|||
isUserSpeaking: false, // default = false
|
|||
setIsUserSpeaking: (val) => set({ isUserSpeaking: val }), |
|||
|
|||
// new wave state
|
|||
shouldWave: false, |
|||
setShouldWave: (val) => set({ shouldWave: val }), |
|||
})); |
|||
|
|||
export default useStore; |
|||
@ -0,0 +1,58 @@ |
|||
// src/store/useWebSocketStore.js
|
|||
import { create } from 'zustand'; |
|||
import { toast } from 'react-toastify'; |
|||
|
|||
const useWebSocketStore = create((set, get) => ({ |
|||
ws: null, |
|||
isWsOpen: false, |
|||
|
|||
// This method opens the WS only if not already open
|
|||
connect: (wsUrl) => { |
|||
const { ws, isWsOpen } = get(); |
|||
if (ws && isWsOpen) { |
|||
console.log('[WS] Already connected.'); |
|||
return; |
|||
} |
|||
|
|||
console.log('[WS] Connecting to', wsUrl); |
|||
const newWs = new WebSocket(wsUrl); |
|||
// We set the binary type to 'blob' so we can receive MP3 chunks
|
|||
newWs.binaryType = 'blob'; |
|||
|
|||
newWs.onopen = () => { |
|||
console.log('[WS] onopen => WebSocket is open.'); |
|||
set({ isWsOpen: true }); |
|||
}; |
|||
|
|||
// We won't set `onmessage` here directly.
|
|||
// We'll let the consuming code define it as needed.
|
|||
// That way, we can handle JSON vs Blob in the hooking code.
|
|||
|
|||
newWs.onerror = (err) => { |
|||
console.error('[WS] onerror =>', err); |
|||
toast.error('Server connection error.', { autoClose: false }); |
|||
}; |
|||
|
|||
newWs.onclose = () => { |
|||
console.log('[WS] onclose => WebSocket closed.'); |
|||
set({ isWsOpen: false, ws: null }); |
|||
toast.error('WebSocket closed. Please refresh or reconnect.', { |
|||
autoClose: 10000, // 10 seconds
|
|||
}); |
|||
}; |
|||
|
|||
// Finally, store the newly created WS
|
|||
set({ ws: newWs }); |
|||
}, |
|||
|
|||
disconnect: () => { |
|||
const { ws } = get(); |
|||
if (ws) { |
|||
console.log('[WS] disconnect => closing WebSocket.'); |
|||
ws.close(); |
|||
} |
|||
set({ ws: null, isWsOpen: false }); |
|||
}, |
|||
})); |
|||
|
|||
export default useWebSocketStore; |
|||
@ -0,0 +1,104 @@ |
|||
/* src/styles/App.css */ |
|||
body, html, #root { |
|||
margin: 0; |
|||
padding: 0; |
|||
height: 100%; |
|||
overflow: hidden; |
|||
} |
|||
|
|||
.App { |
|||
position: relative; |
|||
width: 100vw; |
|||
height: 100vh; |
|||
} |
|||
|
|||
/* The overall container that holds the language selector + the start-conversation button side by side */ |
|||
.start-conversation-button-wrapper { |
|||
left: 0px; |
|||
display: flex; |
|||
align-items: center; |
|||
justify-content: center; |
|||
gap: 1rem; |
|||
position: relative; /* so absolute-positioned items can drop up */ |
|||
bottom: 5%; |
|||
} |
|||
|
|||
/* The "Start Conversation" button styling (unchanged from your existing design) */ |
|||
.start-conversation-button { |
|||
border: 1px solid #ccc; |
|||
border-radius: 20px; |
|||
background: #fff; |
|||
padding: 0.5rem 1.25rem; |
|||
cursor: pointer; |
|||
display: inline-flex; |
|||
align-items: center; |
|||
font-size: 1rem; |
|||
gap: 0.4rem; |
|||
transition: background 0.2s, box-shadow 0.2s; |
|||
} |
|||
.start-conversation-button:hover { |
|||
background: #f5f5f5; |
|||
} |
|||
.start-conversation-button:active { |
|||
box-shadow: inset 0 0 4px rgba(0,0,0,0.15); |
|||
} |
|||
|
|||
/* ====================== |
|||
Language Selector |
|||
======================*/ |
|||
|
|||
/* The parent container that wraps the language button & drop-up */ |
|||
.language-selector-container { |
|||
position: relative; |
|||
} |
|||
|
|||
/* Make the language-selector-button match the style of .start-conversation-button */ |
|||
.language-selector-button { |
|||
border: 1px solid #ccc; |
|||
border-radius: 20px; |
|||
background: #fff; |
|||
padding: 0.5rem 1.25rem; |
|||
cursor: pointer; |
|||
display: inline-flex; |
|||
align-items: center; |
|||
font-size: 1rem; |
|||
gap: 0.4rem; |
|||
transition: background 0.2s, box-shadow 0.2s; |
|||
} |
|||
.language-selector-button:hover { |
|||
background: #f5f5f5; |
|||
} |
|||
.language-selector-button:active { |
|||
box-shadow: inset 0 0 4px rgba(0,0,0,0.15); |
|||
} |
|||
|
|||
/* The drop-up menu itself */ |
|||
.language-dropup { |
|||
position: absolute; |
|||
bottom: 3rem; /* so it pops up above the button */ |
|||
left: 0; |
|||
background: #fff; |
|||
border: 1px solid #ccc; |
|||
border-radius: 10px; |
|||
box-shadow: 0 2px 6px rgba(0,0,0,0.2); |
|||
padding: 0.25rem 0; |
|||
z-index: 9999; |
|||
min-width: 8rem; /* ensure it's wide enough for all language labels */ |
|||
} |
|||
|
|||
/* Each language option inside the drop-up menu */ |
|||
.language-option { |
|||
display: block; |
|||
width: 100%; |
|||
text-align: left; |
|||
padding: 0.5rem 1rem; |
|||
background: transparent; |
|||
border: none; |
|||
cursor: pointer; |
|||
font-size: 0.9rem; |
|||
} |
|||
.language-option:hover { |
|||
background: #f5f5f5; |
|||
} |
|||
|
|||
|
|||
@ -0,0 +1,242 @@ |
|||
/* src/styles/ChatWindow.css */ |
|||
|
|||
.chat-window { |
|||
position: absolute; |
|||
bottom: 5%; |
|||
left: 3%; |
|||
width: 25%; |
|||
max-height: 30%; |
|||
background-color: rgba(0, 0, 0, 0.7); |
|||
display: flex; |
|||
flex-direction: column; |
|||
font-family: 'Tahoma', sans-serif; |
|||
color: white; |
|||
border-radius: 10px; |
|||
overflow: hidden; /* Ensures child elements don't overflow */ |
|||
} |
|||
|
|||
@media (max-width: 768px) { |
|||
.chat-window { |
|||
width: 90%; |
|||
left: 3%; |
|||
} |
|||
} |
|||
|
|||
.messages { |
|||
flex: 1; |
|||
overflow-y: auto; /* Enables vertical scrolling */ |
|||
padding: 10px 15px; |
|||
display: flex; |
|||
flex-direction: column; |
|||
scrollbar-width: thin; |
|||
scrollbar-color: rgba(255, 255, 255, 0.5) transparent; |
|||
scroll-behavior: smooth; /* Smooth scrolling behavior */ |
|||
} |
|||
|
|||
.messages::-webkit-scrollbar { |
|||
width: 3px; |
|||
} |
|||
|
|||
.messages::-webkit-scrollbar-track { |
|||
background: transparent; |
|||
margin: 5px 0; |
|||
} |
|||
|
|||
.messages::-webkit-scrollbar-thumb { |
|||
background-color: rgba(255, 255, 255, 0.5); |
|||
border-radius: 10px; |
|||
} |
|||
|
|||
.messages::-webkit-scrollbar-button { |
|||
display: none; |
|||
} |
|||
|
|||
.message { |
|||
width: fit-content; |
|||
max-width: 80%; |
|||
margin-bottom: 10px; |
|||
padding: 10px; |
|||
border-radius: 10px; |
|||
word-wrap: break-word; |
|||
position: relative; |
|||
clear: both; |
|||
} |
|||
|
|||
.message.user { |
|||
align-self: flex-end; |
|||
background-color: rgba(0, 122, 255, 0.3); |
|||
text-align: right; |
|||
margin-right: 5px; |
|||
} |
|||
|
|||
.message.user::after { |
|||
content: ''; |
|||
position: absolute; |
|||
right: -10px; |
|||
top: 10px; |
|||
border-width: 10px 0 10px 10px; |
|||
border-style: solid; |
|||
border-color: transparent transparent transparent rgba(0, 122, 255, 0.3); |
|||
} |
|||
|
|||
.message.backend { |
|||
align-self: flex-start; |
|||
background-color: rgba(200, 200, 200, 0.5); |
|||
margin-left: 5px; |
|||
} |
|||
|
|||
.message.backend::before { |
|||
content: ''; |
|||
position: absolute; |
|||
left: -10px; |
|||
top: 10px; |
|||
border-width: 10px 10px 10px 0; |
|||
border-style: solid; |
|||
border-color: transparent rgba(200, 200, 200, 0.5) transparent transparent; |
|||
} |
|||
|
|||
.input-area { |
|||
display: flex; |
|||
padding: 10px; |
|||
background-color: rgba(0, 0, 0, 0.5); |
|||
} |
|||
|
|||
.input-area input { |
|||
flex: 1; |
|||
padding: 10px; |
|||
border: none; |
|||
border-radius: 20px; |
|||
margin-right: 10px; |
|||
font-size: 1em; |
|||
background-color: rgba(255, 255, 255, 0.1); |
|||
color: white; |
|||
} |
|||
|
|||
.input-area input:disabled { |
|||
background-color: rgba(255, 255, 255, 0.05); |
|||
cursor: not-allowed; /* Indicate disabled state */ |
|||
} |
|||
|
|||
.input-area button { |
|||
display: flex; |
|||
align-items: center; |
|||
padding: 10px 15px; |
|||
border: none; |
|||
color: white; |
|||
border-radius: 20px; |
|||
cursor: pointer; |
|||
font-size: 1em; |
|||
transition: background-color 0.3s; |
|||
} |
|||
|
|||
.input-area button:disabled { |
|||
cursor: not-allowed; /* Indicate disabled state */ |
|||
opacity: 0.6; /* Visual feedback for disabled state */ |
|||
} |
|||
|
|||
.send-button { |
|||
background-color: #007AFF; |
|||
} |
|||
|
|||
.send-button:hover:not(:disabled) { |
|||
background-color: #005bb5; |
|||
} |
|||
|
|||
.speak-button { |
|||
background-color: #FF3B30; |
|||
margin-left: 10px; |
|||
transition: background-color 1s ease-in-out; |
|||
} |
|||
|
|||
.speak-button.recording { |
|||
animation: fadeRed 2s infinite; |
|||
} |
|||
|
|||
@keyframes fadeRed { |
|||
0% { |
|||
background-color: #FF3B30; |
|||
} |
|||
50% { |
|||
background-color: #FF8A80; |
|||
} |
|||
100% { |
|||
background-color: #FF3B30; |
|||
} |
|||
} |
|||
|
|||
.input-area input:focus { |
|||
outline: none; |
|||
} |
|||
|
|||
.input-area button:focus { |
|||
outline: none; |
|||
} |
|||
|
|||
.input-area svg { |
|||
margin-right: 5px; |
|||
} |
|||
|
|||
|
|||
.start-conversation-button-wrapper { |
|||
position: absolute; |
|||
bottom: 20px; |
|||
left: 20px; |
|||
z-index: 9999; |
|||
} |
|||
|
|||
.start-conversation-button { |
|||
display: flex; |
|||
align-items: center; |
|||
background: #fff; |
|||
border: 2px solid #ccc; |
|||
border-radius: 50px; |
|||
padding: 10px 20px; |
|||
cursor: pointer; |
|||
font-size: 0.9em; |
|||
font-weight: 500; |
|||
color: #333; |
|||
} |
|||
|
|||
.start-conversation-button svg { |
|||
margin-right: 8px; |
|||
} |
|||
|
|||
.fade-in { |
|||
animation: fadeIn 3s ease forwards; |
|||
} |
|||
|
|||
@keyframes fadeIn { |
|||
0% { |
|||
opacity: 0; |
|||
} |
|||
100% { |
|||
opacity: 1; |
|||
} |
|||
} |
|||
|
|||
/* The floating wrapper for the voice indicator: |
|||
Position it near the chat window. |
|||
You can tweak left/bottom to get the exact offset you want. */ |
|||
.voice-indicator-floating { |
|||
position: absolute; |
|||
bottom: 100px; /* example: 80px or 100px above the bottom so it doesn't overlap the chat */ |
|||
left: 45%; /* offset horizontally from the chat window; adjust as needed */ |
|||
z-index: 9999; /* ensure it floats above other elements */ |
|||
} |
|||
|
|||
/* Example styling if you want a transparent background */ |
|||
.voice-indicator-floating { |
|||
background: none; /* or transparent, to avoid any white box or push */ |
|||
padding: 0; /* remove default spacing if any */ |
|||
} |
|||
|
|||
/* If you want it to respond on smaller screens, |
|||
you can add a media query, for instance: */ |
|||
@media (max-width: 768px) { |
|||
.voice-indicator-floating { |
|||
bottom: 120px; |
|||
left: 20px; |
|||
/* or place it somewhere else on smaller screens */ |
|||
} |
|||
} |
|||
|
|||
@ -0,0 +1,30 @@ |
|||
.emotion-bubble { |
|||
position: absolute; |
|||
/* Place at the lower-right corner by default */ |
|||
right: 10px; |
|||
bottom: 10px; |
|||
|
|||
font-size: 2rem; /* Adjust if you want bigger/smaller emoji */ |
|||
z-index: 9999; /* So it floats above other UI elements */ |
|||
|
|||
/* Animation config */ |
|||
animation: floatEmotion 1.5s ease-in forwards; |
|||
/* We fade in/out, float up in that 1.5s */ |
|||
} |
|||
|
|||
@keyframes floatEmotion { |
|||
0% { |
|||
opacity: 0; |
|||
transform: translateY(0); |
|||
} |
|||
15% { |
|||
opacity: 1; |
|||
} |
|||
85% { |
|||
opacity: 1; |
|||
} |
|||
100% { |
|||
opacity: 0; |
|||
transform: translateY(-50px); |
|||
} |
|||
} |
|||
@ -0,0 +1,45 @@ |
|||
.loader-overlay { |
|||
position: fixed; |
|||
top: 0; |
|||
left: 0; |
|||
width: 100%; |
|||
height: 100%; |
|||
background-color: white; |
|||
display: flex; |
|||
justify-content: center; |
|||
align-items: center; |
|||
z-index: 9999; |
|||
opacity: 1; |
|||
transition: opacity 1s ease-in-out; |
|||
} |
|||
|
|||
.loader-overlay.fade-out { |
|||
opacity: 0; |
|||
pointer-events: none; |
|||
} |
|||
|
|||
.loader-content { |
|||
text-align: center; |
|||
} |
|||
|
|||
.spinner { |
|||
border: 8px solid #f3f3f3; /* Light grey */ |
|||
border-top: 8px solid rgba(0, 122, 255, 0.8); /* Blue color (same as user message background) */ |
|||
border-radius: 50%; |
|||
width: 60px; |
|||
height: 60px; |
|||
animation: spin 1s linear infinite; |
|||
margin: 0 auto 20px auto; |
|||
} |
|||
|
|||
.loader-content p { |
|||
font-size: 1.2em; |
|||
color: rgba(0, 122, 255, 0.8); /* Same blue color */ |
|||
font-family: 'Tahoma', sans-serif; |
|||
} |
|||
|
|||
@keyframes spin { |
|||
0% { transform: rotate(0deg); } |
|||
100% { transform: rotate(360deg); } |
|||
} |
|||
|
|||
@ -0,0 +1,39 @@ |
|||
.voice-indicator-container { |
|||
position: relative; |
|||
width: 80px; |
|||
height: 80px; |
|||
margin-left: 16px; /* space to the left of the chat window */ |
|||
opacity: 0; |
|||
pointer-events: none; |
|||
transition: opacity 0.3s ease-in-out; |
|||
} |
|||
|
|||
/* Fade in means we make it visible, pointer events available */ |
|||
.voice-indicator-container.fade-in { |
|||
opacity: 1; |
|||
pointer-events: all; |
|||
} |
|||
|
|||
.voice-indicator-inner { |
|||
position: relative; |
|||
width: 100%; |
|||
height: 100%; |
|||
} |
|||
|
|||
/* The microphone icon in the center of the circle */ |
|||
.voice-indicator-icon-center { |
|||
position: absolute; |
|||
top: 50%; |
|||
left: 50%; |
|||
transform: translate(-50%, -50%); |
|||
font-size: 1.4rem; |
|||
color: #333; /* or any color you'd like for the icon */ |
|||
} |
|||
|
|||
/* Example responsive breakpoints to stack with the chat window */ |
|||
@media (max-width: 768px) { |
|||
.voice-indicator-container { |
|||
margin-left: 0; |
|||
margin-top: 16px; |
|||
} |
|||
} |
|||
Loading…
Reference in new issue