import React, { useState, useEffect } from "react"; import { ScrollView, Text, TouchableHighlight, View } from "react-native"; import { Conversation, Message } from "@/app/lib/conversation"; import MessageBubble from "@/components/ui/MessageBubble"; import { WhisperContext } from "whisper.rn"; import { NavigationProp, ParamListBase } from "@react-navigation/native"; import { CachedTranslator, language_matrix_entry, Translator, } from "@/app/i18n/api"; import { getDb } from "@/app/lib/db"; import LiveAudioStream from 'react-native-live-audio-stream'; const lasOptions = { sampleRate: 32000, // default is 44100 but 32000 is adequate for accurate voice recognition channels: 1, // 1 or 2, default 1 bitsPerSample: 16, // 8 or 16, default 16 audioSource: 6, // android only (see below) bufferSize: 4096 // default is 2048 }; // LiveAudioStream.init(lasOptions as any); interface ConversationThreadProps { conversation: Conversation; whisperContext: WhisperContext; onGoBack?: () => any; } const ConversationThread = (p: ConversationThreadProps) => { const [messages, setMessages] = useState([]); const [guestSpeak, setGuestSpeak] = useState(); const [guestSpeakLoaded, setGuestSpeakLoaded] = useState(false); const ct = new CachedTranslator("en", p.conversation.guest.language); useEffect(() => { const updateMessages = (c: Conversation) => { setMessages([...c]); }; p.conversation.onAddMessage = updateMessages; p.conversation.onTranslationDone = updateMessages; return () => { p.conversation.onAddMessage = undefined; p.conversation.onTranslationDone = undefined; }; }, [p.conversation, guestSpeak]); useEffect(() => { const fetchData = async () => { setGuestSpeak(await ct.translate("Speak")); } fetchData(); }, [guestSpeak]) const renderMessages = () => messages.map((message, index) => ( )); function onGoBack() { p.onGoBack && p.onGoBack(); } return ( {renderMessages()} Speak Go Back {guestSpeak ? guestSpeak : "Speak"} ); }; export default ConversationThread;