413 lines
12 KiB
TypeScript
413 lines
12 KiB
TypeScript
import { useEffect, useMemo, useRef, useState } from 'react'
|
|
import './App.css'
|
|
import TranscriptPanel from './components/TranscriptPanel'
|
|
import AnswerPanel from './components/AnswerPanel'
|
|
import MeetingList from './components/MeetingList'
|
|
import {
|
|
createMeeting,
|
|
deleteMeetings,
|
|
endMeeting,
|
|
fetchAnswerSuggestions,
|
|
fetchMeeting,
|
|
fetchMeetings,
|
|
saveAnswers,
|
|
saveUtterance,
|
|
} from './lib/api'
|
|
|
|
function App() {
|
|
const [isRecording, setIsRecording] = useState(false)
|
|
const [isEditMode, setIsEditMode] = useState(false)
|
|
const [currentMeetingId, setCurrentMeetingId] = useState<number | null>(null)
|
|
const [transcriptLines, setTranscriptLines] = useState<
|
|
{ id: number; ts: string; text: string; isFinal: boolean }[]
|
|
>([])
|
|
const [answerSuggestions, setAnswerSuggestions] = useState<string[]>([])
|
|
const [meetingsList, setMeetingsList] = useState<
|
|
{ id: number; started_at: string; ended_at: string | null; title: string | null }[]
|
|
>([])
|
|
const [selectedMeetingIds, setSelectedMeetingIds] = useState<Set<number>>(
|
|
new Set()
|
|
)
|
|
const [errorMessage, setErrorMessage] = useState<string | null>(null)
|
|
|
|
const recognitionRef = useRef<SpeechRecognition | null>(null)
|
|
const liveTextRef = useRef('')
|
|
const lineIdRef = useRef(1)
|
|
const meetingIdRef = useRef<number | null>(null)
|
|
const isRecordingRef = useRef(false)
|
|
const lastResultAtRef = useRef<number>(Date.now())
|
|
const restartLockRef = useRef(false)
|
|
const finalizeTimerRef = useRef<number | null>(null)
|
|
|
|
const hasSpeechRecognition = useMemo(() => {
|
|
return 'SpeechRecognition' in window || 'webkitSpeechRecognition' in window
|
|
}, [])
|
|
|
|
useEffect(() => {
|
|
fetchMeetings()
|
|
.then(setMeetingsList)
|
|
.catch((err) => setErrorMessage(err.message))
|
|
}, [])
|
|
|
|
useEffect(() => {
|
|
if (!isRecording) return
|
|
const intervalId = window.setInterval(() => {
|
|
if (!isRecordingRef.current) return
|
|
const now = Date.now()
|
|
if (now - lastResultAtRef.current > 4000) {
|
|
void safeRestartRecognition()
|
|
}
|
|
}, 2000)
|
|
return () => window.clearInterval(intervalId)
|
|
}, [isRecording])
|
|
|
|
const commitLiveIfAny = async () => {
|
|
if (!meetingIdRef.current) return
|
|
const text = liveTextRef.current.trim()
|
|
if (!text) return
|
|
const ts = new Date().toISOString()
|
|
setTranscriptLines((prev) => {
|
|
const last = prev[prev.length - 1]
|
|
if (last && !last.isFinal) {
|
|
return [
|
|
...prev.slice(0, -1),
|
|
{ ...last, text, ts, isFinal: true },
|
|
]
|
|
}
|
|
return [...prev, { id: lineIdRef.current++, ts, text, isFinal: true }]
|
|
})
|
|
try {
|
|
await saveUtterance(meetingIdRef.current, text, ts)
|
|
} catch (err) {
|
|
setErrorMessage((err as Error).message)
|
|
}
|
|
}
|
|
|
|
const detectQuestion = (text: string) => {
|
|
const trimmed = text.trim()
|
|
if (!trimmed) return false
|
|
if (trimmed.includes('?')) return true
|
|
const patterns = [
|
|
'어때',
|
|
'할까',
|
|
'인가',
|
|
'있나',
|
|
'맞지',
|
|
'좋을까',
|
|
'생각은',
|
|
'어떻게',
|
|
'왜',
|
|
'뭐',
|
|
'언제',
|
|
'어디',
|
|
'누가',
|
|
]
|
|
return patterns.some((pattern) => trimmed.includes(pattern))
|
|
}
|
|
|
|
const startRecognition = () => {
|
|
const SpeechRecognitionConstructor =
|
|
window.SpeechRecognition || window.webkitSpeechRecognition
|
|
|
|
if (!SpeechRecognitionConstructor) {
|
|
setErrorMessage('이 브라우저에서는 STT를 지원하지 않습니다. Chrome을 사용해 주세요.')
|
|
return
|
|
}
|
|
|
|
const recognition = new SpeechRecognitionConstructor()
|
|
recognition.lang = 'ko-KR'
|
|
recognition.interimResults = true
|
|
recognition.continuous = true
|
|
recognition.maxAlternatives = 3
|
|
|
|
recognition.onresult = (event) => {
|
|
lastResultAtRef.current = Date.now()
|
|
let interim = ''
|
|
for (let i = event.resultIndex; i < event.results.length; i += 1) {
|
|
const result = event.results[i]
|
|
const text = result[0].transcript
|
|
if (result.isFinal) {
|
|
handleFinalTranscript(text)
|
|
} else {
|
|
interim += text
|
|
}
|
|
}
|
|
const interimText = interim.trim()
|
|
if (interimText) {
|
|
liveTextRef.current = interimText
|
|
setTranscriptLines((prev) => {
|
|
const last = prev[prev.length - 1]
|
|
if (last && !last.isFinal) {
|
|
return [...prev.slice(0, -1), { ...last, text: interimText }]
|
|
}
|
|
return [
|
|
...prev,
|
|
{
|
|
id: lineIdRef.current++,
|
|
ts: new Date().toISOString(),
|
|
text: interimText,
|
|
isFinal: false,
|
|
},
|
|
]
|
|
})
|
|
if (finalizeTimerRef.current) {
|
|
window.clearTimeout(finalizeTimerRef.current)
|
|
}
|
|
finalizeTimerRef.current = window.setTimeout(() => {
|
|
void commitLiveIfAny()
|
|
finalizeTimerRef.current = null
|
|
}, 1200)
|
|
}
|
|
}
|
|
|
|
recognition.onerror = () => {
|
|
setErrorMessage('음성 인식 중 오류가 발생했습니다.')
|
|
}
|
|
|
|
recognition.onend = () => {
|
|
if (finalizeTimerRef.current) {
|
|
window.clearTimeout(finalizeTimerRef.current)
|
|
finalizeTimerRef.current = null
|
|
}
|
|
void commitLiveIfAny()
|
|
liveTextRef.current = ''
|
|
if (isRecordingRef.current) {
|
|
window.setTimeout(() => {
|
|
void safeRestartRecognition()
|
|
}, 200)
|
|
} else {
|
|
setIsRecording(false)
|
|
}
|
|
}
|
|
|
|
recognitionRef.current = recognition
|
|
recognition.start()
|
|
}
|
|
|
|
const handleFinalTranscript = async (text: string) => {
|
|
if (!meetingIdRef.current) return
|
|
const trimmed = text.trim()
|
|
if (!trimmed) return
|
|
if (finalizeTimerRef.current) {
|
|
window.clearTimeout(finalizeTimerRef.current)
|
|
finalizeTimerRef.current = null
|
|
}
|
|
lastResultAtRef.current = Date.now()
|
|
const ts = new Date().toISOString()
|
|
liveTextRef.current = ''
|
|
let nextLines: { id: number; ts: string; text: string; isFinal: boolean }[] = []
|
|
setTranscriptLines((prev) => {
|
|
const last = prev[prev.length - 1]
|
|
if (last && !last.isFinal) {
|
|
nextLines = [
|
|
...prev.slice(0, -1),
|
|
{ ...last, text: trimmed, ts, isFinal: true },
|
|
]
|
|
return nextLines
|
|
}
|
|
nextLines = [
|
|
...prev,
|
|
{ id: lineIdRef.current++, ts, text: trimmed, isFinal: true },
|
|
]
|
|
return nextLines
|
|
})
|
|
|
|
try {
|
|
await saveUtterance(meetingIdRef.current, trimmed, ts)
|
|
} catch (err) {
|
|
setErrorMessage((err as Error).message)
|
|
}
|
|
|
|
if (detectQuestion(trimmed)) {
|
|
try {
|
|
const context = nextLines.slice(-20).map((line) => line.text)
|
|
const result = await fetchAnswerSuggestions(context, trimmed)
|
|
setAnswerSuggestions(result.suggestions)
|
|
await saveAnswers(meetingIdRef.current, trimmed, result.suggestions)
|
|
} catch (err) {
|
|
setErrorMessage((err as Error).message)
|
|
}
|
|
}
|
|
}
|
|
|
|
const handleStart = async () => {
|
|
setErrorMessage(null)
|
|
try {
|
|
const result = await createMeeting(new Date().toISOString())
|
|
meetingIdRef.current = result.id
|
|
setCurrentMeetingId(result.id)
|
|
lineIdRef.current = 1
|
|
setTranscriptLines([])
|
|
setAnswerSuggestions([])
|
|
setIsRecording(true)
|
|
isRecordingRef.current = true
|
|
lastResultAtRef.current = Date.now()
|
|
startRecognition()
|
|
} catch (err) {
|
|
setErrorMessage((err as Error).message)
|
|
}
|
|
}
|
|
|
|
const handleStop = async () => {
|
|
if (!meetingIdRef.current) return
|
|
setErrorMessage(null)
|
|
recognitionRef.current?.stop()
|
|
await commitLiveIfAny()
|
|
liveTextRef.current = ''
|
|
setIsRecording(false)
|
|
isRecordingRef.current = false
|
|
try {
|
|
await endMeeting(meetingIdRef.current, new Date().toISOString())
|
|
const list = await fetchMeetings()
|
|
setMeetingsList(list)
|
|
} catch (err) {
|
|
setErrorMessage((err as Error).message)
|
|
}
|
|
}
|
|
|
|
const safeRestartRecognition = async () => {
|
|
if (!recognitionRef.current || restartLockRef.current) return
|
|
restartLockRef.current = true
|
|
try {
|
|
recognitionRef.current.stop()
|
|
recognitionRef.current.start()
|
|
lastResultAtRef.current = Date.now()
|
|
} catch {
|
|
// ignore restart errors
|
|
} finally {
|
|
window.setTimeout(() => {
|
|
restartLockRef.current = false
|
|
}, 500)
|
|
}
|
|
}
|
|
|
|
const handleSave = async () => {
|
|
if (!meetingIdRef.current) return
|
|
setErrorMessage(null)
|
|
try {
|
|
await endMeeting(meetingIdRef.current, new Date().toISOString())
|
|
} catch (err) {
|
|
setErrorMessage((err as Error).message)
|
|
}
|
|
}
|
|
|
|
const handleSelectMeeting = async (id: number) => {
|
|
setErrorMessage(null)
|
|
try {
|
|
const data = await fetchMeeting(id)
|
|
meetingIdRef.current = id
|
|
setCurrentMeetingId(id)
|
|
lineIdRef.current = 1
|
|
setTranscriptLines(
|
|
data.utterances.map((utterance) => ({
|
|
id: lineIdRef.current++,
|
|
ts: utterance.ts,
|
|
text: utterance.text,
|
|
isFinal: true,
|
|
}))
|
|
)
|
|
const lastAnswer = data.answers[data.answers.length - 1]
|
|
setAnswerSuggestions(lastAnswer?.suggestions || [])
|
|
} catch (err) {
|
|
setErrorMessage((err as Error).message)
|
|
}
|
|
}
|
|
|
|
const handleToggleEdit = () => {
|
|
setIsEditMode(true)
|
|
}
|
|
|
|
const handleCancelEdit = () => {
|
|
setIsEditMode(false)
|
|
setSelectedMeetingIds(new Set())
|
|
}
|
|
|
|
const handleToggleSelect = (id: number) => {
|
|
const next = new Set(selectedMeetingIds)
|
|
if (next.has(id)) {
|
|
next.delete(id)
|
|
} else {
|
|
next.add(id)
|
|
}
|
|
setSelectedMeetingIds(next)
|
|
}
|
|
|
|
const handleDelete = async () => {
|
|
if (selectedMeetingIds.size === 0) return
|
|
setErrorMessage(null)
|
|
const ids = Array.from(selectedMeetingIds)
|
|
try {
|
|
await deleteMeetings(ids)
|
|
const updated = meetingsList.filter((meeting) => !selectedMeetingIds.has(meeting.id))
|
|
setMeetingsList(updated)
|
|
setSelectedMeetingIds(new Set())
|
|
setIsEditMode(false)
|
|
if (currentMeetingId && ids.includes(currentMeetingId)) {
|
|
setCurrentMeetingId(null)
|
|
meetingIdRef.current = null
|
|
setTranscriptLines([])
|
|
setAnswerSuggestions([])
|
|
}
|
|
} catch (err) {
|
|
setErrorMessage((err as Error).message)
|
|
}
|
|
}
|
|
|
|
return (
|
|
<div className="app">
|
|
<div className="left-panel">
|
|
{errorMessage && <div className="error-banner">{errorMessage}</div>}
|
|
<TranscriptPanel transcriptLines={transcriptLines} />
|
|
<AnswerPanel suggestions={answerSuggestions} />
|
|
<div className="controls">
|
|
<button
|
|
type="button"
|
|
className={`record-btn ${isRecording ? 'recording' : ''}`}
|
|
onClick={handleStart}
|
|
disabled={isRecording || !hasSpeechRecognition}
|
|
>
|
|
녹음
|
|
</button>
|
|
<button type="button" className="stop-btn" onClick={handleStop} disabled={!isRecording}>
|
|
중지
|
|
</button>
|
|
<button type="button" className="save-btn" onClick={handleSave} disabled={!currentMeetingId}>
|
|
저장
|
|
</button>
|
|
</div>
|
|
{!hasSpeechRecognition && (
|
|
<div className="hint">Chrome에서만 Web Speech API가 안정적으로 동작합니다.</div>
|
|
)}
|
|
</div>
|
|
<div className="right-panel">
|
|
<div className="panel-title">대화 리스트</div>
|
|
<MeetingList
|
|
meetings={meetingsList}
|
|
isEditMode={isEditMode}
|
|
selectedIds={selectedMeetingIds}
|
|
onToggleSelect={handleToggleSelect}
|
|
onSelectMeeting={handleSelectMeeting}
|
|
/>
|
|
<div className="list-controls">
|
|
{!isEditMode ? (
|
|
<button type="button" className="edit-btn" onClick={handleToggleEdit}>
|
|
편집
|
|
</button>
|
|
) : (
|
|
<>
|
|
<button type="button" className="delete-btn" onClick={handleDelete}>
|
|
삭제
|
|
</button>
|
|
<button type="button" className="cancel-btn" onClick={handleCancelEdit}>
|
|
취소
|
|
</button>
|
|
</>
|
|
)}
|
|
</div>
|
|
</div>
|
|
</div>
|
|
)
|
|
}
|
|
|
|
export default App
|