mirror of
https://github.com/MODSetter/SurfSense.git
synced 2026-05-01 20:03:30 +02:00
109 lines
No EOL
3.1 KiB
TypeScript
109 lines
No EOL
3.1 KiB
TypeScript
"use client";
|
|
|
|
import { useState, useRef } from "react";
|
|
import { Button } from "@/components/ui/button";
|
|
import { Mic, Square, Upload } from "lucide-react";
|
|
|
|
interface AudioRecorderProps {
|
|
onTranscription: (text: string) => void;
|
|
apiUrl?: string;
|
|
}
|
|
|
|
export function AudioRecorder({ onTranscription, apiUrl = "/api/v1/stt" }: AudioRecorderProps) {
|
|
const [isRecording, setIsRecording] = useState(false);
|
|
const [isTranscribing, setIsTranscribing] = useState(false);
|
|
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
|
|
const chunksRef = useRef<Blob[]>([]);
|
|
|
|
const startRecording = async () => {
|
|
try {
|
|
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
|
const mediaRecorder = new MediaRecorder(stream);
|
|
mediaRecorderRef.current = mediaRecorder;
|
|
chunksRef.current = [];
|
|
|
|
mediaRecorder.ondataavailable = (event) => {
|
|
chunksRef.current.push(event.data);
|
|
};
|
|
|
|
mediaRecorder.onstop = async () => {
|
|
const audioBlob = new Blob(chunksRef.current, { type: "audio/wav" });
|
|
await transcribeAudio(audioBlob);
|
|
stream.getTracks().forEach(track => track.stop());
|
|
};
|
|
|
|
mediaRecorder.start();
|
|
setIsRecording(true);
|
|
} catch (error) {
|
|
console.error("Error starting recording:", error);
|
|
}
|
|
};
|
|
|
|
const stopRecording = () => {
|
|
if (mediaRecorderRef.current && isRecording) {
|
|
mediaRecorderRef.current.stop();
|
|
setIsRecording(false);
|
|
}
|
|
};
|
|
|
|
const transcribeAudio = async (audioBlob: Blob) => {
|
|
setIsTranscribing(true);
|
|
|
|
const formData = new FormData();
|
|
formData.append("audio", audioBlob, "recording.wav");
|
|
|
|
try {
|
|
const response = await fetch(`${apiUrl}/transcribe`, {
|
|
method: "POST",
|
|
body: formData,
|
|
});
|
|
|
|
if (!response.ok) throw new Error("Transcription failed");
|
|
|
|
const result = await response.json();
|
|
onTranscription(result.transcription);
|
|
} catch (error) {
|
|
console.error("Transcription error:", error);
|
|
} finally {
|
|
setIsTranscribing(false);
|
|
}
|
|
};
|
|
|
|
const handleFileUpload = async (event: React.ChangeEvent<HTMLInputElement>) => {
|
|
const file = event.target.files?.[0];
|
|
if (!file) return;
|
|
|
|
await transcribeAudio(file);
|
|
};
|
|
|
|
return (
|
|
<div className="flex gap-2 items-center">
|
|
<Button
|
|
onClick={isRecording ? stopRecording : startRecording}
|
|
disabled={isTranscribing}
|
|
variant={isRecording ? "destructive" : "default"}
|
|
size="sm"
|
|
>
|
|
{isRecording ? <Square className="w-4 h-4" /> : <Mic className="w-4 h-4" />}
|
|
{isRecording ? "Stop" : "Record"}
|
|
</Button>
|
|
|
|
<label>
|
|
<Button variant="outline" size="sm" disabled={isTranscribing} asChild>
|
|
<span>
|
|
<Upload className="w-4 h-4" />
|
|
Upload
|
|
</span>
|
|
</Button>
|
|
<input
|
|
type="file"
|
|
accept="audio/*"
|
|
onChange={handleFileUpload}
|
|
className="hidden"
|
|
/>
|
|
</label>
|
|
|
|
{isTranscribing && <span className="text-sm text-muted-foreground">Transcribing...</span>}
|
|
</div>
|
|
);
|
|
} |