From b27de8ab2f59d3057f07b396665ca136e73d0749 Mon Sep 17 00:00:00 2001 From: Serhii <snow55mountain@gmail.com> Date: Sun, 15 Sep 2024 19:35:00 +0300 Subject: [PATCH] chore:audio_reload_bug --- frontend/src/pages/Collection.tsx | 10 ++++++++-- linguaphoto/apprunner.yaml | 2 ++ linguaphoto/crud/image.py | 4 +--- linguaphoto/settings.py | 4 ++++ 4 files changed, 15 insertions(+), 5 deletions(-) diff --git a/frontend/src/pages/Collection.tsx b/frontend/src/pages/Collection.tsx index 155a339..14062ae 100644 --- a/frontend/src/pages/Collection.tsx +++ b/frontend/src/pages/Collection.tsx @@ -5,7 +5,7 @@ import Modal from "components/modal"; import UploadContent from "components/UploadContent"; import { useAuth } from "contexts/AuthContext"; import { useLoading } from "contexts/LoadingContext"; -import React, { useEffect, useMemo, useState } from "react"; +import React, { useEffect, useMemo, useRef, useState } from "react"; import { Col, Row } from "react-bootstrap"; import { ArrowLeft, @@ -31,6 +31,7 @@ const CollectionPage: React.FC = () => { const { startLoading, stopLoading } = useLoading(); const [showModal, setShowModal] = useState(false); const [images, setImages] = useState<Array<Image> | null>([]); + const audioRef = useRef<HTMLAudioElement | null>(null); const apiClient: AxiosInstance = useMemo( () => @@ -101,6 +102,11 @@ const CollectionPage: React.FC = () => { asyncfunction(); } }, [collection]); + useEffect(() => { + if (audioRef.current) { + audioRef.current.load(); + } + }, [currentTranscriptionIndex, currentImageIndex]); const handleCreate = async (e: React.FormEvent) => { e.preventDefault(); startLoading(); @@ -327,7 +333,7 @@ const CollectionPage: React.FC = () => { .translation } </p> - <audio controls className="mt-4 w-full"> + <audio controls className="mt-4 w-full" ref={audioRef}> <source src={ currentImage.transcriptions[currentTranscriptionIndex] diff --git a/linguaphoto/apprunner.yaml b/linguaphoto/apprunner.yaml index 4879ca6..7273259 100644 --- a/linguaphoto/apprunner.yaml +++ b/linguaphoto/apprunner.yaml @@ -29,3 +29,5 @@ run: value-from: "arn:aws:secretsmanager:us-east-1:725596835855:secret:linguaphoto-u59FHw:id::" - name: AWS_SECRET_ACCESS_KEY value-from: "arn:aws:secretsmanager:us-east-1:725596835855:secret:linguaphoto-u59FHw:key::" + - name: AWS_SECRET_ACCESS_KEY + value-from: "arn:aws:secretsmanager:us-east-1:725596835855:secret:linguaphoto-u59FHw:openai_key::" diff --git a/linguaphoto/crud/image.py b/linguaphoto/crud/image.py index ee47e00..586138b 100644 --- a/linguaphoto/crud/image.py +++ b/linguaphoto/crud/image.py @@ -82,9 +82,7 @@ async def translate(self, images: List[str], user_id: str) -> List[Image]: if response.status_code == 200: img_source = BytesIO(response.content) # Initialize OpenAI client for transcription and speech synthesis - client = AsyncOpenAI( - api_key="sk-svcacct-PFETCFHtqmHOmIpP_IAyQfBGz5LOpvC6Zudj7d5Wcdp9WjJT4ImAxuotGcpyT3BlbkFJRbtswQqIxYHam9TN13mCM04_OTZE-v8z-Rw1WEcwzyZqW_GcK0PNNyFp6BcA" - ) + client = AsyncOpenAI(api_key=settings.openai_key) transcription_response = await transcribe_image(img_source, client) # Process each transcription and generate corresponding audio for i, transcription in enumerate(transcription_response.transcriptions): diff --git a/linguaphoto/settings.py b/linguaphoto/settings.py index 89a4621..4735394 100644 --- a/linguaphoto/settings.py +++ b/linguaphoto/settings.py @@ -25,6 +25,10 @@ class Settings: aws_region_name = os.getenv("AWS_REGION") aws_access_key_id = os.getenv("AWS_ACCESS_KEY_ID") aws_secret_access_key = os.getenv("AWS_SECRET_ACCESS_KEY") + openai_key = os.getenv( + "OPENAI_API_KEY", + "sk-svcacct-PFETCFHtqmHOmIpP_IAyQfBGz5LOpvC6Zudj7d5Wcdp9WjJT4ImAxuotGcpyT3BlbkFJRbtswQqIxYHam9TN13mCM04_OTZE-v8z-Rw1WEcwzyZqW_GcK0PNNyFp6BcA", + ) settings = Settings()