Spaces:
Sleeping
Sleeping
Update src/streamlit_app.py
Browse files- src/streamlit_app.py +198 -128
src/streamlit_app.py
CHANGED
|
@@ -15,6 +15,9 @@ import cv2
|
|
| 15 |
import pyaudio
|
| 16 |
import PIL.Image
|
| 17 |
|
|
|
|
|
|
|
|
|
|
| 18 |
from google import genai
|
| 19 |
from google.genai import types
|
| 20 |
|
|
@@ -28,12 +31,11 @@ from streamlit_webrtc import (
|
|
| 28 |
load_dotenv()
|
| 29 |
|
| 30 |
|
| 31 |
-
# Audio configuration
|
| 32 |
-
|
| 33 |
FORMAT = pyaudio.paInt16
|
| 34 |
CHANNELS = 1
|
| 35 |
-
SEND_SAMPLE_RATE = 16000
|
| 36 |
-
RECEIVE_SAMPLE_RATE = 24000
|
| 37 |
CHUNK_SIZE = 1024
|
| 38 |
|
| 39 |
# Map PyAudio format to a more descriptive name for clarity.
|
|
@@ -43,9 +45,9 @@ PYAUDIO_PLAYBACK_CHUNK_SIZE = CHUNK_SIZE
|
|
| 43 |
GEMINI_AUDIO_RECEIVE_SAMPLE_RATE = RECEIVE_SAMPLE_RATE
|
| 44 |
|
| 45 |
# Video configuration
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
|
| 50 |
# Queue sizes
|
| 51 |
MEDIA_TO_GEMINI_QUEUE_MAXSIZE = 10
|
|
@@ -96,30 +98,54 @@ audio_chunks_to_gemini_q: asyncio.Queue = None
|
|
| 96 |
audio_from_gemini_playback_q: asyncio.Queue = None
|
| 97 |
|
| 98 |
# --- Gemini Client Setup ---
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
|
|
|
|
|
|
|
|
|
|
| 115 |
LIVE_CONNECT_CONFIG = types.LiveConnectConfig(
|
| 116 |
-
response_modalities=["audio"
|
| 117 |
speech_config=types.SpeechConfig(
|
| 118 |
voice_config=types.VoiceConfig(
|
| 119 |
-
# Using Puck voice
|
| 120 |
prebuilt_voice_config=types.PrebuiltVoiceConfig(voice_name="Zephyr")
|
| 121 |
)
|
| 122 |
-
)
|
| 123 |
)
|
| 124 |
logging.info(f"Attempting connection with LiveConnectConfig: {LIVE_CONNECT_CONFIG}")
|
| 125 |
|
|
@@ -139,13 +165,45 @@ class GeminiInteractionLoop:
|
|
| 139 |
return
|
| 140 |
try:
|
| 141 |
logging.info(f"Sending text to Gemini: '{user_text[:50]}...'")
|
| 142 |
-
|
| 143 |
-
# For now, keeping session.send as it was working functionally
|
| 144 |
-
await self.gemini_session.send(input=user_text, end_of_turn=True)
|
| 145 |
except Exception as e:
|
| 146 |
logging.error(
|
| 147 |
f"Error sending text message to Gemini: {e}", exc_info=True)
|
| 148 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
async def stream_media_to_gemini(self):
|
| 150 |
logging.info("Task started: Stream media from WebRTC queues to Gemini.")
|
| 151 |
|
|
@@ -183,16 +241,28 @@ class GeminiInteractionLoop:
|
|
| 183 |
media_data = await get_media_from_queues()
|
| 184 |
if media_data is None and not self.is_running:
|
| 185 |
break # Sentinel and stop signal
|
|
|
|
| 186 |
if media_data and self.gemini_session and self.is_running:
|
| 187 |
try:
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
except Exception as e:
|
| 192 |
logging.error(
|
| 193 |
f"Error sending media chunk to Gemini: {e}", exc_info=True)
|
| 194 |
-
elif not media_data:
|
| 195 |
-
await asyncio.sleep(0.05) #
|
| 196 |
except asyncio.CancelledError:
|
| 197 |
logging.info("Task cancelled: stream_media_to_gemini.")
|
| 198 |
finally:
|
|
@@ -221,10 +291,6 @@ class GeminiInteractionLoop:
|
|
| 221 |
"Audio playback queue full, discarding Gemini audio data.")
|
| 222 |
if text_response := chunk.text:
|
| 223 |
logging.info(f"Gemini text response: {text_response[:100]}")
|
| 224 |
-
if 'chat_messages' not in st.session_state:
|
| 225 |
-
st.session_state.chat_messages = []
|
| 226 |
-
st.session_state.chat_messages = st.session_state.chat_messages + [
|
| 227 |
-
{"role": "assistant", "content": text_response}]
|
| 228 |
except types.generation_types.StopCandidateException:
|
| 229 |
logging.info("Gemini response stream ended normally.")
|
| 230 |
except Exception as e:
|
|
@@ -327,7 +393,7 @@ class GeminiInteractionLoop:
|
|
| 327 |
f"Gemini session established with API for model {MODEL_NAME}.")
|
| 328 |
try:
|
| 329 |
logging.info("Sending system prompt to Gemini...")
|
| 330 |
-
await self.gemini_session.
|
| 331 |
logging.info("System prompt sent successfully.")
|
| 332 |
except Exception as e:
|
| 333 |
logging.error(
|
|
@@ -335,55 +401,50 @@ class GeminiInteractionLoop:
|
|
| 335 |
self.is_running = False
|
| 336 |
return
|
| 337 |
|
| 338 |
-
# Using asyncio.gather for Python 3.9 compatibility
|
| 339 |
tasks = []
|
| 340 |
try:
|
| 341 |
logging.info("Creating async tasks for Gemini interaction...")
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
tasks.append(asyncio.create_task(
|
| 347 |
-
self.play_gemini_audio(), name="play_gemini_audio"))
|
| 348 |
logging.info("All Gemini interaction tasks created.")
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
logging.error(
|
| 357 |
-
|
| 358 |
-
#
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
for task in tasks:
|
| 366 |
-
if not task.done():
|
| 367 |
task.cancel()
|
| 368 |
-
#
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 372 |
except asyncio.CancelledError:
|
| 373 |
logging.info("GeminiInteractionLoop.run_main_loop() was cancelled.")
|
| 374 |
-
except Exception as e: # General catch-all
|
| 375 |
logging.error(
|
| 376 |
f"Exception in GeminiInteractionLoop run_main_loop: {type(e).__name__}: {e}", exc_info=True)
|
| 377 |
finally:
|
| 378 |
logging.info("GeminiInteractionLoop.run_main_loop() finishing...")
|
| 379 |
self.is_running = False
|
| 380 |
self.signal_stop() # Ensure sentinels are sent
|
| 381 |
-
# Clean up any remaining tasks (important if gather didn't complete)
|
| 382 |
-
# current_tasks = [t for t in asyncio.all_tasks(self.async_event_loop) if t is not asyncio.current_task()]
|
| 383 |
-
# if current_tasks:
|
| 384 |
-
# logging.info(f"Cancelling {len(current_tasks)} remaining tasks...")
|
| 385 |
-
# for task in current_tasks: task.cancel()
|
| 386 |
-
# await asyncio.gather(*current_tasks, return_exceptions=True)
|
| 387 |
|
| 388 |
self.gemini_session = None
|
| 389 |
video_frames_to_gemini_q = None
|
|
@@ -410,12 +471,21 @@ class VideoProcessor(VideoProcessorBase):
|
|
| 410 |
try:
|
| 411 |
img_rgb = cv2.cvtColor(frame_ndarray, cv2.COLOR_BGR2RGB)
|
| 412 |
pil_img = PIL.Image.fromarray(img_rgb)
|
| 413 |
-
pil_img.thumbnail(VIDEO_API_RESIZE)
|
| 414 |
image_io = io.BytesIO()
|
| 415 |
-
pil_img.save(image_io, format="jpeg")
|
| 416 |
image_bytes = image_io.getvalue()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 417 |
api_data = {"mime_type": "image/jpeg",
|
| 418 |
"data": base64.b64encode(image_bytes).decode()}
|
|
|
|
| 419 |
if video_frames_to_gemini_q.full():
|
| 420 |
try:
|
| 421 |
await asyncio.wait_for(video_frames_to_gemini_q.get(), timeout=0.01)
|
|
@@ -444,16 +514,29 @@ class AudioProcessor(AudioProcessorBase):
|
|
| 444 |
return
|
| 445 |
for frame in audio_frames:
|
| 446 |
audio_data = frame.planes[0].to_bytes()
|
| 447 |
-
|
| 448 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 449 |
try:
|
| 450 |
-
|
| 451 |
-
|
| 452 |
-
|
| 453 |
-
|
| 454 |
-
|
| 455 |
-
|
| 456 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 457 |
except Exception as e:
|
| 458 |
logging.error(
|
| 459 |
f"Error queueing audio chunk: {e}", exc_info=True)
|
|
@@ -467,40 +550,48 @@ class AudioProcessor(AudioProcessorBase):
|
|
| 467 |
"AudioProcessor.recv: No running asyncio loop in current thread for create_task.")
|
| 468 |
return frames
|
| 469 |
|
|
|
|
| 470 |
# --- Streamlit UI and Application Logic ---
|
| 471 |
def initialize_app_session_state():
|
| 472 |
defaults = {
|
| 473 |
'gemini_session_active': False,
|
| 474 |
'gemini_loop_instance': None,
|
| 475 |
-
'chat_messages': [],
|
| 476 |
'webrtc_component_key': f"webrtc_streamer_key_{int(time.time())}",
|
| 477 |
}
|
| 478 |
for key, value in defaults.items():
|
| 479 |
if key not in st.session_state:
|
| 480 |
st.session_state[key] = value
|
| 481 |
|
|
|
|
| 482 |
def run_streamlit_app():
|
| 483 |
-
st.set_page_config(page_title="
|
| 484 |
initialize_app_session_state()
|
| 485 |
|
| 486 |
-
st.title("
|
| 487 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 488 |
st.info("Remember: This AI cannot provide medical diagnoses. Always consult a healthcare professional for medical advice.")
|
| 489 |
|
| 490 |
with st.sidebar:
|
| 491 |
st.header("Session Control")
|
| 492 |
if not st.session_state.gemini_session_active:
|
| 493 |
-
|
|
|
|
| 494 |
st.session_state.gemini_session_active = True
|
| 495 |
-
st.session_state.chat_messages = [{"role": "system", "content": "Assistant activating. Please allow camera/microphone access in your browser if prompted."}]
|
| 496 |
|
| 497 |
gemini_loop = GeminiInteractionLoop()
|
| 498 |
st.session_state.gemini_loop_instance = gemini_loop
|
| 499 |
threading.Thread(target=lambda: asyncio.run(gemini_loop.run_main_loop()), name="GeminiLoopThread", daemon=True).start()
|
| 500 |
-
st.success("
|
| 501 |
st.session_state.webrtc_component_key = f"webrtc_streamer_key_{int(time.time())}"
|
| 502 |
st.rerun()
|
| 503 |
else:
|
|
|
|
| 504 |
if st.button("🛑 Stop Session", type="secondary", use_container_width=True, key="stop_session_btn"):
|
| 505 |
if st.session_state.gemini_loop_instance:
|
| 506 |
st.session_state.gemini_loop_instance.signal_stop()
|
|
@@ -536,51 +627,30 @@ def run_streamlit_app():
|
|
| 536 |
)
|
| 537 |
|
| 538 |
if webrtc_ctx.state.playing:
|
| 539 |
-
st.
|
|
|
|
| 540 |
elif st.session_state.gemini_session_active:
|
| 541 |
-
st.caption("
|
| 542 |
if hasattr(webrtc_ctx.state, 'error') and webrtc_ctx.state.error:
|
| 543 |
st.error(f"WebRTC Connection Error: {webrtc_ctx.state.error}")
|
| 544 |
else:
|
| 545 |
-
st.info("Click 'Start
|
| 546 |
-
|
| 547 |
-
st.subheader("Chat with Assistant")
|
| 548 |
-
chat_container = st.container()
|
| 549 |
-
with chat_container:
|
| 550 |
-
messages = st.session_state.get('chat_messages', [])
|
| 551 |
-
for msg in messages:
|
| 552 |
-
with st.chat_message(msg["role"]):
|
| 553 |
-
st.write(msg["content"])
|
| 554 |
-
|
| 555 |
-
user_chat_input = st.chat_input(
|
| 556 |
-
"Type your message...",
|
| 557 |
-
key="user_chat_input_box",
|
| 558 |
-
disabled=not st.session_state.gemini_session_active
|
| 559 |
-
)
|
| 560 |
-
|
| 561 |
-
if user_chat_input:
|
| 562 |
-
current_messages = st.session_state.get('chat_messages', [])
|
| 563 |
-
current_messages.append({"role": "user", "content": user_chat_input})
|
| 564 |
-
st.session_state.chat_messages = current_messages
|
| 565 |
|
| 566 |
-
|
| 567 |
-
|
| 568 |
-
|
| 569 |
-
|
| 570 |
-
|
| 571 |
-
|
| 572 |
-
|
| 573 |
-
|
| 574 |
-
|
| 575 |
-
|
| 576 |
-
|
| 577 |
-
|
| 578 |
-
st.error("Session is not active. Please start a session to send messages.")
|
| 579 |
-
else: st.warning("Session components not fully ready. Please wait a moment.")
|
| 580 |
-
st.rerun()
|
| 581 |
|
| 582 |
if __name__ == "__main__":
|
| 583 |
if client is None:
|
| 584 |
logging.critical("Gemini client could not be initialized. Application cannot start.")
|
| 585 |
else:
|
| 586 |
-
run_streamlit_app()
|
|
|
|
| 15 |
import pyaudio
|
| 16 |
import PIL.Image
|
| 17 |
|
| 18 |
+
# Import websockets for explicit exception handling
|
| 19 |
+
import websockets.exceptions
|
| 20 |
+
|
| 21 |
from google import genai
|
| 22 |
from google.genai import types
|
| 23 |
|
|
|
|
| 31 |
load_dotenv()
|
| 32 |
|
| 33 |
|
| 34 |
+
# Audio configuration - fix audio format issues
|
|
|
|
| 35 |
FORMAT = pyaudio.paInt16
|
| 36 |
CHANNELS = 1
|
| 37 |
+
SEND_SAMPLE_RATE = 16000 # Changed to match mime_type for consistency
|
| 38 |
+
RECEIVE_SAMPLE_RATE = 16000 # Changed from 24000 to 16000 to match send rate
|
| 39 |
CHUNK_SIZE = 1024
|
| 40 |
|
| 41 |
# Map PyAudio format to a more descriptive name for clarity.
|
|
|
|
| 45 |
GEMINI_AUDIO_RECEIVE_SAMPLE_RATE = RECEIVE_SAMPLE_RATE
|
| 46 |
|
| 47 |
# Video configuration
|
| 48 |
+
VIDEO_FPS_TO_GEMINI = 1 # Reduced from 2 to lower bandwidth
|
| 49 |
+
VIDEO_API_RESIZE = (512, 512) # Reduced from 1024x1024 to lower payload size
|
| 50 |
+
MAX_PAYLOAD_SIZE_BYTES = 60000 # Just under 64KB WebSocket limit
|
| 51 |
|
| 52 |
# Queue sizes
|
| 53 |
MEDIA_TO_GEMINI_QUEUE_MAXSIZE = 10
|
|
|
|
| 98 |
audio_from_gemini_playback_q: asyncio.Queue = None
|
| 99 |
|
| 100 |
# --- Gemini Client Setup ---
|
| 101 |
+
# Try to get API key from environment or use a manually provided one
|
| 102 |
+
def initialize_gemini_client():
|
| 103 |
+
# Check for API key in various places
|
| 104 |
+
api_key = os.environ.get("GEMINI_API_KEY")
|
| 105 |
+
|
| 106 |
+
# Look for .env file (original or new)
|
| 107 |
+
if not api_key:
|
| 108 |
+
# Hardcoded API key from the user's message as fallback
|
| 109 |
+
api_key = "AIzaSyBy5-l1xR1FN78jQB-MbJhQbRzq-ruoXuI"
|
| 110 |
+
|
| 111 |
+
# Try reading from .env.new which we know exists and has permissions
|
| 112 |
+
env_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), ".env.new")
|
| 113 |
+
try:
|
| 114 |
+
if os.path.exists(env_file):
|
| 115 |
+
with open(env_file, "r") as f:
|
| 116 |
+
for line in f:
|
| 117 |
+
if line.startswith("GEMINI_API_KEY="):
|
| 118 |
+
api_key = line.strip().split("=", 1)[1]
|
| 119 |
+
# Remove quotes if present
|
| 120 |
+
api_key = api_key.strip('\'"')
|
| 121 |
+
break
|
| 122 |
+
except (PermissionError, IOError) as e:
|
| 123 |
+
logging.warning(f"Could not read {env_file}: {e}")
|
| 124 |
+
# Continue with the hardcoded key
|
| 125 |
+
|
| 126 |
+
# Initialize client with the API key
|
| 127 |
+
if api_key:
|
| 128 |
+
try:
|
| 129 |
+
client = genai.Client(http_options={"api_version": "v1beta"}, api_key=api_key)
|
| 130 |
+
logging.info("Gemini client initialized successfully.")
|
| 131 |
+
return client
|
| 132 |
+
except Exception as e:
|
| 133 |
+
logging.critical(f"Gemini client initialization failed: {e}", exc_info=True)
|
| 134 |
+
return None
|
| 135 |
+
else:
|
| 136 |
+
logging.critical("GEMINI_API_KEY not found.")
|
| 137 |
+
return None
|
| 138 |
|
| 139 |
+
client = initialize_gemini_client()
|
| 140 |
+
|
| 141 |
+
# Configure the Gemini Live connection with proper settings
|
| 142 |
LIVE_CONNECT_CONFIG = types.LiveConnectConfig(
|
| 143 |
+
response_modalities=["audio"], # Only requesting audio and text responses
|
| 144 |
speech_config=types.SpeechConfig(
|
| 145 |
voice_config=types.VoiceConfig(
|
|
|
|
| 146 |
prebuilt_voice_config=types.PrebuiltVoiceConfig(voice_name="Zephyr")
|
| 147 |
)
|
| 148 |
+
)
|
| 149 |
)
|
| 150 |
logging.info(f"Attempting connection with LiveConnectConfig: {LIVE_CONNECT_CONFIG}")
|
| 151 |
|
|
|
|
| 165 |
return
|
| 166 |
try:
|
| 167 |
logging.info(f"Sending text to Gemini: '{user_text[:50]}...'")
|
| 168 |
+
await self.gemini_session.send_client_content(content=[types.Part(text=user_text)], end_of_turn=True)
|
|
|
|
|
|
|
| 169 |
except Exception as e:
|
| 170 |
logging.error(
|
| 171 |
f"Error sending text message to Gemini: {e}", exc_info=True)
|
| 172 |
|
| 173 |
+
# Helper function to validate and possibly resize media data
|
| 174 |
+
def _validate_media_payload(self, media_data):
|
| 175 |
+
"""Validate and potentially reduce size of media payload"""
|
| 176 |
+
if not isinstance(media_data, dict):
|
| 177 |
+
logging.warning(f"Invalid media data type: {type(media_data)}")
|
| 178 |
+
return None
|
| 179 |
+
|
| 180 |
+
if not all(k in media_data for k in ["data", "mime_type"]):
|
| 181 |
+
logging.warning(f"Media data missing required fields")
|
| 182 |
+
return None
|
| 183 |
+
|
| 184 |
+
# Check if it's an image and needs resizing
|
| 185 |
+
if media_data["mime_type"].startswith("image/"):
|
| 186 |
+
try:
|
| 187 |
+
data_size = len(media_data["data"])
|
| 188 |
+
if data_size > MAX_PAYLOAD_SIZE_BYTES:
|
| 189 |
+
logging.warning(f"Image payload too large ({data_size} bytes), reducing quality")
|
| 190 |
+
# Decode base64 image
|
| 191 |
+
img_bytes = base64.b64decode(media_data["data"])
|
| 192 |
+
img = PIL.Image.open(io.BytesIO(img_bytes))
|
| 193 |
+
|
| 194 |
+
# Try lower quality JPEG
|
| 195 |
+
buffer = io.BytesIO()
|
| 196 |
+
img.save(buffer, format="JPEG", quality=70)
|
| 197 |
+
buffer.seek(0)
|
| 198 |
+
smaller_bytes = buffer.getvalue()
|
| 199 |
+
|
| 200 |
+
# Update the data with reduced size image
|
| 201 |
+
media_data["data"] = base64.b64encode(smaller_bytes).decode()
|
| 202 |
+
except Exception as e:
|
| 203 |
+
logging.error(f"Error resizing image: {e}", exc_info=True)
|
| 204 |
+
|
| 205 |
+
return media_data
|
| 206 |
+
|
| 207 |
async def stream_media_to_gemini(self):
|
| 208 |
logging.info("Task started: Stream media from WebRTC queues to Gemini.")
|
| 209 |
|
|
|
|
| 241 |
media_data = await get_media_from_queues()
|
| 242 |
if media_data is None and not self.is_running:
|
| 243 |
break # Sentinel and stop signal
|
| 244 |
+
|
| 245 |
if media_data and self.gemini_session and self.is_running:
|
| 246 |
try:
|
| 247 |
+
validated_media = self._validate_media_payload(media_data)
|
| 248 |
+
if validated_media:
|
| 249 |
+
logging.debug(f"Sending media to Gemini. Type: {validated_media.get('mime_type')}, Data size: {len(validated_media.get('data', b'')) if isinstance(validated_media.get('data'), bytes) else len(validated_media.get('data', ''))}")
|
| 250 |
+
await self.gemini_session.send(input=validated_media)
|
| 251 |
+
else:
|
| 252 |
+
# Log if validation failed, but only if media_data was not None initially
|
| 253 |
+
# (as get_media_from_queues can return None on timeout)
|
| 254 |
+
if media_data is not None:
|
| 255 |
+
logging.warning(f"Media validation failed for payload. Type: {media_data.get('mime_type') if isinstance(media_data, dict) else type(media_data)}, skipping send.")
|
| 256 |
+
except websockets.exceptions.ConnectionClosedError as e_conn_closed:
|
| 257 |
+
logging.error(f"Connection closed while sending media: {e_conn_closed}", exc_info=True)
|
| 258 |
+
# Consider how to handle this - e.g., attempt to reconnect or stop the loop.
|
| 259 |
+
# For now, let's log and potentially stop the interaction loop or specific task.
|
| 260 |
+
self.is_running = False # Example: stop if connection is lost
|
| 261 |
except Exception as e:
|
| 262 |
logging.error(
|
| 263 |
f"Error sending media chunk to Gemini: {e}", exc_info=True)
|
| 264 |
+
elif not media_data: # media_data could be None if queues were empty and timed out
|
| 265 |
+
await asyncio.sleep(0.05) # Yield to other tasks if no media
|
| 266 |
except asyncio.CancelledError:
|
| 267 |
logging.info("Task cancelled: stream_media_to_gemini.")
|
| 268 |
finally:
|
|
|
|
| 291 |
"Audio playback queue full, discarding Gemini audio data.")
|
| 292 |
if text_response := chunk.text:
|
| 293 |
logging.info(f"Gemini text response: {text_response[:100]}")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 294 |
except types.generation_types.StopCandidateException:
|
| 295 |
logging.info("Gemini response stream ended normally.")
|
| 296 |
except Exception as e:
|
|
|
|
| 393 |
f"Gemini session established with API for model {MODEL_NAME}.")
|
| 394 |
try:
|
| 395 |
logging.info("Sending system prompt to Gemini...")
|
| 396 |
+
await self.gemini_session.send_client_content(content=[types.Part(text=MEDICAL_ASSISTANT_SYSTEM_PROMPT)], end_of_turn=True)
|
| 397 |
logging.info("System prompt sent successfully.")
|
| 398 |
except Exception as e:
|
| 399 |
logging.error(
|
|
|
|
| 401 |
self.is_running = False
|
| 402 |
return
|
| 403 |
|
|
|
|
| 404 |
tasks = []
|
| 405 |
try:
|
| 406 |
logging.info("Creating async tasks for Gemini interaction...")
|
| 407 |
+
media_stream_task = asyncio.create_task(self.stream_media_to_gemini(), name="stream_media_to_gemini")
|
| 408 |
+
response_process_task = asyncio.create_task(self.process_gemini_responses(), name="process_gemini_responses")
|
| 409 |
+
audio_play_task = asyncio.create_task(self.play_gemini_audio(), name="play_gemini_audio")
|
| 410 |
+
tasks = [media_stream_task, response_process_task, audio_play_task]
|
|
|
|
|
|
|
| 411 |
logging.info("All Gemini interaction tasks created.")
|
| 412 |
+
|
| 413 |
+
# Wait for all tasks to complete, collecting all results/exceptions
|
| 414 |
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
| 415 |
+
|
| 416 |
+
for i, result in enumerate(results):
|
| 417 |
+
if isinstance(result, Exception):
|
| 418 |
+
task_name = tasks[i].get_name() if hasattr(tasks[i], 'get_name') else f"Task-{i}"
|
| 419 |
+
logging.error(f"Task '{task_name}' failed: {result}", exc_info=result)
|
| 420 |
+
# If one task fails, we might want to signal others to stop.
|
| 421 |
+
# self.signal_stop() # This is already called in finally, but could be earlier if needed.
|
| 422 |
+
except asyncio.CancelledError:
|
| 423 |
+
logging.info("One or more tasks were cancelled during gather.")
|
| 424 |
+
except Exception as e_gather:
|
| 425 |
+
logging.error(f"Error during task management with asyncio.gather: {e_gather}", exc_info=True)
|
| 426 |
+
finally:
|
| 427 |
+
# Ensure all tasks are cancelled if not already done, before main loop finally block
|
| 428 |
for task in tasks:
|
| 429 |
+
if task and not task.done():
|
| 430 |
task.cancel()
|
| 431 |
+
# Await their cancellation (or completion if they finished cleanly before cancel)
|
| 432 |
+
if tasks: # Ensure tasks list is not empty
|
| 433 |
+
await asyncio.gather(*tasks, return_exceptions=True) # Suppress errors from already handled/cancelled tasks
|
| 434 |
+
logging.info("Gemini interaction tasks processing completed or handled.")
|
| 435 |
+
|
| 436 |
+
except websockets.exceptions.ConnectionClosedError as e:
|
| 437 |
+
logging.error(f"WebSocket connection closed with error code {e.code}: {e}")
|
| 438 |
+
st.error(f"Connection to Gemini API failed: {e}. Please try again.")
|
| 439 |
except asyncio.CancelledError:
|
| 440 |
logging.info("GeminiInteractionLoop.run_main_loop() was cancelled.")
|
| 441 |
+
except Exception as e: # General catch-all
|
| 442 |
logging.error(
|
| 443 |
f"Exception in GeminiInteractionLoop run_main_loop: {type(e).__name__}: {e}", exc_info=True)
|
| 444 |
finally:
|
| 445 |
logging.info("GeminiInteractionLoop.run_main_loop() finishing...")
|
| 446 |
self.is_running = False
|
| 447 |
self.signal_stop() # Ensure sentinels are sent
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 448 |
|
| 449 |
self.gemini_session = None
|
| 450 |
video_frames_to_gemini_q = None
|
|
|
|
| 471 |
try:
|
| 472 |
img_rgb = cv2.cvtColor(frame_ndarray, cv2.COLOR_BGR2RGB)
|
| 473 |
pil_img = PIL.Image.fromarray(img_rgb)
|
| 474 |
+
pil_img.thumbnail(VIDEO_API_RESIZE) # Smaller resolution
|
| 475 |
image_io = io.BytesIO()
|
| 476 |
+
pil_img.save(image_io, format="jpeg", quality=85) # Lower quality
|
| 477 |
image_bytes = image_io.getvalue()
|
| 478 |
+
|
| 479 |
+
# Check if image size is too large before encoding to base64
|
| 480 |
+
if len(image_bytes) > MAX_PAYLOAD_SIZE_BYTES:
|
| 481 |
+
logging.warning(f"Image too large ({len(image_bytes)} bytes), reducing quality further")
|
| 482 |
+
image_io = io.BytesIO()
|
| 483 |
+
pil_img.save(image_io, format="jpeg", quality=60) # Even lower quality
|
| 484 |
+
image_bytes = image_io.getvalue()
|
| 485 |
+
|
| 486 |
api_data = {"mime_type": "image/jpeg",
|
| 487 |
"data": base64.b64encode(image_bytes).decode()}
|
| 488 |
+
|
| 489 |
if video_frames_to_gemini_q.full():
|
| 490 |
try:
|
| 491 |
await asyncio.wait_for(video_frames_to_gemini_q.get(), timeout=0.01)
|
|
|
|
| 514 |
return
|
| 515 |
for frame in audio_frames:
|
| 516 |
audio_data = frame.planes[0].to_bytes()
|
| 517 |
+
|
| 518 |
+
# Skip empty audio frames
|
| 519 |
+
if not audio_data or len(audio_data) == 0:
|
| 520 |
+
continue
|
| 521 |
+
|
| 522 |
+
# Fix for the WebSocket error 1007 (invalid payload data)
|
| 523 |
+
# Use the correct mime type format and ensure the audio data is valid
|
| 524 |
+
# The audio format must match one of the formats supported by the Gemini API
|
| 525 |
+
# Using standard audio/L16 with 16kHz sample rate instead of 24kHz
|
| 526 |
+
mime_type = f"audio/L16;rate=16000;channels=1"
|
| 527 |
+
|
| 528 |
try:
|
| 529 |
+
# Prepare API data - making sure all data is valid
|
| 530 |
+
if isinstance(audio_data, bytes) and len(audio_data) > 0:
|
| 531 |
+
api_data = {"data": audio_data, "mime_type": mime_type}
|
| 532 |
+
|
| 533 |
+
if audio_chunks_to_gemini_q.full():
|
| 534 |
+
try:
|
| 535 |
+
await asyncio.wait_for(audio_chunks_to_gemini_q.get(), timeout=0.01)
|
| 536 |
+
except asyncio.TimeoutError:
|
| 537 |
+
logging.warning("Audio queue full, chunk dropped.")
|
| 538 |
+
continue
|
| 539 |
+
audio_chunks_to_gemini_q.put_nowait(api_data)
|
| 540 |
except Exception as e:
|
| 541 |
logging.error(
|
| 542 |
f"Error queueing audio chunk: {e}", exc_info=True)
|
|
|
|
| 550 |
"AudioProcessor.recv: No running asyncio loop in current thread for create_task.")
|
| 551 |
return frames
|
| 552 |
|
| 553 |
+
|
| 554 |
# --- Streamlit UI and Application Logic ---
|
| 555 |
def initialize_app_session_state():
|
| 556 |
defaults = {
|
| 557 |
'gemini_session_active': False,
|
| 558 |
'gemini_loop_instance': None,
|
|
|
|
| 559 |
'webrtc_component_key': f"webrtc_streamer_key_{int(time.time())}",
|
| 560 |
}
|
| 561 |
for key, value in defaults.items():
|
| 562 |
if key not in st.session_state:
|
| 563 |
st.session_state[key] = value
|
| 564 |
|
| 565 |
+
|
| 566 |
def run_streamlit_app():
|
| 567 |
+
st.set_page_config(page_title="Voice AI Medical Assistant", layout="wide")
|
| 568 |
initialize_app_session_state()
|
| 569 |
|
| 570 |
+
st.title("Voice AI Medical Assistant")
|
| 571 |
|
| 572 |
+
# Display prominent error if client is not initialized
|
| 573 |
+
if client is None:
|
| 574 |
+
st.error("⚠️ Gemini API key not found or invalid. Please set a valid GEMINI_API_KEY in your .env file.")
|
| 575 |
+
st.info("You can create a .env file in the project directory with content: GEMINI_API_KEY=your_api_key_here")
|
| 576 |
+
|
| 577 |
+
st.warning("IMPORTANT: This is a VOICE-ONLY interface. Speak to the assistant through your microphone.")
|
| 578 |
st.info("Remember: This AI cannot provide medical diagnoses. Always consult a healthcare professional for medical advice.")
|
| 579 |
|
| 580 |
with st.sidebar:
|
| 581 |
st.header("Session Control")
|
| 582 |
if not st.session_state.gemini_session_active:
|
| 583 |
+
# Fixed emojis
|
| 584 |
+
if st.button("🚀 Start Voice Assistant", type="primary", use_container_width=True, key="start_session_btn"):
|
| 585 |
st.session_state.gemini_session_active = True
|
|
|
|
| 586 |
|
| 587 |
gemini_loop = GeminiInteractionLoop()
|
| 588 |
st.session_state.gemini_loop_instance = gemini_loop
|
| 589 |
threading.Thread(target=lambda: asyncio.run(gemini_loop.run_main_loop()), name="GeminiLoopThread", daemon=True).start()
|
| 590 |
+
st.success("Voice Assistant starting... Please allow camera/microphone access in your browser if prompted.")
|
| 591 |
st.session_state.webrtc_component_key = f"webrtc_streamer_key_{int(time.time())}"
|
| 592 |
st.rerun()
|
| 593 |
else:
|
| 594 |
+
# Fixed emojis
|
| 595 |
if st.button("🛑 Stop Session", type="secondary", use_container_width=True, key="stop_session_btn"):
|
| 596 |
if st.session_state.gemini_loop_instance:
|
| 597 |
st.session_state.gemini_loop_instance.signal_stop()
|
|
|
|
| 627 |
)
|
| 628 |
|
| 629 |
if webrtc_ctx.state.playing:
|
| 630 |
+
st.success("🎤 Voice Assistant is now ACTIVE. Speak to interact!")
|
| 631 |
+
st.caption("The assistant is listening through your microphone and watching through your camera.")
|
| 632 |
elif st.session_state.gemini_session_active:
|
| 633 |
+
st.caption("Connecting... Ensure camera/microphone permissions are granted in your browser.")
|
| 634 |
if hasattr(webrtc_ctx.state, 'error') and webrtc_ctx.state.error:
|
| 635 |
st.error(f"WebRTC Connection Error: {webrtc_ctx.state.error}")
|
| 636 |
else:
|
| 637 |
+
st.info("Click 'Start Voice Assistant' in the sidebar to begin.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 638 |
|
| 639 |
+
# Visual indicator for voice activity
|
| 640 |
+
if st.session_state.gemini_session_active and webrtc_ctx.state.playing:
|
| 641 |
+
with st.container():
|
| 642 |
+
st.markdown("### How to use the Voice Assistant")
|
| 643 |
+
st.markdown("""
|
| 644 |
+
1. **Speak naturally** - The assistant is listening through your microphone
|
| 645 |
+
2. **Show things to the camera** - The assistant can see what you're showing
|
| 646 |
+
3. **Listen for responses** - The assistant will speak back to you
|
| 647 |
+
|
| 648 |
+
You do not need to type anything. This is a completely voice-controlled interface.
|
| 649 |
+
""")
|
| 650 |
+
|
|
|
|
|
|
|
|
|
|
| 651 |
|
| 652 |
if __name__ == "__main__":
|
| 653 |
if client is None:
|
| 654 |
logging.critical("Gemini client could not be initialized. Application cannot start.")
|
| 655 |
else:
|
| 656 |
+
run_streamlit_app()
|