fixed some error frontend stuff

This commit is contained in:
Andreas Fruhwirt 2025-04-02 22:17:05 +02:00
parent 29142e3b04
commit 4dd0d91de2
4 changed files with 86 additions and 44 deletions

1
.gitignore vendored
View File

@ -1,2 +1,3 @@
__pycache__
.env
venv

View File

@ -42,12 +42,15 @@ def handle_disconnect(client : Client):
info(f"Client disconnected: {client.id}")
if client.id in client_to_session_id:
session_id = client_to_session_id[client.id]
if session_id in session_to_websocket:
cnt = 0
for cid in client_to_session_id:
if client_to_session_id[cid] == session_id:
cnt = cnt + 1
if session_id in session_to_websocket and cnt <= 1:
info(f"Deleting session_id from session_to_websocket")
del session_to_websocket[session_id]
del client_to_session_id[client.id]
app.on_disconnect(handle_disconnect)
def start_recording(ui_elements):
if not refresh_ui_enabled(ui_elements):
return
@ -105,7 +108,12 @@ def enable_ui(ui_elements, state = 0):
def after_end_audio(ui_elements, output):
debug("after_end_audio")
if output["status"] != "ok":
if output["status"] == "end_audio_failure":
ui_elements["websocket"].generating = False
refresh_ui_enabled(ui_elements)
with ui_elements["main"]:
ui.notify("Luna didn\'t understand that correctly, please try again!", close_button='OK')
elif output["status"] != "ok":
refresh_ui_enabled(ui_elements)
return
@ -150,6 +158,10 @@ def after_handle_upstream(ui_elements, response):
with ui_elements["main"]:
ui_elements["audio"].set_source(f"data:audio/webm;base64,{response['audio']}")
start_playback(ui_elements["id"])
if "show" in response:
with ui_elements["main"]:
ui_elements["markdown"].set_content(response["show"])
ui_elements["markdown"].update()
pass
@app.post('/api/v1/upload')
@ -158,7 +170,10 @@ async def handle_audio(credentials: Annotated[HTTPBasicCredentials, Depends(secu
if not is_authorized(credentials):
return Response({"status": "Unauthorized"}, status_code=401)
try:
debug(session_to_websocket)
debug(client_to_session_id)
session_id = request.session["id"]
debug("session_id = " + session_id)
if session_id not in session_to_websocket:
return {"status": "nok"}
websocket = session_to_websocket[session_id]
@ -184,7 +199,6 @@ async def main_page(credentials: Annotated[HTTPBasicCredentials, Depends(securit
global main_containers, mic_buttons, text_containers
if not is_authorized(credentials):
return Response("Get outta here män :(", status_code=401)
#await ui.context.client.connected()
session_id = request.session["id"]
ui.add_head_html(f"<script type='text/javascript' src='/static/record.js'></script>")
ui.add_head_html(f"<link type='text/tailwindcss' rel='stylesheet' href='/static/style.css'>")
@ -204,13 +218,27 @@ async def main_page(credentials: Annotated[HTTPBasicCredentials, Depends(securit
button.on('touchstart', lambda: start_recording(ui_elements))
button.on('touchend', lambda: stop_recording(ui_elements))
ui_elements["micbutton"] = button
ui_elements["markdown"] = ui.markdown().props("id=markdown")
audio = ui.audio(src="").classes("disabled opacity-0").props("id=audioout")
audio = ui.audio(src="").props("id=audioout")
audio.on('ended', lambda: stop_playback(ui_elements))
ui_elements["audio"] = audio
ui_elements["websocket"] = WebSocketClient(ui_elements, after_handle_upstream)
ui_elements["websocket"].start_thread(after_websocket_init)
client_to_session_id[client.id] = session_id
session_to_websocket[session_id] = ui_elements["websocket"]
ui.run(show=False)
if (session_id not in session_to_websocket) or (session_id in session_to_websocket and session_to_websocket[session_id].enabled is False):
if session_id in session_to_websocket and session_to_websocket[session_id].enabled is False:
del session_to_websocket[session_id]
ui_elements["websocket"] = WebSocketClient(ui_elements, after_handle_upstream)
ui_elements["websocket"].start_thread(after_websocket_init)
debug("assigned client_to_session_id = " + client.id + ", " + session_id)
client_to_session_id[client.id] = session_id
session_to_websocket[session_id] = ui_elements["websocket"]
else:
if client.id in client_to_session_id:
del client_to_session_id[client.id]
client_to_session_id[client.id] = session_id
ui_elements["websocket"] = session_to_websocket[session_id]
session_to_websocket[session_id].ui_elements = ui_elements
refresh_ui_enabled(ui_elements)
ui.context.client.on_disconnect(handle_disconnect)
ui.run(show=False, port=8642)

View File

@ -6,6 +6,11 @@ let dataArray;
let animationId;
let audioOutSource;
console.log("Asking for Audio permissions...");
window.addEventListener("load", async (event) => {
await navigator.mediaDevices.getUserMedia({ audio: true });
});
function startButtonPulse(analyser) {
const button = document.querySelector('#recordbutton');
@ -52,6 +57,7 @@ const startPlayback = async () => {
analyser.fftSize = 2048;
dataArray = new Uint8Array(analyser.frequencyBinCount);
audioOutSource.connect(analyser);
analyser.connect(audioContext.destination);
}
startButtonPulse(analyser);
@ -73,41 +79,46 @@ const stopPlayback = async () => {
const startRecording = async () => {
try {
console.log("Started recording!");
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
mediaRecorder = new MediaRecorder(stream);
audioChunks = [];
navigator.permissions.query({ name: "microphone" }).then(async (result) => {
if (result.state === "granted") {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
mediaRecorder = new MediaRecorder(stream);
audioChunks = [];
console.log("Started recording!");
// Audio analysis
audioContext = new AudioContext();
analyser = audioContext.createAnalyser();
const source = audioContext.createMediaStreamSource(stream);
analyser.fftSize = 2048;
dataArray = new Uint8Array(analyser.frequencyBinCount);
source.connect(analyser);
// Audio analysis
audioContext = new AudioContext();
analyser = audioContext.createAnalyser();
const source = audioContext.createMediaStreamSource(stream);
analyser.fftSize = 2048;
dataArray = new Uint8Array(analyser.frequencyBinCount);
source.connect(analyser);
startButtonPulse(analyser);
startButtonPulse(analyser);
mediaRecorder.ondataavailable = async event => {
if (event.data.size > 0) {
audioChunks.push(event.data);
url = "/api/v1/upload";
if (mediaRecorder.state === 'inactive') {
url = "/api/v1/send";
}
fetch(url, {
method: 'POST',
body: event.data,
});
mediaRecorder.ondataavailable = async event => {
if (event.data.size > 0) {
console.log("audio available!");
audioChunks.push(event.data);
url = "/api/v1/upload";
if (mediaRecorder.state === 'inactive') {
url = "/api/v1/send";
}
fetch(url, {
method: 'POST',
body: event.data,
});
}
};
mediaRecorder.onstop = async () => {
console.log("Stopped recording!");
stopButtonPulse();
};
mediaRecorder.start(1000);
}
};
mediaRecorder.onstop = async () => {
console.log("Stopped recording!");
stopButtonPulse();
};
mediaRecorder.start(1000);
});
} catch (err) {
console.error('Microphone access denied or error:', err);
alert('Please allow microphone access to record audio.');

View File

@ -1,7 +1,6 @@
from asyncio import Queue
from asyncio.exceptions import TimeoutError
import traceback
import websocket
import os
import json
import threading
@ -27,6 +26,7 @@ class WebSocketClient():
self._handlers = {}
self._audio_source = None
self._after_handle_upstream = after_handle_upstream
self.main_thread = None
def __del__(self):
if hasattr(self, "_websocket"):
@ -79,6 +79,8 @@ class WebSocketClient():
self.generating = False
self._after_handle_upstream(self.ui_elements, {"audio": b64encode(self._audio_source).decode()})
self._audio_source = None
elif request["method"] == "show":
self._after_handle_upstream(self.ui_elements, {"show": request["show"]})
async def input_loop(self):
try:
@ -156,7 +158,7 @@ class WebSocketClient():
# start thread
def start_thread(self, after_init_func):
threading.Thread(target=self.main_loop, args=(after_init_func,), daemon=True).start()
self.main_thread = threading.Thread(target=self.main_loop, args=(after_init_func,), daemon=True).start()
async def check_health(self):
try: