mirror of
https://github.com/sunnypilot/sunnypilot.git
synced 2026-02-22 08:53:55 +08:00
* WebRTCClient and WebRTCServer abstractions * webrtc client implementation * Interactive test scripts * Send localDescriptions as offer/asnwer, as they are different * Tracks need to be added after setting remote description for multi-cam streaming to work * Remove WebRTCStreamingMetadata * Wait for tracks * Move stuff to separate files, rename some things * Refactor everything, create WebRTCStreamBuilder for both offer and answers * ta flight done time to grind * wait for incoming tracks and channels * Dummy track and frame reader track. Fix timing. * dt based on camera type * first trial of the new api * Fix audio track * methods for checking for incoming tracks * Web migration part 2 * Fixes for stream api * use rtc description for web.py * experimental cereal proxy * remove old code from bodyav * fix is_started * serialize session description * fix audio * messaging channel wrapper * fix audiotrack * h264 codec preference * Add codec preference to tracks * override sdp codecs * add logging * Move cli stuff to separate file * slight cleanup * Fix audio track * create codec_mime inside force_codec function * fix incoming media estimation * move builders to __init__ * stream updates following builders * Update example script * web.py support for new builder * web speaker fixes * StreamingMediaInfo API * Move things around * should_add_data_channel rename * is_connected_and_ready * fix linter errors * make cli executable * remove dumb comments * logging support * fix parse_info_from_offer * improve type annotations * satisfy linters * Support for waiting for disconnection * Split device tracks into video/audio files. Move audio speaker to audio.py * default dt for dummy video track * Fix cli * new speaker fixes * Remove almost all functionality from web.py * webrtcd * continue refactoring web.py * after handling joystick reset in controlsd with #30409, controls are not necessary anymore * ping endpoint * Update js files to at least support what worked previously * Fixes after some tests on the body * Streaming fixes * Remove the use of WebRTCStreamBuilder. Subclass use is now required * Add todo * delete all streams on shutdown * Replace lastPing with lastChannelMessageTime * Update ping text only if rtc is still on * That should affect the chart too * Fix paths in web * use protocol in SSLContext * remove warnings since aiortc is not used directly anymore * check if task is done in stop * remove channel handler wrapper, since theres only one channel * Move things around * Moved webrtc abstractions to separate repository * Moved webrtcd to tools/webrtc * Update imports * Add bodyrtc as dependency * Add webrtcd to process_config * Remove usage of DummyVideoStreamTrack * Add main to webrtcd * Move webrtcd to system * Fix imports * Move cereal proxy logic outside of runner * Incoming proxy abstractions * Add some tests * Make it executable * Fix process config * Fix imports * Additional tests. Add tests to pyproject.toml * Update poetry lock * New line * Bump aiortc to 1.6.0 * Added teleoprtc_repo as submodule, and linked its source dir * Add init file to webrtc module * Handle aiortc warnings * Ignore deprecation warnings * Ignore resource warning too * Ignore the warnings * find free port for test_webrtcd * Start process inside the test case * random sleep test * test 2 * Test endpoint function instead * Update comment * Add system/webrtc to release * default arguments for body fields * Add teleoprtc to release * Bump teleoprtc * Exclude teleoprtc from static analysis * Use separate event loop for stream session tests
111 lines
3.8 KiB
Python
111 lines
3.8 KiB
Python
import asyncio
|
|
import io
|
|
from typing import Optional, List, Tuple
|
|
|
|
import aiortc
|
|
import av
|
|
import numpy as np
|
|
import pyaudio
|
|
|
|
|
|
class AudioInputStreamTrack(aiortc.mediastreams.AudioStreamTrack):
|
|
PYAUDIO_TO_AV_FORMAT_MAP = {
|
|
pyaudio.paUInt8: 'u8',
|
|
pyaudio.paInt16: 's16',
|
|
pyaudio.paInt24: 's24',
|
|
pyaudio.paInt32: 's32',
|
|
pyaudio.paFloat32: 'flt',
|
|
}
|
|
|
|
def __init__(self, audio_format: int = pyaudio.paInt16, rate: int = 16000, channels: int = 1, packet_time: float = 0.020, device_index: Optional[int] = None):
|
|
super().__init__()
|
|
|
|
self.p = pyaudio.PyAudio()
|
|
chunk_size = int(packet_time * rate)
|
|
self.stream = self.p.open(format=audio_format,
|
|
channels=channels,
|
|
rate=rate,
|
|
frames_per_buffer=chunk_size,
|
|
input=True,
|
|
input_device_index=device_index)
|
|
self.format = audio_format
|
|
self.rate = rate
|
|
self.channels = channels
|
|
self.packet_time = packet_time
|
|
self.chunk_size = chunk_size
|
|
self.pts = 0
|
|
|
|
async def recv(self):
|
|
mic_data = self.stream.read(self.chunk_size)
|
|
mic_array = np.frombuffer(mic_data, dtype=np.int16)
|
|
mic_array = np.expand_dims(mic_array, axis=0)
|
|
layout = 'stereo' if self.channels > 1 else 'mono'
|
|
frame = av.AudioFrame.from_ndarray(mic_array, format=self.PYAUDIO_TO_AV_FORMAT_MAP[self.format], layout=layout)
|
|
frame.rate = self.rate
|
|
frame.pts = self.pts
|
|
self.pts += frame.samples
|
|
|
|
return frame
|
|
|
|
|
|
class AudioOutputSpeaker:
|
|
def __init__(self, audio_format: int = pyaudio.paInt16, rate: int = 48000, channels: int = 2, packet_time: float = 0.2, device_index: Optional[int] = None):
|
|
|
|
chunk_size = int(packet_time * rate)
|
|
self.p = pyaudio.PyAudio()
|
|
self.buffer = io.BytesIO()
|
|
self.channels = channels
|
|
self.stream = self.p.open(format=audio_format,
|
|
channels=channels,
|
|
rate=rate,
|
|
frames_per_buffer=chunk_size,
|
|
output=True,
|
|
output_device_index=device_index,
|
|
stream_callback=self.__pyaudio_callback)
|
|
self.tracks_and_tasks: List[Tuple[aiortc.MediaStreamTrack, Optional[asyncio.Task]]] = []
|
|
|
|
def __pyaudio_callback(self, in_data, frame_count, time_info, status):
|
|
if self.buffer.getbuffer().nbytes < frame_count * self.channels * 2:
|
|
buff = b'\x00\x00' * frame_count * self.channels
|
|
elif self.buffer.getbuffer().nbytes > 115200: # 3x the usual read size
|
|
self.buffer.seek(0)
|
|
buff = self.buffer.read(frame_count * self.channels * 4)
|
|
buff = buff[:frame_count * self.channels * 2]
|
|
self.buffer.seek(2)
|
|
else:
|
|
self.buffer.seek(0)
|
|
buff = self.buffer.read(frame_count * self.channels * 2)
|
|
self.buffer.seek(2)
|
|
return (buff, pyaudio.paContinue)
|
|
|
|
async def __consume(self, track):
|
|
while True:
|
|
try:
|
|
frame = await track.recv()
|
|
except aiortc.MediaStreamError:
|
|
return
|
|
|
|
self.buffer.write(bytes(frame.planes[0]))
|
|
|
|
def hasTrack(self, track: aiortc.MediaStreamTrack) -> bool:
|
|
return any(t == track for t, _ in self.tracks_and_tasks)
|
|
|
|
def addTrack(self, track: aiortc.MediaStreamTrack):
|
|
if not self.hasTrack(track):
|
|
self.tracks_and_tasks.append((track, None))
|
|
|
|
def start(self):
|
|
for index, (track, task) in enumerate(self.tracks_and_tasks):
|
|
if task is None:
|
|
self.tracks_and_tasks[index] = (track, asyncio.create_task(self.__consume(track)))
|
|
|
|
def stop(self):
|
|
for _, task in self.tracks_and_tasks:
|
|
if task is not None:
|
|
task.cancel()
|
|
|
|
self.tracks_and_tasks = []
|
|
self.stream.stop_stream()
|
|
self.stream.close()
|
|
self.p.terminate()
|