From b3c1c31ccb912962b453e04e36e40ac4153aa97c Mon Sep 17 00:00:00 2001 From: Divya Sampath Kumar Date: Wed, 6 Mar 2024 13:58:42 -0800 Subject: [PATCH 01/11] encoder bitrate change based on twcc --- samples/Common.c | 14 ++++----- samples/Samples.h | 4 +++ samples/kvsWebRTCClientMasterGstSample.c | 36 ++++++++++++++++++------ 3 files changed, 38 insertions(+), 16 deletions(-) diff --git a/samples/Common.c b/samples/Common.c index 64e9739180..64cbf0ee15 100644 --- a/samples/Common.c +++ b/samples/Common.c @@ -554,7 +554,7 @@ STATUS createSampleStreamingSession(PSampleConfiguration pSampleConfiguration, P ATOMIC_STORE_BOOL(&pSampleStreamingSession->terminateFlag, FALSE); ATOMIC_STORE_BOOL(&pSampleStreamingSession->candidateGatheringDone, FALSE); - + pSampleStreamingSession->newVideoBitrate = 0; pSampleStreamingSession->peerConnectionMetrics.peerConnectionStats.peerConnectionStartTime = GETTIME() / HUNDREDS_OF_NANOS_IN_A_MILLISECOND; // Flag to enable SDK to calculate selected ice server, local, remote and candidate pair stats. pSampleConfiguration->enableIceStats = FALSE; @@ -709,23 +709,23 @@ VOID sampleBandwidthEstimationHandler(UINT64 customData, DOUBLE maximumBitrate) VOID sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, UINT32 rxBytes, UINT32 txPacketsCnt, UINT32 rxPacketsCnt, UINT64 duration) { - UNUSED_PARAM(customData); UNUSED_PARAM(duration); UNUSED_PARAM(rxBytes); UNUSED_PARAM(txBytes); UINT32 lostPacketsCnt = txPacketsCnt - rxPacketsCnt; UINT32 percentLost = lostPacketsCnt * 100 / txPacketsCnt; - UINT32 bitrate = 1024; + PSampleStreamingSession pSampleStreamingSession = (PSampleStreamingSession) customData; + UINT64 bitrate; if (percentLost < 2) { // increase encoder bitrate by 2 percent - bitrate *= 1.02f; + bitrate = pSampleStreamingSession->currentVideoBitrate * 1.02f; } else if (percentLost > 5) { // decrease encoder bitrate by packet loss percent - bitrate *= (1.0f - percentLost / 100.0f); + bitrate = pSampleStreamingSession->currentVideoBitrate * (1.0f - percentLost / 100.0f); } // otherwise keep bitrate the same - - DLOGV("received sender bitrate estimation: suggested bitrate %u sent: %u bytes %u packets received: %u bytes %u packets in %lu msec", bitrate, + pSampleStreamingSession->newVideoBitrate = bitrate; + DLOGI("received sender bitrate estimation: suggested bitrate %u kbps sent: %u bytes %u packets received: %u bytes %u packets in %lu msec, ", bitrate, txBytes, txPacketsCnt, rxBytes, rxPacketsCnt, duration / 10000ULL); } diff --git a/samples/Samples.h b/samples/Samples.h index 2ab3d41f20..78bed9dbe1 100644 --- a/samples/Samples.h +++ b/samples/Samples.h @@ -208,6 +208,10 @@ struct __SampleStreamingSession { CHAR pPeerConnectionMetricsMessage[MAX_PEER_CONNECTION_METRICS_MESSAGE_SIZE]; CHAR pSignalingClientMetricsMessage[MAX_SIGNALING_CLIENT_METRICS_MESSAGE_SIZE]; CHAR pIceAgentMetricsMessage[MAX_ICE_AGENT_METRICS_MESSAGE_SIZE]; + UINT64 currentVideoBitrate; + UINT64 newVideoBitrate; + UINT64 newAudioBitrate; + UINT64 currentAudioBitrate; }; // TODO this should all be in a higher webrtccontext layer above PeerConnection diff --git a/samples/kvsWebRTCClientMasterGstSample.c b/samples/kvsWebRTCClientMasterGstSample.c index 4cbe8e2b9f..aae0e83582 100644 --- a/samples/kvsWebRTCClientMasterGstSample.c +++ b/samples/kvsWebRTCClientMasterGstSample.c @@ -3,7 +3,7 @@ #include extern PSampleConfiguration gSampleConfiguration; - +GstElement *pipeline = NULL; // #define VERBOSE GstFlowReturn on_new_sample(GstElement* sink, gpointer data, UINT64 trackid) @@ -62,6 +62,26 @@ GstFlowReturn on_new_sample(GstElement* sink, gpointer data, UINT64 trackid) for (i = 0; i < pSampleConfiguration->streamingSessionCount; ++i) { pSampleStreamingSession = pSampleConfiguration->sampleStreamingSessionList[i]; frame.index = (UINT32) ATOMIC_INCREMENT(&pSampleStreamingSession->frameIndex); + if(pipeline != NULL) { + GstElement *encoder = gst_bin_get_by_name(GST_BIN(pipeline), "my_encoder"); + if(encoder != NULL) { + guint bitrate; + g_object_get(G_OBJECT(encoder), "bitrate", &bitrate, NULL); + pSampleStreamingSession->currentVideoBitrate = (UINT64) bitrate; + DLOGI("Current encoder bitrate: %u kbps", pSampleStreamingSession->currentVideoBitrate); + if(pSampleStreamingSession->newVideoBitrate != 0) { + DLOGI("New bitrate: %d", pSampleStreamingSession->newVideoBitrate); + bitrate = (guint) (pSampleStreamingSession->newVideoBitrate); + pSampleStreamingSession->newVideoBitrate = 0; + g_object_set(G_OBJECT(encoder), "bitrate", bitrate, NULL); + } + + } else { + DLOGI("Encoder not found in pipeline"); + } + } else { + DLOGI("pipeline is null"); + } if (trackid == DEFAULT_AUDIO_TRACK_ID) { pRtcRtpTransceiver = pSampleStreamingSession->pAudioRtcRtpTransceiver; @@ -120,7 +140,7 @@ GstFlowReturn on_new_sample_audio(GstElement* sink, gpointer data) PVOID sendGstreamerAudioVideo(PVOID args) { STATUS retStatus = STATUS_SUCCESS; - GstElement *appsinkVideo = NULL, *appsinkAudio = NULL, *pipeline = NULL; + GstElement *appsinkVideo = NULL, *appsinkAudio = NULL; GstBus* bus; GstMessage* msg; GError* error = NULL; @@ -177,10 +197,9 @@ PVOID sendGstreamerAudioVideo(PVOID args) &error); break; } - case DEVICE_SOURCE: { - pipeline = gst_parse_launch("autovideosrc ! queue ! videoconvert ! videoscale ! video/x-raw,width=1280,height=720 ! " - "videorate ! video/x-raw,framerate=25/1 ! " - "x264enc bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + case DEVICE_SOURCE: {xw + pipeline = gst_parse_launch("autovideosrc ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " + "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE " "emit-signals=TRUE name=appsink-video", &error); @@ -221,9 +240,8 @@ PVOID sendGstreamerAudioVideo(PVOID args) } case DEVICE_SOURCE: { pipeline = - gst_parse_launch("autovideosrc ! queue ! videoconvert ! videoscale ! video/x-raw,width=1280,height=720 ! " - "videorate ! video/x-raw,framerate=25/1 ! " - "x264enc bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + gst_parse_launch("autovideosrc ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " + "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE emit-signals=TRUE " "name=appsink-video autoaudiosrc ! " "queue leaky=2 max-size-buffers=400 ! audioconvert ! audioresample ! opusenc ! " From 1a3012ee2e9ca7ff5826c186fac44d2774f1184e Mon Sep 17 00:00:00 2001 From: Divya Sampath Kumar Date: Wed, 13 Mar 2024 14:24:56 -0700 Subject: [PATCH 02/11] Change to 5% inc and dec --- samples/Common.c | 14 ++++++++++---- samples/kvsWebRTCClientMasterGstSample.c | 7 +++---- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/samples/Common.c b/samples/Common.c index 64cbf0ee15..c29065f653 100644 --- a/samples/Common.c +++ b/samples/Common.c @@ -716,12 +716,18 @@ VOID sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, U UINT32 percentLost = lostPacketsCnt * 100 / txPacketsCnt; PSampleStreamingSession pSampleStreamingSession = (PSampleStreamingSession) customData; UINT64 bitrate; - if (percentLost < 2) { + if (percentLost <= 5) { // increase encoder bitrate by 2 percent - bitrate = pSampleStreamingSession->currentVideoBitrate * 1.02f; + bitrate = pSampleStreamingSession->currentVideoBitrate * 1.05f; } else if (percentLost > 5) { - // decrease encoder bitrate by packet loss percent - bitrate = pSampleStreamingSession->currentVideoBitrate * (1.0f - percentLost / 100.0f); + if(pSampleStreamingSession->currentVideoBitrate >= 1 * 1024) { + // decrease encoder bitrate by packet loss percent + bitrate = pSampleStreamingSession->currentVideoBitrate * 0.95f; + } + else { + DLOGW("Bitrate already too low...maintaining..expect frame packet drops and choppy playback"); + bitrate = pSampleStreamingSession->currentVideoBitrate; + } } // otherwise keep bitrate the same pSampleStreamingSession->newVideoBitrate = bitrate; diff --git a/samples/kvsWebRTCClientMasterGstSample.c b/samples/kvsWebRTCClientMasterGstSample.c index aae0e83582..ce6c3eea0e 100644 --- a/samples/kvsWebRTCClientMasterGstSample.c +++ b/samples/kvsWebRTCClientMasterGstSample.c @@ -189,9 +189,8 @@ PVOID sendGstreamerAudioVideo(PVOID args) switch (pSampleConfiguration->srcType) { case TEST_SOURCE: { pipeline = - gst_parse_launch("videotestsrc is-live=TRUE ! queue ! videoconvert ! videoscale ! video/x-raw,width=1280,height=720 ! " - "videorate ! video/x-raw,framerate=25/1 ! " - "x264enc bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + gst_parse_launch("videotestsrc is-live=TRUE ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " + "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE emit-signals=TRUE " "name=appsink-video", &error); @@ -230,7 +229,7 @@ PVOID sendGstreamerAudioVideo(PVOID args) case TEST_SOURCE: { pipeline = gst_parse_launch("videotestsrc is-live=TRUE ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " - "x264enc bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE " "emit-signals=TRUE name=appsink-video audiotestsrc is-live=TRUE ! " "queue leaky=2 max-size-buffers=400 ! audioconvert ! audioresample ! opusenc ! " From b318f92dd9fb2d5b7fe48e7daf09f75d0df1e359 Mon Sep 17 00:00:00 2001 From: Divya Sampath Kumar Date: Thu, 21 Mar 2024 12:58:47 -0700 Subject: [PATCH 03/11] modify percentages --- samples/Common.c | 12 +++++++----- samples/kvsWebRTCClientMasterGstSample.c | 18 ++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/samples/Common.c b/samples/Common.c index c29065f653..73db3907be 100644 --- a/samples/Common.c +++ b/samples/Common.c @@ -414,13 +414,12 @@ STATUS initializePeerConnection(PSampleConfiguration pSampleConfiguration, PRtcP configuration.kvsRtcConfiguration.iceSetInterfaceFilterFunc = NULL; // disable TWCC - configuration.kvsRtcConfiguration.disableSenderSideBandwidthEstimation = TRUE; + configuration.kvsRtcConfiguration.disableSenderSideBandwidthEstimation = FALSE; // Set the ICE mode explicitly configuration.iceTransportPolicy = ICE_TRANSPORT_POLICY_ALL; configuration.kvsRtcConfiguration.enableIceStats = pSampleConfiguration->enableIceStats; - configuration.kvsRtcConfiguration.disableSenderSideBandwidthEstimation = TRUE; // Set the STUN server PCHAR pKinesisVideoStunUrlPostFix = KINESIS_VIDEO_STUN_URL_POSTFIX; // If region is in CN, add CN region uri postfix @@ -716,20 +715,23 @@ VOID sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, U UINT32 percentLost = lostPacketsCnt * 100 / txPacketsCnt; PSampleStreamingSession pSampleStreamingSession = (PSampleStreamingSession) customData; UINT64 bitrate; + DLOGI("Percent lost: %d", percentLost); if (percentLost <= 5) { - // increase encoder bitrate by 2 percent + // increase encoder bitrate by 5 percent bitrate = pSampleStreamingSession->currentVideoBitrate * 1.05f; } else if (percentLost > 5) { if(pSampleStreamingSession->currentVideoBitrate >= 1 * 1024) { // decrease encoder bitrate by packet loss percent - bitrate = pSampleStreamingSession->currentVideoBitrate * 0.95f; + bitrate = pSampleStreamingSession->currentVideoBitrate * (1.0f - percentLost/100.0f); } else { DLOGW("Bitrate already too low...maintaining..expect frame packet drops and choppy playback"); bitrate = pSampleStreamingSession->currentVideoBitrate; } } - // otherwise keep bitrate the same + if(bitrate > 2048000) { + bitrate = pSampleStreamingSession->currentVideoBitrate; + } pSampleStreamingSession->newVideoBitrate = bitrate; DLOGI("received sender bitrate estimation: suggested bitrate %u kbps sent: %u bytes %u packets received: %u bytes %u packets in %lu msec, ", bitrate, txBytes, txPacketsCnt, rxBytes, rxPacketsCnt, duration / 10000ULL); diff --git a/samples/kvsWebRTCClientMasterGstSample.c b/samples/kvsWebRTCClientMasterGstSample.c index ce6c3eea0e..a3685d14ea 100644 --- a/samples/kvsWebRTCClientMasterGstSample.c +++ b/samples/kvsWebRTCClientMasterGstSample.c @@ -68,9 +68,7 @@ GstFlowReturn on_new_sample(GstElement* sink, gpointer data, UINT64 trackid) guint bitrate; g_object_get(G_OBJECT(encoder), "bitrate", &bitrate, NULL); pSampleStreamingSession->currentVideoBitrate = (UINT64) bitrate; - DLOGI("Current encoder bitrate: %u kbps", pSampleStreamingSession->currentVideoBitrate); if(pSampleStreamingSession->newVideoBitrate != 0) { - DLOGI("New bitrate: %d", pSampleStreamingSession->newVideoBitrate); bitrate = (guint) (pSampleStreamingSession->newVideoBitrate); pSampleStreamingSession->newVideoBitrate = 0; g_object_set(G_OBJECT(encoder), "bitrate", bitrate, NULL); @@ -190,7 +188,7 @@ PVOID sendGstreamerAudioVideo(PVOID args) case TEST_SOURCE: { pipeline = gst_parse_launch("videotestsrc is-live=TRUE ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " - "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE emit-signals=TRUE " "name=appsink-video", &error); @@ -198,7 +196,7 @@ PVOID sendGstreamerAudioVideo(PVOID args) } case DEVICE_SOURCE: {xw pipeline = gst_parse_launch("autovideosrc ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " - "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE " "emit-signals=TRUE name=appsink-video", &error); @@ -208,7 +206,7 @@ PVOID sendGstreamerAudioVideo(PVOID args) UINT16 stringOutcome = snprintf(rtspPipeLineBuffer, RTSP_PIPELINE_MAX_CHAR_COUNT, "uridecodebin uri=%s ! " "videoconvert ! " - "x264enc bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! queue ! " "appsink sync=TRUE emit-signals=TRUE name=appsink-video ", pSampleConfiguration->rtspUri); @@ -228,8 +226,8 @@ PVOID sendGstreamerAudioVideo(PVOID args) switch (pSampleConfiguration->srcType) { case TEST_SOURCE: { pipeline = - gst_parse_launch("videotestsrc is-live=TRUE ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " - "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + gst_parse_launch("videotestsrc pattern=ball is-live=TRUE ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " + "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE " "emit-signals=TRUE name=appsink-video audiotestsrc is-live=TRUE ! " "queue leaky=2 max-size-buffers=400 ! audioconvert ! audioresample ! opusenc ! " @@ -240,7 +238,7 @@ PVOID sendGstreamerAudioVideo(PVOID args) case DEVICE_SOURCE: { pipeline = gst_parse_launch("autovideosrc ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " - "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE emit-signals=TRUE " "name=appsink-video autoaudiosrc ! " "queue leaky=2 max-size-buffers=400 ! audioconvert ! audioresample ! opusenc ! " @@ -251,7 +249,7 @@ PVOID sendGstreamerAudioVideo(PVOID args) case RTSP_SOURCE: { UINT16 stringOutcome = snprintf(rtspPipeLineBuffer, RTSP_PIPELINE_MAX_CHAR_COUNT, "uridecodebin uri=%s name=src ! videoconvert ! " - "x264enc bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! queue ! " "appsink sync=TRUE emit-signals=TRUE name=appsink-video " "src. ! audioconvert ! " @@ -300,8 +298,8 @@ PVOID sendGstreamerAudioVideo(PVOID args) if (bus != NULL) { gst_object_unref(bus); } - gst_element_set_state(pipeline, GST_STATE_NULL); if (pipeline != NULL) { + gst_element_set_state(pipeline, GST_STATE_NULL); gst_object_unref(pipeline); } if (appsinkAudio != NULL) { From 9b9e520114f1540879316b7928e1d7da86767923 Mon Sep 17 00:00:00 2001 From: Divya Sampath Kumar Date: Thu, 21 Mar 2024 14:02:12 -0700 Subject: [PATCH 04/11] ema based calc --- samples/Common.c | 57 +++++++++++++----------- samples/Samples.h | 19 ++++++-- samples/kvsWebRTCClientMasterGstSample.c | 8 ++-- 3 files changed, 50 insertions(+), 34 deletions(-) diff --git a/samples/Common.c b/samples/Common.c index 73db3907be..f98b79b3c8 100644 --- a/samples/Common.c +++ b/samples/Common.c @@ -553,7 +553,7 @@ STATUS createSampleStreamingSession(PSampleConfiguration pSampleConfiguration, P ATOMIC_STORE_BOOL(&pSampleStreamingSession->terminateFlag, FALSE); ATOMIC_STORE_BOOL(&pSampleStreamingSession->candidateGatheringDone, FALSE); - pSampleStreamingSession->newVideoBitrate = 0; + MEMSET(&pSampleStreamingSession->twccMetadata, 0x00, SIZEOF(TwccMetadata)); pSampleStreamingSession->peerConnectionMetrics.peerConnectionStats.peerConnectionStartTime = GETTIME() / HUNDREDS_OF_NANOS_IN_A_MILLISECOND; // Flag to enable SDK to calculate selected ice server, local, remote and candidate pair stats. pSampleConfiguration->enableIceStats = FALSE; @@ -705,35 +705,40 @@ VOID sampleBandwidthEstimationHandler(UINT64 customData, DOUBLE maximumBitrate) DLOGV("received bitrate suggestion: %f", maximumBitrate); } -VOID sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, UINT32 rxBytes, UINT32 txPacketsCnt, UINT32 rxPacketsCnt, - UINT64 duration) -{ +void sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, UINT32 rxBytes, UINT32 txPacketsCnt, UINT32 rxPacketsCnt, UINT64 duration) { UNUSED_PARAM(duration); - UNUSED_PARAM(rxBytes); - UNUSED_PARAM(txBytes); UINT32 lostPacketsCnt = txPacketsCnt - rxPacketsCnt; - UINT32 percentLost = lostPacketsCnt * 100 / txPacketsCnt; - PSampleStreamingSession pSampleStreamingSession = (PSampleStreamingSession) customData; - UINT64 bitrate; - DLOGI("Percent lost: %d", percentLost); - if (percentLost <= 5) { - // increase encoder bitrate by 5 percent - bitrate = pSampleStreamingSession->currentVideoBitrate * 1.05f; - } else if (percentLost > 5) { - if(pSampleStreamingSession->currentVideoBitrate >= 1 * 1024) { - // decrease encoder bitrate by packet loss percent - bitrate = pSampleStreamingSession->currentVideoBitrate * (1.0f - percentLost/100.0f); - } - else { - DLOGW("Bitrate already too low...maintaining..expect frame packet drops and choppy playback"); - bitrate = pSampleStreamingSession->currentVideoBitrate; - } + UINT8 percentLost = (txPacketsCnt > 0) ? (lostPacketsCnt * 100 / txPacketsCnt) : 0; + + SampleStreamingSession* pSampleStreamingSession = (SampleStreamingSession*) customData; + + // Calculate smoothed packet loss + DOUBLE currentPacketLoss = (DOUBLE) percentLost; + EMA_ACCUMULATOR_GET_NEXT(pSampleStreamingSession->twccMetadata.averagePacketLoss, currentPacketLoss); + + UINT64 currentTimeMs = GETTIME(); + UINT64 timeDiff = currentTimeMs - pSampleStreamingSession->twccMetadata.lastAdjustmentTimeMs; + if (timeDiff < ADJUSTMENT_INTERVAL_MS) { + // Too soon for another adjustment + DLOGI("Too soon"); + return; } - if(bitrate > 2048000) { - bitrate = pSampleStreamingSession->currentVideoBitrate; + + UINT64 bitrate = pSampleStreamingSession->twccMetadata.currentVideoBitrate; + if (pSampleStreamingSession->twccMetadata.averagePacketLoss <= 5) { + // increase encoder bitrate by 5 percent with a cap at MAX_BITRATE + bitrate = (UINT64) MIN(bitrate * 1.05f, MAX_BITRATE); + } else { + // decrease encoder bitrate by average packet loss percent, with a cap at MIN_BITRATE + bitrate = (UINT64) MAX(bitrate * (1.0f - pSampleStreamingSession->twccMetadata.averagePacketLoss / 100.0f), MIN_BITRATE); } - pSampleStreamingSession->newVideoBitrate = bitrate; - DLOGI("received sender bitrate estimation: suggested bitrate %u kbps sent: %u bytes %u packets received: %u bytes %u packets in %lu msec, ", bitrate, + + // Update the session with the new bitrate and adjustment time + pSampleStreamingSession->twccMetadata.newVideoBitrate = bitrate; + pSampleStreamingSession->twccMetadata.lastAdjustmentTimeMs = currentTimeMs; + + DLOGI("Adjustment made: average packet loss = %.2f%%, timediff: %llu ms", pSampleStreamingSession->twccMetadata.averagePacketLoss, ADJUSTMENT_INTERVAL_MS, timeDiff); + DLOGI("received sender bitrate estimation: suggested bitrate %u sent: %u bytes %u packets received: %u bytes %u packets in %lu msec", bitrate, txBytes, txPacketsCnt, rxBytes, rxPacketsCnt, duration / 10000ULL); } diff --git a/samples/Samples.h b/samples/Samples.h index 78bed9dbe1..ae0773f0e9 100644 --- a/samples/Samples.h +++ b/samples/Samples.h @@ -83,6 +83,11 @@ extern "C" { #define MAX_SIGNALING_CLIENT_METRICS_MESSAGE_SIZE 736 // strlen(SIGNALING_CLIENT_METRICS_JSON_TEMPLATE) + 20 * 10 #define MAX_ICE_AGENT_METRICS_MESSAGE_SIZE 113 // strlen(ICE_AGENT_METRICS_JSON_TEMPLATE) + 20 * 2 + +#define ADJUSTMENT_INTERVAL_MS 5 * HUNDREDS_OF_NANOS_IN_A_SECOND +#define MIN_BITRATE 512 +#define MAX_BITRATE 2048000 + typedef enum { SAMPLE_STREAMING_VIDEO_ONLY, SAMPLE_STREAMING_AUDIO_VIDEO, @@ -179,6 +184,15 @@ typedef struct { typedef VOID (*StreamSessionShutdownCallback)(UINT64, PSampleStreamingSession); +typedef struct { + UINT64 lastAdjustmentTimeMs; + UINT64 currentVideoBitrate; + UINT64 newVideoBitrate; + UINT64 currentAudioBitrate; + UINT64 newAudioBitrate; + float averagePacketLoss; +} TwccMetadata, *PTwccMetadata; + struct __SampleStreamingSession { volatile ATOMIC_BOOL terminateFlag; volatile ATOMIC_BOOL candidateGatheringDone; @@ -208,10 +222,7 @@ struct __SampleStreamingSession { CHAR pPeerConnectionMetricsMessage[MAX_PEER_CONNECTION_METRICS_MESSAGE_SIZE]; CHAR pSignalingClientMetricsMessage[MAX_SIGNALING_CLIENT_METRICS_MESSAGE_SIZE]; CHAR pIceAgentMetricsMessage[MAX_ICE_AGENT_METRICS_MESSAGE_SIZE]; - UINT64 currentVideoBitrate; - UINT64 newVideoBitrate; - UINT64 newAudioBitrate; - UINT64 currentAudioBitrate; + TwccMetadata twccMetadata; }; // TODO this should all be in a higher webrtccontext layer above PeerConnection diff --git a/samples/kvsWebRTCClientMasterGstSample.c b/samples/kvsWebRTCClientMasterGstSample.c index a3685d14ea..86dbc92ae6 100644 --- a/samples/kvsWebRTCClientMasterGstSample.c +++ b/samples/kvsWebRTCClientMasterGstSample.c @@ -67,10 +67,10 @@ GstFlowReturn on_new_sample(GstElement* sink, gpointer data, UINT64 trackid) if(encoder != NULL) { guint bitrate; g_object_get(G_OBJECT(encoder), "bitrate", &bitrate, NULL); - pSampleStreamingSession->currentVideoBitrate = (UINT64) bitrate; - if(pSampleStreamingSession->newVideoBitrate != 0) { - bitrate = (guint) (pSampleStreamingSession->newVideoBitrate); - pSampleStreamingSession->newVideoBitrate = 0; + pSampleStreamingSession->twccMetadata.currentVideoBitrate = (UINT64) bitrate; + if(pSampleStreamingSession->twccMetadata.newVideoBitrate != 0) { + bitrate = (guint) (pSampleStreamingSession->twccMetadata.newVideoBitrate); + pSampleStreamingSession->twccMetadata.newVideoBitrate = 0; g_object_set(G_OBJECT(encoder), "bitrate", bitrate, NULL); } From 3adc56b63b5b105489a72f2e10006d219a74d9c9 Mon Sep 17 00:00:00 2001 From: Divya Sampath Kumar Date: Thu, 21 Mar 2024 14:46:58 -0700 Subject: [PATCH 05/11] EMa fix --- samples/Common.c | 13 +++-- samples/Samples.h | 7 +-- samples/kvsWebRTCClientMasterGstSample.c | 73 ++++++++++++------------ 3 files changed, 49 insertions(+), 44 deletions(-) diff --git a/samples/Common.c b/samples/Common.c index f98b79b3c8..bcc5712b3b 100644 --- a/samples/Common.c +++ b/samples/Common.c @@ -705,7 +705,9 @@ VOID sampleBandwidthEstimationHandler(UINT64 customData, DOUBLE maximumBitrate) DLOGV("received bitrate suggestion: %f", maximumBitrate); } -void sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, UINT32 rxBytes, UINT32 txPacketsCnt, UINT32 rxPacketsCnt, UINT64 duration) { +void sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, UINT32 rxBytes, UINT32 txPacketsCnt, UINT32 rxPacketsCnt, + UINT64 duration) +{ UNUSED_PARAM(duration); UINT32 lostPacketsCnt = txPacketsCnt - rxPacketsCnt; UINT8 percentLost = (txPacketsCnt > 0) ? (lostPacketsCnt * 100 / txPacketsCnt) : 0; @@ -714,13 +716,13 @@ void sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, U // Calculate smoothed packet loss DOUBLE currentPacketLoss = (DOUBLE) percentLost; - EMA_ACCUMULATOR_GET_NEXT(pSampleStreamingSession->twccMetadata.averagePacketLoss, currentPacketLoss); + pSampleStreamingSession->twccMetadata.averagePacketLoss = + EMA_ACCUMULATOR_GET_NEXT(pSampleStreamingSession->twccMetadata.averagePacketLoss, currentPacketLoss); UINT64 currentTimeMs = GETTIME(); UINT64 timeDiff = currentTimeMs - pSampleStreamingSession->twccMetadata.lastAdjustmentTimeMs; - if (timeDiff < ADJUSTMENT_INTERVAL_MS) { + if (timeDiff < ADJUSTMENT_INTERVAL_SECONDS) { // Too soon for another adjustment - DLOGI("Too soon"); return; } @@ -737,7 +739,8 @@ void sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, U pSampleStreamingSession->twccMetadata.newVideoBitrate = bitrate; pSampleStreamingSession->twccMetadata.lastAdjustmentTimeMs = currentTimeMs; - DLOGI("Adjustment made: average packet loss = %.2f%%, timediff: %llu ms", pSampleStreamingSession->twccMetadata.averagePacketLoss, ADJUSTMENT_INTERVAL_MS, timeDiff); + DLOGI("Adjustment made: average packet loss = %.2f%%, timediff: %llu ms", pSampleStreamingSession->twccMetadata.averagePacketLoss, + ADJUSTMENT_INTERVAL_SECONDS, timeDiff); DLOGI("received sender bitrate estimation: suggested bitrate %u sent: %u bytes %u packets received: %u bytes %u packets in %lu msec", bitrate, txBytes, txPacketsCnt, rxBytes, rxPacketsCnt, duration / 10000ULL); } diff --git a/samples/Samples.h b/samples/Samples.h index ae0773f0e9..48ef431d32 100644 --- a/samples/Samples.h +++ b/samples/Samples.h @@ -83,10 +83,9 @@ extern "C" { #define MAX_SIGNALING_CLIENT_METRICS_MESSAGE_SIZE 736 // strlen(SIGNALING_CLIENT_METRICS_JSON_TEMPLATE) + 20 * 10 #define MAX_ICE_AGENT_METRICS_MESSAGE_SIZE 113 // strlen(ICE_AGENT_METRICS_JSON_TEMPLATE) + 20 * 2 - -#define ADJUSTMENT_INTERVAL_MS 5 * HUNDREDS_OF_NANOS_IN_A_SECOND -#define MIN_BITRATE 512 -#define MAX_BITRATE 2048000 +#define ADJUSTMENT_INTERVAL_SECONDS 1 * HUNDREDS_OF_NANOS_IN_A_SECOND +#define MIN_BITRATE 512 +#define MAX_BITRATE 2048000 typedef enum { SAMPLE_STREAMING_VIDEO_ONLY, diff --git a/samples/kvsWebRTCClientMasterGstSample.c b/samples/kvsWebRTCClientMasterGstSample.c index 86dbc92ae6..f7499829d0 100644 --- a/samples/kvsWebRTCClientMasterGstSample.c +++ b/samples/kvsWebRTCClientMasterGstSample.c @@ -3,7 +3,7 @@ #include extern PSampleConfiguration gSampleConfiguration; -GstElement *pipeline = NULL; +GstElement* pipeline = NULL; // #define VERBOSE GstFlowReturn on_new_sample(GstElement* sink, gpointer data, UINT64 trackid) @@ -62,13 +62,13 @@ GstFlowReturn on_new_sample(GstElement* sink, gpointer data, UINT64 trackid) for (i = 0; i < pSampleConfiguration->streamingSessionCount; ++i) { pSampleStreamingSession = pSampleConfiguration->sampleStreamingSessionList[i]; frame.index = (UINT32) ATOMIC_INCREMENT(&pSampleStreamingSession->frameIndex); - if(pipeline != NULL) { - GstElement *encoder = gst_bin_get_by_name(GST_BIN(pipeline), "my_encoder"); - if(encoder != NULL) { + if (pipeline != NULL) { + GstElement* encoder = gst_bin_get_by_name(GST_BIN(pipeline), "my_encoder"); + if (encoder != NULL) { guint bitrate; g_object_get(G_OBJECT(encoder), "bitrate", &bitrate, NULL); pSampleStreamingSession->twccMetadata.currentVideoBitrate = (UINT64) bitrate; - if(pSampleStreamingSession->twccMetadata.newVideoBitrate != 0) { + if (pSampleStreamingSession->twccMetadata.newVideoBitrate != 0) { bitrate = (guint) (pSampleStreamingSession->twccMetadata.newVideoBitrate); pSampleStreamingSession->twccMetadata.newVideoBitrate = 0; g_object_set(G_OBJECT(encoder), "bitrate", bitrate, NULL); @@ -194,22 +194,24 @@ PVOID sendGstreamerAudioVideo(PVOID args) &error); break; } - case DEVICE_SOURCE: {xw - pipeline = gst_parse_launch("autovideosrc ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " - "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " - "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE " - "emit-signals=TRUE name=appsink-video", - &error); + case DEVICE_SOURCE: { + pipeline = + gst_parse_launch("autovideosrc ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " + "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " + "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE " + "emit-signals=TRUE name=appsink-video", + &error); break; } case RTSP_SOURCE: { - UINT16 stringOutcome = snprintf(rtspPipeLineBuffer, RTSP_PIPELINE_MAX_CHAR_COUNT, - "uridecodebin uri=%s ! " - "videoconvert ! " - "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " - "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! queue ! " - "appsink sync=TRUE emit-signals=TRUE name=appsink-video ", - pSampleConfiguration->rtspUri); + UINT16 stringOutcome = + snprintf(rtspPipeLineBuffer, RTSP_PIPELINE_MAX_CHAR_COUNT, + "uridecodebin uri=%s ! " + "videoconvert ! " + "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " + "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! queue ! " + "appsink sync=TRUE emit-signals=TRUE name=appsink-video ", + pSampleConfiguration->rtspUri); if (stringOutcome > RTSP_PIPELINE_MAX_CHAR_COUNT) { DLOGE("[KVS GStreamer Master] ERROR: rtsp uri entered exceeds maximum allowed length set by RTSP_PIPELINE_MAX_CHAR_COUNT"); @@ -225,14 +227,14 @@ PVOID sendGstreamerAudioVideo(PVOID args) case SAMPLE_STREAMING_AUDIO_VIDEO: switch (pSampleConfiguration->srcType) { case TEST_SOURCE: { - pipeline = - gst_parse_launch("videotestsrc pattern=ball is-live=TRUE ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " - "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " - "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE " - "emit-signals=TRUE name=appsink-video audiotestsrc is-live=TRUE ! " - "queue leaky=2 max-size-buffers=400 ! audioconvert ! audioresample ! opusenc ! " - "audio/x-opus,rate=48000,channels=2 ! appsink sync=TRUE emit-signals=TRUE name=appsink-audio", - &error); + pipeline = gst_parse_launch( + "videotestsrc pattern=ball is-live=TRUE ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " + "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " + "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE " + "emit-signals=TRUE name=appsink-video audiotestsrc is-live=TRUE ! " + "queue leaky=2 max-size-buffers=400 ! audioconvert ! audioresample ! opusenc ! " + "audio/x-opus,rate=48000,channels=2 ! appsink sync=TRUE emit-signals=TRUE name=appsink-audio", + &error); break; } case DEVICE_SOURCE: { @@ -247,15 +249,16 @@ PVOID sendGstreamerAudioVideo(PVOID args) break; } case RTSP_SOURCE: { - UINT16 stringOutcome = snprintf(rtspPipeLineBuffer, RTSP_PIPELINE_MAX_CHAR_COUNT, - "uridecodebin uri=%s name=src ! videoconvert ! " - "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " - "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! queue ! " - "appsink sync=TRUE emit-signals=TRUE name=appsink-video " - "src. ! audioconvert ! " - "audioresample ! opusenc ! audio/x-opus,rate=48000,channels=2 ! queue ! " - "appsink sync=TRUE emit-signals=TRUE name=appsink-audio", - pSampleConfiguration->rtspUri); + UINT16 stringOutcome = + snprintf(rtspPipeLineBuffer, RTSP_PIPELINE_MAX_CHAR_COUNT, + "uridecodebin uri=%s name=src ! videoconvert ! " + "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " + "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! queue ! " + "appsink sync=TRUE emit-signals=TRUE name=appsink-video " + "src. ! audioconvert ! " + "audioresample ! opusenc ! audio/x-opus,rate=48000,channels=2 ! queue ! " + "appsink sync=TRUE emit-signals=TRUE name=appsink-audio", + pSampleConfiguration->rtspUri); if (stringOutcome > RTSP_PIPELINE_MAX_CHAR_COUNT) { DLOGE("[KVS GStreamer Master] ERROR: rtsp uri entered exceeds maximum allowed length set by RTSP_PIPELINE_MAX_CHAR_COUNT"); From 6b20b77ab9205d355d6facea19371a0f80214e7b Mon Sep 17 00:00:00 2001 From: Divya Sampath Kumar Date: Mon, 25 Mar 2024 16:53:19 -0700 Subject: [PATCH 06/11] Nits --- samples/Common.c | 63 ++++++--- samples/Samples.h | 14 +- samples/kvsWebRTCClientMasterGstSample.c | 128 ++++++++++-------- .../kinesis/video/webrtcclient/Include.h | 3 +- 4 files changed, 127 insertions(+), 81 deletions(-) diff --git a/samples/Common.c b/samples/Common.c index bcc5712b3b..e499f241e5 100644 --- a/samples/Common.c +++ b/samples/Common.c @@ -414,7 +414,7 @@ STATUS initializePeerConnection(PSampleConfiguration pSampleConfiguration, PRtcP configuration.kvsRtcConfiguration.iceSetInterfaceFilterFunc = NULL; // disable TWCC - configuration.kvsRtcConfiguration.disableSenderSideBandwidthEstimation = FALSE; + configuration.kvsRtcConfiguration.disableSenderSideBandwidthEstimation = pSampleConfiguration->disableTwcc; // Set the ICE mode explicitly configuration.iceTransportPolicy = ICE_TRANSPORT_POLICY_ALL; @@ -557,6 +557,12 @@ STATUS createSampleStreamingSession(PSampleConfiguration pSampleConfiguration, P pSampleStreamingSession->peerConnectionMetrics.peerConnectionStats.peerConnectionStartTime = GETTIME() / HUNDREDS_OF_NANOS_IN_A_MILLISECOND; // Flag to enable SDK to calculate selected ice server, local, remote and candidate pair stats. pSampleConfiguration->enableIceStats = FALSE; + pSampleConfiguration->disableTwcc = FALSE; + + if (!pSampleConfiguration->disableTwcc) { + pSampleStreamingSession->twccMetadata.updateLock = MUTEX_CREATE(TRUE); + } + CHK_STATUS(initializePeerConnection(pSampleConfiguration, &pSampleStreamingSession->pPeerConnection)); CHK_STATUS(peerConnectionOnIceCandidate(pSampleStreamingSession->pPeerConnection, (UINT64) pSampleStreamingSession, onIceCandidateHandler)); CHK_STATUS( @@ -655,6 +661,12 @@ STATUS freeSampleStreamingSession(PSampleStreamingSession* ppSampleStreamingSess } MUTEX_UNLOCK(pSampleConfiguration->sampleConfigurationObjLock); + if (!pSampleConfiguration->disableTwcc) { + if (IS_VALID_MUTEX_VALUE(pSampleStreamingSession->twccMetadata.updateLock)) { + MUTEX_FREE(pSampleStreamingSession->twccMetadata.updateLock); + } + } + CHK_LOG_ERR(closePeerConnection(pSampleStreamingSession->pPeerConnection)); CHK_LOG_ERR(freePeerConnection(&pSampleStreamingSession->pPeerConnection)); SAFE_MEMFREE(pSampleStreamingSession); @@ -705,44 +717,63 @@ VOID sampleBandwidthEstimationHandler(UINT64 customData, DOUBLE maximumBitrate) DLOGV("received bitrate suggestion: %f", maximumBitrate); } -void sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, UINT32 rxBytes, UINT32 txPacketsCnt, UINT32 rxPacketsCnt, +VOID sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, UINT32 rxBytes, UINT32 txPacketsCnt, UINT32 rxPacketsCnt, UINT64 duration) { UNUSED_PARAM(duration); + UINT64 videoBitrate, audioBitrate; + UINT64 currentTimeMs, timeDiff; UINT32 lostPacketsCnt = txPacketsCnt - rxPacketsCnt; - UINT8 percentLost = (txPacketsCnt > 0) ? (lostPacketsCnt * 100 / txPacketsCnt) : 0; - + DOUBLE percentLost = (DOUBLE) ((txPacketsCnt > 0) ? (lostPacketsCnt * 100 / txPacketsCnt) : 0.0); SampleStreamingSession* pSampleStreamingSession = (SampleStreamingSession*) customData; - // Calculate smoothed packet loss - DOUBLE currentPacketLoss = (DOUBLE) percentLost; + if (pSampleStreamingSession == NULL) { + DLOGW("Invalid streaming session (NULL object)"); + return; + } + + // Calculate packet loss pSampleStreamingSession->twccMetadata.averagePacketLoss = - EMA_ACCUMULATOR_GET_NEXT(pSampleStreamingSession->twccMetadata.averagePacketLoss, currentPacketLoss); + EMA_ACCUMULATOR_GET_NEXT(pSampleStreamingSession->twccMetadata.averagePacketLoss, ((DOUBLE) percentLost)); - UINT64 currentTimeMs = GETTIME(); - UINT64 timeDiff = currentTimeMs - pSampleStreamingSession->twccMetadata.lastAdjustmentTimeMs; + currentTimeMs = GETTIME(); + timeDiff = currentTimeMs - pSampleStreamingSession->twccMetadata.lastAdjustmentTimeMs; if (timeDiff < ADJUSTMENT_INTERVAL_SECONDS) { // Too soon for another adjustment return; } - UINT64 bitrate = pSampleStreamingSession->twccMetadata.currentVideoBitrate; + MUTEX_LOCK(pSampleStreamingSession->twccMetadata.updateLock); + videoBitrate = pSampleStreamingSession->twccMetadata.currentVideoBitrate; + audioBitrate = pSampleStreamingSession->twccMetadata.currentAudioBitrate; + if (pSampleStreamingSession->twccMetadata.averagePacketLoss <= 5) { // increase encoder bitrate by 5 percent with a cap at MAX_BITRATE - bitrate = (UINT64) MIN(bitrate * 1.05f, MAX_BITRATE); + videoBitrate = (UINT64) MIN(videoBitrate * 1.05f, MAX_VIDEO_BITRATE_KBPS); } else { // decrease encoder bitrate by average packet loss percent, with a cap at MIN_BITRATE - bitrate = (UINT64) MAX(bitrate * (1.0f - pSampleStreamingSession->twccMetadata.averagePacketLoss / 100.0f), MIN_BITRATE); + videoBitrate = (UINT64) MAX(videoBitrate * (1.0f - pSampleStreamingSession->twccMetadata.averagePacketLoss / 100.0f), MIN_VIDEO_BITRATE_KBPS); + } + + if (pSampleStreamingSession->twccMetadata.averagePacketLoss <= 5) { + // increase encoder bitrate by 5 percent with a cap at MAX_BITRATE + audioBitrate = (UINT64) MIN(audioBitrate * 1.05f, MAX_AUDIO_BITRATE_BPS); + } else { + // decrease encoder bitrate by average packet loss percent, with a cap at MIN_BITRATE + audioBitrate = (UINT64) MAX(audioBitrate * (1.0f - pSampleStreamingSession->twccMetadata.averagePacketLoss / 100.0f), MIN_AUDIO_BITRATE_BPS); } // Update the session with the new bitrate and adjustment time - pSampleStreamingSession->twccMetadata.newVideoBitrate = bitrate; + pSampleStreamingSession->twccMetadata.newVideoBitrate = videoBitrate; + pSampleStreamingSession->twccMetadata.newAudioBitrate = audioBitrate; + MUTEX_UNLOCK(pSampleStreamingSession->twccMetadata.updateLock); + pSampleStreamingSession->twccMetadata.lastAdjustmentTimeMs = currentTimeMs; - DLOGI("Adjustment made: average packet loss = %.2f%%, timediff: %llu ms", pSampleStreamingSession->twccMetadata.averagePacketLoss, + DLOGV("Adjustment made: average packet loss = %.2f%%, timediff: %llu ms", pSampleStreamingSession->twccMetadata.averagePacketLoss, ADJUSTMENT_INTERVAL_SECONDS, timeDiff); - DLOGI("received sender bitrate estimation: suggested bitrate %u sent: %u bytes %u packets received: %u bytes %u packets in %lu msec", bitrate, - txBytes, txPacketsCnt, rxBytes, rxPacketsCnt, duration / 10000ULL); + DLOGV("Suggested video bitrate %u kbps, suggested audio bitrate: %u bps, sent: %u bytes %u packets received: %u bytes %u packets in %lu msec", + videoBitrate, audioBitrate, txBytes, txPacketsCnt, rxBytes, rxPacketsCnt, duration / 10000ULL); } STATUS handleRemoteCandidate(PSampleStreamingSession pSampleStreamingSession, PSignalingMessage pSignalingMessage) diff --git a/samples/Samples.h b/samples/Samples.h index 48ef431d32..cf8f0cdd3e 100644 --- a/samples/Samples.h +++ b/samples/Samples.h @@ -84,8 +84,10 @@ extern "C" { #define MAX_ICE_AGENT_METRICS_MESSAGE_SIZE 113 // strlen(ICE_AGENT_METRICS_JSON_TEMPLATE) + 20 * 2 #define ADJUSTMENT_INTERVAL_SECONDS 1 * HUNDREDS_OF_NANOS_IN_A_SECOND -#define MIN_BITRATE 512 -#define MAX_BITRATE 2048000 +#define MIN_VIDEO_BITRATE_KBPS 512 // Unit kilobits/sec. Value could change based on codec. +#define MAX_VIDEO_BITRATE_KBPS 2048000 // Unit kilobits/sec. Value could change based on codec. +#define MIN_AUDIO_BITRATE_BPS 4000 // Unit bits/sec. Value could change based on codec. +#define MAX_AUDIO_BITRATE_BPS 650000 // Unit bits/sec. Value could change based on codec. typedef enum { SAMPLE_STREAMING_VIDEO_ONLY, @@ -164,6 +166,7 @@ typedef struct { PCHAR rtspUri; UINT32 logLevel; BOOL enableIceStats; + BOOL disableTwcc; } SampleConfiguration, *PSampleConfiguration; typedef struct { @@ -184,12 +187,13 @@ typedef struct { typedef VOID (*StreamSessionShutdownCallback)(UINT64, PSampleStreamingSession); typedef struct { + MUTEX updateLock; UINT64 lastAdjustmentTimeMs; UINT64 currentVideoBitrate; - UINT64 newVideoBitrate; UINT64 currentAudioBitrate; + UINT64 newVideoBitrate; UINT64 newAudioBitrate; - float averagePacketLoss; + DOUBLE averagePacketLoss; } TwccMetadata, *PTwccMetadata; struct __SampleStreamingSession { @@ -211,6 +215,7 @@ struct __SampleStreamingSession { UINT64 startUpLatency; RtcMetricsHistory rtcMetricsHistory; BOOL remoteCanTrickleIce; + TwccMetadata twccMetadata; // this is called when the SampleStreamingSession is being freed StreamSessionShutdownCallback shutdownCallback; @@ -221,7 +226,6 @@ struct __SampleStreamingSession { CHAR pPeerConnectionMetricsMessage[MAX_PEER_CONNECTION_METRICS_MESSAGE_SIZE]; CHAR pSignalingClientMetricsMessage[MAX_SIGNALING_CLIENT_METRICS_MESSAGE_SIZE]; CHAR pIceAgentMetricsMessage[MAX_ICE_AGENT_METRICS_MESSAGE_SIZE]; - TwccMetadata twccMetadata; }; // TODO this should all be in a higher webrtccontext layer above PeerConnection diff --git a/samples/kvsWebRTCClientMasterGstSample.c b/samples/kvsWebRTCClientMasterGstSample.c index f7499829d0..ca5d5c184b 100644 --- a/samples/kvsWebRTCClientMasterGstSample.c +++ b/samples/kvsWebRTCClientMasterGstSample.c @@ -3,9 +3,10 @@ #include extern PSampleConfiguration gSampleConfiguration; -GstElement* pipeline = NULL; // #define VERBOSE +GstElement* senderPipeline = NULL; + GstFlowReturn on_new_sample(GstElement* sink, gpointer data, UINT64 trackid) { GstBuffer* buffer; @@ -22,6 +23,7 @@ GstFlowReturn on_new_sample(GstElement* sink, gpointer data, UINT64 trackid) PSampleStreamingSession pSampleStreamingSession = NULL; PRtcRtpTransceiver pRtcRtpTransceiver = NULL; UINT32 i; + guint bitrate; CHK_ERR(pSampleConfiguration != NULL, STATUS_NULL_ARG, "NULL sample configuration"); @@ -62,32 +64,42 @@ GstFlowReturn on_new_sample(GstElement* sink, gpointer data, UINT64 trackid) for (i = 0; i < pSampleConfiguration->streamingSessionCount; ++i) { pSampleStreamingSession = pSampleConfiguration->sampleStreamingSessionList[i]; frame.index = (UINT32) ATOMIC_INCREMENT(&pSampleStreamingSession->frameIndex); - if (pipeline != NULL) { - GstElement* encoder = gst_bin_get_by_name(GST_BIN(pipeline), "my_encoder"); - if (encoder != NULL) { - guint bitrate; - g_object_get(G_OBJECT(encoder), "bitrate", &bitrate, NULL); - pSampleStreamingSession->twccMetadata.currentVideoBitrate = (UINT64) bitrate; - if (pSampleStreamingSession->twccMetadata.newVideoBitrate != 0) { - bitrate = (guint) (pSampleStreamingSession->twccMetadata.newVideoBitrate); - pSampleStreamingSession->twccMetadata.newVideoBitrate = 0; - g_object_set(G_OBJECT(encoder), "bitrate", bitrate, NULL); - } - - } else { - DLOGI("Encoder not found in pipeline"); - } - } else { - DLOGI("pipeline is null"); - } if (trackid == DEFAULT_AUDIO_TRACK_ID) { + if (!pSampleStreamingSession->pSampleConfiguration->disableTwcc && senderPipeline != NULL) { + GstElement* encoder = gst_bin_get_by_name(GST_BIN(senderPipeline), "sampleAudioEncoder"); + if (encoder != NULL) { + g_object_get(G_OBJECT(encoder), "bitrate", &bitrate, NULL); + MUTEX_LOCK(pSampleStreamingSession->twccMetadata.updateLock); + pSampleStreamingSession->twccMetadata.currentAudioBitrate = (UINT64) bitrate; + if (pSampleStreamingSession->twccMetadata.newAudioBitrate != 0) { + bitrate = (guint) (pSampleStreamingSession->twccMetadata.newAudioBitrate); + pSampleStreamingSession->twccMetadata.newAudioBitrate = 0; + g_object_set(G_OBJECT(encoder), "bitrate", bitrate, NULL); + } + MUTEX_UNLOCK(pSampleStreamingSession->twccMetadata.updateLock); + } + } pRtcRtpTransceiver = pSampleStreamingSession->pAudioRtcRtpTransceiver; frame.presentationTs = pSampleStreamingSession->audioTimestamp; frame.decodingTs = frame.presentationTs; pSampleStreamingSession->audioTimestamp += SAMPLE_AUDIO_FRAME_DURATION; // assume audio frame size is 20ms, which is default in opusenc } else { + if (!pSampleStreamingSession->pSampleConfiguration->disableTwcc && senderPipeline != NULL) { + GstElement* encoder = gst_bin_get_by_name(GST_BIN(senderPipeline), "sampleVideoEncoder"); + if (encoder != NULL) { + g_object_get(G_OBJECT(encoder), "bitrate", &bitrate, NULL); + MUTEX_LOCK(pSampleStreamingSession->twccMetadata.updateLock); + pSampleStreamingSession->twccMetadata.currentVideoBitrate = (UINT64) bitrate; + if (pSampleStreamingSession->twccMetadata.newVideoBitrate != 0) { + bitrate = (guint) (pSampleStreamingSession->twccMetadata.newVideoBitrate); + pSampleStreamingSession->twccMetadata.newVideoBitrate = 0; + g_object_set(G_OBJECT(encoder), "bitrate", bitrate, NULL); + } + MUTEX_UNLOCK(pSampleStreamingSession->twccMetadata.updateLock); + } + } pRtcRtpTransceiver = pSampleStreamingSession->pVideoRtcRtpTransceiver; frame.presentationTs = pSampleStreamingSession->videoTimestamp; frame.decodingTs = frame.presentationTs; @@ -186,29 +198,29 @@ PVOID sendGstreamerAudioVideo(PVOID args) case SAMPLE_STREAMING_VIDEO_ONLY: switch (pSampleConfiguration->srcType) { case TEST_SOURCE: { - pipeline = - gst_parse_launch("videotestsrc is-live=TRUE ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " - "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " - "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE emit-signals=TRUE " - "name=appsink-video", - &error); + senderPipeline = gst_parse_launch( + "videotestsrc pattern=ball is-live=TRUE ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " + "x264enc name=sampleVideoEncoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE emit-signals=TRUE " + "name=appsink-video", + &error); break; } case DEVICE_SOURCE: { - pipeline = - gst_parse_launch("autovideosrc ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " - "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " - "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE " - "emit-signals=TRUE name=appsink-video", - &error); + senderPipeline = gst_parse_launch( + "autovideosrc ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " + "x264enc name=sampleVideoEncoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE " + "emit-signals=TRUE name=appsink-video", + &error); break; } case RTSP_SOURCE: { UINT16 stringOutcome = - snprintf(rtspPipeLineBuffer, RTSP_PIPELINE_MAX_CHAR_COUNT, + SNPRINTF(rtspPipeLineBuffer, RTSP_PIPELINE_MAX_CHAR_COUNT, "uridecodebin uri=%s ! " "videoconvert ! " - "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " + "x264enc name=sampleVideoEncoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! queue ! " "appsink sync=TRUE emit-signals=TRUE name=appsink-video ", pSampleConfiguration->rtspUri); @@ -217,7 +229,7 @@ PVOID sendGstreamerAudioVideo(PVOID args) DLOGE("[KVS GStreamer Master] ERROR: rtsp uri entered exceeds maximum allowed length set by RTSP_PIPELINE_MAX_CHAR_COUNT"); goto CleanUp; } - pipeline = gst_parse_launch(rtspPipeLineBuffer, &error); + senderPipeline = gst_parse_launch(rtspPipeLineBuffer, &error); break; } @@ -227,36 +239,36 @@ PVOID sendGstreamerAudioVideo(PVOID args) case SAMPLE_STREAMING_AUDIO_VIDEO: switch (pSampleConfiguration->srcType) { case TEST_SOURCE: { - pipeline = gst_parse_launch( + senderPipeline = gst_parse_launch( "videotestsrc pattern=ball is-live=TRUE ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " - "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " + "x264enc name=sampleVideoEncoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE " - "emit-signals=TRUE name=appsink-video audiotestsrc is-live=TRUE ! " - "queue leaky=2 max-size-buffers=400 ! audioconvert ! audioresample ! opusenc ! " + "emit-signals=TRUE name=appsink-video audiotestsrc wave=triangle is-live=TRUE ! " + "queue leaky=2 max-size-buffers=400 ! audioconvert ! audioresample ! opusenc name=sampleAudioEncoder ! " "audio/x-opus,rate=48000,channels=2 ! appsink sync=TRUE emit-signals=TRUE name=appsink-audio", &error); break; } case DEVICE_SOURCE: { - pipeline = - gst_parse_launch("autovideosrc ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " - "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " - "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE emit-signals=TRUE " - "name=appsink-video autoaudiosrc ! " - "queue leaky=2 max-size-buffers=400 ! audioconvert ! audioresample ! opusenc ! " - "audio/x-opus,rate=48000,channels=2 ! appsink sync=TRUE emit-signals=TRUE name=appsink-audio", - &error); + senderPipeline = gst_parse_launch( + "autovideosrc ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " + "x264enc name=sampleVideoEncoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " + "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE emit-signals=TRUE " + "name=appsink-video autoaudiosrc ! " + "queue leaky=2 max-size-buffers=400 ! audioconvert ! audioresample ! opusenc name=sampleAudioEncoder ! " + "audio/x-opus,rate=48000,channels=2 ! appsink sync=TRUE emit-signals=TRUE name=appsink-audio", + &error); break; } case RTSP_SOURCE: { UINT16 stringOutcome = - snprintf(rtspPipeLineBuffer, RTSP_PIPELINE_MAX_CHAR_COUNT, + SNPRINTF(rtspPipeLineBuffer, RTSP_PIPELINE_MAX_CHAR_COUNT, "uridecodebin uri=%s name=src ! videoconvert ! " - "x264enc name=my_encoder bframes=0 speed-preset=veryfast bitrate=2048 byte-stream=TRUE tune=zerolatency ! " + "x264enc name=sampleVideoEncoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! queue ! " "appsink sync=TRUE emit-signals=TRUE name=appsink-video " "src. ! audioconvert ! " - "audioresample ! opusenc ! audio/x-opus,rate=48000,channels=2 ! queue ! " + "audioresample ! opusenc name=sampleAudioEncoder ! audio/x-opus,rate=48000,channels=2 ! queue ! " "appsink sync=TRUE emit-signals=TRUE name=appsink-audio", pSampleConfiguration->rtspUri); @@ -264,7 +276,7 @@ PVOID sendGstreamerAudioVideo(PVOID args) DLOGE("[KVS GStreamer Master] ERROR: rtsp uri entered exceeds maximum allowed length set by RTSP_PIPELINE_MAX_CHAR_COUNT"); goto CleanUp; } - pipeline = gst_parse_launch(rtspPipeLineBuffer, &error); + senderPipeline = gst_parse_launch(rtspPipeLineBuffer, &error); break; } @@ -272,10 +284,10 @@ PVOID sendGstreamerAudioVideo(PVOID args) break; } - CHK_ERR(pipeline != NULL, STATUS_NULL_ARG, "[KVS Gstreamer Master] Pipeline is NULL"); + CHK_ERR(senderPipeline != NULL, STATUS_NULL_ARG, "[KVS Gstreamer Master] Pipeline is NULL"); - appsinkVideo = gst_bin_get_by_name(GST_BIN(pipeline), "appsink-video"); - appsinkAudio = gst_bin_get_by_name(GST_BIN(pipeline), "appsink-audio"); + appsinkVideo = gst_bin_get_by_name(GST_BIN(senderPipeline), "appsink-video"); + appsinkAudio = gst_bin_get_by_name(GST_BIN(senderPipeline), "appsink-audio"); if (!(appsinkVideo != NULL || appsinkAudio != NULL)) { DLOGE("[KVS GStreamer Master] sendGstreamerAudioVideo(): cant find appsink, operation returned status code: 0x%08x", STATUS_INTERNAL_ERROR); @@ -288,10 +300,10 @@ PVOID sendGstreamerAudioVideo(PVOID args) if (appsinkAudio != NULL) { g_signal_connect(appsinkAudio, "new-sample", G_CALLBACK(on_new_sample_audio), (gpointer) pSampleConfiguration); } - gst_element_set_state(pipeline, GST_STATE_PLAYING); + gst_element_set_state(senderPipeline, GST_STATE_PLAYING); /* block until error or EOS */ - bus = gst_element_get_bus(pipeline); + bus = gst_element_get_bus(senderPipeline); msg = gst_bus_timed_pop_filtered(bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR | GST_MESSAGE_EOS); /* Free resources */ @@ -301,9 +313,9 @@ PVOID sendGstreamerAudioVideo(PVOID args) if (bus != NULL) { gst_object_unref(bus); } - if (pipeline != NULL) { - gst_element_set_state(pipeline, GST_STATE_NULL); - gst_object_unref(pipeline); + if (senderPipeline != NULL) { + gst_element_set_state(senderPipeline, GST_STATE_NULL); + gst_object_unref(senderPipeline); } if (appsinkAudio != NULL) { gst_object_unref(appsinkAudio); diff --git a/src/include/com/amazonaws/kinesis/video/webrtcclient/Include.h b/src/include/com/amazonaws/kinesis/video/webrtcclient/Include.h index 39ebdc6b22..11b3d36656 100644 --- a/src/include/com/amazonaws/kinesis/video/webrtcclient/Include.h +++ b/src/include/com/amazonaws/kinesis/video/webrtcclient/Include.h @@ -1221,8 +1221,7 @@ typedef struct { //!< would like to whitelist/blacklist specific network interfaces BOOL disableSenderSideBandwidthEstimation; //!< Disable TWCC feedback based sender bandwidth estimation, enabled by default. - //!< You want to set this to TRUE if you are on a very stable connection and want to save 1.2MB of - //!< memory + //!< You want to set this to TRUE if you are on a very stable connection BOOL enableIceStats; //!< Enable ICE stats to be calculated } KvsRtcConfiguration, *PKvsRtcConfiguration; From 94c73e08449df5132cb39d3524f040d308b70260 Mon Sep 17 00:00:00 2001 From: Divya Sampath Kumar Date: Thu, 28 Mar 2024 13:55:36 -0700 Subject: [PATCH 07/11] Readme --- README.md | 26 ++++++++++++++++++++++++++ samples/Common.c | 21 +++++++++++++-------- 2 files changed, 39 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 1067bbda88..64cd53f79f 100644 --- a/README.md +++ b/README.md @@ -510,6 +510,32 @@ To disable threadpool, run `cmake .. -DENABLE_KVS_THREADPOOL=OFF` ### Thread stack sizes The default thread stack size for the KVS WebRTC SDK is 64 kb. Notable stack sizes that may need to be changed for your specific application will be the ConnectionListener Receiver thread and the media sender threads. Please modify the stack sizes for these media dependent threads to be suitable for the media your application is processing. +### Set up TWCC +TWCC is a mechanism in WebRTC designed to enhance the performance and reliability of real-time communication over the internet. TWCC addresses the challenges of network congestion by providing detailed feedback on the transport of packets across the network, enabling adaptive bitrate control and optimization of +media streams in real-time. This feedback mechanism is crucial for maintaining high-quality audio and video communication, as it allows senders to adjust their transmission strategies based on comprehensive information about packet losses, delays, and jitter experienced across the entire transport path. +The importance of TWCC in WebRTC lies in its ability to ensure efficient use of available network bandwidth while minimizing the negative impacts of network congestion. By monitoring the delivery of packets across the network, TWCC helps identify bottlenecks and adjust the media transmission rates accordingly. +This dynamic approach to congestion control is essential for preventing degradation in call quality, such as pixelation, stuttering, or drops in audio and video streams, especially in environments with fluctuating network conditions. To learn more about TWCC, you can refer to the [RFC draft](https://datatracker.ietf.org/doc/html/draft-holmer-rmcat-transport-wide-cc-extensions-01) + +In order to enable TWCC usage in the SDK, 2 things need to be set up: + +1. Set the `disableSenderSideBandwidthEstimation` to FALSE. In our samples, the value is set using `disableTwcc` flag in `pSampleConfiguration` + +```c +pSampleConfiguration->disableTwcc = TRUE; // to disable TWCC +pSampleConfiguration->disableTwcc = FALSE; // to enable TWCC +configuration.kvsRtcConfiguration.disableSenderSideBandwidthEstimation = pSampleConfiguration->disableTwcc; +``` + +2. Set the callback that will have the business logic to modify the bitrate based on packet loss information. The callback can be set using `peerConnectionOnSenderBandwidthEstimation()`. + +```c +CHK_STATUS(peerConnectionOnSenderBandwidthEstimation(pSampleStreamingSession->pPeerConnection, (UINT64) pSampleStreamingSession, + sampleSenderBandwidthEstimationHandler)); +``` + +By default, our SDK enables TWCC listener. The SDK has a sample implementation to integrate TWCC into the Gstreamer pipeline via the `sampleSenderBandwidthEstimationHandler` callback. To get more details, look for this specific callback. + + ### Setting ICE related timeouts There are some default timeout values set for different steps in ICE in the [KvsRtcConfiguration](https://awslabs.github.io/amazon-kinesis-video-streams-webrtc-sdk-c/structKvsRtcConfiguration.html). These are configurable in the application. While the defaults are generous, there could be applications that might need more flexibility to improve chances of connection establishment because of poor network. diff --git a/samples/Common.c b/samples/Common.c index e499f241e5..dca4a1e591 100644 --- a/samples/Common.c +++ b/samples/Common.c @@ -609,8 +609,10 @@ STATUS createSampleStreamingSession(PSampleConfiguration pSampleConfiguration, P CHK_STATUS(transceiverOnBandwidthEstimation(pSampleStreamingSession->pAudioRtcRtpTransceiver, (UINT64) pSampleStreamingSession, sampleBandwidthEstimationHandler)); // twcc bandwidth estimation - CHK_STATUS(peerConnectionOnSenderBandwidthEstimation(pSampleStreamingSession->pPeerConnection, (UINT64) pSampleStreamingSession, - sampleSenderBandwidthEstimationHandler)); + if (!pSampleConfiguration->disableTwcc) { + CHK_STATUS(peerConnectionOnSenderBandwidthEstimation(pSampleStreamingSession->pPeerConnection, (UINT64) pSampleStreamingSession, + sampleSenderBandwidthEstimationHandler)); + } pSampleStreamingSession->startUpLatency = 0; CleanUp: @@ -717,6 +719,9 @@ VOID sampleBandwidthEstimationHandler(UINT64 customData, DOUBLE maximumBitrate) DLOGV("received bitrate suggestion: %f", maximumBitrate); } +// Sample callback for TWCC. Average packet is calculated with EMA. If average packet lost is <= 5%, +// the current bitrate is increased by 5%. If more than 5%, the current bitrate +// is reduced by percent lost. Bitrate update is allowed every second and is increased/decreased upto the limits VOID sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, UINT32 rxBytes, UINT32 txPacketsCnt, UINT32 rxPacketsCnt, UINT64 duration) { @@ -749,18 +754,18 @@ VOID sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, U if (pSampleStreamingSession->twccMetadata.averagePacketLoss <= 5) { // increase encoder bitrate by 5 percent with a cap at MAX_BITRATE - videoBitrate = (UINT64) MIN(videoBitrate * 1.05f, MAX_VIDEO_BITRATE_KBPS); + videoBitrate = (UINT64) MIN(videoBitrate * 1.05, MAX_VIDEO_BITRATE_KBPS); } else { // decrease encoder bitrate by average packet loss percent, with a cap at MIN_BITRATE - videoBitrate = (UINT64) MAX(videoBitrate * (1.0f - pSampleStreamingSession->twccMetadata.averagePacketLoss / 100.0f), MIN_VIDEO_BITRATE_KBPS); + videoBitrate = (UINT64) MAX(videoBitrate * (1.0 - pSampleStreamingSession->twccMetadata.averagePacketLoss / 100.0), MIN_VIDEO_BITRATE_KBPS); } if (pSampleStreamingSession->twccMetadata.averagePacketLoss <= 5) { // increase encoder bitrate by 5 percent with a cap at MAX_BITRATE - audioBitrate = (UINT64) MIN(audioBitrate * 1.05f, MAX_AUDIO_BITRATE_BPS); + audioBitrate = (UINT64) MIN(audioBitrate * 1.05, MAX_AUDIO_BITRATE_BPS); } else { // decrease encoder bitrate by average packet loss percent, with a cap at MIN_BITRATE - audioBitrate = (UINT64) MAX(audioBitrate * (1.0f - pSampleStreamingSession->twccMetadata.averagePacketLoss / 100.0f), MIN_AUDIO_BITRATE_BPS); + audioBitrate = (UINT64) MAX(audioBitrate * (1.0 - pSampleStreamingSession->twccMetadata.averagePacketLoss / 100.0), MIN_AUDIO_BITRATE_BPS); } // Update the session with the new bitrate and adjustment time @@ -770,9 +775,9 @@ VOID sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, U pSampleStreamingSession->twccMetadata.lastAdjustmentTimeMs = currentTimeMs; - DLOGV("Adjustment made: average packet loss = %.2f%%, timediff: %llu ms", pSampleStreamingSession->twccMetadata.averagePacketLoss, + DLOGI("Adjustment made: average packet loss = %.2f%%, timediff: %llu ms", pSampleStreamingSession->twccMetadata.averagePacketLoss, ADJUSTMENT_INTERVAL_SECONDS, timeDiff); - DLOGV("Suggested video bitrate %u kbps, suggested audio bitrate: %u bps, sent: %u bytes %u packets received: %u bytes %u packets in %lu msec", + DLOGI("Suggested video bitrate %u kbps, suggested audio bitrate: %u bps, sent: %u bytes %u packets received: %u bytes %u packets in %lu msec", videoBitrate, audioBitrate, txBytes, txPacketsCnt, rxBytes, rxPacketsCnt, duration / 10000ULL); } From fbb997e0deddb657f90d3c3f50e0754988103e2c Mon Sep 17 00:00:00 2001 From: Divya Sampath Kumar Date: Thu, 4 Apr 2024 04:14:33 -0700 Subject: [PATCH 08/11] flip --- README.md | 8 +++---- samples/Common.c | 27 +++++++++++------------- samples/Samples.h | 12 +++++------ samples/kvsWebRTCClientMasterGstSample.c | 6 +++--- 4 files changed, 25 insertions(+), 28 deletions(-) diff --git a/README.md b/README.md index 64cd53f79f..14405ae689 100644 --- a/README.md +++ b/README.md @@ -518,12 +518,12 @@ This dynamic approach to congestion control is essential for preventing degradat In order to enable TWCC usage in the SDK, 2 things need to be set up: -1. Set the `disableSenderSideBandwidthEstimation` to FALSE. In our samples, the value is set using `disableTwcc` flag in `pSampleConfiguration` +1. Set the `disableSenderSideBandwidthEstimation` to FALSE. In our samples, the value is set using `enableTwcc` flag in `pSampleConfiguration` ```c -pSampleConfiguration->disableTwcc = TRUE; // to disable TWCC -pSampleConfiguration->disableTwcc = FALSE; // to enable TWCC -configuration.kvsRtcConfiguration.disableSenderSideBandwidthEstimation = pSampleConfiguration->disableTwcc; +pSampleConfiguration->enableTwcc = TRUE; // to enable TWCC +pSampleConfiguration->enableTwcc = FALSE; // to disable TWCC +configuration.kvsRtcConfiguration.disableSenderSideBandwidthEstimation = !pSampleConfiguration->enableTwcc; ``` 2. Set the callback that will have the business logic to modify the bitrate based on packet loss information. The callback can be set using `peerConnectionOnSenderBandwidthEstimation()`. diff --git a/samples/Common.c b/samples/Common.c index dca4a1e591..21d6e4580d 100644 --- a/samples/Common.c +++ b/samples/Common.c @@ -414,7 +414,8 @@ STATUS initializePeerConnection(PSampleConfiguration pSampleConfiguration, PRtcP configuration.kvsRtcConfiguration.iceSetInterfaceFilterFunc = NULL; // disable TWCC - configuration.kvsRtcConfiguration.disableSenderSideBandwidthEstimation = pSampleConfiguration->disableTwcc; + configuration.kvsRtcConfiguration.disableSenderSideBandwidthEstimation = !(pSampleConfiguration->enableTwcc); + DLOGI("TWCC is : %s", configuration.kvsRtcConfiguration.disableSenderSideBandwidthEstimation ? "Disabled" : "Enabled"); // Set the ICE mode explicitly configuration.iceTransportPolicy = ICE_TRANSPORT_POLICY_ALL; @@ -557,9 +558,9 @@ STATUS createSampleStreamingSession(PSampleConfiguration pSampleConfiguration, P pSampleStreamingSession->peerConnectionMetrics.peerConnectionStats.peerConnectionStartTime = GETTIME() / HUNDREDS_OF_NANOS_IN_A_MILLISECOND; // Flag to enable SDK to calculate selected ice server, local, remote and candidate pair stats. pSampleConfiguration->enableIceStats = FALSE; - pSampleConfiguration->disableTwcc = FALSE; + pSampleConfiguration->enableTwcc = TRUE; - if (!pSampleConfiguration->disableTwcc) { + if (pSampleConfiguration->enableTwcc) { pSampleStreamingSession->twccMetadata.updateLock = MUTEX_CREATE(TRUE); } @@ -609,7 +610,7 @@ STATUS createSampleStreamingSession(PSampleConfiguration pSampleConfiguration, P CHK_STATUS(transceiverOnBandwidthEstimation(pSampleStreamingSession->pAudioRtcRtpTransceiver, (UINT64) pSampleStreamingSession, sampleBandwidthEstimationHandler)); // twcc bandwidth estimation - if (!pSampleConfiguration->disableTwcc) { + if (pSampleConfiguration->enableTwcc) { CHK_STATUS(peerConnectionOnSenderBandwidthEstimation(pSampleStreamingSession->pPeerConnection, (UINT64) pSampleStreamingSession, sampleSenderBandwidthEstimationHandler)); } @@ -663,7 +664,7 @@ STATUS freeSampleStreamingSession(PSampleStreamingSession* ppSampleStreamingSess } MUTEX_UNLOCK(pSampleConfiguration->sampleConfigurationObjLock); - if (!pSampleConfiguration->disableTwcc) { + if (pSampleConfiguration->enableTwcc) { if (IS_VALID_MUTEX_VALUE(pSampleStreamingSession->twccMetadata.updateLock)) { MUTEX_FREE(pSampleStreamingSession->twccMetadata.updateLock); } @@ -743,7 +744,7 @@ VOID sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, U currentTimeMs = GETTIME(); timeDiff = currentTimeMs - pSampleStreamingSession->twccMetadata.lastAdjustmentTimeMs; - if (timeDiff < ADJUSTMENT_INTERVAL_SECONDS) { + if (timeDiff < TWCC_BITRATE_ADJUSTMENT_INTERVAL_SECONDS) { // Too soon for another adjustment return; } @@ -755,15 +756,11 @@ VOID sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, U if (pSampleStreamingSession->twccMetadata.averagePacketLoss <= 5) { // increase encoder bitrate by 5 percent with a cap at MAX_BITRATE videoBitrate = (UINT64) MIN(videoBitrate * 1.05, MAX_VIDEO_BITRATE_KBPS); - } else { - // decrease encoder bitrate by average packet loss percent, with a cap at MIN_BITRATE - videoBitrate = (UINT64) MAX(videoBitrate * (1.0 - pSampleStreamingSession->twccMetadata.averagePacketLoss / 100.0), MIN_VIDEO_BITRATE_KBPS); - } - - if (pSampleStreamingSession->twccMetadata.averagePacketLoss <= 5) { // increase encoder bitrate by 5 percent with a cap at MAX_BITRATE audioBitrate = (UINT64) MIN(audioBitrate * 1.05, MAX_AUDIO_BITRATE_BPS); } else { + // decrease encoder bitrate by average packet loss percent, with a cap at MIN_BITRATE + videoBitrate = (UINT64) MAX(videoBitrate * (1.0 - pSampleStreamingSession->twccMetadata.averagePacketLoss / 100.0), MIN_VIDEO_BITRATE_KBPS); // decrease encoder bitrate by average packet loss percent, with a cap at MIN_BITRATE audioBitrate = (UINT64) MAX(audioBitrate * (1.0 - pSampleStreamingSession->twccMetadata.averagePacketLoss / 100.0), MIN_AUDIO_BITRATE_BPS); } @@ -775,9 +772,9 @@ VOID sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, U pSampleStreamingSession->twccMetadata.lastAdjustmentTimeMs = currentTimeMs; - DLOGI("Adjustment made: average packet loss = %.2f%%, timediff: %llu ms", pSampleStreamingSession->twccMetadata.averagePacketLoss, - ADJUSTMENT_INTERVAL_SECONDS, timeDiff); - DLOGI("Suggested video bitrate %u kbps, suggested audio bitrate: %u bps, sent: %u bytes %u packets received: %u bytes %u packets in %lu msec", + DLOGD("Adjustment made: average packet loss = %.2f%%, timediff: %llu ms", pSampleStreamingSession->twccMetadata.averagePacketLoss, + TWCC_BITRATE_ADJUSTMENT_INTERVAL_SECONDS, timeDiff); + DLOGD("Suggested video bitrate %u kbps, suggested audio bitrate: %u bps, sent: %u bytes %u packets received: %u bytes %u packets in %lu msec", videoBitrate, audioBitrate, txBytes, txPacketsCnt, rxBytes, rxPacketsCnt, duration / 10000ULL); } diff --git a/samples/Samples.h b/samples/Samples.h index cf8f0cdd3e..7901a58cf7 100644 --- a/samples/Samples.h +++ b/samples/Samples.h @@ -83,11 +83,11 @@ extern "C" { #define MAX_SIGNALING_CLIENT_METRICS_MESSAGE_SIZE 736 // strlen(SIGNALING_CLIENT_METRICS_JSON_TEMPLATE) + 20 * 10 #define MAX_ICE_AGENT_METRICS_MESSAGE_SIZE 113 // strlen(ICE_AGENT_METRICS_JSON_TEMPLATE) + 20 * 2 -#define ADJUSTMENT_INTERVAL_SECONDS 1 * HUNDREDS_OF_NANOS_IN_A_SECOND -#define MIN_VIDEO_BITRATE_KBPS 512 // Unit kilobits/sec. Value could change based on codec. -#define MAX_VIDEO_BITRATE_KBPS 2048000 // Unit kilobits/sec. Value could change based on codec. -#define MIN_AUDIO_BITRATE_BPS 4000 // Unit bits/sec. Value could change based on codec. -#define MAX_AUDIO_BITRATE_BPS 650000 // Unit bits/sec. Value could change based on codec. +#define TWCC_BITRATE_ADJUSTMENT_INTERVAL_SECONDS 1 * HUNDREDS_OF_NANOS_IN_A_SECOND +#define MIN_VIDEO_BITRATE_KBPS 512 // Unit kilobits/sec. Value could change based on codec. +#define MAX_VIDEO_BITRATE_KBPS 2048000 // Unit kilobits/sec. Value could change based on codec. +#define MIN_AUDIO_BITRATE_BPS 4000 // Unit bits/sec. Value could change based on codec. +#define MAX_AUDIO_BITRATE_BPS 650000 // Unit bits/sec. Value could change based on codec. typedef enum { SAMPLE_STREAMING_VIDEO_ONLY, @@ -166,7 +166,7 @@ typedef struct { PCHAR rtspUri; UINT32 logLevel; BOOL enableIceStats; - BOOL disableTwcc; + BOOL enableTwcc; } SampleConfiguration, *PSampleConfiguration; typedef struct { diff --git a/samples/kvsWebRTCClientMasterGstSample.c b/samples/kvsWebRTCClientMasterGstSample.c index ca5d5c184b..f8e77c6450 100644 --- a/samples/kvsWebRTCClientMasterGstSample.c +++ b/samples/kvsWebRTCClientMasterGstSample.c @@ -66,7 +66,7 @@ GstFlowReturn on_new_sample(GstElement* sink, gpointer data, UINT64 trackid) frame.index = (UINT32) ATOMIC_INCREMENT(&pSampleStreamingSession->frameIndex); if (trackid == DEFAULT_AUDIO_TRACK_ID) { - if (!pSampleStreamingSession->pSampleConfiguration->disableTwcc && senderPipeline != NULL) { + if (pSampleStreamingSession->pSampleConfiguration->enableTwcc && senderPipeline != NULL) { GstElement* encoder = gst_bin_get_by_name(GST_BIN(senderPipeline), "sampleAudioEncoder"); if (encoder != NULL) { g_object_get(G_OBJECT(encoder), "bitrate", &bitrate, NULL); @@ -86,7 +86,7 @@ GstFlowReturn on_new_sample(GstElement* sink, gpointer data, UINT64 trackid) pSampleStreamingSession->audioTimestamp += SAMPLE_AUDIO_FRAME_DURATION; // assume audio frame size is 20ms, which is default in opusenc } else { - if (!pSampleStreamingSession->pSampleConfiguration->disableTwcc && senderPipeline != NULL) { + if (pSampleStreamingSession->pSampleConfiguration->enableTwcc && senderPipeline != NULL) { GstElement* encoder = gst_bin_get_by_name(GST_BIN(senderPipeline), "sampleVideoEncoder"); if (encoder != NULL) { g_object_get(G_OBJECT(encoder), "bitrate", &bitrate, NULL); @@ -243,7 +243,7 @@ PVOID sendGstreamerAudioVideo(PVOID args) "videotestsrc pattern=ball is-live=TRUE ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " "x264enc name=sampleVideoEncoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE " - "emit-signals=TRUE name=appsink-video audiotestsrc wave=triangle is-live=TRUE ! " + "emit-signals=TRUE name=appsink-video audiotestsrc wave=ticks is-live=TRUE ! " "queue leaky=2 max-size-buffers=400 ! audioconvert ! audioresample ! opusenc name=sampleAudioEncoder ! " "audio/x-opus,rate=48000,channels=2 ! appsink sync=TRUE emit-signals=TRUE name=appsink-audio", &error); From 8995bf62ab9742e3ae93588fcc12266c5fbfc3cc Mon Sep 17 00:00:00 2001 From: Divya Sampath Kumar Date: Tue, 30 Apr 2024 10:03:31 -0700 Subject: [PATCH 09/11] memset remove --- samples/Common.c | 1 - 1 file changed, 1 deletion(-) diff --git a/samples/Common.c b/samples/Common.c index 21d6e4580d..1ec523c4c8 100644 --- a/samples/Common.c +++ b/samples/Common.c @@ -554,7 +554,6 @@ STATUS createSampleStreamingSession(PSampleConfiguration pSampleConfiguration, P ATOMIC_STORE_BOOL(&pSampleStreamingSession->terminateFlag, FALSE); ATOMIC_STORE_BOOL(&pSampleStreamingSession->candidateGatheringDone, FALSE); - MEMSET(&pSampleStreamingSession->twccMetadata, 0x00, SIZEOF(TwccMetadata)); pSampleStreamingSession->peerConnectionMetrics.peerConnectionStats.peerConnectionStartTime = GETTIME() / HUNDREDS_OF_NANOS_IN_A_MILLISECOND; // Flag to enable SDK to calculate selected ice server, local, remote and candidate pair stats. pSampleConfiguration->enableIceStats = FALSE; From aec05b55f70242dd0427d4f264af58d6848ce1aa Mon Sep 17 00:00:00 2001 From: Divya Sampath Kumar Date: Tue, 30 Apr 2024 11:46:11 -0700 Subject: [PATCH 10/11] Readme update, move enable flags to createSampleConfiguration --- README.md | 27 +++++++++++++++++++-------- samples/Common.c | 9 ++++++--- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 14405ae689..cfc9b965bc 100644 --- a/README.md +++ b/README.md @@ -396,21 +396,32 @@ createLwsIotCredentialProvider( freeIotCredentialProvider(&pSampleConfiguration->pCredentialProvider); ``` -## Use of TWCC -In order to listen in on TWCC reports, the application must set up a callback using the `peerConnectionOnSenderBandwidthEstimation` API. In our samples, it is set up like this: +## TWCC support + +Transport Wide Congestion Control (TWCC) is a mechanism in WebRTC designed to enhance the performance and reliability of real-time communication over the internet. TWCC addresses the challenges of network congestion by providing detailed feedback on the transport of packets across the network, enabling adaptive bitrate control and optimization of media streams in real-time. This feedback mechanism is crucial for maintaining high-quality audio and video communication, as it allows senders to adjust their transmission strategies based on comprehensive information about packet losses, delays, and jitter experienced across the entire transport path. + +The importance of TWCC in WebRTC lies in its ability to ensure efficient use of available network bandwidth while minimizing the negative impacts of network congestion. By monitoring the delivery of packets across the network, TWCC helps identify bottlenecks and adjust the media transmission rates accordingly. This dynamic approach to congestion control is essential for preventing degradation in call quality, such as pixelation, stuttering, or drops in audio and video streams, especially in environments with fluctuating network conditions. + +To learn more about TWCC, check [TWCC spec](https://datatracker.ietf.org/doc/html/draft-holmer-rmcat-transport-wide-cc-extensions-01) + +### Enabling TWCC support + +TWCC is enabled by default in the SDK samples (via `pSampleConfiguration->enableTwcc`) flag. In order to disable it, set this flag to `FALSE`. ```c -CHK_STATUS(peerConnectionOnSenderBandwidthEstimation(pSampleStreamingSession->pPeerConnection, (UINT64) pSampleStreamingSession, - sampleSenderBandwidthEstimationHandler)); +pSampleConfiguration->enableTwcc = FALSE; ``` -Note that TWCC is disabled by default in the SDK samples. In order to enable it, set the `disableSenderSideBandwidthEstimation` flag to FALSE. For example, - +If not using the samples directly, 2 things need to be done to set up Twcc: +1. Set the `disableSenderSideBandwidthEstimation` to `FALSE`: ```c -RtcConfiguration configuration; configuration.kvsRtcConfiguration.disableSenderSideBandwidthEstimation = FALSE; ``` - +2. Set the callback that will have the business logic to modify the bitrate based on packet loss information. The callback can be set using `peerConnectionOnSenderBandwidthEstimation()`: +```c +CHK_STATUS(peerConnectionOnSenderBandwidthEstimation(pSampleStreamingSession->pPeerConnection, (UINT64) pSampleStreamingSession, + sampleSenderBandwidthEstimationHandler)); +``` ## Use Pre-generated Certificates The certificate generating function ([createCertificateAndKey](https://awslabs.github.io/amazon-kinesis-video-streams-webrtc-sdk-c/Dtls__openssl_8c.html#a451c48525b0c0a8919a880d6834c1f7f)) in createDtlsSession() can take between 5 - 15 seconds in low performance embedded devices, it is called for every peer connection creation when KVS WebRTC receives an offer. To avoid this extra start-up latency, certificate can be pre-generated and passed in when offer comes. diff --git a/samples/Common.c b/samples/Common.c index 1ec523c4c8..31fcd14e4c 100644 --- a/samples/Common.c +++ b/samples/Common.c @@ -555,9 +555,6 @@ STATUS createSampleStreamingSession(PSampleConfiguration pSampleConfiguration, P ATOMIC_STORE_BOOL(&pSampleStreamingSession->terminateFlag, FALSE); ATOMIC_STORE_BOOL(&pSampleStreamingSession->candidateGatheringDone, FALSE); pSampleStreamingSession->peerConnectionMetrics.peerConnectionStats.peerConnectionStartTime = GETTIME() / HUNDREDS_OF_NANOS_IN_A_MILLISECOND; - // Flag to enable SDK to calculate selected ice server, local, remote and candidate pair stats. - pSampleConfiguration->enableIceStats = FALSE; - pSampleConfiguration->enableTwcc = TRUE; if (pSampleConfiguration->enableTwcc) { pSampleStreamingSession->twccMetadata.updateLock = MUTEX_CREATE(TRUE); @@ -963,6 +960,12 @@ STATUS createSampleConfiguration(PCHAR channelName, SIGNALING_CHANNEL_ROLE_TYPE pSampleConfiguration->pregenerateCertTimerId = MAX_UINT32; pSampleConfiguration->signalingClientMetrics.version = SIGNALING_CLIENT_METRICS_CURRENT_VERSION; + // Flag to enable SDK to calculate selected ice server, local, remote and candidate pair stats. + pSampleConfiguration->enableIceStats = FALSE; + + // Flag to enable/disable TWCC + pSampleConfiguration->enableTwcc = TRUE; + ATOMIC_STORE_BOOL(&pSampleConfiguration->interrupted, FALSE); ATOMIC_STORE_BOOL(&pSampleConfiguration->mediaThreadStarted, FALSE); ATOMIC_STORE_BOOL(&pSampleConfiguration->appTerminateFlag, FALSE); From 33db3e3608f61f1f682ef5bd42da961d791625ab Mon Sep 17 00:00:00 2001 From: Divya Sampath Kumar Date: Tue, 30 Apr 2024 13:17:23 -0700 Subject: [PATCH 11/11] Add codecov token --- .github/workflows/codecov.yml | 1 + README.md | 2 +- samples/Common.c | 9 ++++----- samples/Samples.h | 10 +++++----- samples/kvsWebRTCClientMasterGstSample.c | 20 +++++++++++++------- 5 files changed, 24 insertions(+), 18 deletions(-) diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml index fcc29b84d1..0db44ee0ab 100644 --- a/.github/workflows/codecov.yml +++ b/.github/workflows/codecov.yml @@ -13,6 +13,7 @@ jobs: runs-on: ubuntu-20.04 env: AWS_KVS_LOG_LEVEL: 2 + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} permissions: id-token: write contents: read diff --git a/README.md b/README.md index cfc9b965bc..586c8c0602 100644 --- a/README.md +++ b/README.md @@ -522,7 +522,7 @@ To disable threadpool, run `cmake .. -DENABLE_KVS_THREADPOOL=OFF` The default thread stack size for the KVS WebRTC SDK is 64 kb. Notable stack sizes that may need to be changed for your specific application will be the ConnectionListener Receiver thread and the media sender threads. Please modify the stack sizes for these media dependent threads to be suitable for the media your application is processing. ### Set up TWCC -TWCC is a mechanism in WebRTC designed to enhance the performance and reliability of real-time communication over the internet. TWCC addresses the challenges of network congestion by providing detailed feedback on the transport of packets across the network, enabling adaptive bitrate control and optimization of +TWCC is a mechanism in WebRTC designed to enhance the performance and reliability of real-time communication over the Internet. TWCC addresses the challenges of network congestion by providing detailed feedback on the transport of packets across the network, enabling adaptive bitrate control and optimization of media streams in real-time. This feedback mechanism is crucial for maintaining high-quality audio and video communication, as it allows senders to adjust their transmission strategies based on comprehensive information about packet losses, delays, and jitter experienced across the entire transport path. The importance of TWCC in WebRTC lies in its ability to ensure efficient use of available network bandwidth while minimizing the negative impacts of network congestion. By monitoring the delivery of packets across the network, TWCC helps identify bottlenecks and adjust the media transmission rates accordingly. This dynamic approach to congestion control is essential for preventing degradation in call quality, such as pixelation, stuttering, or drops in audio and video streams, especially in environments with fluctuating network conditions. To learn more about TWCC, you can refer to the [RFC draft](https://datatracker.ietf.org/doc/html/draft-holmer-rmcat-transport-wide-cc-extensions-01) diff --git a/samples/Common.c b/samples/Common.c index 31fcd14e4c..0991cc03f0 100644 --- a/samples/Common.c +++ b/samples/Common.c @@ -716,7 +716,7 @@ VOID sampleBandwidthEstimationHandler(UINT64 customData, DOUBLE maximumBitrate) DLOGV("received bitrate suggestion: %f", maximumBitrate); } -// Sample callback for TWCC. Average packet is calculated with EMA. If average packet lost is <= 5%, +// Sample callback for TWCC. Average packet is calculated with exponential moving average (EMA). If average packet lost is <= 5%, // the current bitrate is increased by 5%. If more than 5%, the current bitrate // is reduced by percent lost. Bitrate update is allowed every second and is increased/decreased upto the limits VOID sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, UINT32 rxBytes, UINT32 txPacketsCnt, UINT32 rxPacketsCnt, @@ -740,7 +740,7 @@ VOID sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, U currentTimeMs = GETTIME(); timeDiff = currentTimeMs - pSampleStreamingSession->twccMetadata.lastAdjustmentTimeMs; - if (timeDiff < TWCC_BITRATE_ADJUSTMENT_INTERVAL_SECONDS) { + if (timeDiff < TWCC_BITRATE_ADJUSTMENT_INTERVAL_MS) { // Too soon for another adjustment return; } @@ -768,9 +768,8 @@ VOID sampleSenderBandwidthEstimationHandler(UINT64 customData, UINT32 txBytes, U pSampleStreamingSession->twccMetadata.lastAdjustmentTimeMs = currentTimeMs; - DLOGD("Adjustment made: average packet loss = %.2f%%, timediff: %llu ms", pSampleStreamingSession->twccMetadata.averagePacketLoss, - TWCC_BITRATE_ADJUSTMENT_INTERVAL_SECONDS, timeDiff); - DLOGD("Suggested video bitrate %u kbps, suggested audio bitrate: %u bps, sent: %u bytes %u packets received: %u bytes %u packets in %lu msec", + DLOGI("Adjustment made: average packet loss = %.2f%%, timediff: %llu ms", pSampleStreamingSession->twccMetadata.averagePacketLoss, timeDiff); + DLOGI("Suggested video bitrate %u kbps, suggested audio bitrate: %u bps, sent: %u bytes %u packets received: %u bytes %u packets in %lu msec", videoBitrate, audioBitrate, txBytes, txPacketsCnt, rxBytes, rxPacketsCnt, duration / 10000ULL); } diff --git a/samples/Samples.h b/samples/Samples.h index 7901a58cf7..9e3dfa5bd9 100644 --- a/samples/Samples.h +++ b/samples/Samples.h @@ -83,11 +83,11 @@ extern "C" { #define MAX_SIGNALING_CLIENT_METRICS_MESSAGE_SIZE 736 // strlen(SIGNALING_CLIENT_METRICS_JSON_TEMPLATE) + 20 * 10 #define MAX_ICE_AGENT_METRICS_MESSAGE_SIZE 113 // strlen(ICE_AGENT_METRICS_JSON_TEMPLATE) + 20 * 2 -#define TWCC_BITRATE_ADJUSTMENT_INTERVAL_SECONDS 1 * HUNDREDS_OF_NANOS_IN_A_SECOND -#define MIN_VIDEO_BITRATE_KBPS 512 // Unit kilobits/sec. Value could change based on codec. -#define MAX_VIDEO_BITRATE_KBPS 2048000 // Unit kilobits/sec. Value could change based on codec. -#define MIN_AUDIO_BITRATE_BPS 4000 // Unit bits/sec. Value could change based on codec. -#define MAX_AUDIO_BITRATE_BPS 650000 // Unit bits/sec. Value could change based on codec. +#define TWCC_BITRATE_ADJUSTMENT_INTERVAL_MS 1000 * HUNDREDS_OF_NANOS_IN_A_MILLISECOND +#define MIN_VIDEO_BITRATE_KBPS 512 // Unit kilobits/sec. Value could change based on codec. +#define MAX_VIDEO_BITRATE_KBPS 2048000 // Unit kilobits/sec. Value could change based on codec. +#define MIN_AUDIO_BITRATE_BPS 4000 // Unit bits/sec. Value could change based on codec. +#define MAX_AUDIO_BITRATE_BPS 650000 // Unit bits/sec. Value could change based on codec. typedef enum { SAMPLE_STREAMING_VIDEO_ONLY, diff --git a/samples/kvsWebRTCClientMasterGstSample.c b/samples/kvsWebRTCClientMasterGstSample.c index f8e77c6450..ff5ffccb35 100644 --- a/samples/kvsWebRTCClientMasterGstSample.c +++ b/samples/kvsWebRTCClientMasterGstSample.c @@ -199,10 +199,13 @@ PVOID sendGstreamerAudioVideo(PVOID args) switch (pSampleConfiguration->srcType) { case TEST_SOURCE: { senderPipeline = gst_parse_launch( - "videotestsrc pattern=ball is-live=TRUE ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " + "videotestsrc pattern=ball is-live=TRUE ! " + "queue ! videoconvert ! videoscale ! video/x-raw,width=1280,height=720 ! " + "clockoverlay halignment=right valignment=top time-format=\"%Y-%m-%d %H:%M:%S\" ! " + "videorate ! video/x-raw,framerate=25/1 ! " "x264enc name=sampleVideoEncoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " - "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE emit-signals=TRUE " - "name=appsink-video", + "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! " + "appsink sync=TRUE emit-signals=TRUE name=appsink-video", &error); break; } @@ -210,7 +213,8 @@ PVOID sendGstreamerAudioVideo(PVOID args) senderPipeline = gst_parse_launch( "autovideosrc ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " "x264enc name=sampleVideoEncoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " - "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE " + "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! " + " appsink sync=TRUE " "emit-signals=TRUE name=appsink-video", &error); break; @@ -240,10 +244,12 @@ PVOID sendGstreamerAudioVideo(PVOID args) switch (pSampleConfiguration->srcType) { case TEST_SOURCE: { senderPipeline = gst_parse_launch( - "videotestsrc pattern=ball is-live=TRUE ! queue ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " + "videotestsrc pattern=ball is-live=TRUE ! " + "queue ! videorate ! videoscale ! videoconvert ! video/x-raw,width=1280,height=720,framerate=25/1 ! " + "clockoverlay halignment=right valignment=top time-format=\"%Y-%m-%d %H:%M:%S\" ! " "x264enc name=sampleVideoEncoder bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! " - "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! appsink sync=TRUE " - "emit-signals=TRUE name=appsink-video audiotestsrc wave=ticks is-live=TRUE ! " + "video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! " + "appsink sync=TRUE emit-signals=TRUE name=appsink-video audiotestsrc wave=ticks is-live=TRUE ! " "queue leaky=2 max-size-buffers=400 ! audioconvert ! audioresample ! opusenc name=sampleAudioEncoder ! " "audio/x-opus,rate=48000,channels=2 ! appsink sync=TRUE emit-signals=TRUE name=appsink-audio", &error);