Skip to content

Commit

Permalink
Merge branch 'develop' into twcc-sample
Browse files Browse the repository at this point in the history
  • Loading branch information
disa6302 authored Apr 15, 2024
2 parents 08156ff + a07a25c commit 08a39bc
Show file tree
Hide file tree
Showing 3,014 changed files with 1,280 additions and 101 deletions.
The diff you're trying to view is too large. We only load the first 3000 changed files.
6 changes: 5 additions & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ on:
- master
jobs:
clang-format-check:
runs-on: macos-latest
runs-on: macos-12
steps:
- name: Clone repository
uses: actions/checkout@v4
Expand Down Expand Up @@ -78,6 +78,7 @@ jobs:
runs-on: macos-13-xlarge
env:
AWS_KVS_LOG_LEVEL: 2
PKG_CONFIG_PATH: /usr/local/opt/pkgconfig
permissions:
id-token: write
contents: read
Expand All @@ -89,6 +90,9 @@ jobs:
with:
role-to-assume: ${{ secrets.AWS_ROLE_TO_ASSUME }}
aws-region: ${{ secrets.AWS_REGION }}
- name: Install dependencies
run: |
brew install gstreamer glib
- name: Build repository
run: |
brew unlink openssl
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/codecov.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ jobs:
run: |
sudo sh -c 'echo 0 > /proc/sys/net/ipv6/conf/all/disable_ipv6'
mkdir build && cd build
cmake .. -DCODE_COVERAGE=TRUE -DBUILD_TEST=TRUE
cmake .. -DCODE_COVERAGE=TRUE -DBUILD_TEST=TRUE -DBUILD_SAMPLE=OFF
make
ulimit -c unlimited -S
- name: Run tests
Expand Down
2 changes: 2 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -461,6 +461,8 @@ if (BUILD_SAMPLE)
# copy sample frames to build folder, in case developer runs sample program with command `samples/kvsWebrtcClientMaster` from `build` dir.
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/samples/opusSampleFrames" DESTINATION .)
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/samples/h264SampleFrames" DESTINATION .)
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/samples/aacSampleFrames" DESTINATION .)
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/samples/h265SampleFrames" DESTINATION .)

add_subdirectory(samples)
endif()
Expand Down
36 changes: 35 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -294,11 +294,45 @@ Pass the desired media and source type when running the sample. The mediaType ca


#### Sample: kvsWebrtcClientViewer
This application accepts sample H264/Opus frames and prints them out. To run:
This application accepts sample H264/Opus frames by default. You can use other supported codecs by changing the value for `videoTrack.codec` and `audioTrack.codec` in _Common.c_. By default, this sample only logs the size of the audio and video buffer it receives. To write these frames to a file using GStreamer, use the _kvsWebrtcClientViewerGstSample_ instead.

To run:
```shell
./samples/kvsWebrtcClientViewer <channelName>
```

#### Sample: kvsWebrtcClientViewerGstSample
This application is similar to the kvsWebrtcClientViewer. However, instead of just logging the media it receives, it generates a file using filesink. Make sure that your device has enough space to write the media to a file. You can also customize the receiving logic by modifying the functions in _GstAudioVideoReceiver.c_

To run:
```shell
./samples/kvsWebrtcClientViewerGstSample <channelName> <mediaType>
```

#### Sample: Generating sample frames

##### H264
```shell
gst-launch-1.0 videotestsrc pattern=ball num-buffers=1500 ! timeoverlay ! videoconvert ! video/x-raw,format=I420,width=1280,height=720,framerate=25/1 ! queue ! x264enc bframes=0 speed-preset=veryfast bitrate=512 byte-stream=TRUE tune=zerolatency ! video/x-h264,stream-format=byte-stream,alignment=au,profile=baseline ! multifilesink location="frame-%04d.h264" index=1
```

##### H265
```shell
gst-launch-1.0 videotestsrc pattern=ball num-buffers=1500 ! timeoverlay ! videoconvert ! video/x-raw,format=I420,width=1280,height=720,framerate=25/1 ! queue ! x265enc speed-preset=veryfast bitrate=512 tune=zerolatency ! video/x-h265,stream-format=byte-stream,alignment=au,profile=main ! multifilesink location="frame-%04d.h265" index=1
```

##### AAC

###### ADTS LC
```shell
gst-launch-1.0 audiotestsrc num-buffers=1500 ! audioconvert ! audioresample ! faac ! capsfilter caps=audio/mpeg,mpegversion=4,stream-format=adts,base-profile=lc,channels=2,rate=48000 ! multifilesink location="sample-%03d.aac" index=1
```

###### RAW LC
```shell
gst-launch-1.0 audiotestsrc num-buffers=1500 ! audioconvert ! audioresample ! faac ! capsfilter caps=audio/mpeg,mpegversion=4,stream-format=raw,base-profile=lc,channels=2,rate=44100 ! multifilesink location="sample-%03d.aac" index=1
```

### Viewing Master Samples

After running one of the master samples, when the command line application prints "Signaling client connection to socket established", indicating that your signaling channel is created and the connected master is streaming media to it, you can view the stream. To do so, check the media playback viewer on the KVS Signaling Channels console or open the [WebRTC SDK Test Page](https://awslabs.github.io/amazon-kinesis-video-streams-webrtc-sdk-js/examples/index.html).
Expand Down
15 changes: 15 additions & 0 deletions samples/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,8 @@ link_directories(${OPEN_SRC_INSTALL_PREFIX}/lib)
# copy sample frames to this subproject build folder, in case developer runs sample program with command `kvsWebrtcClientMaster` from `build/samples` dir.
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/opusSampleFrames" DESTINATION .)
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/h264SampleFrames" DESTINATION .)
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/aacSampleFrames" DESTINATION .)
file(COPY "${CMAKE_CURRENT_SOURCE_DIR}/h265SampleFrames" DESTINATION .)

add_executable(
kvsWebrtcClientMaster
Expand All @@ -76,13 +78,26 @@ if(GST_FOUND)
add_executable(
kvsWebrtcClientMasterGstSample
Common.c
GstAudioVideoReceiver.c
kvsWebRTCClientMasterGstSample.c
)
target_link_libraries(kvsWebrtcClientMasterGstSample kvsWebrtcClient kvsWebrtcSignalingClient ${EXTRA_DEPS} ${GST_SAMPLE_LIBRARIES} kvsCommonLws kvspicUtils websockets kvssdp)

install(TARGETS kvsWebrtcClientMasterGstSample
RUNTIME DESTINATION bin
)

add_executable(
kvsWebrtcClientViewerGstSample
Common.c
GstAudioVideoReceiver.c
kvsWebRTCClientViewerGstSample.c
)
target_link_libraries(kvsWebrtcClientViewerGstSample kvsWebrtcClient kvsWebrtcSignalingClient ${EXTRA_DEPS} ${GST_SAMPLE_LIBRARIES} kvsCommonLws kvspicUtils websockets kvssdp)

install(TARGETS kvsWebrtcClientViewerGstSample
RUNTIME DESTINATION bin
)
endif()

install(TARGETS kvsWebrtcClientMaster kvsWebrtcClientViewer discoverNatBehavior
Expand Down
5 changes: 5 additions & 0 deletions samples/Common.c
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,11 @@ STATUS handleAnswer(PSampleConfiguration pSampleConfiguration, PSampleStreamingS
CHK_STATUS(deserializeSessionDescriptionInit(pSignalingMessage->payload, pSignalingMessage->payloadLen, pAnswerSessionDescriptionInit));
CHK_STATUS(setRemoteDescription(pSampleStreamingSession->pPeerConnection, pAnswerSessionDescriptionInit));

// The audio video receive routine should be per streaming session
if (pSampleConfiguration->receiveAudioVideoSource != NULL) {
THREAD_CREATE(&pSampleStreamingSession->receiveAudioVideoSenderTid, pSampleConfiguration->receiveAudioVideoSource,
(PVOID) pSampleStreamingSession);
}
CleanUp:

if (pAnswerSessionDescriptionInit != NULL) {
Expand Down
224 changes: 224 additions & 0 deletions samples/GstAudioVideoReceiver.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,224 @@
#include "Samples.h"
#include <gst/gst.h>
#include <gst/app/app.h>
#include <gst/app/gstappsink.h>

// This function is a callback for the transceiver for every single video frame it receives
// It writes these frames to a buffer and pushes it to the `appsrcVideo` element of the
// GStreamer pipeline created in `receiveGstreamerAudioVideo`. Any logic to modify / discard the frames would go here
VOID onGstVideoFrameReady(UINT64 customData, PFrame pFrame)
{
STATUS retStatus = STATUS_SUCCESS;
GstFlowReturn ret;
GstBuffer* buffer;
GstElement* appsrcVideo = (GstElement*) customData;

CHK_ERR(appsrcVideo != NULL, STATUS_NULL_ARG, "appsrcVideo is null");
CHK_ERR(pFrame != NULL, STATUS_NULL_ARG, "Video frame is null");

buffer = gst_buffer_new_allocate(NULL, pFrame->size, NULL);
CHK_ERR(buffer != NULL, STATUS_NULL_ARG, "Buffer allocation failed");

DLOGV("Frame size: %d, presentationTs: %llu", pFrame->size, pFrame->presentationTs);
GST_BUFFER_PTS(buffer) = pFrame->presentationTs;
GST_BUFFER_DURATION(buffer) = gst_util_uint64_scale(1, GST_SECOND, DEFAULT_FPS_VALUE);
if (gst_buffer_fill(buffer, 0, pFrame->frameData, pFrame->size) != pFrame->size) {
DLOGE("Buffer fill did not complete correctly");
gst_buffer_unref(buffer);
return;
}
g_signal_emit_by_name(appsrcVideo, "push-buffer", buffer, &ret);
if (ret != GST_FLOW_OK) {
DLOGE("Error pushing buffer: %s", gst_flow_get_name(ret));
}
gst_buffer_unref(buffer);

CleanUp:
return;
}

// This function is a callback for the transceiver for every single audio frame it receives
// It writes these frames to a buffer and pushes it to the `appsrcAudio` element of the
// GStreamer pipeline created in `receiveGstreamerAudioVideo`. Any logic to modify / discard the frames would go here
VOID onGstAudioFrameReady(UINT64 customData, PFrame pFrame)
{
STATUS retStatus = STATUS_SUCCESS;
GstFlowReturn ret;
GstBuffer* buffer;
GstElement* appsrcAudio = (GstElement*) customData;

CHK_ERR(appsrcAudio != NULL, STATUS_NULL_ARG, "appsrcAudio is null");
CHK_ERR(pFrame != NULL, STATUS_NULL_ARG, "Audio frame is null");

buffer = gst_buffer_new_allocate(NULL, pFrame->size, NULL);
CHK_ERR(buffer != NULL, STATUS_NULL_ARG, "Buffer allocation failed");

DLOGV("Audio Frame size: %d, presentationTs: %llu", pFrame->size, pFrame->presentationTs);
GST_BUFFER_PTS(buffer) = pFrame->presentationTs;

// Recalculate the byte-rate if not using the default values
GST_BUFFER_DURATION(buffer) = gst_util_uint64_scale(pFrame->size, GST_SECOND, DEFAULT_AUDIO_OPUS_BYTE_RATE);
if (gst_buffer_fill(buffer, 0, pFrame->frameData, pFrame->size) != pFrame->size) {
DLOGE("Buffer fill did not complete correctly");
gst_buffer_unref(buffer);
return;
}
g_signal_emit_by_name(appsrcAudio, "push-buffer", buffer, &ret);
if (ret != GST_FLOW_OK) {
DLOGE("Error pushing buffer: %s", gst_flow_get_name(ret));
}
gst_buffer_unref(buffer);

CleanUp:
return;
}

// This function is a callback for the streaming session shutdown event. We send an eos to the pipeline to exit the
// application using this.
VOID onSampleStreamingSessionShutdown(UINT64 customData, PSampleStreamingSession pSampleStreamingSession)
{
(void) (pSampleStreamingSession);
GstElement* pipeline = (GstElement*) customData;
gst_element_send_event(pipeline, gst_event_new_eos());
}

PVOID receiveGstreamerAudioVideo(PVOID args)
{
STATUS retStatus = STATUS_SUCCESS;
GstElement *pipeline = NULL, *appsrcAudio = NULL, *appsrcVideo = NULL;
GstBus* bus;
GstMessage* msg;
GError* error = NULL;
GstCaps *audiocaps, *videocaps;
PSampleStreamingSession pSampleStreamingSession = (PSampleStreamingSession) args;
PSampleConfiguration pSampleConfiguration = pSampleStreamingSession->pSampleConfiguration;
PCHAR roleType = "Viewer";
gchar *videoDescription = "", *audioDescription = "", *audioVideoDescription;

if (pSampleConfiguration->channelInfo.channelRoleType == SIGNALING_CHANNEL_ROLE_TYPE_MASTER) {
roleType = "Master";
}

CHK_ERR(gst_init_check(NULL, NULL, &error), STATUS_INTERNAL_ERROR, "[KVS %s] GStreamer initialization failed");

CHK_ERR(pSampleStreamingSession != NULL, STATUS_NULL_ARG, "[KVS %s] Sample streaming session is NULL", roleType);

// It is advised to modify the pipeline and the caps as per the source of the media. Customers can also modify this pipeline to
// use any other sinks instead of `filesink` like `autovideosink` and `autoaudiosink`. The existing pipelines are not complex enough to
// change caps and properties dynamically, more complex logic may be needed to support the same.
switch (pSampleStreamingSession->pVideoRtcRtpTransceiver->receiver.track.codec) {
case RTC_CODEC_H264_PROFILE_42E01F_LEVEL_ASYMMETRY_ALLOWED_PACKETIZATION_MODE:
videoDescription = "appsrc name=appsrc-video ! queue ! h264parse ! queue ! matroskamux name=mux ! queue ! filesink location=video.mkv";
videocaps = gst_caps_new_simple("video/x-h264", "stream-format", G_TYPE_STRING, "byte-stream", "alignment", G_TYPE_STRING, "au",
"profile", G_TYPE_STRING, "baseline", "height", G_TYPE_INT, DEFAULT_VIDEO_HEIGHT_PIXELS, "width",
G_TYPE_INT, DEFAULT_VIDEO_WIDTH_PIXELS, NULL);
break;

case RTC_CODEC_H265:
videoDescription = "appsrc name=appsrc-video ! queue ! h265parse ! queue ! matroskamux name=mux ! queue ! filesink location=video.mkv ";
videocaps = gst_caps_new_simple("video/x-h265", "stream-format", G_TYPE_STRING, "byte-stream", "alignment", G_TYPE_STRING, "au",
"profile", G_TYPE_STRING, "main", "height", G_TYPE_INT, DEFAULT_VIDEO_HEIGHT_PIXELS, "width", G_TYPE_INT,
DEFAULT_VIDEO_WIDTH_PIXELS, NULL);
break;

// TODO: add a similar pipeline for VP8

default:
break;
}

if (pSampleConfiguration->mediaType == SAMPLE_STREAMING_AUDIO_VIDEO) {
switch (pSampleStreamingSession->pAudioRtcRtpTransceiver->receiver.track.codec) {
case RTC_CODEC_OPUS:
audioDescription = "appsrc name=appsrc-audio ! queue ! opusparse ! queue ! mux.";
audiocaps = gst_caps_new_simple("audio/x-opus", "rate", G_TYPE_INT, DEFAULT_AUDIO_OPUS_SAMPLE_RATE_HZ, "channel-mapping-family",
G_TYPE_INT, 1, NULL);
break;

case RTC_CODEC_AAC:
audioDescription = "appsrc name=appsrc-audio ! queue ! aacparse ! mux.";
audiocaps = gst_caps_new_simple("audio/mpeg", "mpegversion", G_TYPE_INT, 4, "rate", G_TYPE_INT, DEFAULT_AUDIO_AAC_SAMPLE_RATE_HZ,
"channels", G_TYPE_INT, DEFAULT_AUDIO_AAC_CHANNELS, "stream-format", G_TYPE_STRING, "adts",
"base-profile", G_TYPE_STRING, "lc", NULL);
break;

// TODO: make sure this pipeline works. Figure out the caps for this
case RTC_CODEC_MULAW:
case RTC_CODEC_ALAW:
audioDescription = "appsrc name=appsrc-audio ! rawaudioparse ! decodebin ! autoaudiosink";
break;

default:
break;
}
}

audioVideoDescription = g_strjoin(" ", videoDescription, audioDescription, NULL);

pipeline = gst_parse_launch(audioVideoDescription, &error);
CHK_ERR(pipeline != NULL, STATUS_INTERNAL_ERROR, "[KVS %s] Pipeline is NULL", roleType);

appsrcVideo = gst_bin_get_by_name(GST_BIN(pipeline), "appsrc-video");
CHK_ERR(appsrcVideo != NULL, STATUS_INTERNAL_ERROR, "[KVS %s] Cannot find appsrc video", roleType);
CHK_STATUS(transceiverOnFrame(pSampleStreamingSession->pVideoRtcRtpTransceiver, (UINT64) appsrcVideo, onGstVideoFrameReady));
g_object_set(G_OBJECT(appsrcVideo), "caps", videocaps, NULL);
gst_caps_unref(videocaps);

if (pSampleConfiguration->mediaType == SAMPLE_STREAMING_AUDIO_VIDEO) {
appsrcAudio = gst_bin_get_by_name(GST_BIN(pipeline), "appsrc-audio");
CHK_ERR(appsrcAudio != NULL, STATUS_INTERNAL_ERROR, "[KVS %s] Cannot find appsrc audio", roleType);
CHK_STATUS(transceiverOnFrame(pSampleStreamingSession->pAudioRtcRtpTransceiver, (UINT64) appsrcAudio, onGstAudioFrameReady));
g_object_set(G_OBJECT(appsrcAudio), "caps", audiocaps, NULL);
gst_caps_unref(audiocaps);
}

CHK_STATUS(streamingSessionOnShutdown(pSampleStreamingSession, (UINT64) pipeline, onSampleStreamingSessionShutdown));
g_free(audioVideoDescription);

gst_element_set_state(pipeline, GST_STATE_PLAYING);

/* block until error or EOS */
bus = gst_element_get_bus(pipeline);
CHK_ERR(bus != NULL, STATUS_INTERNAL_ERROR, "[KVS %s] Bus is NULL", roleType);
msg = gst_bus_timed_pop_filtered(bus, GST_CLOCK_TIME_NONE, GST_MESSAGE_ERROR | GST_MESSAGE_EOS);

/* Free resources */
if (msg != NULL) {
switch (GST_MESSAGE_TYPE(msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error(msg, &error, NULL);
DLOGE("Error received: %s", error->message);
g_error_free(error);
break;
case GST_MESSAGE_EOS:
DLOGI("End of stream");
break;
default:
break;
}
gst_message_unref(msg);
}
if (bus != NULL) {
gst_object_unref(bus);
}
if (pipeline != NULL) {
gst_element_set_state(pipeline, GST_STATE_NULL);
gst_object_unref(pipeline);
}
if (appsrcAudio != NULL) {
gst_object_unref(appsrcAudio);
}
if (appsrcVideo != NULL) {
gst_object_unref(appsrcVideo);
}

CleanUp:
if (error != NULL) {
DLOGE("[KVS %s] %s", roleType, error->message);
g_clear_error(&error);
}

gst_deinit();

return (PVOID) (ULONG_PTR) retStatus;
}
12 changes: 12 additions & 0 deletions samples/Samples.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,23 @@ extern "C" {
#define NUMBER_OF_H264_FRAME_FILES 1500
#define NUMBER_OF_OPUS_FRAME_FILES 618
#define DEFAULT_FPS_VALUE 25
#define DEFAULT_VIDEO_HEIGHT_PIXELS 720
#define DEFAULT_VIDEO_WIDTH_PIXELS 1280
#define DEFAULT_AUDIO_OPUS_CHANNELS 2
#define DEFAULT_AUDIO_AAC_CHANNELS 2
#define DEFAULT_AUDIO_OPUS_SAMPLE_RATE_HZ 48000
#define DEFAULT_AUDIO_AAC_SAMPLE_RATE_HZ 16000
#define DEFAULT_AUDIO_OPUS_BITS_PER_SAMPLE 16
#define DEFAULT_AUDIO_AAC_BITS_PER_SAMPLE 16
#define DEFAULT_MAX_CONCURRENT_STREAMING_SESSION 10

#define SAMPLE_MASTER_CLIENT_ID "ProducerMaster"
#define SAMPLE_VIEWER_CLIENT_ID "ConsumerViewer"
#define SAMPLE_CHANNEL_NAME (PCHAR) "ScaryTestChannel"

#define DEFAULT_AUDIO_OPUS_BYTE_RATE (DEFAULT_AUDIO_OPUS_SAMPLE_RATE_HZ * DEFAULT_AUDIO_OPUS_CHANNELS * DEFAULT_AUDIO_OPUS_BITS_PER_SAMPLE) / 8
#define DEFAULT_AUDIO_AAC_BYTE_RATE (DEFAULT_AUDIO_AAC_SAMPLE_RATE_HZ * DEFAULT_AUDIO_AAC_CHANNELS * DEFAULT_AUDIO_AAC_BITS_PER_SAMPLE) / 8

#define SAMPLE_AUDIO_FRAME_DURATION (20 * HUNDREDS_OF_NANOS_IN_A_MILLISECOND)
#define SAMPLE_STATS_DURATION (60 * HUNDREDS_OF_NANOS_IN_A_SECOND)
#define SAMPLE_VIDEO_FRAME_DURATION (HUNDREDS_OF_NANOS_IN_A_SECOND / DEFAULT_FPS_VALUE)
Expand Down Expand Up @@ -228,6 +239,7 @@ typedef struct {

VOID sigintHandler(INT32);
STATUS readFrameFromDisk(PBYTE, PUINT32, PCHAR);
PVOID receiveGstreamerAudioVideo(PVOID);
PVOID sendVideoPackets(PVOID);
PVOID sendAudioPackets(PVOID);
PVOID sendGstreamerAudioVideo(PVOID);
Expand Down
Binary file added samples/aacSampleFrames/sample-001.aac
Binary file not shown.
1 change: 1 addition & 0 deletions samples/aacSampleFrames/sample-002.aac
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
��L�_�!LH����������YbDHsf ��"C�0I6X�"قH9j狍��F���@�v���3������h���P٫�`J���� �F�̽k_���P�t����+�,x=�?F笇�C�!���yd<(N��_Q+m_�ѮՁ*�5��bY�]/-DK
Loading

0 comments on commit 08a39bc

Please sign in to comment.