mirror of
https://github.com/mollyim/webrtc.git
synced 2025-05-13 05:40:42 +01:00
Remove multiplex codec.
The feature isn't in use by Google and has proven to contain security issues. It's time to remove it. Bug: b/324864439 Change-Id: I80344eb2f2060469d2d69a54dc4519fdd02ab4ea Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/340324 Reviewed-by: Stefan Holmer <stefan@webrtc.org> Commit-Queue: Markus Handell <handellm@webrtc.org> Reviewed-by: Björn Terelius <terelius@webrtc.org> Cr-Commit-Position: refs/heads/main@{#41808}
This commit is contained in:
parent
db2f52ba88
commit
97df932ecc
54 changed files with 5 additions and 3922 deletions
|
@ -21,7 +21,6 @@ enum VideoCodecType {
|
|||
kVideoCodecVP9,
|
||||
kVideoCodecAV1,
|
||||
kVideoCodecH264,
|
||||
kVideoCodecMultiplex,
|
||||
kVideoCodecH265,
|
||||
};
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@ constexpr char kPayloadNameAv1[] = "AV1";
|
|||
constexpr char kPayloadNameAv1x[] = "AV1X";
|
||||
constexpr char kPayloadNameH264[] = "H264";
|
||||
constexpr char kPayloadNameGeneric[] = "Generic";
|
||||
constexpr char kPayloadNameMultiplex[] = "Multiplex";
|
||||
constexpr char kPayloadNameH265[] = "H265";
|
||||
} // namespace
|
||||
|
||||
|
@ -153,8 +152,6 @@ const char* CodecTypeToPayloadString(VideoCodecType type) {
|
|||
return kPayloadNameAv1;
|
||||
case kVideoCodecH264:
|
||||
return kPayloadNameH264;
|
||||
case kVideoCodecMultiplex:
|
||||
return kPayloadNameMultiplex;
|
||||
case kVideoCodecGeneric:
|
||||
return kPayloadNameGeneric;
|
||||
case kVideoCodecH265:
|
||||
|
@ -173,8 +170,6 @@ VideoCodecType PayloadStringToCodecType(const std::string& name) {
|
|||
return kVideoCodecAV1;
|
||||
if (absl::EqualsIgnoreCase(name, kPayloadNameH264))
|
||||
return kVideoCodecH264;
|
||||
if (absl::EqualsIgnoreCase(name, kPayloadNameMultiplex))
|
||||
return kVideoCodecMultiplex;
|
||||
if (absl::EqualsIgnoreCase(name, kPayloadNameH265))
|
||||
return kVideoCodecH265;
|
||||
return kVideoCodecGeneric;
|
||||
|
|
|
@ -171,10 +171,6 @@ void VideoDecoderSoftwareFallbackWrapper::UpdateFallbackDecoderHistograms() {
|
|||
RTC_HISTOGRAM_COUNTS_100000(kFallbackHistogramsUmaPrefix + "H264",
|
||||
hw_decoded_frames_since_last_fallback_);
|
||||
break;
|
||||
case kVideoCodecMultiplex:
|
||||
RTC_HISTOGRAM_COUNTS_100000(kFallbackHistogramsUmaPrefix + "Multiplex",
|
||||
hw_decoded_frames_since_last_fallback_);
|
||||
break;
|
||||
case kVideoCodecH265:
|
||||
RTC_HISTOGRAM_COUNTS_100000(kFallbackHistogramsUmaPrefix + "H265",
|
||||
hw_decoded_frames_since_last_fallback_);
|
||||
|
|
|
@ -96,7 +96,6 @@ void PopulateRtpWithCodecSpecifics(const CodecSpecificInfo& info,
|
|||
info.codecSpecific.H264.packetization_mode;
|
||||
return;
|
||||
}
|
||||
case kVideoCodecMultiplex:
|
||||
case kVideoCodecGeneric:
|
||||
rtp->codec = kVideoCodecGeneric;
|
||||
return;
|
||||
|
@ -340,8 +339,6 @@ void RtpPayloadParams::SetGeneric(const CodecSpecificInfo* codec_specific_info,
|
|||
is_keyframe, rtp_video_header);
|
||||
}
|
||||
return;
|
||||
case VideoCodecType::kVideoCodecMultiplex:
|
||||
return;
|
||||
case VideoCodecType::kVideoCodecH265:
|
||||
// TODO(bugs.webrtc.org/13485): Implement H265 to generic descriptor.
|
||||
return;
|
||||
|
@ -407,7 +404,6 @@ absl::optional<FrameDependencyStructure> RtpPayloadParams::GenericStructure(
|
|||
case VideoCodecType::kVideoCodecAV1:
|
||||
case VideoCodecType::kVideoCodecH264:
|
||||
case VideoCodecType::kVideoCodecH265:
|
||||
case VideoCodecType::kVideoCodecMultiplex:
|
||||
return absl::nullopt;
|
||||
}
|
||||
RTC_DCHECK_NOTREACHED() << "Unsupported codec.";
|
||||
|
|
|
@ -60,10 +60,6 @@ group("examples") {
|
|||
deps += [ ":peerconnection_client" ]
|
||||
}
|
||||
}
|
||||
|
||||
if (is_android || is_win) {
|
||||
deps += [ ":webrtc_unity_plugin" ]
|
||||
}
|
||||
}
|
||||
|
||||
rtc_library("read_auth_file") {
|
||||
|
@ -838,94 +834,7 @@ if (is_linux || is_chromeos || is_win) {
|
|||
}
|
||||
}
|
||||
|
||||
if (is_win || is_android) {
|
||||
rtc_shared_library("webrtc_unity_plugin") {
|
||||
testonly = true
|
||||
sources = [
|
||||
"unityplugin/simple_peer_connection.cc",
|
||||
"unityplugin/simple_peer_connection.h",
|
||||
"unityplugin/unity_plugin_apis.cc",
|
||||
"unityplugin/unity_plugin_apis.h",
|
||||
"unityplugin/video_observer.cc",
|
||||
"unityplugin/video_observer.h",
|
||||
]
|
||||
|
||||
if (is_android) {
|
||||
sources += [
|
||||
"unityplugin/class_reference_holder.cc",
|
||||
"unityplugin/class_reference_holder.h",
|
||||
"unityplugin/jni_onload.cc",
|
||||
]
|
||||
suppressed_configs += [ "//build/config/android:hide_all_but_jni_onload" ]
|
||||
}
|
||||
|
||||
if (is_win) {
|
||||
configs += [ "//build/config/win:windowed" ]
|
||||
}
|
||||
deps = [
|
||||
"../api:create_peerconnection_factory",
|
||||
"../api:libjingle_peerconnection_api",
|
||||
"../api:media_stream_interface",
|
||||
"../api/audio_codecs:builtin_audio_decoder_factory",
|
||||
"../api/audio_codecs:builtin_audio_encoder_factory",
|
||||
"../api/video:video_frame",
|
||||
"../api/video:video_rtp_headers",
|
||||
"../media:rtc_audio_video",
|
||||
"../media:rtc_internal_video_codecs",
|
||||
"../media:rtc_media",
|
||||
"../media:rtc_media_base",
|
||||
"../modules/audio_device",
|
||||
"../modules/audio_processing",
|
||||
"../modules/audio_processing:api",
|
||||
"../modules/video_capture:video_capture_module",
|
||||
"../pc:libjingle_peerconnection",
|
||||
"../pc:video_track_source",
|
||||
"../rtc_base:ssl",
|
||||
"../test:platform_video_capturer",
|
||||
"../test:video_test_common",
|
||||
"//third_party/abseil-cpp/absl/memory",
|
||||
]
|
||||
if (is_android) {
|
||||
deps += [
|
||||
"../modules/utility",
|
||||
"../sdk/android:libjingle_peerconnection_jni",
|
||||
"../sdk/android:native_api_jni",
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (is_android) {
|
||||
rtc_android_library("webrtc_unity_java") {
|
||||
sources = [ "unityplugin/java/src/org/webrtc/UnityUtility.java" ]
|
||||
deps = [
|
||||
"../rtc_base:base_java",
|
||||
"../sdk/android:camera_java",
|
||||
"../sdk/android:libjingle_peerconnection_java",
|
||||
"../sdk/android:peerconnection_java",
|
||||
"../sdk/android:video_api_java",
|
||||
"../sdk/android:video_java",
|
||||
"//third_party/androidx:androidx_annotation_annotation_java",
|
||||
]
|
||||
}
|
||||
|
||||
# TODO(https://bugs.webrtc.org/15095) - Fix or remove this target.
|
||||
#dist_jar("libwebrtc_unity") {
|
||||
# _target_dir_name = get_label_info(":$target_name", "dir")
|
||||
# output = "${root_out_dir}/lib.java${_target_dir_name}/${target_name}.jar"
|
||||
# direct_deps_only = false
|
||||
# use_interface_jars = false
|
||||
# use_unprocessed_jars = false
|
||||
# requires_android = true
|
||||
# deps = [
|
||||
# ":webrtc_unity_java",
|
||||
# "../rtc_base:base_java",
|
||||
# "../sdk/android:libjingle_peerconnection_java",
|
||||
# "../sdk/android:libjingle_peerconnection_metrics_default_java",
|
||||
# "//third_party/androidx:androidx_annotation_annotation_java",
|
||||
# ]
|
||||
#}
|
||||
|
||||
robolectric_binary("android_examples_junit_tests") {
|
||||
sources = [
|
||||
"androidjunit/src/org/appspot/apprtc/BluetoothManagerTest.java",
|
||||
|
|
|
@ -1,33 +0,0 @@
|
|||
Instruction of running webrtc_unity_plugin on Android Unity
|
||||
|
||||
1. On Linux machine, compile target webrtc_unity_plugin.
|
||||
Checkout WebRTC codebase: fetch --nohooks webrtc_android
|
||||
If you already have a checkout for linux, add target_os=”android” into .gclient file.
|
||||
Run gclient sync
|
||||
Run gn args out/Android, and again set target_os=”android” in the args.gn
|
||||
Run ninja -C out/Android webrtc_unity_plugin
|
||||
|
||||
2. On Linux machine, build target libwebrtc_unity under webrtc checkout. This is the java code for webrtc to work on Android.
|
||||
|
||||
3. Copy libwebrtc_unity.jar and libwebrtc_unity_plugin.so into Unity project folder, under Assets/Plugins/Android folder.
|
||||
|
||||
4. Rename libwebrtc_unity_plugin.so to libjingle_peerconnection_so.so. This is hacky, and the purpose is to let the java code in libwebrtc_unity.jar to find their JNI implementations. Simultaneously, in your C# wrapper script for the native plugin libjingle_peerconnection_so.so, the dll_path should be set to “jingle_peerconnection_so”.
|
||||
|
||||
5. In the Unity Main Scene’s Start method, write the following code to initialize the Java environment for webrtc (otherwise, webrtc will not be able to access audio device or camera from C++ code):
|
||||
|
||||
#if UNITY_ANDROID
|
||||
AndroidJavaClass playerClass = new AndroidJavaClass("com.unity3d.player.UnityPlayer");
|
||||
AndroidJavaObject activity = playerClass.GetStatic<AndroidJavaObject>("currentActivity");
|
||||
AndroidJavaClass utilityClass = new AndroidJavaClass("org.webrtc.UnityUtility");
|
||||
utilityClass.CallStatic("InitializePeerConncectionFactory", new object[1] { activity });
|
||||
#endif
|
||||
|
||||
6. Compile the unity project into an APK, and decompile the apk using apktool that you can download from https://ibotpeaches.github.io/Apktool/
|
||||
Run apktool d apkname.apk.
|
||||
Then copy the AndroidManifest.xml in the decompiled folder to the Assets/Plugins/Android folder, and add two lines:
|
||||
<uses-permission android:name="android.permission.RECORD_AUDIO" />
|
||||
<uses-permission android:name="android.permission.CAMERA" />
|
||||
|
||||
The purpose of using apktool is to get a well-written android manifest xml file. If you know how to write manifest file from scratch, you can skip using apktool.
|
||||
|
||||
7. Compile the unity project into an APK again and deploy it to an android device.
|
|
@ -1,4 +0,0 @@
|
|||
include_rules = [
|
||||
"+modules/utility",
|
||||
"+sdk",
|
||||
]
|
|
@ -1,309 +0,0 @@
|
|||
This directory contains an example Unity native plugin for Windows OS and Android.
|
||||
|
||||
The APIs use Platform Invoke (P/Invoke) technology as required by Unity native plugin.
|
||||
This plugin dll can also be used by Windows C# applications other than Unity.
|
||||
|
||||
For detailed build instruction on Android, see ANDROID_INSTRUCTION
|
||||
|
||||
An example of wrapping native plugin into a C# managed class in Unity is given as following:
|
||||
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
namespace SimplePeerConnectionM {
|
||||
// A class for ice candidate.
|
||||
public class IceCandidate {
|
||||
public IceCandidate(string candidate, int sdpMlineIndex, string sdpMid) {
|
||||
mCandidate = candidate;
|
||||
mSdpMlineIndex = sdpMlineIndex;
|
||||
mSdpMid = sdpMid;
|
||||
}
|
||||
string mCandidate;
|
||||
int mSdpMlineIndex;
|
||||
string mSdpMid;
|
||||
|
||||
public string Candidate {
|
||||
get { return mCandidate; }
|
||||
set { mCandidate = value; }
|
||||
}
|
||||
|
||||
public int SdpMlineIndex {
|
||||
get { return mSdpMlineIndex; }
|
||||
set { mSdpMlineIndex = value; }
|
||||
}
|
||||
|
||||
public string SdpMid {
|
||||
get { return mSdpMid; }
|
||||
set { mSdpMid = value; }
|
||||
}
|
||||
}
|
||||
|
||||
// A managed wrapper up class for the native c style peer connection APIs.
|
||||
public class PeerConnectionM {
|
||||
private const string dllPath = "webrtc_unity_plugin";
|
||||
|
||||
//create a peerconnection with turn servers
|
||||
[DllImport(dllPath, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern int CreatePeerConnection(string[] turnUrls, int noOfUrls,
|
||||
string username, string credential);
|
||||
|
||||
[DllImport(dllPath, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern bool ClosePeerConnection(int peerConnectionId);
|
||||
|
||||
[DllImport(dllPath, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern bool AddStream(int peerConnectionId, bool audioOnly);
|
||||
|
||||
[DllImport(dllPath, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern bool AddDataChannel(int peerConnectionId);
|
||||
|
||||
[DllImport(dllPath, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern bool CreateOffer(int peerConnectionId);
|
||||
|
||||
[DllImport(dllPath, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern bool CreateAnswer(int peerConnectionId);
|
||||
|
||||
[DllImport(dllPath, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern bool SendDataViaDataChannel(int peerConnectionId, string data);
|
||||
|
||||
[DllImport(dllPath, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern bool SetAudioControl(int peerConnectionId, bool isMute, bool isRecord);
|
||||
|
||||
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
|
||||
private delegate void LocalDataChannelReadyInternalDelegate();
|
||||
public delegate void LocalDataChannelReadyDelegate(int id);
|
||||
[DllImport(dllPath, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern bool RegisterOnLocalDataChannelReady(
|
||||
int peerConnectionId, LocalDataChannelReadyInternalDelegate callback);
|
||||
|
||||
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
|
||||
private delegate void DataFromDataChannelReadyInternalDelegate(string s);
|
||||
public delegate void DataFromDataChannelReadyDelegate(int id, string s);
|
||||
[DllImport(dllPath, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern bool RegisterOnDataFromDataChannelReady(
|
||||
int peerConnectionId, DataFromDataChannelReadyInternalDelegate callback);
|
||||
|
||||
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
|
||||
private delegate void FailureMessageInternalDelegate(string msg);
|
||||
public delegate void FailureMessageDelegate(int id, string msg);
|
||||
[DllImport(dllPath, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern bool RegisterOnFailure(int peerConnectionId,
|
||||
FailureMessageInternalDelegate callback);
|
||||
|
||||
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
|
||||
private delegate void AudioBusReadyInternalDelegate(IntPtr data, int bitsPerSample,
|
||||
int sampleRate, int numberOfChannels, int numberOfFrames);
|
||||
public delegate void AudioBusReadyDelegate(int id, IntPtr data, int bitsPerSample,
|
||||
int sampleRate, int numberOfChannels, int numberOfFrames);
|
||||
[DllImport(dllPath, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern bool RegisterOnAudioBusReady(int peerConnectionId,
|
||||
AudioBusReadyInternalDelegate callback);
|
||||
|
||||
// Video callbacks.
|
||||
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
|
||||
private delegate void I420FrameReadyInternalDelegate(
|
||||
IntPtr dataY, IntPtr dataU, IntPtr dataV,
|
||||
int strideY, int strideU, int strideV,
|
||||
uint width, uint height);
|
||||
public delegate void I420FrameReadyDelegate(int id,
|
||||
IntPtr dataY, IntPtr dataU, IntPtr dataV,
|
||||
int strideY, int strideU, int strideV,
|
||||
uint width, uint height);
|
||||
[DllImport(dllPath, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern bool RegisterOnLocalI420FrameReady(int peerConnectionId,
|
||||
I420FrameReadyInternalDelegate callback);
|
||||
[DllImport(dllPath, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern bool RegisterOnRemoteI420FrameReady(int peerConnectionId,
|
||||
I420FrameReadyInternalDelegate callback);
|
||||
|
||||
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
|
||||
private delegate void LocalSdpReadytoSendInternalDelegate(string type, string sdp);
|
||||
public delegate void LocalSdpReadytoSendDelegate(int id, string type, string sdp);
|
||||
[DllImport(dllPath, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern bool RegisterOnLocalSdpReadytoSend(int peerConnectionId,
|
||||
LocalSdpReadytoSendInternalDelegate callback);
|
||||
|
||||
[UnmanagedFunctionPointer(CallingConvention.Cdecl)]
|
||||
private delegate void IceCandidateReadytoSendInternalDelegate(
|
||||
string candidate, int sdpMlineIndex, string sdpMid);
|
||||
public delegate void IceCandidateReadytoSendDelegate(
|
||||
int id, string candidate, int sdpMlineIndex, string sdpMid);
|
||||
[DllImport(dllPath, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern bool RegisterOnIceCandidateReadytoSend(
|
||||
int peerConnectionId, IceCandidateReadytoSendInternalDelegate callback);
|
||||
|
||||
[DllImport(dllPath, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern bool SetRemoteDescription(int peerConnectionId, string type, string sdp);
|
||||
|
||||
[DllImport(dllPath, CallingConvention = CallingConvention.Cdecl)]
|
||||
private static extern bool AddIceCandidate(int peerConnectionId, string sdp,
|
||||
int sdpMlineindex, string sdpMid);
|
||||
|
||||
public PeerConnectionM(List<string> turnUrls, string username, string credential) {
|
||||
string[] urls = turnUrls != null ? turnUrls.ToArray() : null;
|
||||
int length = turnUrls != null ? turnUrls.Count : 0;
|
||||
mPeerConnectionId = CreatePeerConnection(urls, length, username, credential);
|
||||
RegisterCallbacks();
|
||||
}
|
||||
|
||||
public void ClosePeerConnection() {
|
||||
ClosePeerConnection(mPeerConnectionId);
|
||||
mPeerConnectionId = -1;
|
||||
}
|
||||
|
||||
// Return -1 if Peerconnection is not available.
|
||||
public int GetUniqueId() {
|
||||
return mPeerConnectionId;
|
||||
}
|
||||
|
||||
public void AddStream(bool audioOnly) {
|
||||
AddStream(mPeerConnectionId, audioOnly);
|
||||
}
|
||||
|
||||
public void AddDataChannel() {
|
||||
AddDataChannel(mPeerConnectionId);
|
||||
}
|
||||
|
||||
public void CreateOffer() {
|
||||
CreateOffer(mPeerConnectionId);
|
||||
}
|
||||
|
||||
public void CreateAnswer() {
|
||||
CreateAnswer(mPeerConnectionId);
|
||||
}
|
||||
|
||||
public void SendDataViaDataChannel(string data) {
|
||||
SendDataViaDataChannel(mPeerConnectionId, data);
|
||||
}
|
||||
|
||||
public void SetAudioControl(bool isMute, bool isRecord) {
|
||||
SetAudioControl(mPeerConnectionId, isMute, isRecord);
|
||||
}
|
||||
|
||||
public void SetRemoteDescription(string type, string sdp) {
|
||||
SetRemoteDescription(mPeerConnectionId, type, sdp);
|
||||
}
|
||||
|
||||
public void AddIceCandidate(string candidate, int sdpMlineindex, string sdpMid) {
|
||||
AddIceCandidate(mPeerConnectionId, candidate, sdpMlineindex, sdpMid);
|
||||
}
|
||||
|
||||
private void RegisterCallbacks() {
|
||||
localDataChannelReadyDelegate = new LocalDataChannelReadyInternalDelegate(
|
||||
RaiseLocalDataChannelReady);
|
||||
RegisterOnLocalDataChannelReady(mPeerConnectionId, localDataChannelReadyDelegate);
|
||||
|
||||
dataFromDataChannelReadyDelegate = new DataFromDataChannelReadyInternalDelegate(
|
||||
RaiseDataFromDataChannelReady);
|
||||
RegisterOnDataFromDataChannelReady(mPeerConnectionId, dataFromDataChannelReadyDelegate);
|
||||
|
||||
failureMessageDelegate = new FailureMessageInternalDelegate(RaiseFailureMessage);
|
||||
RegisterOnFailure(mPeerConnectionId, failureMessageDelegate);
|
||||
|
||||
audioBusReadyDelegate = new AudioBusReadyInternalDelegate(RaiseAudioBusReady);
|
||||
RegisterOnAudioBusReady(mPeerConnectionId, audioBusReadyDelegate);
|
||||
|
||||
localI420FrameReadyDelegate = new I420FrameReadyInternalDelegate(
|
||||
RaiseLocalVideoFrameReady);
|
||||
RegisterOnLocalI420FrameReady(mPeerConnectionId, localI420FrameReadyDelegate);
|
||||
|
||||
remoteI420FrameReadyDelegate = new I420FrameReadyInternalDelegate(
|
||||
RaiseRemoteVideoFrameReady);
|
||||
RegisterOnRemoteI420FrameReady(mPeerConnectionId, remoteI420FrameReadyDelegate);
|
||||
|
||||
localSdpReadytoSendDelegate = new LocalSdpReadytoSendInternalDelegate(
|
||||
RaiseLocalSdpReadytoSend);
|
||||
RegisterOnLocalSdpReadytoSend(mPeerConnectionId, localSdpReadytoSendDelegate);
|
||||
|
||||
iceCandidateReadytoSendDelegate =
|
||||
new IceCandidateReadytoSendInternalDelegate(RaiseIceCandidateReadytoSend);
|
||||
RegisterOnIceCandidateReadytoSend(
|
||||
mPeerConnectionId, iceCandidateReadytoSendDelegate);
|
||||
}
|
||||
|
||||
private void RaiseLocalDataChannelReady() {
|
||||
if (OnLocalDataChannelReady != null)
|
||||
OnLocalDataChannelReady(mPeerConnectionId);
|
||||
}
|
||||
|
||||
private void RaiseDataFromDataChannelReady(string data) {
|
||||
if (OnDataFromDataChannelReady != null)
|
||||
OnDataFromDataChannelReady(mPeerConnectionId, data);
|
||||
}
|
||||
|
||||
private void RaiseFailureMessage(string msg) {
|
||||
if (OnFailureMessage != null)
|
||||
OnFailureMessage(mPeerConnectionId, msg);
|
||||
}
|
||||
|
||||
private void RaiseAudioBusReady(IntPtr data, int bitsPerSample,
|
||||
int sampleRate, int numberOfChannels, int numberOfFrames) {
|
||||
if (OnAudioBusReady != null)
|
||||
OnAudioBusReady(mPeerConnectionId, data, bitsPerSample, sampleRate,
|
||||
numberOfChannels, numberOfFrames);
|
||||
}
|
||||
|
||||
private void RaiseLocalVideoFrameReady(
|
||||
IntPtr dataY, IntPtr dataU, IntPtr dataV,
|
||||
int strideY, int strideU, int strideV,
|
||||
uint width, uint height) {
|
||||
if (OnLocalVideoFrameReady != null)
|
||||
OnLocalVideoFrameReady(mPeerConnectionId, dataY, dataU, dataV, strideY, strideU, strideV,
|
||||
width, height);
|
||||
}
|
||||
|
||||
private void RaiseRemoteVideoFrameReady(
|
||||
IntPtr dataY, IntPtr dataU, IntPtr dataV,
|
||||
int strideY, int strideU, int strideV,
|
||||
uint width, uint height) {
|
||||
if (OnRemoteVideoFrameReady != null)
|
||||
OnRemoteVideoFrameReady(mPeerConnectionId, dataY, dataU, dataV, strideY, strideU, strideV,
|
||||
width, height);
|
||||
}
|
||||
|
||||
|
||||
private void RaiseLocalSdpReadytoSend(string type, string sdp) {
|
||||
if (OnLocalSdpReadytoSend != null)
|
||||
OnLocalSdpReadytoSend(mPeerConnectionId, type, sdp);
|
||||
}
|
||||
|
||||
private void RaiseIceCandidateReadytoSend(string candidate, int sdpMlineIndex, string sdpMid) {
|
||||
if (OnIceCandidateReadytoSend != null)
|
||||
OnIceCandidateReadytoSend(mPeerConnectionId, candidate, sdpMlineIndex, sdpMid);
|
||||
}
|
||||
|
||||
public void AddQueuedIceCandidate(List<IceCandidate> iceCandidateQueue) {
|
||||
if (iceCandidateQueue != null) {
|
||||
foreach (IceCandidate ic in iceCandidateQueue) {
|
||||
AddIceCandidate(mPeerConnectionId, ic.Candidate, ic.SdpMlineIndex, ic.SdpMid);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private LocalDataChannelReadyInternalDelegate localDataChannelReadyDelegate = null;
|
||||
public event LocalDataChannelReadyDelegate OnLocalDataChannelReady;
|
||||
|
||||
private DataFromDataChannelReadyInternalDelegate dataFromDataChannelReadyDelegate = null;
|
||||
public event DataFromDataChannelReadyDelegate OnDataFromDataChannelReady;
|
||||
|
||||
private FailureMessageInternalDelegate failureMessageDelegate = null;
|
||||
public event FailureMessageDelegate OnFailureMessage;
|
||||
|
||||
private AudioBusReadyInternalDelegate audioBusReadyDelegate = null;
|
||||
public event AudioBusReadyDelegate OnAudioBusReady;
|
||||
|
||||
private I420FrameReadyInternalDelegate localI420FrameReadyDelegate = null;
|
||||
public event I420FrameReadyDelegate OnLocalVideoFrameReady;
|
||||
|
||||
private I420FrameReadyInternalDelegate remoteI420FrameReadyDelegate = null;
|
||||
public event I420FrameReadyDelegate OnRemoteVideoFrameReady;
|
||||
|
||||
private LocalSdpReadytoSendInternalDelegate localSdpReadytoSendDelegate = null;
|
||||
public event LocalSdpReadytoSendDelegate OnLocalSdpReadytoSend;
|
||||
|
||||
private IceCandidateReadytoSendInternalDelegate iceCandidateReadytoSendDelegate = null;
|
||||
public event IceCandidateReadytoSendDelegate OnIceCandidateReadytoSend;
|
||||
|
||||
private int mPeerConnectionId = -1;
|
||||
}
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
/*
|
||||
* Copyright 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
#include "examples/unityplugin/class_reference_holder.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "sdk/android/src/jni/jni_helpers.h"
|
||||
|
||||
namespace unity_plugin {
|
||||
|
||||
// ClassReferenceHolder holds global reference to Java classes in app/webrtc.
|
||||
class ClassReferenceHolder {
|
||||
public:
|
||||
explicit ClassReferenceHolder(JNIEnv* jni);
|
||||
~ClassReferenceHolder();
|
||||
|
||||
void FreeReferences(JNIEnv* jni);
|
||||
jclass GetClass(const std::string& name);
|
||||
|
||||
void LoadClass(JNIEnv* jni, const std::string& name);
|
||||
|
||||
private:
|
||||
std::map<std::string, jclass> classes_;
|
||||
};
|
||||
|
||||
// Allocated in LoadGlobalClassReferenceHolder(),
|
||||
// freed in FreeGlobalClassReferenceHolder().
|
||||
static ClassReferenceHolder* g_class_reference_holder = nullptr;
|
||||
|
||||
void LoadGlobalClassReferenceHolder() {
|
||||
RTC_CHECK(g_class_reference_holder == nullptr);
|
||||
g_class_reference_holder = new ClassReferenceHolder(webrtc::jni::GetEnv());
|
||||
}
|
||||
|
||||
void FreeGlobalClassReferenceHolder() {
|
||||
g_class_reference_holder->FreeReferences(
|
||||
webrtc::jni::AttachCurrentThreadIfNeeded());
|
||||
delete g_class_reference_holder;
|
||||
g_class_reference_holder = nullptr;
|
||||
}
|
||||
|
||||
ClassReferenceHolder::ClassReferenceHolder(JNIEnv* jni) {
|
||||
LoadClass(jni, "org/webrtc/UnityUtility");
|
||||
}
|
||||
|
||||
ClassReferenceHolder::~ClassReferenceHolder() {
|
||||
RTC_CHECK(classes_.empty()) << "Must call FreeReferences() before dtor!";
|
||||
}
|
||||
|
||||
void ClassReferenceHolder::FreeReferences(JNIEnv* jni) {
|
||||
for (std::map<std::string, jclass>::const_iterator it = classes_.begin();
|
||||
it != classes_.end(); ++it) {
|
||||
jni->DeleteGlobalRef(it->second);
|
||||
}
|
||||
classes_.clear();
|
||||
}
|
||||
|
||||
jclass ClassReferenceHolder::GetClass(const std::string& name) {
|
||||
std::map<std::string, jclass>::iterator it = classes_.find(name);
|
||||
RTC_CHECK(it != classes_.end()) << "Unexpected GetClass() call for: " << name;
|
||||
return it->second;
|
||||
}
|
||||
|
||||
void ClassReferenceHolder::LoadClass(JNIEnv* jni, const std::string& name) {
|
||||
jclass localRef = jni->FindClass(name.c_str());
|
||||
CHECK_EXCEPTION(jni) << "error during FindClass: " << name;
|
||||
RTC_CHECK(localRef) << name;
|
||||
jclass globalRef = reinterpret_cast<jclass>(jni->NewGlobalRef(localRef));
|
||||
CHECK_EXCEPTION(jni) << "error during NewGlobalRef: " << name;
|
||||
RTC_CHECK(globalRef) << name;
|
||||
bool inserted = classes_.insert(std::make_pair(name, globalRef)).second;
|
||||
RTC_CHECK(inserted) << "Duplicate class name: " << name;
|
||||
}
|
||||
|
||||
// Returns a global reference guaranteed to be valid for the lifetime of the
|
||||
// process.
|
||||
jclass FindClass(JNIEnv* jni, const char* name) {
|
||||
return g_class_reference_holder->GetClass(name);
|
||||
}
|
||||
|
||||
} // namespace unity_plugin
|
|
@ -1,38 +0,0 @@
|
|||
/*
|
||||
* Copyright 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
// This is a supplement of webrtc::jni::ClassReferenceHolder.
|
||||
// The purpose of this ClassReferenceHolder is to load the example
|
||||
// specific java class into JNI c++ side, so that our c++ code can
|
||||
// call those java functions.
|
||||
|
||||
#ifndef EXAMPLES_UNITYPLUGIN_CLASS_REFERENCE_HOLDER_H_
|
||||
#define EXAMPLES_UNITYPLUGIN_CLASS_REFERENCE_HOLDER_H_
|
||||
|
||||
#include <jni.h>
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace unity_plugin {
|
||||
|
||||
// LoadGlobalClassReferenceHolder must be called in JNI_OnLoad.
|
||||
void LoadGlobalClassReferenceHolder();
|
||||
// FreeGlobalClassReferenceHolder must be called in JNI_UnLoad.
|
||||
void FreeGlobalClassReferenceHolder();
|
||||
|
||||
// Returns a global reference guaranteed to be valid for the lifetime of the
|
||||
// process.
|
||||
jclass FindClass(JNIEnv* jni, const char* name);
|
||||
|
||||
} // namespace unity_plugin
|
||||
|
||||
#endif // EXAMPLES_UNITYPLUGIN_CLASS_REFERENCE_HOLDER_H_
|
|
@ -1,68 +0,0 @@
|
|||
/*
|
||||
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
package org.webrtc;
|
||||
|
||||
import android.content.Context;
|
||||
import androidx.annotation.Nullable;
|
||||
|
||||
public class UnityUtility {
|
||||
private static final String VIDEO_CAPTURER_THREAD_NAME = "VideoCapturerThread";
|
||||
|
||||
public static SurfaceTextureHelper LoadSurfaceTextureHelper() {
|
||||
final SurfaceTextureHelper surfaceTextureHelper =
|
||||
SurfaceTextureHelper.create(VIDEO_CAPTURER_THREAD_NAME, null);
|
||||
return surfaceTextureHelper;
|
||||
}
|
||||
|
||||
private static boolean useCamera2() {
|
||||
return Camera2Enumerator.isSupported(ContextUtils.getApplicationContext());
|
||||
}
|
||||
|
||||
private static @Nullable VideoCapturer createCameraCapturer(CameraEnumerator enumerator) {
|
||||
final String[] deviceNames = enumerator.getDeviceNames();
|
||||
|
||||
for (String deviceName : deviceNames) {
|
||||
if (enumerator.isFrontFacing(deviceName)) {
|
||||
VideoCapturer videoCapturer = enumerator.createCapturer(deviceName, null);
|
||||
|
||||
if (videoCapturer != null) {
|
||||
return videoCapturer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
public static VideoCapturer LinkCamera(
|
||||
long nativeTrackSource, SurfaceTextureHelper surfaceTextureHelper) {
|
||||
VideoCapturer capturer =
|
||||
createCameraCapturer(new Camera2Enumerator(ContextUtils.getApplicationContext()));
|
||||
|
||||
VideoSource videoSource = new VideoSource(nativeTrackSource);
|
||||
|
||||
capturer.initialize(surfaceTextureHelper, ContextUtils.getApplicationContext(),
|
||||
videoSource.getCapturerObserver());
|
||||
|
||||
capturer.startCapture(720, 480, 30);
|
||||
return capturer;
|
||||
}
|
||||
|
||||
public static void StopCamera(VideoCapturer camera) throws InterruptedException {
|
||||
camera.stopCapture();
|
||||
camera.dispose();
|
||||
}
|
||||
|
||||
public static void InitializePeerConncectionFactory(Context context) throws InterruptedException {
|
||||
PeerConnectionFactory.initialize(
|
||||
PeerConnectionFactory.InitializationOptions.builder(context).createInitializationOptions());
|
||||
}
|
||||
}
|
|
@ -1,42 +0,0 @@
|
|||
/*
|
||||
* Copyright 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <jni.h>
|
||||
#undef JNIEXPORT
|
||||
#define JNIEXPORT __attribute__((visibility("default")))
|
||||
|
||||
#include "examples/unityplugin/class_reference_holder.h"
|
||||
#include "rtc_base/ssl_adapter.h"
|
||||
#include "sdk/android/native_api/jni/class_loader.h"
|
||||
#include "sdk/android/src/jni/jni_helpers.h"
|
||||
|
||||
namespace webrtc {
|
||||
namespace jni {
|
||||
|
||||
extern "C" jint JNIEXPORT JNICALL JNI_OnLoad(JavaVM* jvm, void* reserved) {
|
||||
jint ret = InitGlobalJniVariables(jvm);
|
||||
RTC_DCHECK_GE(ret, 0);
|
||||
if (ret < 0)
|
||||
return -1;
|
||||
|
||||
RTC_CHECK(rtc::InitializeSSL()) << "Failed to InitializeSSL()";
|
||||
webrtc::InitClassLoader(GetEnv());
|
||||
unity_plugin::LoadGlobalClassReferenceHolder();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
extern "C" void JNIEXPORT JNICALL JNI_OnUnLoad(JavaVM* jvm, void* reserved) {
|
||||
unity_plugin::FreeGlobalClassReferenceHolder();
|
||||
RTC_CHECK(rtc::CleanupSSL()) << "Failed to CleanupSSL()";
|
||||
}
|
||||
|
||||
} // namespace jni
|
||||
} // namespace webrtc
|
|
@ -1,586 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "examples/unityplugin/simple_peer_connection.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "api/audio_codecs/builtin_audio_decoder_factory.h"
|
||||
#include "api/audio_codecs/builtin_audio_encoder_factory.h"
|
||||
#include "api/create_peerconnection_factory.h"
|
||||
#include "media/engine/internal_decoder_factory.h"
|
||||
#include "media/engine/internal_encoder_factory.h"
|
||||
#include "media/engine/multiplex_codec_factory.h"
|
||||
#include "modules/audio_device/include/audio_device.h"
|
||||
#include "modules/audio_processing/include/audio_processing.h"
|
||||
#include "modules/video_capture/video_capture_factory.h"
|
||||
#include "pc/video_track_source.h"
|
||||
#include "test/vcm_capturer.h"
|
||||
|
||||
#if defined(WEBRTC_ANDROID)
|
||||
#include "examples/unityplugin/class_reference_holder.h"
|
||||
#include "modules/utility/include/helpers_android.h"
|
||||
#include "sdk/android/src/jni/android_video_track_source.h"
|
||||
#include "sdk/android/src/jni/jni_helpers.h"
|
||||
#endif
|
||||
|
||||
// Names used for media stream ids.
|
||||
const char kAudioLabel[] = "audio_label";
|
||||
const char kVideoLabel[] = "video_label";
|
||||
const char kStreamId[] = "stream_id";
|
||||
|
||||
namespace {
|
||||
static int g_peer_count = 0;
|
||||
static std::unique_ptr<rtc::Thread> g_worker_thread;
|
||||
static std::unique_ptr<rtc::Thread> g_signaling_thread;
|
||||
static rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface>
|
||||
g_peer_connection_factory;
|
||||
#if defined(WEBRTC_ANDROID)
|
||||
// Android case: the video track does not own the capturer, and it
|
||||
// relies on the app to dispose the capturer when the peerconnection
|
||||
// shuts down.
|
||||
static jobject g_camera = nullptr;
|
||||
#else
|
||||
class CapturerTrackSource : public webrtc::VideoTrackSource {
|
||||
public:
|
||||
static rtc::scoped_refptr<CapturerTrackSource> Create() {
|
||||
const size_t kWidth = 640;
|
||||
const size_t kHeight = 480;
|
||||
const size_t kFps = 30;
|
||||
const size_t kDeviceIndex = 0;
|
||||
std::unique_ptr<webrtc::test::VcmCapturer> capturer = absl::WrapUnique(
|
||||
webrtc::test::VcmCapturer::Create(kWidth, kHeight, kFps, kDeviceIndex));
|
||||
if (!capturer) {
|
||||
return nullptr;
|
||||
}
|
||||
return rtc::make_ref_counted<CapturerTrackSource>(std::move(capturer));
|
||||
}
|
||||
|
||||
protected:
|
||||
explicit CapturerTrackSource(
|
||||
std::unique_ptr<webrtc::test::VcmCapturer> capturer)
|
||||
: VideoTrackSource(/*remote=*/false), capturer_(std::move(capturer)) {}
|
||||
|
||||
private:
|
||||
rtc::VideoSourceInterface<webrtc::VideoFrame>* source() override {
|
||||
return capturer_.get();
|
||||
}
|
||||
std::unique_ptr<webrtc::test::VcmCapturer> capturer_;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
std::string GetEnvVarOrDefault(const char* env_var_name,
|
||||
const char* default_value) {
|
||||
std::string value;
|
||||
const char* env_var = getenv(env_var_name);
|
||||
if (env_var)
|
||||
value = env_var;
|
||||
|
||||
if (value.empty())
|
||||
value = default_value;
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
std::string GetPeerConnectionString() {
|
||||
return GetEnvVarOrDefault("WEBRTC_CONNECT", "stun:stun.l.google.com:19302");
|
||||
}
|
||||
|
||||
class DummySetSessionDescriptionObserver
|
||||
: public webrtc::SetSessionDescriptionObserver {
|
||||
public:
|
||||
static rtc::scoped_refptr<DummySetSessionDescriptionObserver> Create() {
|
||||
return rtc::make_ref_counted<DummySetSessionDescriptionObserver>();
|
||||
}
|
||||
virtual void OnSuccess() { RTC_LOG(LS_INFO) << __FUNCTION__; }
|
||||
virtual void OnFailure(webrtc::RTCError error) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__ << " " << ToString(error.type()) << ": "
|
||||
<< error.message();
|
||||
}
|
||||
|
||||
protected:
|
||||
DummySetSessionDescriptionObserver() {}
|
||||
~DummySetSessionDescriptionObserver() {}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
bool SimplePeerConnection::InitializePeerConnection(const char** turn_urls,
|
||||
const int no_of_urls,
|
||||
const char* username,
|
||||
const char* credential,
|
||||
bool is_receiver) {
|
||||
RTC_DCHECK(peer_connection_.get() == nullptr);
|
||||
|
||||
if (g_peer_connection_factory == nullptr) {
|
||||
g_worker_thread = rtc::Thread::Create();
|
||||
g_worker_thread->Start();
|
||||
g_signaling_thread = rtc::Thread::Create();
|
||||
g_signaling_thread->Start();
|
||||
|
||||
g_peer_connection_factory = webrtc::CreatePeerConnectionFactory(
|
||||
g_worker_thread.get(), g_worker_thread.get(), g_signaling_thread.get(),
|
||||
nullptr, webrtc::CreateBuiltinAudioEncoderFactory(),
|
||||
webrtc::CreateBuiltinAudioDecoderFactory(),
|
||||
std::unique_ptr<webrtc::VideoEncoderFactory>(
|
||||
new webrtc::MultiplexEncoderFactory(
|
||||
std::make_unique<webrtc::InternalEncoderFactory>())),
|
||||
std::unique_ptr<webrtc::VideoDecoderFactory>(
|
||||
new webrtc::MultiplexDecoderFactory(
|
||||
std::make_unique<webrtc::InternalDecoderFactory>())),
|
||||
nullptr, nullptr);
|
||||
}
|
||||
if (!g_peer_connection_factory.get()) {
|
||||
DeletePeerConnection();
|
||||
return false;
|
||||
}
|
||||
|
||||
g_peer_count++;
|
||||
if (!CreatePeerConnection(turn_urls, no_of_urls, username, credential)) {
|
||||
DeletePeerConnection();
|
||||
return false;
|
||||
}
|
||||
|
||||
mandatory_receive_ = is_receiver;
|
||||
return peer_connection_.get() != nullptr;
|
||||
}
|
||||
|
||||
bool SimplePeerConnection::CreatePeerConnection(const char** turn_urls,
|
||||
const int no_of_urls,
|
||||
const char* username,
|
||||
const char* credential) {
|
||||
RTC_DCHECK(g_peer_connection_factory.get() != nullptr);
|
||||
RTC_DCHECK(peer_connection_.get() == nullptr);
|
||||
|
||||
local_video_observer_.reset(new VideoObserver());
|
||||
remote_video_observer_.reset(new VideoObserver());
|
||||
|
||||
// Add the turn server.
|
||||
if (turn_urls != nullptr) {
|
||||
if (no_of_urls > 0) {
|
||||
webrtc::PeerConnectionInterface::IceServer turn_server;
|
||||
for (int i = 0; i < no_of_urls; i++) {
|
||||
std::string url(turn_urls[i]);
|
||||
if (url.length() > 0)
|
||||
turn_server.urls.push_back(turn_urls[i]);
|
||||
}
|
||||
|
||||
std::string user_name(username);
|
||||
if (user_name.length() > 0)
|
||||
turn_server.username = username;
|
||||
|
||||
std::string password(credential);
|
||||
if (password.length() > 0)
|
||||
turn_server.password = credential;
|
||||
|
||||
config_.servers.push_back(turn_server);
|
||||
}
|
||||
}
|
||||
|
||||
// Add the stun server.
|
||||
webrtc::PeerConnectionInterface::IceServer stun_server;
|
||||
stun_server.uri = GetPeerConnectionString();
|
||||
config_.servers.push_back(stun_server);
|
||||
|
||||
auto result = g_peer_connection_factory->CreatePeerConnectionOrError(
|
||||
config_, webrtc::PeerConnectionDependencies(this));
|
||||
if (!result.ok()) {
|
||||
peer_connection_ = nullptr;
|
||||
return false;
|
||||
}
|
||||
peer_connection_ = result.MoveValue();
|
||||
return true;
|
||||
}
|
||||
|
||||
void SimplePeerConnection::DeletePeerConnection() {
|
||||
g_peer_count--;
|
||||
|
||||
#if defined(WEBRTC_ANDROID)
|
||||
if (g_camera) {
|
||||
JNIEnv* env = webrtc::jni::GetEnv();
|
||||
jclass pc_factory_class =
|
||||
unity_plugin::FindClass(env, "org/webrtc/UnityUtility");
|
||||
jmethodID stop_camera_method = webrtc::GetStaticMethodID(
|
||||
env, pc_factory_class, "StopCamera", "(Lorg/webrtc/VideoCapturer;)V");
|
||||
|
||||
env->CallStaticVoidMethod(pc_factory_class, stop_camera_method, g_camera);
|
||||
CHECK_EXCEPTION(env);
|
||||
|
||||
g_camera = nullptr;
|
||||
}
|
||||
#endif
|
||||
|
||||
CloseDataChannel();
|
||||
peer_connection_ = nullptr;
|
||||
active_streams_.clear();
|
||||
|
||||
if (g_peer_count == 0) {
|
||||
g_peer_connection_factory = nullptr;
|
||||
g_signaling_thread.reset();
|
||||
g_worker_thread.reset();
|
||||
}
|
||||
}
|
||||
|
||||
bool SimplePeerConnection::CreateOffer() {
|
||||
if (!peer_connection_.get())
|
||||
return false;
|
||||
|
||||
webrtc::PeerConnectionInterface::RTCOfferAnswerOptions options;
|
||||
if (mandatory_receive_) {
|
||||
options.offer_to_receive_audio = true;
|
||||
options.offer_to_receive_video = true;
|
||||
}
|
||||
peer_connection_->CreateOffer(this, options);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SimplePeerConnection::CreateAnswer() {
|
||||
if (!peer_connection_.get())
|
||||
return false;
|
||||
|
||||
webrtc::PeerConnectionInterface::RTCOfferAnswerOptions options;
|
||||
if (mandatory_receive_) {
|
||||
options.offer_to_receive_audio = true;
|
||||
options.offer_to_receive_video = true;
|
||||
}
|
||||
peer_connection_->CreateAnswer(this, options);
|
||||
return true;
|
||||
}
|
||||
|
||||
void SimplePeerConnection::OnSuccess(
|
||||
webrtc::SessionDescriptionInterface* desc) {
|
||||
peer_connection_->SetLocalDescription(
|
||||
DummySetSessionDescriptionObserver::Create().get(), desc);
|
||||
|
||||
std::string sdp;
|
||||
desc->ToString(&sdp);
|
||||
|
||||
if (OnLocalSdpReady)
|
||||
OnLocalSdpReady(desc->type().c_str(), sdp.c_str());
|
||||
}
|
||||
|
||||
void SimplePeerConnection::OnFailure(webrtc::RTCError error) {
|
||||
RTC_LOG(LS_ERROR) << ToString(error.type()) << ": " << error.message();
|
||||
|
||||
// TODO(hta): include error.type in the message
|
||||
if (OnFailureMessage)
|
||||
OnFailureMessage(error.message());
|
||||
}
|
||||
|
||||
void SimplePeerConnection::OnIceCandidate(
|
||||
const webrtc::IceCandidateInterface* candidate) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__ << " " << candidate->sdp_mline_index();
|
||||
|
||||
std::string sdp;
|
||||
if (!candidate->ToString(&sdp)) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to serialize candidate";
|
||||
return;
|
||||
}
|
||||
|
||||
if (OnIceCandidateReady)
|
||||
OnIceCandidateReady(sdp.c_str(), candidate->sdp_mline_index(),
|
||||
candidate->sdp_mid().c_str());
|
||||
}
|
||||
|
||||
void SimplePeerConnection::RegisterOnLocalI420FrameReady(
|
||||
I420FRAMEREADY_CALLBACK callback) {
|
||||
if (local_video_observer_)
|
||||
local_video_observer_->SetVideoCallback(callback);
|
||||
}
|
||||
|
||||
void SimplePeerConnection::RegisterOnRemoteI420FrameReady(
|
||||
I420FRAMEREADY_CALLBACK callback) {
|
||||
if (remote_video_observer_)
|
||||
remote_video_observer_->SetVideoCallback(callback);
|
||||
}
|
||||
|
||||
void SimplePeerConnection::RegisterOnLocalDataChannelReady(
|
||||
LOCALDATACHANNELREADY_CALLBACK callback) {
|
||||
OnLocalDataChannelReady = callback;
|
||||
}
|
||||
|
||||
void SimplePeerConnection::RegisterOnDataFromDataChannelReady(
|
||||
DATAFROMEDATECHANNELREADY_CALLBACK callback) {
|
||||
OnDataFromDataChannelReady = callback;
|
||||
}
|
||||
|
||||
void SimplePeerConnection::RegisterOnFailure(FAILURE_CALLBACK callback) {
|
||||
OnFailureMessage = callback;
|
||||
}
|
||||
|
||||
void SimplePeerConnection::RegisterOnAudioBusReady(
|
||||
AUDIOBUSREADY_CALLBACK callback) {
|
||||
OnAudioReady = callback;
|
||||
}
|
||||
|
||||
void SimplePeerConnection::RegisterOnLocalSdpReadytoSend(
|
||||
LOCALSDPREADYTOSEND_CALLBACK callback) {
|
||||
OnLocalSdpReady = callback;
|
||||
}
|
||||
|
||||
void SimplePeerConnection::RegisterOnIceCandidateReadytoSend(
|
||||
ICECANDIDATEREADYTOSEND_CALLBACK callback) {
|
||||
OnIceCandidateReady = callback;
|
||||
}
|
||||
|
||||
bool SimplePeerConnection::SetRemoteDescription(const char* type,
|
||||
const char* sdp) {
|
||||
if (!peer_connection_)
|
||||
return false;
|
||||
|
||||
std::string remote_desc(sdp);
|
||||
std::string desc_type(type);
|
||||
webrtc::SdpParseError error;
|
||||
webrtc::SessionDescriptionInterface* session_description(
|
||||
webrtc::CreateSessionDescription(desc_type, remote_desc, &error));
|
||||
if (!session_description) {
|
||||
RTC_LOG(LS_WARNING) << "Can't parse received session description message. "
|
||||
"SdpParseError was: "
|
||||
<< error.description;
|
||||
return false;
|
||||
}
|
||||
RTC_LOG(LS_INFO) << " Received session description :" << remote_desc;
|
||||
peer_connection_->SetRemoteDescription(
|
||||
DummySetSessionDescriptionObserver::Create().get(), session_description);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SimplePeerConnection::AddIceCandidate(const char* candidate,
|
||||
const int sdp_mlineindex,
|
||||
const char* sdp_mid) {
|
||||
if (!peer_connection_)
|
||||
return false;
|
||||
|
||||
webrtc::SdpParseError error;
|
||||
std::unique_ptr<webrtc::IceCandidateInterface> ice_candidate(
|
||||
webrtc::CreateIceCandidate(sdp_mid, sdp_mlineindex, candidate, &error));
|
||||
if (!ice_candidate.get()) {
|
||||
RTC_LOG(LS_WARNING) << "Can't parse received candidate message. "
|
||||
"SdpParseError was: "
|
||||
<< error.description;
|
||||
return false;
|
||||
}
|
||||
if (!peer_connection_->AddIceCandidate(ice_candidate.get())) {
|
||||
RTC_LOG(LS_WARNING) << "Failed to apply the received candidate";
|
||||
return false;
|
||||
}
|
||||
RTC_LOG(LS_INFO) << " Received candidate :" << candidate;
|
||||
return true;
|
||||
}
|
||||
|
||||
void SimplePeerConnection::SetAudioControl(bool is_mute, bool is_record) {
|
||||
is_mute_audio_ = is_mute;
|
||||
is_record_audio_ = is_record;
|
||||
|
||||
SetAudioControl();
|
||||
}
|
||||
|
||||
void SimplePeerConnection::SetAudioControl() {
|
||||
if (!remote_stream_)
|
||||
return;
|
||||
webrtc::AudioTrackVector tracks = remote_stream_->GetAudioTracks();
|
||||
if (tracks.empty())
|
||||
return;
|
||||
|
||||
rtc::scoped_refptr<webrtc::AudioTrackInterface>& audio_track = tracks[0];
|
||||
if (is_record_audio_)
|
||||
audio_track->AddSink(this);
|
||||
else
|
||||
audio_track->RemoveSink(this);
|
||||
|
||||
for (auto& track : tracks) {
|
||||
if (is_mute_audio_)
|
||||
track->set_enabled(false);
|
||||
else
|
||||
track->set_enabled(true);
|
||||
}
|
||||
}
|
||||
|
||||
void SimplePeerConnection::OnAddStream(
|
||||
rtc::scoped_refptr<webrtc::MediaStreamInterface> stream) {
|
||||
RTC_LOG(LS_INFO) << __FUNCTION__ << " " << stream->id();
|
||||
remote_stream_ = stream;
|
||||
if (remote_video_observer_ && !remote_stream_->GetVideoTracks().empty()) {
|
||||
remote_stream_->GetVideoTracks()[0]->AddOrUpdateSink(
|
||||
remote_video_observer_.get(), rtc::VideoSinkWants());
|
||||
}
|
||||
SetAudioControl();
|
||||
}
|
||||
|
||||
void SimplePeerConnection::AddStreams(bool audio_only) {
|
||||
if (active_streams_.find(kStreamId) != active_streams_.end())
|
||||
return; // Already added.
|
||||
|
||||
rtc::scoped_refptr<webrtc::MediaStreamInterface> stream =
|
||||
g_peer_connection_factory->CreateLocalMediaStream(kStreamId);
|
||||
|
||||
rtc::scoped_refptr<webrtc::AudioTrackInterface> audio_track(
|
||||
g_peer_connection_factory->CreateAudioTrack(
|
||||
kAudioLabel,
|
||||
g_peer_connection_factory->CreateAudioSource(cricket::AudioOptions())
|
||||
.get()));
|
||||
stream->AddTrack(audio_track);
|
||||
|
||||
if (!audio_only) {
|
||||
#if defined(WEBRTC_ANDROID)
|
||||
JNIEnv* env = webrtc::jni::GetEnv();
|
||||
jclass pc_factory_class =
|
||||
unity_plugin::FindClass(env, "org/webrtc/UnityUtility");
|
||||
jmethodID load_texture_helper_method = webrtc::GetStaticMethodID(
|
||||
env, pc_factory_class, "LoadSurfaceTextureHelper",
|
||||
"()Lorg/webrtc/SurfaceTextureHelper;");
|
||||
jobject texture_helper = env->CallStaticObjectMethod(
|
||||
pc_factory_class, load_texture_helper_method);
|
||||
CHECK_EXCEPTION(env);
|
||||
RTC_DCHECK(texture_helper != nullptr)
|
||||
<< "Cannot get the Surface Texture Helper.";
|
||||
|
||||
auto source = rtc::make_ref_counted<webrtc::jni::AndroidVideoTrackSource>(
|
||||
g_signaling_thread.get(), env, /*is_screencast=*/false,
|
||||
/*align_timestamps=*/true);
|
||||
|
||||
// link with VideoCapturer (Camera);
|
||||
jmethodID link_camera_method = webrtc::GetStaticMethodID(
|
||||
env, pc_factory_class, "LinkCamera",
|
||||
"(JLorg/webrtc/SurfaceTextureHelper;)Lorg/webrtc/VideoCapturer;");
|
||||
jobject camera_tmp =
|
||||
env->CallStaticObjectMethod(pc_factory_class, link_camera_method,
|
||||
(jlong)source.get(), texture_helper);
|
||||
CHECK_EXCEPTION(env);
|
||||
g_camera = (jobject)env->NewGlobalRef(camera_tmp);
|
||||
|
||||
rtc::scoped_refptr<webrtc::VideoTrackInterface> video_track(
|
||||
g_peer_connection_factory->CreateVideoTrack(source, kVideoLabel));
|
||||
stream->AddTrack(video_track);
|
||||
#else
|
||||
rtc::scoped_refptr<CapturerTrackSource> video_device =
|
||||
CapturerTrackSource::Create();
|
||||
if (video_device) {
|
||||
rtc::scoped_refptr<webrtc::VideoTrackInterface> video_track(
|
||||
g_peer_connection_factory->CreateVideoTrack(video_device,
|
||||
kVideoLabel));
|
||||
|
||||
stream->AddTrack(video_track);
|
||||
}
|
||||
#endif
|
||||
if (local_video_observer_ && !stream->GetVideoTracks().empty()) {
|
||||
stream->GetVideoTracks()[0]->AddOrUpdateSink(local_video_observer_.get(),
|
||||
rtc::VideoSinkWants());
|
||||
}
|
||||
}
|
||||
|
||||
if (!peer_connection_->AddStream(stream.get())) {
|
||||
RTC_LOG(LS_ERROR) << "Adding stream to PeerConnection failed";
|
||||
}
|
||||
|
||||
typedef std::pair<std::string,
|
||||
rtc::scoped_refptr<webrtc::MediaStreamInterface>>
|
||||
MediaStreamPair;
|
||||
active_streams_.insert(MediaStreamPair(stream->id(), stream));
|
||||
}
|
||||
|
||||
bool SimplePeerConnection::CreateDataChannel() {
|
||||
struct webrtc::DataChannelInit init;
|
||||
init.ordered = true;
|
||||
init.reliable = true;
|
||||
auto result = peer_connection_->CreateDataChannelOrError("Hello", &init);
|
||||
if (result.ok()) {
|
||||
data_channel_ = result.MoveValue();
|
||||
data_channel_->RegisterObserver(this);
|
||||
RTC_LOG(LS_INFO) << "Succeeds to create data channel";
|
||||
return true;
|
||||
} else {
|
||||
RTC_LOG(LS_INFO) << "Fails to create data channel";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void SimplePeerConnection::CloseDataChannel() {
|
||||
if (data_channel_.get()) {
|
||||
data_channel_->UnregisterObserver();
|
||||
data_channel_->Close();
|
||||
}
|
||||
data_channel_ = nullptr;
|
||||
}
|
||||
|
||||
bool SimplePeerConnection::SendDataViaDataChannel(const std::string& data) {
|
||||
if (!data_channel_.get()) {
|
||||
RTC_LOG(LS_INFO) << "Data channel is not established";
|
||||
return false;
|
||||
}
|
||||
webrtc::DataBuffer buffer(data);
|
||||
data_channel_->Send(buffer);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Peerconnection observer
|
||||
void SimplePeerConnection::OnDataChannel(
|
||||
rtc::scoped_refptr<webrtc::DataChannelInterface> channel) {
|
||||
channel->RegisterObserver(this);
|
||||
}
|
||||
|
||||
void SimplePeerConnection::OnStateChange() {
|
||||
if (data_channel_) {
|
||||
webrtc::DataChannelInterface::DataState state = data_channel_->state();
|
||||
if (state == webrtc::DataChannelInterface::kOpen) {
|
||||
if (OnLocalDataChannelReady)
|
||||
OnLocalDataChannelReady();
|
||||
RTC_LOG(LS_INFO) << "Data channel is open";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A data buffer was successfully received.
|
||||
void SimplePeerConnection::OnMessage(const webrtc::DataBuffer& buffer) {
|
||||
size_t size = buffer.data.size();
|
||||
char* msg = new char[size + 1];
|
||||
memcpy(msg, buffer.data.data(), size);
|
||||
msg[size] = 0;
|
||||
if (OnDataFromDataChannelReady)
|
||||
OnDataFromDataChannelReady(msg);
|
||||
delete[] msg;
|
||||
}
|
||||
|
||||
// AudioTrackSinkInterface implementation.
|
||||
void SimplePeerConnection::OnData(const void* audio_data,
|
||||
int bits_per_sample,
|
||||
int sample_rate,
|
||||
size_t number_of_channels,
|
||||
size_t number_of_frames) {
|
||||
if (OnAudioReady)
|
||||
OnAudioReady(audio_data, bits_per_sample, sample_rate,
|
||||
static_cast<int>(number_of_channels),
|
||||
static_cast<int>(number_of_frames));
|
||||
}
|
||||
|
||||
std::vector<uint32_t> SimplePeerConnection::GetRemoteAudioTrackSsrcs() {
|
||||
std::vector<rtc::scoped_refptr<webrtc::RtpReceiverInterface>> receivers =
|
||||
peer_connection_->GetReceivers();
|
||||
|
||||
std::vector<uint32_t> ssrcs;
|
||||
for (const auto& receiver : receivers) {
|
||||
if (receiver->media_type() != cricket::MEDIA_TYPE_AUDIO)
|
||||
continue;
|
||||
|
||||
std::vector<webrtc::RtpEncodingParameters> params =
|
||||
receiver->GetParameters().encodings;
|
||||
|
||||
for (const auto& param : params) {
|
||||
uint32_t ssrc = param.ssrc.value_or(0);
|
||||
if (ssrc > 0)
|
||||
ssrcs.push_back(ssrc);
|
||||
}
|
||||
}
|
||||
|
||||
return ssrcs;
|
||||
}
|
|
@ -1,135 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef EXAMPLES_UNITYPLUGIN_SIMPLE_PEER_CONNECTION_H_
|
||||
#define EXAMPLES_UNITYPLUGIN_SIMPLE_PEER_CONNECTION_H_
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "api/data_channel_interface.h"
|
||||
#include "api/media_stream_interface.h"
|
||||
#include "api/peer_connection_interface.h"
|
||||
#include "examples/unityplugin/unity_plugin_apis.h"
|
||||
#include "examples/unityplugin/video_observer.h"
|
||||
|
||||
class SimplePeerConnection : public webrtc::PeerConnectionObserver,
|
||||
public webrtc::CreateSessionDescriptionObserver,
|
||||
public webrtc::DataChannelObserver,
|
||||
public webrtc::AudioTrackSinkInterface {
|
||||
public:
|
||||
SimplePeerConnection() {}
|
||||
~SimplePeerConnection() {}
|
||||
|
||||
bool InitializePeerConnection(const char** turn_urls,
|
||||
int no_of_urls,
|
||||
const char* username,
|
||||
const char* credential,
|
||||
bool is_receiver);
|
||||
void DeletePeerConnection();
|
||||
void AddStreams(bool audio_only);
|
||||
bool CreateDataChannel();
|
||||
bool CreateOffer();
|
||||
bool CreateAnswer();
|
||||
bool SendDataViaDataChannel(const std::string& data);
|
||||
void SetAudioControl(bool is_mute, bool is_record);
|
||||
|
||||
// Register callback functions.
|
||||
void RegisterOnLocalI420FrameReady(I420FRAMEREADY_CALLBACK callback);
|
||||
void RegisterOnRemoteI420FrameReady(I420FRAMEREADY_CALLBACK callback);
|
||||
void RegisterOnLocalDataChannelReady(LOCALDATACHANNELREADY_CALLBACK callback);
|
||||
void RegisterOnDataFromDataChannelReady(
|
||||
DATAFROMEDATECHANNELREADY_CALLBACK callback);
|
||||
void RegisterOnFailure(FAILURE_CALLBACK callback);
|
||||
void RegisterOnAudioBusReady(AUDIOBUSREADY_CALLBACK callback);
|
||||
void RegisterOnLocalSdpReadytoSend(LOCALSDPREADYTOSEND_CALLBACK callback);
|
||||
void RegisterOnIceCandidateReadytoSend(
|
||||
ICECANDIDATEREADYTOSEND_CALLBACK callback);
|
||||
bool SetRemoteDescription(const char* type, const char* sdp);
|
||||
bool AddIceCandidate(const char* sdp,
|
||||
int sdp_mlineindex,
|
||||
const char* sdp_mid);
|
||||
|
||||
protected:
|
||||
// create a peerconneciton and add the turn servers info to the configuration.
|
||||
bool CreatePeerConnection(const char** turn_urls,
|
||||
int no_of_urls,
|
||||
const char* username,
|
||||
const char* credential);
|
||||
void CloseDataChannel();
|
||||
void SetAudioControl();
|
||||
|
||||
// PeerConnectionObserver implementation.
|
||||
void OnSignalingChange(
|
||||
webrtc::PeerConnectionInterface::SignalingState new_state) override {}
|
||||
void OnAddStream(
|
||||
rtc::scoped_refptr<webrtc::MediaStreamInterface> stream) override;
|
||||
void OnRemoveStream(
|
||||
rtc::scoped_refptr<webrtc::MediaStreamInterface> stream) override {}
|
||||
void OnDataChannel(
|
||||
rtc::scoped_refptr<webrtc::DataChannelInterface> channel) override;
|
||||
void OnRenegotiationNeeded() override {}
|
||||
void OnIceConnectionChange(
|
||||
webrtc::PeerConnectionInterface::IceConnectionState new_state) override {}
|
||||
void OnIceGatheringChange(
|
||||
webrtc::PeerConnectionInterface::IceGatheringState new_state) override {}
|
||||
void OnIceCandidate(const webrtc::IceCandidateInterface* candidate) override;
|
||||
void OnIceConnectionReceivingChange(bool receiving) override {}
|
||||
|
||||
// CreateSessionDescriptionObserver implementation.
|
||||
void OnSuccess(webrtc::SessionDescriptionInterface* desc) override;
|
||||
void OnFailure(webrtc::RTCError error) override;
|
||||
|
||||
// DataChannelObserver implementation.
|
||||
void OnStateChange() override;
|
||||
void OnMessage(const webrtc::DataBuffer& buffer) override;
|
||||
|
||||
// AudioTrackSinkInterface implementation.
|
||||
void OnData(const void* audio_data,
|
||||
int bits_per_sample,
|
||||
int sample_rate,
|
||||
size_t number_of_channels,
|
||||
size_t number_of_frames) override;
|
||||
|
||||
// Get remote audio tracks ssrcs.
|
||||
std::vector<uint32_t> GetRemoteAudioTrackSsrcs();
|
||||
|
||||
private:
|
||||
rtc::scoped_refptr<webrtc::PeerConnectionInterface> peer_connection_;
|
||||
rtc::scoped_refptr<webrtc::DataChannelInterface> data_channel_;
|
||||
std::map<std::string, rtc::scoped_refptr<webrtc::MediaStreamInterface> >
|
||||
active_streams_;
|
||||
|
||||
std::unique_ptr<VideoObserver> local_video_observer_;
|
||||
std::unique_ptr<VideoObserver> remote_video_observer_;
|
||||
|
||||
rtc::scoped_refptr<webrtc::MediaStreamInterface> remote_stream_ = nullptr;
|
||||
webrtc::PeerConnectionInterface::RTCConfiguration config_;
|
||||
|
||||
LOCALDATACHANNELREADY_CALLBACK OnLocalDataChannelReady = nullptr;
|
||||
DATAFROMEDATECHANNELREADY_CALLBACK OnDataFromDataChannelReady = nullptr;
|
||||
FAILURE_CALLBACK OnFailureMessage = nullptr;
|
||||
AUDIOBUSREADY_CALLBACK OnAudioReady = nullptr;
|
||||
|
||||
LOCALSDPREADYTOSEND_CALLBACK OnLocalSdpReady = nullptr;
|
||||
ICECANDIDATEREADYTOSEND_CALLBACK OnIceCandidateReady = nullptr;
|
||||
|
||||
bool is_mute_audio_ = false;
|
||||
bool is_record_audio_ = false;
|
||||
bool mandatory_receive_ = false;
|
||||
|
||||
// disallow copy-and-assign
|
||||
SimplePeerConnection(const SimplePeerConnection&) = delete;
|
||||
SimplePeerConnection& operator=(const SimplePeerConnection&) = delete;
|
||||
};
|
||||
|
||||
#endif // EXAMPLES_UNITYPLUGIN_SIMPLE_PEER_CONNECTION_H_
|
|
@ -1,196 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "examples/unityplugin/unity_plugin_apis.h"
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
||||
#include "examples/unityplugin/simple_peer_connection.h"
|
||||
|
||||
namespace {
|
||||
static int g_peer_connection_id = 1;
|
||||
static std::map<int, rtc::scoped_refptr<SimplePeerConnection>>
|
||||
g_peer_connection_map;
|
||||
} // namespace
|
||||
|
||||
int CreatePeerConnection(const char** turn_urls,
|
||||
const int no_of_urls,
|
||||
const char* username,
|
||||
const char* credential,
|
||||
bool mandatory_receive_video) {
|
||||
g_peer_connection_map[g_peer_connection_id] =
|
||||
rtc::make_ref_counted<SimplePeerConnection>();
|
||||
|
||||
if (!g_peer_connection_map[g_peer_connection_id]->InitializePeerConnection(
|
||||
turn_urls, no_of_urls, username, credential, mandatory_receive_video))
|
||||
return -1;
|
||||
|
||||
return g_peer_connection_id++;
|
||||
}
|
||||
|
||||
bool ClosePeerConnection(int peer_connection_id) {
|
||||
if (!g_peer_connection_map.count(peer_connection_id))
|
||||
return false;
|
||||
|
||||
g_peer_connection_map[peer_connection_id]->DeletePeerConnection();
|
||||
g_peer_connection_map.erase(peer_connection_id);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AddStream(int peer_connection_id, bool audio_only) {
|
||||
if (!g_peer_connection_map.count(peer_connection_id))
|
||||
return false;
|
||||
|
||||
g_peer_connection_map[peer_connection_id]->AddStreams(audio_only);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AddDataChannel(int peer_connection_id) {
|
||||
if (!g_peer_connection_map.count(peer_connection_id))
|
||||
return false;
|
||||
|
||||
return g_peer_connection_map[peer_connection_id]->CreateDataChannel();
|
||||
}
|
||||
|
||||
bool CreateOffer(int peer_connection_id) {
|
||||
if (!g_peer_connection_map.count(peer_connection_id))
|
||||
return false;
|
||||
|
||||
return g_peer_connection_map[peer_connection_id]->CreateOffer();
|
||||
}
|
||||
|
||||
bool CreateAnswer(int peer_connection_id) {
|
||||
if (!g_peer_connection_map.count(peer_connection_id))
|
||||
return false;
|
||||
|
||||
return g_peer_connection_map[peer_connection_id]->CreateAnswer();
|
||||
}
|
||||
|
||||
bool SendDataViaDataChannel(int peer_connection_id, const char* data) {
|
||||
if (!g_peer_connection_map.count(peer_connection_id))
|
||||
return false;
|
||||
|
||||
std::string s(data);
|
||||
g_peer_connection_map[peer_connection_id]->SendDataViaDataChannel(s);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SetAudioControl(int peer_connection_id, bool is_mute, bool is_record) {
|
||||
if (!g_peer_connection_map.count(peer_connection_id))
|
||||
return false;
|
||||
|
||||
g_peer_connection_map[peer_connection_id]->SetAudioControl(is_mute,
|
||||
is_record);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SetRemoteDescription(int peer_connection_id,
|
||||
const char* type,
|
||||
const char* sdp) {
|
||||
if (!g_peer_connection_map.count(peer_connection_id))
|
||||
return false;
|
||||
|
||||
return g_peer_connection_map[peer_connection_id]->SetRemoteDescription(type,
|
||||
sdp);
|
||||
}
|
||||
|
||||
bool AddIceCandidate(const int peer_connection_id,
|
||||
const char* candidate,
|
||||
const int sdp_mlineindex,
|
||||
const char* sdp_mid) {
|
||||
if (!g_peer_connection_map.count(peer_connection_id))
|
||||
return false;
|
||||
|
||||
return g_peer_connection_map[peer_connection_id]->AddIceCandidate(
|
||||
candidate, sdp_mlineindex, sdp_mid);
|
||||
}
|
||||
|
||||
// Register callback functions.
|
||||
bool RegisterOnLocalI420FrameReady(int peer_connection_id,
|
||||
I420FRAMEREADY_CALLBACK callback) {
|
||||
if (!g_peer_connection_map.count(peer_connection_id))
|
||||
return false;
|
||||
|
||||
g_peer_connection_map[peer_connection_id]->RegisterOnLocalI420FrameReady(
|
||||
callback);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RegisterOnRemoteI420FrameReady(int peer_connection_id,
|
||||
I420FRAMEREADY_CALLBACK callback) {
|
||||
if (!g_peer_connection_map.count(peer_connection_id))
|
||||
return false;
|
||||
|
||||
g_peer_connection_map[peer_connection_id]->RegisterOnRemoteI420FrameReady(
|
||||
callback);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RegisterOnLocalDataChannelReady(int peer_connection_id,
|
||||
LOCALDATACHANNELREADY_CALLBACK callback) {
|
||||
if (!g_peer_connection_map.count(peer_connection_id))
|
||||
return false;
|
||||
|
||||
g_peer_connection_map[peer_connection_id]->RegisterOnLocalDataChannelReady(
|
||||
callback);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RegisterOnDataFromDataChannelReady(
|
||||
int peer_connection_id,
|
||||
DATAFROMEDATECHANNELREADY_CALLBACK callback) {
|
||||
if (!g_peer_connection_map.count(peer_connection_id))
|
||||
return false;
|
||||
|
||||
g_peer_connection_map[peer_connection_id]->RegisterOnDataFromDataChannelReady(
|
||||
callback);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RegisterOnFailure(int peer_connection_id, FAILURE_CALLBACK callback) {
|
||||
if (!g_peer_connection_map.count(peer_connection_id))
|
||||
return false;
|
||||
|
||||
g_peer_connection_map[peer_connection_id]->RegisterOnFailure(callback);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RegisterOnAudioBusReady(int peer_connection_id,
|
||||
AUDIOBUSREADY_CALLBACK callback) {
|
||||
if (!g_peer_connection_map.count(peer_connection_id))
|
||||
return false;
|
||||
|
||||
g_peer_connection_map[peer_connection_id]->RegisterOnAudioBusReady(callback);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Singnaling channel related functions.
|
||||
bool RegisterOnLocalSdpReadytoSend(int peer_connection_id,
|
||||
LOCALSDPREADYTOSEND_CALLBACK callback) {
|
||||
if (!g_peer_connection_map.count(peer_connection_id))
|
||||
return false;
|
||||
|
||||
g_peer_connection_map[peer_connection_id]->RegisterOnLocalSdpReadytoSend(
|
||||
callback);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RegisterOnIceCandidateReadytoSend(
|
||||
int peer_connection_id,
|
||||
ICECANDIDATEREADYTOSEND_CALLBACK callback) {
|
||||
if (!g_peer_connection_map.count(peer_connection_id))
|
||||
return false;
|
||||
|
||||
g_peer_connection_map[peer_connection_id]->RegisterOnIceCandidateReadytoSend(
|
||||
callback);
|
||||
return true;
|
||||
}
|
|
@ -1,108 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
// This file provides an example of unity native plugin APIs.
|
||||
|
||||
#ifndef EXAMPLES_UNITYPLUGIN_UNITY_PLUGIN_APIS_H_
|
||||
#define EXAMPLES_UNITYPLUGIN_UNITY_PLUGIN_APIS_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
// Definitions of callback functions.
|
||||
typedef void (*I420FRAMEREADY_CALLBACK)(const uint8_t* data_y,
|
||||
const uint8_t* data_u,
|
||||
const uint8_t* data_v,
|
||||
const uint8_t* data_a,
|
||||
int stride_y,
|
||||
int stride_u,
|
||||
int stride_v,
|
||||
int stride_a,
|
||||
uint32_t width,
|
||||
uint32_t height);
|
||||
typedef void (*LOCALDATACHANNELREADY_CALLBACK)();
|
||||
typedef void (*DATAFROMEDATECHANNELREADY_CALLBACK)(const char* msg);
|
||||
typedef void (*FAILURE_CALLBACK)(const char* msg);
|
||||
typedef void (*LOCALSDPREADYTOSEND_CALLBACK)(const char* type, const char* sdp);
|
||||
typedef void (*ICECANDIDATEREADYTOSEND_CALLBACK)(const char* candidate,
|
||||
int sdp_mline_index,
|
||||
const char* sdp_mid);
|
||||
typedef void (*AUDIOBUSREADY_CALLBACK)(const void* audio_data,
|
||||
int bits_per_sample,
|
||||
int sample_rate,
|
||||
int number_of_channels,
|
||||
int number_of_frames);
|
||||
|
||||
#if defined(WEBRTC_WIN)
|
||||
#define WEBRTC_PLUGIN_API __declspec(dllexport)
|
||||
#elif defined(WEBRTC_ANDROID)
|
||||
#define WEBRTC_PLUGIN_API __attribute__((visibility("default")))
|
||||
#endif
|
||||
extern "C" {
|
||||
// Create a peerconnection and return a unique peer connection id.
|
||||
WEBRTC_PLUGIN_API int CreatePeerConnection(const char** turn_urls,
|
||||
int no_of_urls,
|
||||
const char* username,
|
||||
const char* credential,
|
||||
bool mandatory_receive_video);
|
||||
// Close a peerconnection.
|
||||
WEBRTC_PLUGIN_API bool ClosePeerConnection(int peer_connection_id);
|
||||
// Add a audio stream. If audio_only is true, the stream only has an audio
|
||||
// track and no video track.
|
||||
WEBRTC_PLUGIN_API bool AddStream(int peer_connection_id, bool audio_only);
|
||||
// Add a data channel to peer connection.
|
||||
WEBRTC_PLUGIN_API bool AddDataChannel(int peer_connection_id);
|
||||
// Create a peer connection offer.
|
||||
WEBRTC_PLUGIN_API bool CreateOffer(int peer_connection_id);
|
||||
// Create a peer connection answer.
|
||||
WEBRTC_PLUGIN_API bool CreateAnswer(int peer_connection_id);
|
||||
// Send data through data channel.
|
||||
WEBRTC_PLUGIN_API bool SendDataViaDataChannel(int peer_connection_id,
|
||||
const char* data);
|
||||
// Set audio control. If is_mute=true, no audio will playout. If is_record=true,
|
||||
// AUDIOBUSREADY_CALLBACK will be called every 10 ms.
|
||||
WEBRTC_PLUGIN_API bool SetAudioControl(int peer_connection_id,
|
||||
bool is_mute,
|
||||
bool is_record);
|
||||
// Set remote sdp.
|
||||
WEBRTC_PLUGIN_API bool SetRemoteDescription(int peer_connection_id,
|
||||
const char* type,
|
||||
const char* sdp);
|
||||
// Add ice candidate.
|
||||
WEBRTC_PLUGIN_API bool AddIceCandidate(int peer_connection_id,
|
||||
const char* candidate,
|
||||
int sdp_mlineindex,
|
||||
const char* sdp_mid);
|
||||
|
||||
// Register callback functions.
|
||||
WEBRTC_PLUGIN_API bool RegisterOnLocalI420FrameReady(
|
||||
int peer_connection_id,
|
||||
I420FRAMEREADY_CALLBACK callback);
|
||||
WEBRTC_PLUGIN_API bool RegisterOnRemoteI420FrameReady(
|
||||
int peer_connection_id,
|
||||
I420FRAMEREADY_CALLBACK callback);
|
||||
WEBRTC_PLUGIN_API bool RegisterOnLocalDataChannelReady(
|
||||
int peer_connection_id,
|
||||
LOCALDATACHANNELREADY_CALLBACK callback);
|
||||
WEBRTC_PLUGIN_API bool RegisterOnDataFromDataChannelReady(
|
||||
int peer_connection_id,
|
||||
DATAFROMEDATECHANNELREADY_CALLBACK callback);
|
||||
WEBRTC_PLUGIN_API bool RegisterOnFailure(int peer_connection_id,
|
||||
FAILURE_CALLBACK callback);
|
||||
WEBRTC_PLUGIN_API bool RegisterOnAudioBusReady(int peer_connection_id,
|
||||
AUDIOBUSREADY_CALLBACK callback);
|
||||
WEBRTC_PLUGIN_API bool RegisterOnLocalSdpReadytoSend(
|
||||
int peer_connection_id,
|
||||
LOCALSDPREADYTOSEND_CALLBACK callback);
|
||||
WEBRTC_PLUGIN_API bool RegisterOnIceCandidateReadytoSend(
|
||||
int peer_connection_id,
|
||||
ICECANDIDATEREADYTOSEND_CALLBACK callback);
|
||||
}
|
||||
|
||||
#endif // EXAMPLES_UNITYPLUGIN_UNITY_PLUGIN_APIS_H_
|
|
@ -1,44 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "examples/unityplugin/video_observer.h"
|
||||
|
||||
void VideoObserver::SetVideoCallback(I420FRAMEREADY_CALLBACK callback) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
OnI420FrameReady = callback;
|
||||
}
|
||||
|
||||
void VideoObserver::OnFrame(const webrtc::VideoFrame& frame) {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
if (!OnI420FrameReady)
|
||||
return;
|
||||
|
||||
rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer(
|
||||
frame.video_frame_buffer());
|
||||
|
||||
if (buffer->type() != webrtc::VideoFrameBuffer::Type::kI420A) {
|
||||
rtc::scoped_refptr<webrtc::I420BufferInterface> i420_buffer =
|
||||
buffer->ToI420();
|
||||
OnI420FrameReady(i420_buffer->DataY(), i420_buffer->DataU(),
|
||||
i420_buffer->DataV(), nullptr, i420_buffer->StrideY(),
|
||||
i420_buffer->StrideU(), i420_buffer->StrideV(), 0,
|
||||
frame.width(), frame.height());
|
||||
|
||||
} else {
|
||||
// The buffer has alpha channel.
|
||||
const webrtc::I420ABufferInterface* i420a_buffer = buffer->GetI420A();
|
||||
|
||||
OnI420FrameReady(i420a_buffer->DataY(), i420a_buffer->DataU(),
|
||||
i420a_buffer->DataV(), i420a_buffer->DataA(),
|
||||
i420a_buffer->StrideY(), i420a_buffer->StrideU(),
|
||||
i420a_buffer->StrideV(), i420a_buffer->StrideA(),
|
||||
frame.width(), frame.height());
|
||||
}
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef EXAMPLES_UNITYPLUGIN_VIDEO_OBSERVER_H_
|
||||
#define EXAMPLES_UNITYPLUGIN_VIDEO_OBSERVER_H_
|
||||
|
||||
#include <mutex>
|
||||
|
||||
#include "api/media_stream_interface.h"
|
||||
#include "api/video/video_sink_interface.h"
|
||||
#include "examples/unityplugin/unity_plugin_apis.h"
|
||||
|
||||
class VideoObserver : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
|
||||
public:
|
||||
VideoObserver() {}
|
||||
~VideoObserver() {}
|
||||
void SetVideoCallback(I420FRAMEREADY_CALLBACK callback);
|
||||
|
||||
protected:
|
||||
// VideoSinkInterface implementation
|
||||
void OnFrame(const webrtc::VideoFrame& frame) override;
|
||||
|
||||
private:
|
||||
I420FRAMEREADY_CALLBACK OnI420FrameReady = nullptr;
|
||||
std::mutex mutex;
|
||||
};
|
||||
|
||||
#endif // EXAMPLES_UNITYPLUGIN_VIDEO_OBSERVER_H_
|
|
@ -106,9 +106,6 @@ rtclog2::FrameDecodedEvents::Codec ConvertToProtoFormat(VideoCodecType codec) {
|
|||
return rtclog2::FrameDecodedEvents::CODEC_AV1;
|
||||
case VideoCodecType::kVideoCodecH264:
|
||||
return rtclog2::FrameDecodedEvents::CODEC_H264;
|
||||
case VideoCodecType::kVideoCodecMultiplex:
|
||||
// This codec type is afaik not used.
|
||||
return rtclog2::FrameDecodedEvents::CODEC_UNKNOWN;
|
||||
case VideoCodecType::kVideoCodecH265:
|
||||
return rtclog2::FrameDecodedEvents::CODEC_H265;
|
||||
}
|
||||
|
|
|
@ -271,12 +271,11 @@ VideoCodecType GetRuntimeCodecType(rtclog2::FrameDecodedEvents::Codec codec) {
|
|||
case rtclog2::FrameDecodedEvents::CODEC_H265:
|
||||
return VideoCodecType::kVideoCodecH265;
|
||||
case rtclog2::FrameDecodedEvents::CODEC_UNKNOWN:
|
||||
RTC_LOG(LS_ERROR) << "Unknown codec type. Assuming "
|
||||
"VideoCodecType::kVideoCodecMultiplex";
|
||||
return VideoCodecType::kVideoCodecMultiplex;
|
||||
RTC_LOG(LS_ERROR) << "Unknown codec type. Returning generic.";
|
||||
return VideoCodecType::kVideoCodecGeneric;
|
||||
}
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return VideoCodecType::kVideoCodecMultiplex;
|
||||
return VideoCodecType::kVideoCodecGeneric;
|
||||
}
|
||||
|
||||
ParsedRtcEventLog::ParseStatus GetHeaderExtensions(
|
||||
|
|
|
@ -396,7 +396,6 @@ rtc_library("rtc_internal_video_codecs") {
|
|||
"../call:video_stream_api",
|
||||
"../modules/video_coding:video_codec_interface",
|
||||
"../modules/video_coding:webrtc_h264",
|
||||
"../modules/video_coding:webrtc_multiplex",
|
||||
"../modules/video_coding:webrtc_vp8",
|
||||
"../modules/video_coding:webrtc_vp9",
|
||||
"../rtc_base:checks",
|
||||
|
@ -427,8 +426,6 @@ rtc_library("rtc_internal_video_codecs") {
|
|||
"engine/internal_decoder_factory.h",
|
||||
"engine/internal_encoder_factory.cc",
|
||||
"engine/internal_encoder_factory.h",
|
||||
"engine/multiplex_codec_factory.cc",
|
||||
"engine/multiplex_codec_factory.h",
|
||||
]
|
||||
}
|
||||
|
||||
|
@ -897,7 +894,6 @@ if (rtc_include_tests) {
|
|||
"base/video_common_unittest.cc",
|
||||
"engine/internal_decoder_factory_unittest.cc",
|
||||
"engine/internal_encoder_factory_unittest.cc",
|
||||
"engine/multiplex_codec_factory_unittest.cc",
|
||||
"engine/null_webrtc_video_engine_unittest.cc",
|
||||
"engine/payload_type_mapper_unittest.cc",
|
||||
"engine/simulcast_encoder_adapter_unittest.cc",
|
||||
|
|
|
@ -24,7 +24,6 @@ const float kProcessCpuThreshold = 0.10f;
|
|||
|
||||
const char kRedCodecName[] = "red";
|
||||
const char kUlpfecCodecName[] = "ulpfec";
|
||||
const char kMultiplexCodecName[] = "multiplex";
|
||||
|
||||
// TODO(brandtr): Change this to 'flexfec' when we are confident that the
|
||||
// header format is not changing anymore.
|
||||
|
|
|
@ -1,117 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "media/engine/multiplex_codec_factory.h"
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "absl/strings/match.h"
|
||||
#include "api/environment/environment.h"
|
||||
#include "api/video_codecs/sdp_video_format.h"
|
||||
#include "media/base/codec.h"
|
||||
#include "media/base/media_constants.h"
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h"
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
namespace {
|
||||
|
||||
bool IsMultiplexCodec(const cricket::VideoCodec& codec) {
|
||||
return absl::EqualsIgnoreCase(codec.name.c_str(),
|
||||
cricket::kMultiplexCodecName);
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
constexpr const char* kMultiplexAssociatedCodecName = cricket::kVp9CodecName;
|
||||
|
||||
MultiplexEncoderFactory::MultiplexEncoderFactory(
|
||||
std::unique_ptr<VideoEncoderFactory> factory,
|
||||
bool supports_augmenting_data)
|
||||
: factory_(std::move(factory)),
|
||||
supports_augmenting_data_(supports_augmenting_data) {}
|
||||
|
||||
std::vector<SdpVideoFormat> MultiplexEncoderFactory::GetSupportedFormats()
|
||||
const {
|
||||
std::vector<SdpVideoFormat> formats = factory_->GetSupportedFormats();
|
||||
for (const auto& format : formats) {
|
||||
if (absl::EqualsIgnoreCase(format.name, kMultiplexAssociatedCodecName)) {
|
||||
SdpVideoFormat multiplex_format = format;
|
||||
multiplex_format.parameters[cricket::kCodecParamAssociatedCodecName] =
|
||||
format.name;
|
||||
multiplex_format.name = cricket::kMultiplexCodecName;
|
||||
formats.push_back(multiplex_format);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return formats;
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoEncoder> MultiplexEncoderFactory::CreateVideoEncoder(
|
||||
const SdpVideoFormat& format) {
|
||||
if (!IsMultiplexCodec(cricket::CreateVideoCodec(format)))
|
||||
return factory_->CreateVideoEncoder(format);
|
||||
const auto& it =
|
||||
format.parameters.find(cricket::kCodecParamAssociatedCodecName);
|
||||
if (it == format.parameters.end()) {
|
||||
RTC_LOG(LS_ERROR) << "No assicated codec for multiplex.";
|
||||
return nullptr;
|
||||
}
|
||||
SdpVideoFormat associated_format = format;
|
||||
associated_format.name = it->second;
|
||||
return std::unique_ptr<VideoEncoder>(new MultiplexEncoderAdapter(
|
||||
factory_.get(), associated_format, supports_augmenting_data_));
|
||||
}
|
||||
|
||||
MultiplexDecoderFactory::MultiplexDecoderFactory(
|
||||
std::unique_ptr<VideoDecoderFactory> factory,
|
||||
bool supports_augmenting_data)
|
||||
: factory_(std::move(factory)),
|
||||
supports_augmenting_data_(supports_augmenting_data) {}
|
||||
|
||||
std::vector<SdpVideoFormat> MultiplexDecoderFactory::GetSupportedFormats()
|
||||
const {
|
||||
std::vector<SdpVideoFormat> formats = factory_->GetSupportedFormats();
|
||||
std::vector<SdpVideoFormat> augmented_formats = formats;
|
||||
for (const auto& format : formats) {
|
||||
if (absl::EqualsIgnoreCase(format.name, kMultiplexAssociatedCodecName)) {
|
||||
SdpVideoFormat multiplex_format = format;
|
||||
multiplex_format.parameters[cricket::kCodecParamAssociatedCodecName] =
|
||||
format.name;
|
||||
multiplex_format.name = cricket::kMultiplexCodecName;
|
||||
augmented_formats.push_back(multiplex_format);
|
||||
}
|
||||
}
|
||||
return augmented_formats;
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoDecoder> MultiplexDecoderFactory::Create(
|
||||
const Environment& env,
|
||||
const SdpVideoFormat& format) {
|
||||
if (!IsMultiplexCodec(cricket::CreateVideoCodec(format))) {
|
||||
return factory_->Create(env, format);
|
||||
}
|
||||
auto it = format.parameters.find(cricket::kCodecParamAssociatedCodecName);
|
||||
if (it == format.parameters.end()) {
|
||||
RTC_LOG(LS_ERROR) << "No assicated codec for multiplex.";
|
||||
return nullptr;
|
||||
}
|
||||
SdpVideoFormat associated_format = format;
|
||||
associated_format.name = it->second;
|
||||
return std::make_unique<MultiplexDecoderAdapter>(
|
||||
env, factory_.get(), associated_format, supports_augmenting_data_);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
|
@ -1,80 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MEDIA_ENGINE_MULTIPLEX_CODEC_FACTORY_H_
|
||||
#define MEDIA_ENGINE_MULTIPLEX_CODEC_FACTORY_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "api/environment/environment.h"
|
||||
#include "api/video_codecs/sdp_video_format.h"
|
||||
#include "api/video_codecs/video_decoder.h"
|
||||
#include "api/video_codecs/video_decoder_factory.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "api/video_codecs/video_encoder_factory.h"
|
||||
#include "rtc_base/system/rtc_export.h"
|
||||
|
||||
namespace webrtc {
|
||||
// Multiplex codec is a completely modular/optional codec that allows users to
|
||||
// send more than a frame's opaque content(RGB/YUV) over video channels.
|
||||
// - Allows sending Alpha channel over the wire iff input is
|
||||
// I420ABufferInterface. Users can expect to receive I420ABufferInterface as the
|
||||
// decoded video frame buffer. I420A data is split into YUV/AXX portions,
|
||||
// encoded/decoded seperately and bitstreams are concatanated.
|
||||
// - Allows sending augmenting data over the wire attached to the frame. This
|
||||
// attached data portion is not encoded in any way and sent as it is. Users can
|
||||
// input AugmentedVideoFrameBuffer and can expect the same interface as the
|
||||
// decoded video frame buffer.
|
||||
// - Showcases an example of how to add a custom codec in webrtc video channel.
|
||||
// How to use it end-to-end:
|
||||
// - Wrap your existing VideoEncoderFactory implemention with
|
||||
// MultiplexEncoderFactory and VideoDecoderFactory implemention with
|
||||
// MultiplexDecoderFactory below. For actual coding, multiplex creates encoder
|
||||
// and decoder instance(s) using these factories.
|
||||
// - Use Multiplex*coderFactory classes in CreatePeerConnectionFactory() calls.
|
||||
// - Select "multiplex" codec in SDP negotiation.
|
||||
class RTC_EXPORT MultiplexEncoderFactory : public VideoEncoderFactory {
|
||||
public:
|
||||
// `supports_augmenting_data` defines if the encoder would support augmenting
|
||||
// data. If set, the encoder expects to receive video frame buffers of type
|
||||
// AugmentedVideoFrameBuffer.
|
||||
MultiplexEncoderFactory(std::unique_ptr<VideoEncoderFactory> factory,
|
||||
bool supports_augmenting_data = false);
|
||||
|
||||
std::vector<SdpVideoFormat> GetSupportedFormats() const override;
|
||||
std::unique_ptr<VideoEncoder> CreateVideoEncoder(
|
||||
const SdpVideoFormat& format) override;
|
||||
|
||||
private:
|
||||
std::unique_ptr<VideoEncoderFactory> factory_;
|
||||
const bool supports_augmenting_data_;
|
||||
};
|
||||
|
||||
class RTC_EXPORT MultiplexDecoderFactory : public VideoDecoderFactory {
|
||||
public:
|
||||
// `supports_augmenting_data` defines if the decoder would support augmenting
|
||||
// data. If set, the decoder is expected to output video frame buffers of type
|
||||
// AugmentedVideoFrameBuffer.
|
||||
MultiplexDecoderFactory(std::unique_ptr<VideoDecoderFactory> factory,
|
||||
bool supports_augmenting_data = false);
|
||||
|
||||
std::vector<SdpVideoFormat> GetSupportedFormats() const override;
|
||||
std::unique_ptr<VideoDecoder> Create(const Environment& env,
|
||||
const SdpVideoFormat& format) override;
|
||||
|
||||
private:
|
||||
std::unique_ptr<VideoDecoderFactory> factory_;
|
||||
const bool supports_augmenting_data_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MEDIA_ENGINE_MULTIPLEX_CODEC_FACTORY_H_
|
|
@ -1,50 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "media/engine/multiplex_codec_factory.h"
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "api/environment/environment_factory.h"
|
||||
#include "api/video_codecs/sdp_video_format.h"
|
||||
#include "api/video_codecs/video_decoder.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "media/base/media_constants.h"
|
||||
#include "media/engine/internal_decoder_factory.h"
|
||||
#include "media/engine/internal_encoder_factory.h"
|
||||
#include "test/gtest.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
TEST(MultiplexDecoderFactoryTest, CreateVideoDecoder) {
|
||||
std::unique_ptr<VideoDecoderFactory> internal_factory =
|
||||
std::make_unique<InternalDecoderFactory>();
|
||||
MultiplexDecoderFactory factory(std::move(internal_factory));
|
||||
std::unique_ptr<VideoDecoder> decoder = factory.Create(
|
||||
CreateEnvironment(),
|
||||
SdpVideoFormat(
|
||||
cricket::kMultiplexCodecName,
|
||||
{{cricket::kCodecParamAssociatedCodecName, cricket::kVp9CodecName}}));
|
||||
EXPECT_TRUE(decoder);
|
||||
}
|
||||
|
||||
TEST(MultiplexEncoderFactory, CreateVideoEncoder) {
|
||||
std::unique_ptr<VideoEncoderFactory> internal_factory(
|
||||
new InternalEncoderFactory());
|
||||
MultiplexEncoderFactory factory(std::move(internal_factory));
|
||||
std::unique_ptr<VideoEncoder> encoder =
|
||||
factory.CreateVideoEncoder(SdpVideoFormat(
|
||||
cricket::kMultiplexCodecName,
|
||||
{{cricket::kCodecParamAssociatedCodecName, cricket::kVp9CodecName}}));
|
||||
EXPECT_TRUE(encoder);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
|
@ -2225,7 +2225,6 @@ WebRtcVideoSendChannel::WebRtcVideoSendStream::CreateVideoEncoderConfig(
|
|||
case webrtc::kVideoCodecVP9:
|
||||
case webrtc::kVideoCodecAV1:
|
||||
case webrtc::kVideoCodecGeneric:
|
||||
case webrtc::kVideoCodecMultiplex:
|
||||
max_qp = kDefaultVideoMaxQpVpx;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -43,7 +43,6 @@ std::unique_ptr<VideoRtpDepacketizer> CreateVideoRtpDepacketizer(
|
|||
return nullptr;
|
||||
#endif
|
||||
case kVideoCodecGeneric:
|
||||
case kVideoCodecMultiplex:
|
||||
return std::make_unique<VideoRtpDepacketizerGeneric>();
|
||||
}
|
||||
RTC_CHECK_NOTREACHED();
|
||||
|
|
|
@ -512,37 +512,6 @@ rtc_library("webrtc_h264") {
|
|||
}
|
||||
}
|
||||
|
||||
rtc_library("webrtc_multiplex") {
|
||||
sources = [
|
||||
"codecs/multiplex/augmented_video_frame_buffer.cc",
|
||||
"codecs/multiplex/include/augmented_video_frame_buffer.h",
|
||||
"codecs/multiplex/include/multiplex_decoder_adapter.h",
|
||||
"codecs/multiplex/include/multiplex_encoder_adapter.h",
|
||||
"codecs/multiplex/multiplex_decoder_adapter.cc",
|
||||
"codecs/multiplex/multiplex_encoded_image_packer.cc",
|
||||
"codecs/multiplex/multiplex_encoded_image_packer.h",
|
||||
"codecs/multiplex/multiplex_encoder_adapter.cc",
|
||||
]
|
||||
|
||||
deps = [
|
||||
":video_codec_interface",
|
||||
":video_coding_utility",
|
||||
"../../api:fec_controller_api",
|
||||
"../../api:scoped_refptr",
|
||||
"../../api/environment",
|
||||
"../../api/video:encoded_image",
|
||||
"../../api/video:video_frame",
|
||||
"../../api/video:video_rtp_headers",
|
||||
"../../api/video_codecs:video_codecs_api",
|
||||
"../../common_video",
|
||||
"../../media:rtc_media_base",
|
||||
"../../rtc_base:checks",
|
||||
"../../rtc_base:logging",
|
||||
"../../rtc_base/synchronization:mutex",
|
||||
"../rtp_rtcp:rtp_rtcp_format",
|
||||
]
|
||||
}
|
||||
|
||||
# This target defines a bare-bones interface towards libvpx, used by the
|
||||
# VP8 and VP9 wrappers below.
|
||||
rtc_library("webrtc_libvpx_interface") {
|
||||
|
@ -1054,7 +1023,6 @@ if (rtc_include_tests) {
|
|||
|
||||
sources = [
|
||||
"codecs/h264/test/h264_impl_unittest.cc",
|
||||
"codecs/multiplex/test/multiplex_adapter_unittest.cc",
|
||||
"codecs/test/video_encoder_decoder_instantiation_tests.cc",
|
||||
"codecs/test/videocodec_test_av1.cc",
|
||||
"codecs/test/videocodec_test_libvpx.cc",
|
||||
|
@ -1078,7 +1046,6 @@ if (rtc_include_tests) {
|
|||
":videocodec_test_impl",
|
||||
":webrtc_h264",
|
||||
":webrtc_libvpx_interface",
|
||||
":webrtc_multiplex",
|
||||
":webrtc_vp8",
|
||||
":webrtc_vp9",
|
||||
":webrtc_vp9_helpers",
|
||||
|
|
|
@ -1,65 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "api/video/video_frame_buffer.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
AugmentedVideoFrameBuffer::AugmentedVideoFrameBuffer(
|
||||
const rtc::scoped_refptr<VideoFrameBuffer>& video_frame_buffer,
|
||||
std::unique_ptr<uint8_t[]> augmenting_data,
|
||||
uint16_t augmenting_data_size)
|
||||
: augmenting_data_size_(augmenting_data_size),
|
||||
augmenting_data_(std::move(augmenting_data)),
|
||||
video_frame_buffer_(video_frame_buffer) {}
|
||||
|
||||
rtc::scoped_refptr<VideoFrameBuffer>
|
||||
AugmentedVideoFrameBuffer::GetVideoFrameBuffer() const {
|
||||
return video_frame_buffer_;
|
||||
}
|
||||
|
||||
uint8_t* AugmentedVideoFrameBuffer::GetAugmentingData() const {
|
||||
return augmenting_data_.get();
|
||||
}
|
||||
|
||||
uint16_t AugmentedVideoFrameBuffer::GetAugmentingDataSize() const {
|
||||
return augmenting_data_size_;
|
||||
}
|
||||
|
||||
VideoFrameBuffer::Type AugmentedVideoFrameBuffer::type() const {
|
||||
return video_frame_buffer_->type();
|
||||
}
|
||||
|
||||
int AugmentedVideoFrameBuffer::width() const {
|
||||
return video_frame_buffer_->width();
|
||||
}
|
||||
|
||||
int AugmentedVideoFrameBuffer::height() const {
|
||||
return video_frame_buffer_->height();
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<I420BufferInterface> AugmentedVideoFrameBuffer::ToI420() {
|
||||
return video_frame_buffer_->ToI420();
|
||||
}
|
||||
|
||||
const I420BufferInterface* AugmentedVideoFrameBuffer::GetI420() const {
|
||||
// TODO(https://crbug.com/webrtc/12021): When AugmentedVideoFrameBuffer is
|
||||
// updated to implement the buffer interfaces of relevant
|
||||
// VideoFrameBuffer::Types, stop overriding GetI420() as a workaround to
|
||||
// AugmentedVideoFrameBuffer not being the type that is returned by type().
|
||||
return video_frame_buffer_->GetI420();
|
||||
}
|
||||
} // namespace webrtc
|
|
@ -1,62 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_AUGMENTED_VIDEO_FRAME_BUFFER_H_
|
||||
#define MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_AUGMENTED_VIDEO_FRAME_BUFFER_H_
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/video/video_frame_buffer.h"
|
||||
|
||||
namespace webrtc {
|
||||
class AugmentedVideoFrameBuffer : public VideoFrameBuffer {
|
||||
public:
|
||||
AugmentedVideoFrameBuffer(
|
||||
const rtc::scoped_refptr<VideoFrameBuffer>& video_frame_buffer,
|
||||
std::unique_ptr<uint8_t[]> augmenting_data,
|
||||
uint16_t augmenting_data_size);
|
||||
|
||||
// Retrieves the underlying VideoFrameBuffer without the augmented data
|
||||
rtc::scoped_refptr<VideoFrameBuffer> GetVideoFrameBuffer() const;
|
||||
|
||||
// Gets a pointer to the augmenting data and moves ownership to the caller
|
||||
uint8_t* GetAugmentingData() const;
|
||||
|
||||
// Get the size of the augmenting data
|
||||
uint16_t GetAugmentingDataSize() const;
|
||||
|
||||
// Returns the type of the underlying VideoFrameBuffer
|
||||
Type type() const final;
|
||||
|
||||
// Returns the width of the underlying VideoFrameBuffer
|
||||
int width() const final;
|
||||
|
||||
// Returns the height of the underlying VideoFrameBuffer
|
||||
int height() const final;
|
||||
|
||||
// Get the I140 Buffer from the underlying frame buffer
|
||||
rtc::scoped_refptr<I420BufferInterface> ToI420() final;
|
||||
// Returns GetI420() of the underlying VideoFrameBuffer.
|
||||
// TODO(hbos): AugmentedVideoFrameBuffer should not return a type (such as
|
||||
// kI420) without also implementing that type's interface (i.e.
|
||||
// I420BufferInterface). Either implement all possible Type's interfaces or
|
||||
// return kNative.
|
||||
const I420BufferInterface* GetI420() const final;
|
||||
|
||||
private:
|
||||
uint16_t augmenting_data_size_;
|
||||
std::unique_ptr<uint8_t[]> augmenting_data_;
|
||||
rtc::scoped_refptr<webrtc::VideoFrameBuffer> video_frame_buffer_;
|
||||
};
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_AUGMENTED_VIDEO_FRAME_BUFFER_H_
|
|
@ -1,82 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_DECODER_ADAPTER_H_
|
||||
#define MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_DECODER_ADAPTER_H_
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "api/environment/environment.h"
|
||||
#include "api/video_codecs/sdp_video_format.h"
|
||||
#include "api/video_codecs/video_decoder.h"
|
||||
#include "api/video_codecs/video_decoder_factory.h"
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class MultiplexDecoderAdapter : public VideoDecoder {
|
||||
public:
|
||||
// `factory` is not owned and expected to outlive this class.
|
||||
MultiplexDecoderAdapter(const Environment& env,
|
||||
VideoDecoderFactory* factory,
|
||||
const SdpVideoFormat& associated_format,
|
||||
bool supports_augmenting_data = false);
|
||||
virtual ~MultiplexDecoderAdapter();
|
||||
|
||||
// Implements VideoDecoder
|
||||
bool Configure(const Settings& settings) override;
|
||||
int32_t Decode(const EncodedImage& input_image,
|
||||
int64_t render_time_ms) override;
|
||||
int32_t RegisterDecodeCompleteCallback(
|
||||
DecodedImageCallback* callback) override;
|
||||
int32_t Release() override;
|
||||
|
||||
void Decoded(AlphaCodecStream stream_idx,
|
||||
VideoFrame* decoded_image,
|
||||
absl::optional<int32_t> decode_time_ms,
|
||||
absl::optional<uint8_t> qp);
|
||||
|
||||
private:
|
||||
// Wrapper class that redirects Decoded() calls.
|
||||
class AdapterDecodedImageCallback;
|
||||
|
||||
// Holds the decoded image output of a frame.
|
||||
struct DecodedImageData;
|
||||
|
||||
// Holds the augmenting data of an image
|
||||
struct AugmentingData;
|
||||
|
||||
void MergeAlphaImages(VideoFrame* decoded_image,
|
||||
const absl::optional<int32_t>& decode_time_ms,
|
||||
const absl::optional<uint8_t>& qp,
|
||||
VideoFrame* multiplex_decoded_image,
|
||||
const absl::optional<int32_t>& multiplex_decode_time_ms,
|
||||
const absl::optional<uint8_t>& multiplex_qp,
|
||||
std::unique_ptr<uint8_t[]> augmenting_data,
|
||||
uint16_t augmenting_data_length);
|
||||
|
||||
const Environment env_;
|
||||
VideoDecoderFactory* const factory_;
|
||||
const SdpVideoFormat associated_format_;
|
||||
std::vector<std::unique_ptr<VideoDecoder>> decoders_;
|
||||
std::vector<std::unique_ptr<AdapterDecodedImageCallback>> adapter_callbacks_;
|
||||
DecodedImageCallback* decoded_complete_callback_;
|
||||
|
||||
// Holds YUV or AXX decode output of a frame that is identified by timestamp.
|
||||
std::map<uint32_t /* timestamp */, DecodedImageData> decoded_data_;
|
||||
std::map<uint32_t /* timestamp */, AugmentingData> decoded_augmenting_data_;
|
||||
const bool supports_augmenting_data_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_DECODER_ADAPTER_H_
|
|
@ -1,91 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_ENCODER_ADAPTER_H_
|
||||
#define MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_ENCODER_ADAPTER_H_
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "api/fec_controller_override.h"
|
||||
#include "api/video_codecs/sdp_video_format.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "api/video_codecs/video_encoder_factory.h"
|
||||
#include "modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h"
|
||||
#include "modules/video_coding/include/video_codec_interface.h"
|
||||
#include "rtc_base/synchronization/mutex.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
enum AlphaCodecStream {
|
||||
kYUVStream = 0,
|
||||
kAXXStream = 1,
|
||||
kAlphaCodecStreams = 2,
|
||||
};
|
||||
|
||||
class MultiplexEncoderAdapter : public VideoEncoder {
|
||||
public:
|
||||
// `factory` is not owned and expected to outlive this class.
|
||||
MultiplexEncoderAdapter(VideoEncoderFactory* factory,
|
||||
const SdpVideoFormat& associated_format,
|
||||
bool supports_augmenting_data = false);
|
||||
virtual ~MultiplexEncoderAdapter();
|
||||
|
||||
// Implements VideoEncoder
|
||||
void SetFecControllerOverride(
|
||||
FecControllerOverride* fec_controller_override) override;
|
||||
int InitEncode(const VideoCodec* inst,
|
||||
const VideoEncoder::Settings& settings) override;
|
||||
int Encode(const VideoFrame& input_image,
|
||||
const std::vector<VideoFrameType>* frame_types) override;
|
||||
int RegisterEncodeCompleteCallback(EncodedImageCallback* callback) override;
|
||||
void SetRates(const RateControlParameters& parameters) override;
|
||||
void OnPacketLossRateUpdate(float packet_loss_rate) override;
|
||||
void OnRttUpdate(int64_t rtt_ms) override;
|
||||
void OnLossNotification(const LossNotification& loss_notification) override;
|
||||
int Release() override;
|
||||
EncoderInfo GetEncoderInfo() const override;
|
||||
|
||||
EncodedImageCallback::Result OnEncodedImage(
|
||||
AlphaCodecStream stream_idx,
|
||||
const EncodedImage& encodedImage,
|
||||
const CodecSpecificInfo* codecSpecificInfo);
|
||||
|
||||
private:
|
||||
// Wrapper class that redirects OnEncodedImage() calls.
|
||||
class AdapterEncodedImageCallback;
|
||||
|
||||
VideoEncoderFactory* const factory_;
|
||||
const SdpVideoFormat associated_format_;
|
||||
std::vector<std::unique_ptr<VideoEncoder>> encoders_;
|
||||
std::vector<std::unique_ptr<AdapterEncodedImageCallback>> adapter_callbacks_;
|
||||
EncodedImageCallback* encoded_complete_callback_;
|
||||
|
||||
std::map<uint32_t /* timestamp */, MultiplexImage> stashed_images_
|
||||
RTC_GUARDED_BY(mutex_);
|
||||
|
||||
uint16_t picture_index_ = 0;
|
||||
std::vector<uint8_t> multiplex_dummy_planes_;
|
||||
|
||||
int key_frame_interval_;
|
||||
EncodedImage combined_image_;
|
||||
|
||||
Mutex mutex_;
|
||||
|
||||
const bool supports_augmented_data_;
|
||||
int augmenting_data_size_ = 0;
|
||||
|
||||
EncoderInfo encoder_info_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_VIDEO_CODING_CODECS_MULTIPLEX_INCLUDE_MULTIPLEX_ENCODER_ADAPTER_H_
|
|
@ -1,267 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h"
|
||||
|
||||
#include "api/environment/environment.h"
|
||||
#include "api/video/encoded_image.h"
|
||||
#include "api/video/i420_buffer.h"
|
||||
#include "api/video/video_frame_buffer.h"
|
||||
#include "common_video/include/video_frame_buffer.h"
|
||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||
#include "modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h"
|
||||
#include "modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class MultiplexDecoderAdapter::AdapterDecodedImageCallback
|
||||
: public webrtc::DecodedImageCallback {
|
||||
public:
|
||||
AdapterDecodedImageCallback(webrtc::MultiplexDecoderAdapter* adapter,
|
||||
AlphaCodecStream stream_idx)
|
||||
: adapter_(adapter), stream_idx_(stream_idx) {}
|
||||
|
||||
void Decoded(VideoFrame& decoded_image,
|
||||
absl::optional<int32_t> decode_time_ms,
|
||||
absl::optional<uint8_t> qp) override {
|
||||
if (!adapter_)
|
||||
return;
|
||||
adapter_->Decoded(stream_idx_, &decoded_image, decode_time_ms, qp);
|
||||
}
|
||||
int32_t Decoded(VideoFrame& decoded_image) override {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
private:
|
||||
MultiplexDecoderAdapter* adapter_;
|
||||
const AlphaCodecStream stream_idx_;
|
||||
};
|
||||
|
||||
struct MultiplexDecoderAdapter::DecodedImageData {
|
||||
explicit DecodedImageData(AlphaCodecStream stream_idx)
|
||||
: stream_idx_(stream_idx),
|
||||
decoded_image_(
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(
|
||||
I420Buffer::Create(1 /* width */, 1 /* height */))
|
||||
.set_timestamp_rtp(0)
|
||||
.set_timestamp_us(0)
|
||||
.set_rotation(kVideoRotation_0)
|
||||
.build()) {
|
||||
RTC_DCHECK_EQ(kAXXStream, stream_idx);
|
||||
}
|
||||
DecodedImageData(AlphaCodecStream stream_idx,
|
||||
const VideoFrame& decoded_image,
|
||||
const absl::optional<int32_t>& decode_time_ms,
|
||||
const absl::optional<uint8_t>& qp)
|
||||
: stream_idx_(stream_idx),
|
||||
decoded_image_(decoded_image),
|
||||
decode_time_ms_(decode_time_ms),
|
||||
qp_(qp) {}
|
||||
|
||||
DecodedImageData() = delete;
|
||||
DecodedImageData(const DecodedImageData&) = delete;
|
||||
DecodedImageData& operator=(const DecodedImageData&) = delete;
|
||||
|
||||
const AlphaCodecStream stream_idx_;
|
||||
VideoFrame decoded_image_;
|
||||
const absl::optional<int32_t> decode_time_ms_;
|
||||
const absl::optional<uint8_t> qp_;
|
||||
};
|
||||
|
||||
struct MultiplexDecoderAdapter::AugmentingData {
|
||||
AugmentingData(std::unique_ptr<uint8_t[]> augmenting_data, uint16_t data_size)
|
||||
: data_(std::move(augmenting_data)), size_(data_size) {}
|
||||
AugmentingData() = delete;
|
||||
AugmentingData(const AugmentingData&) = delete;
|
||||
AugmentingData& operator=(const AugmentingData&) = delete;
|
||||
|
||||
std::unique_ptr<uint8_t[]> data_;
|
||||
const uint16_t size_;
|
||||
};
|
||||
|
||||
MultiplexDecoderAdapter::MultiplexDecoderAdapter(
|
||||
const Environment& env,
|
||||
VideoDecoderFactory* factory,
|
||||
const SdpVideoFormat& associated_format,
|
||||
bool supports_augmenting_data)
|
||||
: env_(env),
|
||||
factory_(factory),
|
||||
associated_format_(associated_format),
|
||||
supports_augmenting_data_(supports_augmenting_data) {}
|
||||
|
||||
MultiplexDecoderAdapter::~MultiplexDecoderAdapter() {
|
||||
Release();
|
||||
}
|
||||
|
||||
bool MultiplexDecoderAdapter::Configure(const Settings& settings) {
|
||||
RTC_DCHECK_EQ(settings.codec_type(), kVideoCodecMultiplex);
|
||||
Settings associated_settings = settings;
|
||||
associated_settings.set_codec_type(
|
||||
PayloadStringToCodecType(associated_format_.name));
|
||||
for (size_t i = 0; i < kAlphaCodecStreams; ++i) {
|
||||
std::unique_ptr<VideoDecoder> decoder =
|
||||
factory_->Create(env_, associated_format_);
|
||||
if (!decoder->Configure(associated_settings)) {
|
||||
return false;
|
||||
}
|
||||
adapter_callbacks_.emplace_back(
|
||||
new MultiplexDecoderAdapter::AdapterDecodedImageCallback(
|
||||
this, static_cast<AlphaCodecStream>(i)));
|
||||
decoder->RegisterDecodeCompleteCallback(adapter_callbacks_.back().get());
|
||||
decoders_.emplace_back(std::move(decoder));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
int32_t MultiplexDecoderAdapter::Decode(const EncodedImage& input_image,
|
||||
int64_t render_time_ms) {
|
||||
MultiplexImage image = MultiplexEncodedImagePacker::Unpack(input_image);
|
||||
|
||||
if (supports_augmenting_data_) {
|
||||
RTC_DCHECK(decoded_augmenting_data_.find(input_image.RtpTimestamp()) ==
|
||||
decoded_augmenting_data_.end());
|
||||
decoded_augmenting_data_.emplace(
|
||||
std::piecewise_construct,
|
||||
std::forward_as_tuple(input_image.RtpTimestamp()),
|
||||
std::forward_as_tuple(std::move(image.augmenting_data),
|
||||
image.augmenting_data_size));
|
||||
}
|
||||
|
||||
if (image.component_count == 1) {
|
||||
RTC_DCHECK(decoded_data_.find(input_image.RtpTimestamp()) ==
|
||||
decoded_data_.end());
|
||||
decoded_data_.emplace(std::piecewise_construct,
|
||||
std::forward_as_tuple(input_image.RtpTimestamp()),
|
||||
std::forward_as_tuple(kAXXStream));
|
||||
}
|
||||
int32_t rv = 0;
|
||||
for (size_t i = 0; i < image.image_components.size(); i++) {
|
||||
rv = decoders_[image.image_components[i].component_index]->Decode(
|
||||
image.image_components[i].encoded_image, render_time_ms);
|
||||
if (rv != WEBRTC_VIDEO_CODEC_OK)
|
||||
return rv;
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
int32_t MultiplexDecoderAdapter::RegisterDecodeCompleteCallback(
|
||||
DecodedImageCallback* callback) {
|
||||
decoded_complete_callback_ = callback;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
int32_t MultiplexDecoderAdapter::Release() {
|
||||
for (auto& decoder : decoders_) {
|
||||
const int32_t rv = decoder->Release();
|
||||
if (rv)
|
||||
return rv;
|
||||
}
|
||||
decoders_.clear();
|
||||
adapter_callbacks_.clear();
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
void MultiplexDecoderAdapter::Decoded(AlphaCodecStream stream_idx,
|
||||
VideoFrame* decoded_image,
|
||||
absl::optional<int32_t> decode_time_ms,
|
||||
absl::optional<uint8_t> qp) {
|
||||
const auto& other_decoded_data_it =
|
||||
decoded_data_.find(decoded_image->timestamp());
|
||||
const auto& augmenting_data_it =
|
||||
decoded_augmenting_data_.find(decoded_image->timestamp());
|
||||
const bool has_augmenting_data =
|
||||
augmenting_data_it != decoded_augmenting_data_.end();
|
||||
if (other_decoded_data_it != decoded_data_.end()) {
|
||||
uint16_t augmenting_data_size =
|
||||
has_augmenting_data ? augmenting_data_it->second.size_ : 0;
|
||||
std::unique_ptr<uint8_t[]> augmenting_data =
|
||||
has_augmenting_data ? std::move(augmenting_data_it->second.data_)
|
||||
: nullptr;
|
||||
auto& other_image_data = other_decoded_data_it->second;
|
||||
if (stream_idx == kYUVStream) {
|
||||
RTC_DCHECK_EQ(kAXXStream, other_image_data.stream_idx_);
|
||||
MergeAlphaImages(decoded_image, decode_time_ms, qp,
|
||||
&other_image_data.decoded_image_,
|
||||
other_image_data.decode_time_ms_, other_image_data.qp_,
|
||||
std::move(augmenting_data), augmenting_data_size);
|
||||
} else {
|
||||
RTC_DCHECK_EQ(kYUVStream, other_image_data.stream_idx_);
|
||||
RTC_DCHECK_EQ(kAXXStream, stream_idx);
|
||||
MergeAlphaImages(&other_image_data.decoded_image_,
|
||||
other_image_data.decode_time_ms_, other_image_data.qp_,
|
||||
decoded_image, decode_time_ms, qp,
|
||||
std::move(augmenting_data), augmenting_data_size);
|
||||
}
|
||||
decoded_data_.erase(decoded_data_.begin(), other_decoded_data_it);
|
||||
if (has_augmenting_data) {
|
||||
decoded_augmenting_data_.erase(decoded_augmenting_data_.begin(),
|
||||
augmenting_data_it);
|
||||
}
|
||||
return;
|
||||
}
|
||||
RTC_DCHECK(decoded_data_.find(decoded_image->timestamp()) ==
|
||||
decoded_data_.end());
|
||||
decoded_data_.emplace(
|
||||
std::piecewise_construct,
|
||||
std::forward_as_tuple(decoded_image->timestamp()),
|
||||
std::forward_as_tuple(stream_idx, *decoded_image, decode_time_ms, qp));
|
||||
}
|
||||
|
||||
void MultiplexDecoderAdapter::MergeAlphaImages(
|
||||
VideoFrame* decoded_image,
|
||||
const absl::optional<int32_t>& decode_time_ms,
|
||||
const absl::optional<uint8_t>& qp,
|
||||
VideoFrame* alpha_decoded_image,
|
||||
const absl::optional<int32_t>& alpha_decode_time_ms,
|
||||
const absl::optional<uint8_t>& alpha_qp,
|
||||
std::unique_ptr<uint8_t[]> augmenting_data,
|
||||
uint16_t augmenting_data_length) {
|
||||
rtc::scoped_refptr<VideoFrameBuffer> merged_buffer;
|
||||
if (!alpha_decoded_image->timestamp()) {
|
||||
merged_buffer = decoded_image->video_frame_buffer();
|
||||
} else {
|
||||
rtc::scoped_refptr<webrtc::I420BufferInterface> yuv_buffer =
|
||||
decoded_image->video_frame_buffer()->ToI420();
|
||||
rtc::scoped_refptr<webrtc::I420BufferInterface> alpha_buffer =
|
||||
alpha_decoded_image->video_frame_buffer()->ToI420();
|
||||
RTC_DCHECK_EQ(yuv_buffer->width(), alpha_buffer->width());
|
||||
RTC_DCHECK_EQ(yuv_buffer->height(), alpha_buffer->height());
|
||||
merged_buffer = WrapI420ABuffer(
|
||||
yuv_buffer->width(), yuv_buffer->height(), yuv_buffer->DataY(),
|
||||
yuv_buffer->StrideY(), yuv_buffer->DataU(), yuv_buffer->StrideU(),
|
||||
yuv_buffer->DataV(), yuv_buffer->StrideV(), alpha_buffer->DataY(),
|
||||
alpha_buffer->StrideY(),
|
||||
// To keep references alive.
|
||||
[yuv_buffer, alpha_buffer] {});
|
||||
}
|
||||
if (supports_augmenting_data_) {
|
||||
merged_buffer = rtc::make_ref_counted<AugmentedVideoFrameBuffer>(
|
||||
merged_buffer, std::move(augmenting_data), augmenting_data_length);
|
||||
}
|
||||
|
||||
VideoFrame merged_image = VideoFrame::Builder()
|
||||
.set_video_frame_buffer(merged_buffer)
|
||||
.set_timestamp_rtp(decoded_image->timestamp())
|
||||
.set_timestamp_us(0)
|
||||
.set_rotation(decoded_image->rotation())
|
||||
.set_id(decoded_image->id())
|
||||
.set_packet_infos(decoded_image->packet_infos())
|
||||
.build();
|
||||
decoded_complete_callback_->Decoded(merged_image, decode_time_ms, qp);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
|
@ -1,277 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h"
|
||||
|
||||
#include <cstring>
|
||||
#include <utility>
|
||||
|
||||
#include "modules/rtp_rtcp/source/byte_io.h"
|
||||
#include "rtc_base/checks.h"
|
||||
|
||||
namespace webrtc {
|
||||
int PackHeader(uint8_t* buffer, MultiplexImageHeader header) {
|
||||
int offset = 0;
|
||||
ByteWriter<uint8_t>::WriteBigEndian(buffer + offset, header.component_count);
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
ByteWriter<uint16_t>::WriteBigEndian(buffer + offset, header.image_index);
|
||||
offset += sizeof(uint16_t);
|
||||
|
||||
ByteWriter<uint16_t>::WriteBigEndian(buffer + offset,
|
||||
header.augmenting_data_size);
|
||||
offset += sizeof(uint16_t);
|
||||
|
||||
ByteWriter<uint32_t>::WriteBigEndian(buffer + offset,
|
||||
header.augmenting_data_offset);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
ByteWriter<uint32_t>::WriteBigEndian(buffer + offset,
|
||||
header.first_component_header_offset);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
RTC_DCHECK_EQ(offset, kMultiplexImageHeaderSize);
|
||||
return offset;
|
||||
}
|
||||
|
||||
MultiplexImageHeader UnpackHeader(const uint8_t* buffer) {
|
||||
MultiplexImageHeader header;
|
||||
int offset = 0;
|
||||
header.component_count = ByteReader<uint8_t>::ReadBigEndian(buffer + offset);
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
header.image_index = ByteReader<uint16_t>::ReadBigEndian(buffer + offset);
|
||||
offset += sizeof(uint16_t);
|
||||
|
||||
header.augmenting_data_size =
|
||||
ByteReader<uint16_t>::ReadBigEndian(buffer + offset);
|
||||
offset += sizeof(uint16_t);
|
||||
|
||||
header.augmenting_data_offset =
|
||||
ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
header.first_component_header_offset =
|
||||
ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
RTC_DCHECK_EQ(offset, kMultiplexImageHeaderSize);
|
||||
return header;
|
||||
}
|
||||
|
||||
int PackFrameHeader(uint8_t* buffer,
|
||||
MultiplexImageComponentHeader frame_header) {
|
||||
int offset = 0;
|
||||
ByteWriter<uint32_t>::WriteBigEndian(
|
||||
buffer + offset, frame_header.next_component_header_offset);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
ByteWriter<uint8_t>::WriteBigEndian(buffer + offset,
|
||||
frame_header.component_index);
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
ByteWriter<uint32_t>::WriteBigEndian(buffer + offset,
|
||||
frame_header.bitstream_offset);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
ByteWriter<uint32_t>::WriteBigEndian(buffer + offset,
|
||||
frame_header.bitstream_length);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
ByteWriter<uint8_t>::WriteBigEndian(buffer + offset, frame_header.codec_type);
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
ByteWriter<uint8_t>::WriteBigEndian(
|
||||
buffer + offset, static_cast<uint8_t>(frame_header.frame_type));
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
RTC_DCHECK_EQ(offset, kMultiplexImageComponentHeaderSize);
|
||||
return offset;
|
||||
}
|
||||
|
||||
MultiplexImageComponentHeader UnpackFrameHeader(const uint8_t* buffer) {
|
||||
MultiplexImageComponentHeader frame_header;
|
||||
int offset = 0;
|
||||
|
||||
frame_header.next_component_header_offset =
|
||||
ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
frame_header.component_index =
|
||||
ByteReader<uint8_t>::ReadBigEndian(buffer + offset);
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
frame_header.bitstream_offset =
|
||||
ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
frame_header.bitstream_length =
|
||||
ByteReader<uint32_t>::ReadBigEndian(buffer + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
|
||||
// This makes the wire format depend on the numeric values of the
|
||||
// VideoCodecType and VideoFrameType enum constants.
|
||||
frame_header.codec_type = static_cast<VideoCodecType>(
|
||||
ByteReader<uint8_t>::ReadBigEndian(buffer + offset));
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
frame_header.frame_type = static_cast<VideoFrameType>(
|
||||
ByteReader<uint8_t>::ReadBigEndian(buffer + offset));
|
||||
offset += sizeof(uint8_t);
|
||||
|
||||
RTC_DCHECK_EQ(offset, kMultiplexImageComponentHeaderSize);
|
||||
return frame_header;
|
||||
}
|
||||
|
||||
void PackBitstream(uint8_t* buffer, MultiplexImageComponent image) {
|
||||
memcpy(buffer, image.encoded_image.data(), image.encoded_image.size());
|
||||
}
|
||||
|
||||
MultiplexImage::MultiplexImage(uint16_t picture_index,
|
||||
uint8_t frame_count,
|
||||
std::unique_ptr<uint8_t[]> augmenting_data,
|
||||
uint16_t augmenting_data_size)
|
||||
: image_index(picture_index),
|
||||
component_count(frame_count),
|
||||
augmenting_data_size(augmenting_data_size),
|
||||
augmenting_data(std::move(augmenting_data)) {}
|
||||
|
||||
EncodedImage MultiplexEncodedImagePacker::PackAndRelease(
|
||||
const MultiplexImage& multiplex_image) {
|
||||
MultiplexImageHeader header;
|
||||
std::vector<MultiplexImageComponentHeader> frame_headers;
|
||||
|
||||
header.component_count = multiplex_image.component_count;
|
||||
header.image_index = multiplex_image.image_index;
|
||||
int header_offset = kMultiplexImageHeaderSize;
|
||||
header.first_component_header_offset = header_offset;
|
||||
header.augmenting_data_offset =
|
||||
header_offset +
|
||||
kMultiplexImageComponentHeaderSize * header.component_count;
|
||||
header.augmenting_data_size = multiplex_image.augmenting_data_size;
|
||||
int bitstream_offset =
|
||||
header.augmenting_data_offset + header.augmenting_data_size;
|
||||
|
||||
const std::vector<MultiplexImageComponent>& images =
|
||||
multiplex_image.image_components;
|
||||
EncodedImage combined_image = images[0].encoded_image;
|
||||
for (size_t i = 0; i < images.size(); i++) {
|
||||
MultiplexImageComponentHeader frame_header;
|
||||
header_offset += kMultiplexImageComponentHeaderSize;
|
||||
frame_header.next_component_header_offset =
|
||||
(i == images.size() - 1) ? 0 : header_offset;
|
||||
frame_header.component_index = images[i].component_index;
|
||||
|
||||
frame_header.bitstream_offset = bitstream_offset;
|
||||
frame_header.bitstream_length =
|
||||
static_cast<uint32_t>(images[i].encoded_image.size());
|
||||
bitstream_offset += frame_header.bitstream_length;
|
||||
|
||||
frame_header.codec_type = images[i].codec_type;
|
||||
frame_header.frame_type = images[i].encoded_image._frameType;
|
||||
|
||||
// As long as one component is delta frame, we have to mark the combined
|
||||
// frame as delta frame, because it is necessary for all components to be
|
||||
// key frame so as to decode the whole image without previous frame data.
|
||||
// Thus only when all components are key frames, we can mark the combined
|
||||
// frame as key frame.
|
||||
if (frame_header.frame_type == VideoFrameType::kVideoFrameDelta) {
|
||||
combined_image._frameType = VideoFrameType::kVideoFrameDelta;
|
||||
}
|
||||
|
||||
frame_headers.push_back(frame_header);
|
||||
}
|
||||
|
||||
auto buffer = EncodedImageBuffer::Create(bitstream_offset);
|
||||
combined_image.SetEncodedData(buffer);
|
||||
|
||||
// header
|
||||
header_offset = PackHeader(buffer->data(), header);
|
||||
RTC_DCHECK_EQ(header.first_component_header_offset,
|
||||
kMultiplexImageHeaderSize);
|
||||
|
||||
// Frame Header
|
||||
for (size_t i = 0; i < images.size(); i++) {
|
||||
int relative_offset =
|
||||
PackFrameHeader(buffer->data() + header_offset, frame_headers[i]);
|
||||
RTC_DCHECK_EQ(relative_offset, kMultiplexImageComponentHeaderSize);
|
||||
|
||||
header_offset = frame_headers[i].next_component_header_offset;
|
||||
RTC_DCHECK_EQ(header_offset,
|
||||
(i == images.size() - 1)
|
||||
? 0
|
||||
: (kMultiplexImageHeaderSize +
|
||||
kMultiplexImageComponentHeaderSize * (i + 1)));
|
||||
}
|
||||
|
||||
// Augmenting Data
|
||||
if (multiplex_image.augmenting_data_size != 0) {
|
||||
memcpy(buffer->data() + header.augmenting_data_offset,
|
||||
multiplex_image.augmenting_data.get(),
|
||||
multiplex_image.augmenting_data_size);
|
||||
}
|
||||
|
||||
// Bitstreams
|
||||
for (size_t i = 0; i < images.size(); i++) {
|
||||
PackBitstream(buffer->data() + frame_headers[i].bitstream_offset,
|
||||
images[i]);
|
||||
}
|
||||
|
||||
return combined_image;
|
||||
}
|
||||
|
||||
MultiplexImage MultiplexEncodedImagePacker::Unpack(
|
||||
const EncodedImage& combined_image) {
|
||||
const MultiplexImageHeader& header = UnpackHeader(combined_image.data());
|
||||
|
||||
std::vector<MultiplexImageComponentHeader> frame_headers;
|
||||
int header_offset = header.first_component_header_offset;
|
||||
|
||||
while (header_offset > 0) {
|
||||
frame_headers.push_back(
|
||||
UnpackFrameHeader(combined_image.data() + header_offset));
|
||||
header_offset = frame_headers.back().next_component_header_offset;
|
||||
}
|
||||
|
||||
RTC_DCHECK_LE(frame_headers.size(), header.component_count);
|
||||
std::unique_ptr<uint8_t[]> augmenting_data = nullptr;
|
||||
if (header.augmenting_data_size != 0) {
|
||||
augmenting_data =
|
||||
std::unique_ptr<uint8_t[]>(new uint8_t[header.augmenting_data_size]);
|
||||
memcpy(augmenting_data.get(),
|
||||
combined_image.data() + header.augmenting_data_offset,
|
||||
header.augmenting_data_size);
|
||||
}
|
||||
|
||||
MultiplexImage multiplex_image(header.image_index, header.component_count,
|
||||
std::move(augmenting_data),
|
||||
header.augmenting_data_size);
|
||||
|
||||
for (size_t i = 0; i < frame_headers.size(); i++) {
|
||||
MultiplexImageComponent image_component;
|
||||
image_component.component_index = frame_headers[i].component_index;
|
||||
image_component.codec_type = frame_headers[i].codec_type;
|
||||
|
||||
EncodedImage encoded_image = combined_image;
|
||||
encoded_image.SetRtpTimestamp(combined_image.RtpTimestamp());
|
||||
encoded_image._frameType = frame_headers[i].frame_type;
|
||||
encoded_image.SetEncodedData(EncodedImageBuffer::Create(
|
||||
combined_image.data() + frame_headers[i].bitstream_offset,
|
||||
frame_headers[i].bitstream_length));
|
||||
|
||||
image_component.encoded_image = encoded_image;
|
||||
|
||||
multiplex_image.image_components.push_back(image_component);
|
||||
}
|
||||
|
||||
return multiplex_image;
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
|
@ -1,120 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#ifndef MODULES_VIDEO_CODING_CODECS_MULTIPLEX_MULTIPLEX_ENCODED_IMAGE_PACKER_H_
|
||||
#define MODULES_VIDEO_CODING_CODECS_MULTIPLEX_MULTIPLEX_ENCODED_IMAGE_PACKER_H_
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "api/video/encoded_image.h"
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Struct describing the whole bundle of multiple frames of an image.
|
||||
// This struct is expected to be the set in the beginning of a picture's
|
||||
// bitstream data.
|
||||
struct MultiplexImageHeader {
|
||||
// The number of frame components making up the complete picture data.
|
||||
// For example, `frame_count` = 2 for the case of YUV frame with Alpha frame.
|
||||
uint8_t component_count;
|
||||
|
||||
// The increasing image ID given by the encoder. For different components
|
||||
// of a single picture, they have the same `picture_index`.
|
||||
uint16_t image_index;
|
||||
|
||||
// The location of the first MultiplexImageComponentHeader in the bitstream,
|
||||
// in terms of byte from the beginning of the bitstream.
|
||||
uint32_t first_component_header_offset;
|
||||
|
||||
// The location of the augmenting data in the bitstream, in terms of bytes
|
||||
// from the beginning of the bitstream
|
||||
uint32_t augmenting_data_offset;
|
||||
|
||||
// The size of the augmenting data in the bitstream it terms of byte
|
||||
uint16_t augmenting_data_size;
|
||||
};
|
||||
const int kMultiplexImageHeaderSize =
|
||||
sizeof(uint8_t) + 2 * sizeof(uint16_t) + 2 * sizeof(uint32_t);
|
||||
|
||||
// Struct describing the individual image component's content.
|
||||
struct MultiplexImageComponentHeader {
|
||||
// The location of the next MultiplexImageComponentHeader in the bitstream,
|
||||
// in terms of the byte from the beginning of the bitstream;
|
||||
uint32_t next_component_header_offset;
|
||||
|
||||
// Identifies which component this frame represent, i.e. YUV frame vs Alpha
|
||||
// frame.
|
||||
uint8_t component_index;
|
||||
|
||||
// The location of the real encoded image data of the frame in the bitstream,
|
||||
// in terms of byte from the beginning of the bitstream.
|
||||
uint32_t bitstream_offset;
|
||||
|
||||
// Indicates the number of bytes of the encoded image data.
|
||||
uint32_t bitstream_length;
|
||||
|
||||
// Indicated the underlying VideoCodecType of the frame, i.e. VP9 or VP8 etc.
|
||||
VideoCodecType codec_type;
|
||||
|
||||
// Indicated the underlying frame is a key frame or delta frame.
|
||||
VideoFrameType frame_type;
|
||||
};
|
||||
const int kMultiplexImageComponentHeaderSize =
|
||||
sizeof(uint32_t) + sizeof(uint8_t) + sizeof(uint32_t) + sizeof(uint32_t) +
|
||||
sizeof(uint8_t) + sizeof(uint8_t);
|
||||
|
||||
// Struct holding the encoded image for one component.
|
||||
struct MultiplexImageComponent {
|
||||
// Indicated the underlying VideoCodecType of the frame, i.e. VP9 or VP8 etc.
|
||||
VideoCodecType codec_type;
|
||||
|
||||
// Identifies which component this frame represent, i.e. YUV frame vs Alpha
|
||||
// frame.
|
||||
uint8_t component_index;
|
||||
|
||||
// Stores the actual frame data of the encoded image.
|
||||
EncodedImage encoded_image;
|
||||
};
|
||||
|
||||
// Struct holding the whole frame bundle of components of an image.
|
||||
struct MultiplexImage {
|
||||
uint16_t image_index;
|
||||
uint8_t component_count;
|
||||
uint16_t augmenting_data_size;
|
||||
std::unique_ptr<uint8_t[]> augmenting_data;
|
||||
std::vector<MultiplexImageComponent> image_components;
|
||||
|
||||
MultiplexImage(uint16_t picture_index,
|
||||
uint8_t component_count,
|
||||
std::unique_ptr<uint8_t[]> augmenting_data,
|
||||
uint16_t augmenting_data_size);
|
||||
};
|
||||
|
||||
// A utility class providing conversion between two representations of a
|
||||
// multiplex image frame:
|
||||
// 1. Packed version is just one encoded image, we pack all necessary metadata
|
||||
// in the bitstream as headers.
|
||||
// 2. Unpacked version is essentially a list of encoded images, one for one
|
||||
// component.
|
||||
class MultiplexEncodedImagePacker {
|
||||
public:
|
||||
// Note: It is caller responsibility to release the buffer of the result.
|
||||
static EncodedImage PackAndRelease(const MultiplexImage& image);
|
||||
|
||||
// Note: The image components just share the memory with `combined_image`.
|
||||
static MultiplexImage Unpack(const EncodedImage& combined_image);
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
|
||||
#endif // MODULES_VIDEO_CODING_CODECS_MULTIPLEX_MULTIPLEX_ENCODED_IMAGE_PACKER_H_
|
|
@ -1,356 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
|
||||
|
||||
#include <cstring>
|
||||
|
||||
#include "api/video/encoded_image.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "common_video/include/video_frame_buffer.h"
|
||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||
#include "media/base/video_common.h"
|
||||
#include "modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
// Callback wrapper that helps distinguish returned results from `encoders_`
|
||||
// instances.
|
||||
class MultiplexEncoderAdapter::AdapterEncodedImageCallback
|
||||
: public webrtc::EncodedImageCallback {
|
||||
public:
|
||||
AdapterEncodedImageCallback(webrtc::MultiplexEncoderAdapter* adapter,
|
||||
AlphaCodecStream stream_idx)
|
||||
: adapter_(adapter), stream_idx_(stream_idx) {}
|
||||
|
||||
EncodedImageCallback::Result OnEncodedImage(
|
||||
const EncodedImage& encoded_image,
|
||||
const CodecSpecificInfo* codec_specific_info) override {
|
||||
if (!adapter_)
|
||||
return Result(Result::OK);
|
||||
return adapter_->OnEncodedImage(stream_idx_, encoded_image,
|
||||
codec_specific_info);
|
||||
}
|
||||
|
||||
private:
|
||||
MultiplexEncoderAdapter* adapter_;
|
||||
const AlphaCodecStream stream_idx_;
|
||||
};
|
||||
|
||||
MultiplexEncoderAdapter::MultiplexEncoderAdapter(
|
||||
VideoEncoderFactory* factory,
|
||||
const SdpVideoFormat& associated_format,
|
||||
bool supports_augmented_data)
|
||||
: factory_(factory),
|
||||
associated_format_(associated_format),
|
||||
encoded_complete_callback_(nullptr),
|
||||
key_frame_interval_(0),
|
||||
supports_augmented_data_(supports_augmented_data) {}
|
||||
|
||||
MultiplexEncoderAdapter::~MultiplexEncoderAdapter() {
|
||||
Release();
|
||||
}
|
||||
|
||||
void MultiplexEncoderAdapter::SetFecControllerOverride(
|
||||
FecControllerOverride* fec_controller_override) {
|
||||
// Ignored.
|
||||
}
|
||||
|
||||
int MultiplexEncoderAdapter::InitEncode(
|
||||
const VideoCodec* inst,
|
||||
const VideoEncoder::Settings& settings) {
|
||||
const size_t buffer_size =
|
||||
CalcBufferSize(VideoType::kI420, inst->width, inst->height);
|
||||
multiplex_dummy_planes_.resize(buffer_size);
|
||||
// It is more expensive to encode 0x00, so use 0x80 instead.
|
||||
std::fill(multiplex_dummy_planes_.begin(), multiplex_dummy_planes_.end(),
|
||||
0x80);
|
||||
|
||||
RTC_DCHECK_EQ(kVideoCodecMultiplex, inst->codecType);
|
||||
VideoCodec video_codec = *inst;
|
||||
video_codec.codecType = PayloadStringToCodecType(associated_format_.name);
|
||||
|
||||
// Take over the key frame interval at adapter level, because we have to
|
||||
// sync the key frames for both sub-encoders.
|
||||
switch (video_codec.codecType) {
|
||||
case kVideoCodecVP8:
|
||||
key_frame_interval_ = video_codec.VP8()->keyFrameInterval;
|
||||
video_codec.VP8()->keyFrameInterval = 0;
|
||||
break;
|
||||
case kVideoCodecVP9:
|
||||
key_frame_interval_ = video_codec.VP9()->keyFrameInterval;
|
||||
video_codec.VP9()->keyFrameInterval = 0;
|
||||
break;
|
||||
case kVideoCodecH264:
|
||||
key_frame_interval_ = video_codec.H264()->keyFrameInterval;
|
||||
video_codec.H264()->keyFrameInterval = 0;
|
||||
break;
|
||||
case kVideoCodecH265:
|
||||
// TODO(bugs.webrtc.org/13485)
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
encoder_info_ = EncoderInfo();
|
||||
encoder_info_.implementation_name = "MultiplexEncoderAdapter (";
|
||||
encoder_info_.requested_resolution_alignment = 1;
|
||||
encoder_info_.apply_alignment_to_all_simulcast_layers = false;
|
||||
// This needs to be false so that we can do the split in Encode().
|
||||
encoder_info_.supports_native_handle = false;
|
||||
|
||||
for (size_t i = 0; i < kAlphaCodecStreams; ++i) {
|
||||
std::unique_ptr<VideoEncoder> encoder =
|
||||
factory_->CreateVideoEncoder(associated_format_);
|
||||
const int rv = encoder->InitEncode(&video_codec, settings);
|
||||
if (rv) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to create multiplex codec index " << i;
|
||||
return rv;
|
||||
}
|
||||
adapter_callbacks_.emplace_back(new AdapterEncodedImageCallback(
|
||||
this, static_cast<AlphaCodecStream>(i)));
|
||||
encoder->RegisterEncodeCompleteCallback(adapter_callbacks_.back().get());
|
||||
|
||||
const EncoderInfo& encoder_impl_info = encoder->GetEncoderInfo();
|
||||
encoder_info_.implementation_name += encoder_impl_info.implementation_name;
|
||||
if (i != kAlphaCodecStreams - 1) {
|
||||
encoder_info_.implementation_name += ", ";
|
||||
}
|
||||
// Uses hardware support if any of the encoders uses it.
|
||||
// For example, if we are having issues with down-scaling due to
|
||||
// pipelining delay in HW encoders we need higher encoder usage
|
||||
// thresholds in CPU adaptation.
|
||||
if (i == 0) {
|
||||
encoder_info_.is_hardware_accelerated =
|
||||
encoder_impl_info.is_hardware_accelerated;
|
||||
} else {
|
||||
encoder_info_.is_hardware_accelerated |=
|
||||
encoder_impl_info.is_hardware_accelerated;
|
||||
}
|
||||
|
||||
encoder_info_.requested_resolution_alignment = cricket::LeastCommonMultiple(
|
||||
encoder_info_.requested_resolution_alignment,
|
||||
encoder_impl_info.requested_resolution_alignment);
|
||||
|
||||
if (encoder_impl_info.apply_alignment_to_all_simulcast_layers) {
|
||||
encoder_info_.apply_alignment_to_all_simulcast_layers = true;
|
||||
}
|
||||
|
||||
encoders_.emplace_back(std::move(encoder));
|
||||
}
|
||||
encoder_info_.implementation_name += ")";
|
||||
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
int MultiplexEncoderAdapter::Encode(
|
||||
const VideoFrame& input_image,
|
||||
const std::vector<VideoFrameType>* frame_types) {
|
||||
if (!encoded_complete_callback_) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
|
||||
// The input image is forwarded as-is, unless it is a native buffer and
|
||||
// `supports_augmented_data_` is true in which case we need to map it in order
|
||||
// to access the underlying AugmentedVideoFrameBuffer.
|
||||
VideoFrame forwarded_image = input_image;
|
||||
if (supports_augmented_data_ &&
|
||||
forwarded_image.video_frame_buffer()->type() ==
|
||||
VideoFrameBuffer::Type::kNative) {
|
||||
auto info = GetEncoderInfo();
|
||||
rtc::scoped_refptr<VideoFrameBuffer> mapped_buffer =
|
||||
forwarded_image.video_frame_buffer()->GetMappedFrameBuffer(
|
||||
info.preferred_pixel_formats);
|
||||
if (!mapped_buffer) {
|
||||
// Unable to map the buffer.
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
forwarded_image.set_video_frame_buffer(std::move(mapped_buffer));
|
||||
}
|
||||
|
||||
std::vector<VideoFrameType> adjusted_frame_types;
|
||||
if (key_frame_interval_ > 0 && picture_index_ % key_frame_interval_ == 0) {
|
||||
adjusted_frame_types.push_back(VideoFrameType::kVideoFrameKey);
|
||||
} else {
|
||||
adjusted_frame_types.push_back(VideoFrameType::kVideoFrameDelta);
|
||||
}
|
||||
const bool has_alpha = forwarded_image.video_frame_buffer()->type() ==
|
||||
VideoFrameBuffer::Type::kI420A;
|
||||
std::unique_ptr<uint8_t[]> augmenting_data = nullptr;
|
||||
uint16_t augmenting_data_length = 0;
|
||||
AugmentedVideoFrameBuffer* augmented_video_frame_buffer = nullptr;
|
||||
if (supports_augmented_data_) {
|
||||
augmented_video_frame_buffer = static_cast<AugmentedVideoFrameBuffer*>(
|
||||
forwarded_image.video_frame_buffer().get());
|
||||
augmenting_data_length =
|
||||
augmented_video_frame_buffer->GetAugmentingDataSize();
|
||||
augmenting_data =
|
||||
std::unique_ptr<uint8_t[]>(new uint8_t[augmenting_data_length]);
|
||||
memcpy(augmenting_data.get(),
|
||||
augmented_video_frame_buffer->GetAugmentingData(),
|
||||
augmenting_data_length);
|
||||
augmenting_data_size_ = augmenting_data_length;
|
||||
}
|
||||
|
||||
{
|
||||
MutexLock lock(&mutex_);
|
||||
stashed_images_.emplace(
|
||||
std::piecewise_construct,
|
||||
std::forward_as_tuple(forwarded_image.timestamp()),
|
||||
std::forward_as_tuple(
|
||||
picture_index_, has_alpha ? kAlphaCodecStreams : 1,
|
||||
std::move(augmenting_data), augmenting_data_length));
|
||||
}
|
||||
|
||||
++picture_index_;
|
||||
|
||||
// Encode YUV
|
||||
int rv =
|
||||
encoders_[kYUVStream]->Encode(forwarded_image, &adjusted_frame_types);
|
||||
|
||||
// If we do not receive an alpha frame, we send a single frame for this
|
||||
// `picture_index_`. The receiver will receive `frame_count` as 1 which
|
||||
// specifies this case.
|
||||
if (rv || !has_alpha)
|
||||
return rv;
|
||||
|
||||
// Encode AXX
|
||||
rtc::scoped_refptr<VideoFrameBuffer> frame_buffer =
|
||||
supports_augmented_data_
|
||||
? augmented_video_frame_buffer->GetVideoFrameBuffer()
|
||||
: forwarded_image.video_frame_buffer();
|
||||
const I420ABufferInterface* yuva_buffer = frame_buffer->GetI420A();
|
||||
rtc::scoped_refptr<I420BufferInterface> alpha_buffer =
|
||||
WrapI420Buffer(forwarded_image.width(), forwarded_image.height(),
|
||||
yuva_buffer->DataA(), yuva_buffer->StrideA(),
|
||||
multiplex_dummy_planes_.data(), yuva_buffer->StrideU(),
|
||||
multiplex_dummy_planes_.data(), yuva_buffer->StrideV(),
|
||||
// To keep reference alive.
|
||||
[frame_buffer] {});
|
||||
VideoFrame alpha_image =
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(alpha_buffer)
|
||||
.set_timestamp_rtp(forwarded_image.timestamp())
|
||||
.set_timestamp_ms(forwarded_image.render_time_ms())
|
||||
.set_rotation(forwarded_image.rotation())
|
||||
.set_id(forwarded_image.id())
|
||||
.set_packet_infos(forwarded_image.packet_infos())
|
||||
.build();
|
||||
rv = encoders_[kAXXStream]->Encode(alpha_image, &adjusted_frame_types);
|
||||
return rv;
|
||||
}
|
||||
|
||||
int MultiplexEncoderAdapter::RegisterEncodeCompleteCallback(
|
||||
EncodedImageCallback* callback) {
|
||||
encoded_complete_callback_ = callback;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
void MultiplexEncoderAdapter::SetRates(
|
||||
const RateControlParameters& parameters) {
|
||||
VideoBitrateAllocation bitrate_allocation(parameters.bitrate);
|
||||
bitrate_allocation.SetBitrate(
|
||||
0, 0, parameters.bitrate.GetBitrate(0, 0) - augmenting_data_size_);
|
||||
for (auto& encoder : encoders_) {
|
||||
// TODO(emircan): `framerate` is used to calculate duration in encoder
|
||||
// instances. We report the total frame rate to keep real time for now.
|
||||
// Remove this after refactoring duration logic.
|
||||
encoder->SetRates(RateControlParameters(
|
||||
bitrate_allocation,
|
||||
static_cast<uint32_t>(encoders_.size() * parameters.framerate_fps),
|
||||
parameters.bandwidth_allocation -
|
||||
DataRate::BitsPerSec(augmenting_data_size_)));
|
||||
}
|
||||
}
|
||||
|
||||
void MultiplexEncoderAdapter::OnPacketLossRateUpdate(float packet_loss_rate) {
|
||||
for (auto& encoder : encoders_) {
|
||||
encoder->OnPacketLossRateUpdate(packet_loss_rate);
|
||||
}
|
||||
}
|
||||
|
||||
void MultiplexEncoderAdapter::OnRttUpdate(int64_t rtt_ms) {
|
||||
for (auto& encoder : encoders_) {
|
||||
encoder->OnRttUpdate(rtt_ms);
|
||||
}
|
||||
}
|
||||
|
||||
void MultiplexEncoderAdapter::OnLossNotification(
|
||||
const LossNotification& loss_notification) {
|
||||
for (auto& encoder : encoders_) {
|
||||
encoder->OnLossNotification(loss_notification);
|
||||
}
|
||||
}
|
||||
|
||||
int MultiplexEncoderAdapter::Release() {
|
||||
for (auto& encoder : encoders_) {
|
||||
const int rv = encoder->Release();
|
||||
if (rv)
|
||||
return rv;
|
||||
}
|
||||
encoders_.clear();
|
||||
adapter_callbacks_.clear();
|
||||
MutexLock lock(&mutex_);
|
||||
stashed_images_.clear();
|
||||
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
VideoEncoder::EncoderInfo MultiplexEncoderAdapter::GetEncoderInfo() const {
|
||||
return encoder_info_;
|
||||
}
|
||||
|
||||
EncodedImageCallback::Result MultiplexEncoderAdapter::OnEncodedImage(
|
||||
AlphaCodecStream stream_idx,
|
||||
const EncodedImage& encodedImage,
|
||||
const CodecSpecificInfo* codecSpecificInfo) {
|
||||
// Save the image
|
||||
MultiplexImageComponent image_component;
|
||||
image_component.component_index = stream_idx;
|
||||
image_component.codec_type =
|
||||
PayloadStringToCodecType(associated_format_.name);
|
||||
image_component.encoded_image = encodedImage;
|
||||
|
||||
MutexLock lock(&mutex_);
|
||||
const auto& stashed_image_itr =
|
||||
stashed_images_.find(encodedImage.RtpTimestamp());
|
||||
const auto& stashed_image_next_itr = std::next(stashed_image_itr, 1);
|
||||
RTC_DCHECK(stashed_image_itr != stashed_images_.end());
|
||||
MultiplexImage& stashed_image = stashed_image_itr->second;
|
||||
const uint8_t frame_count = stashed_image.component_count;
|
||||
|
||||
stashed_image.image_components.push_back(image_component);
|
||||
|
||||
if (stashed_image.image_components.size() == frame_count) {
|
||||
// Complete case
|
||||
for (auto iter = stashed_images_.begin();
|
||||
iter != stashed_images_.end() && iter != stashed_image_next_itr;
|
||||
iter++) {
|
||||
// No image at all, skip.
|
||||
if (iter->second.image_components.size() == 0)
|
||||
continue;
|
||||
|
||||
// We have to send out those stashed frames, otherwise the delta frame
|
||||
// dependency chain is broken.
|
||||
combined_image_ =
|
||||
MultiplexEncodedImagePacker::PackAndRelease(iter->second);
|
||||
|
||||
CodecSpecificInfo codec_info = *codecSpecificInfo;
|
||||
codec_info.codecType = kVideoCodecMultiplex;
|
||||
encoded_complete_callback_->OnEncodedImage(combined_image_, &codec_info);
|
||||
}
|
||||
|
||||
stashed_images_.erase(stashed_images_.begin(), stashed_image_next_itr);
|
||||
}
|
||||
return EncodedImageCallback::Result(EncodedImageCallback::Result::OK);
|
||||
}
|
||||
|
||||
} // namespace webrtc
|
|
@ -1,323 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "api/environment/environment.h"
|
||||
#include "api/environment/environment_factory.h"
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/test/mock_video_decoder_factory.h"
|
||||
#include "api/test/mock_video_encoder_factory.h"
|
||||
#include "api/video/encoded_image.h"
|
||||
#include "api/video/video_frame.h"
|
||||
#include "api/video/video_frame_buffer.h"
|
||||
#include "api/video/video_rotation.h"
|
||||
#include "api/video_codecs/sdp_video_format.h"
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
#include "api/video_codecs/video_decoder.h"
|
||||
#include "api/video_codecs/video_encoder.h"
|
||||
#include "common_video/include/video_frame_buffer.h"
|
||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||
#include "media/base/media_constants.h"
|
||||
#include "modules/video_coding/codecs/multiplex/include/augmented_video_frame_buffer.h"
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h"
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
|
||||
#include "modules/video_coding/codecs/multiplex/multiplex_encoded_image_packer.h"
|
||||
#include "modules/video_coding/codecs/test/video_codec_unittest.h"
|
||||
#include "modules/video_coding/codecs/vp9/include/vp9.h"
|
||||
#include "modules/video_coding/include/video_codec_interface.h"
|
||||
#include "modules/video_coding/include/video_error_codes.h"
|
||||
#include "test/gmock.h"
|
||||
#include "test/gtest.h"
|
||||
#include "test/video_codec_settings.h"
|
||||
|
||||
using ::testing::_;
|
||||
using ::testing::Return;
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
constexpr const char* kMultiplexAssociatedCodecName = cricket::kVp9CodecName;
|
||||
const VideoCodecType kMultiplexAssociatedCodecType =
|
||||
PayloadStringToCodecType(kMultiplexAssociatedCodecName);
|
||||
|
||||
class TestMultiplexAdapter : public VideoCodecUnitTest,
|
||||
public ::testing::WithParamInterface<
|
||||
bool /* supports_augmenting_data */> {
|
||||
public:
|
||||
TestMultiplexAdapter()
|
||||
: decoder_factory_(new webrtc::MockVideoDecoderFactory),
|
||||
encoder_factory_(new webrtc::MockVideoEncoderFactory),
|
||||
supports_augmenting_data_(GetParam()) {}
|
||||
|
||||
protected:
|
||||
std::unique_ptr<VideoDecoder> CreateDecoder() override {
|
||||
return std::make_unique<MultiplexDecoderAdapter>(
|
||||
env_, decoder_factory_.get(),
|
||||
SdpVideoFormat(kMultiplexAssociatedCodecName),
|
||||
supports_augmenting_data_);
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoEncoder> CreateEncoder() override {
|
||||
return std::make_unique<MultiplexEncoderAdapter>(
|
||||
encoder_factory_.get(), SdpVideoFormat(kMultiplexAssociatedCodecName),
|
||||
supports_augmenting_data_);
|
||||
}
|
||||
|
||||
void ModifyCodecSettings(VideoCodec* codec_settings) override {
|
||||
webrtc::test::CodecSettings(kMultiplexAssociatedCodecType, codec_settings);
|
||||
codec_settings->VP9()->numberOfTemporalLayers = 1;
|
||||
codec_settings->VP9()->numberOfSpatialLayers = 1;
|
||||
codec_settings->codecType = webrtc::kVideoCodecMultiplex;
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoFrame> CreateDataAugmentedInputFrame(
|
||||
VideoFrame* video_frame) {
|
||||
rtc::scoped_refptr<VideoFrameBuffer> video_buffer =
|
||||
video_frame->video_frame_buffer();
|
||||
std::unique_ptr<uint8_t[]> data =
|
||||
std::unique_ptr<uint8_t[]>(new uint8_t[16]);
|
||||
for (int i = 0; i < 16; i++) {
|
||||
data[i] = i;
|
||||
}
|
||||
auto augmented_video_frame_buffer =
|
||||
rtc::make_ref_counted<AugmentedVideoFrameBuffer>(video_buffer,
|
||||
std::move(data), 16);
|
||||
return std::make_unique<VideoFrame>(
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(augmented_video_frame_buffer)
|
||||
.set_timestamp_rtp(video_frame->timestamp())
|
||||
.set_timestamp_ms(video_frame->render_time_ms())
|
||||
.set_rotation(video_frame->rotation())
|
||||
.set_id(video_frame->id())
|
||||
.build());
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoFrame> CreateI420AInputFrame() {
|
||||
VideoFrame input_frame = NextInputFrame();
|
||||
rtc::scoped_refptr<webrtc::I420BufferInterface> yuv_buffer =
|
||||
input_frame.video_frame_buffer()->ToI420();
|
||||
rtc::scoped_refptr<I420ABufferInterface> yuva_buffer = WrapI420ABuffer(
|
||||
yuv_buffer->width(), yuv_buffer->height(), yuv_buffer->DataY(),
|
||||
yuv_buffer->StrideY(), yuv_buffer->DataU(), yuv_buffer->StrideU(),
|
||||
yuv_buffer->DataV(), yuv_buffer->StrideV(), yuv_buffer->DataY(),
|
||||
yuv_buffer->StrideY(),
|
||||
// To keep reference alive.
|
||||
[yuv_buffer] {});
|
||||
return std::make_unique<VideoFrame>(VideoFrame::Builder()
|
||||
.set_video_frame_buffer(yuva_buffer)
|
||||
.set_timestamp_rtp(123)
|
||||
.set_timestamp_ms(345)
|
||||
.set_rotation(kVideoRotation_0)
|
||||
.build());
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoFrame> CreateInputFrame(bool contains_alpha) {
|
||||
std::unique_ptr<VideoFrame> video_frame;
|
||||
if (contains_alpha) {
|
||||
video_frame = CreateI420AInputFrame();
|
||||
} else {
|
||||
VideoFrame next_frame = NextInputFrame();
|
||||
video_frame = std::make_unique<VideoFrame>(
|
||||
VideoFrame::Builder()
|
||||
.set_video_frame_buffer(next_frame.video_frame_buffer())
|
||||
.set_timestamp_rtp(next_frame.timestamp())
|
||||
.set_timestamp_ms(next_frame.render_time_ms())
|
||||
.set_rotation(next_frame.rotation())
|
||||
.set_id(next_frame.id())
|
||||
.build());
|
||||
}
|
||||
if (supports_augmenting_data_) {
|
||||
video_frame = CreateDataAugmentedInputFrame(video_frame.get());
|
||||
}
|
||||
|
||||
return video_frame;
|
||||
}
|
||||
|
||||
void CheckData(rtc::scoped_refptr<VideoFrameBuffer> video_frame_buffer) {
|
||||
if (!supports_augmenting_data_) {
|
||||
return;
|
||||
}
|
||||
AugmentedVideoFrameBuffer* augmented_buffer =
|
||||
static_cast<AugmentedVideoFrameBuffer*>(video_frame_buffer.get());
|
||||
EXPECT_EQ(augmented_buffer->GetAugmentingDataSize(), 16);
|
||||
uint8_t* data = augmented_buffer->GetAugmentingData();
|
||||
for (int i = 0; i < 16; i++) {
|
||||
EXPECT_EQ(data[i], i);
|
||||
}
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoFrame> ExtractAXXFrame(const VideoFrame& video_frame) {
|
||||
rtc::scoped_refptr<VideoFrameBuffer> video_frame_buffer =
|
||||
video_frame.video_frame_buffer();
|
||||
if (supports_augmenting_data_) {
|
||||
AugmentedVideoFrameBuffer* augmentedBuffer =
|
||||
static_cast<AugmentedVideoFrameBuffer*>(video_frame_buffer.get());
|
||||
video_frame_buffer = augmentedBuffer->GetVideoFrameBuffer();
|
||||
}
|
||||
const I420ABufferInterface* yuva_buffer = video_frame_buffer->GetI420A();
|
||||
rtc::scoped_refptr<I420BufferInterface> axx_buffer = WrapI420Buffer(
|
||||
yuva_buffer->width(), yuva_buffer->height(), yuva_buffer->DataA(),
|
||||
yuva_buffer->StrideA(), yuva_buffer->DataU(), yuva_buffer->StrideU(),
|
||||
yuva_buffer->DataV(), yuva_buffer->StrideV(), [video_frame_buffer] {});
|
||||
return std::make_unique<VideoFrame>(VideoFrame::Builder()
|
||||
.set_video_frame_buffer(axx_buffer)
|
||||
.set_timestamp_rtp(123)
|
||||
.set_timestamp_ms(345)
|
||||
.set_rotation(kVideoRotation_0)
|
||||
.build());
|
||||
}
|
||||
|
||||
private:
|
||||
void SetUp() override {
|
||||
EXPECT_CALL(*decoder_factory_, Die);
|
||||
// The decoders/encoders will be owned by the caller of
|
||||
// CreateVideoDecoder()/CreateVideoEncoder().
|
||||
EXPECT_CALL(*decoder_factory_, Create).Times(2).WillRepeatedly([] {
|
||||
return VP9Decoder::Create();
|
||||
});
|
||||
|
||||
EXPECT_CALL(*encoder_factory_, Die);
|
||||
EXPECT_CALL(*encoder_factory_, CreateVideoEncoder)
|
||||
.Times(2)
|
||||
.WillRepeatedly([] { return VP9Encoder::Create(); });
|
||||
|
||||
VideoCodecUnitTest::SetUp();
|
||||
}
|
||||
|
||||
const Environment env_ = CreateEnvironment();
|
||||
const std::unique_ptr<webrtc::MockVideoDecoderFactory> decoder_factory_;
|
||||
const std::unique_ptr<webrtc::MockVideoEncoderFactory> encoder_factory_;
|
||||
const bool supports_augmenting_data_;
|
||||
};
|
||||
|
||||
// TODO(emircan): Currently VideoCodecUnitTest tests do a complete setup
|
||||
// step that goes beyond constructing `decoder_`. Simplify these tests to do
|
||||
// less.
|
||||
TEST_P(TestMultiplexAdapter, ConstructAndDestructDecoder) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Release());
|
||||
}
|
||||
|
||||
TEST_P(TestMultiplexAdapter, ConstructAndDestructEncoder) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Release());
|
||||
}
|
||||
|
||||
TEST_P(TestMultiplexAdapter, EncodeDecodeI420Frame) {
|
||||
std::unique_ptr<VideoFrame> input_frame = CreateInputFrame(false);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
|
||||
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, -1));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
EXPECT_GT(I420PSNR(input_frame.get(), decoded_frame.get()), 36);
|
||||
CheckData(decoded_frame->video_frame_buffer());
|
||||
}
|
||||
|
||||
TEST_P(TestMultiplexAdapter, EncodeDecodeI420AFrame) {
|
||||
std::unique_ptr<VideoFrame> yuva_frame = CreateInputFrame(true);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*yuva_frame, nullptr));
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
|
||||
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, decoder_->Decode(encoded_frame, 0));
|
||||
std::unique_ptr<VideoFrame> decoded_frame;
|
||||
absl::optional<uint8_t> decoded_qp;
|
||||
ASSERT_TRUE(WaitForDecodedFrame(&decoded_frame, &decoded_qp));
|
||||
ASSERT_TRUE(decoded_frame);
|
||||
EXPECT_GT(I420PSNR(yuva_frame.get(), decoded_frame.get()), 36);
|
||||
|
||||
// Find PSNR for AXX bits.
|
||||
std::unique_ptr<VideoFrame> input_axx_frame = ExtractAXXFrame(*yuva_frame);
|
||||
std::unique_ptr<VideoFrame> output_axx_frame =
|
||||
ExtractAXXFrame(*decoded_frame);
|
||||
EXPECT_GT(I420PSNR(input_axx_frame.get(), output_axx_frame.get()), 47);
|
||||
|
||||
CheckData(decoded_frame->video_frame_buffer());
|
||||
}
|
||||
|
||||
TEST_P(TestMultiplexAdapter, CheckSingleFrameEncodedBitstream) {
|
||||
std::unique_ptr<VideoFrame> input_frame = CreateInputFrame(false);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*input_frame, nullptr));
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
|
||||
EXPECT_FALSE(encoded_frame.SpatialIndex());
|
||||
|
||||
const MultiplexImage& unpacked_frame =
|
||||
MultiplexEncodedImagePacker::Unpack(encoded_frame);
|
||||
EXPECT_EQ(0, unpacked_frame.image_index);
|
||||
EXPECT_EQ(1, unpacked_frame.component_count);
|
||||
const MultiplexImageComponent& component = unpacked_frame.image_components[0];
|
||||
EXPECT_EQ(0, component.component_index);
|
||||
EXPECT_NE(nullptr, component.encoded_image.data());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey, component.encoded_image._frameType);
|
||||
}
|
||||
|
||||
TEST_P(TestMultiplexAdapter, CheckDoubleFramesEncodedBitstream) {
|
||||
std::unique_ptr<VideoFrame> yuva_frame = CreateInputFrame(true);
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*yuva_frame, nullptr));
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
EXPECT_EQ(kVideoCodecMultiplex, codec_specific_info.codecType);
|
||||
EXPECT_FALSE(encoded_frame.SpatialIndex());
|
||||
|
||||
const MultiplexImage& unpacked_frame =
|
||||
MultiplexEncodedImagePacker::Unpack(encoded_frame);
|
||||
EXPECT_EQ(0, unpacked_frame.image_index);
|
||||
EXPECT_EQ(2, unpacked_frame.component_count);
|
||||
EXPECT_EQ(unpacked_frame.image_components.size(),
|
||||
unpacked_frame.component_count);
|
||||
for (int i = 0; i < unpacked_frame.component_count; ++i) {
|
||||
const MultiplexImageComponent& component =
|
||||
unpacked_frame.image_components[i];
|
||||
EXPECT_EQ(i, component.component_index);
|
||||
EXPECT_NE(nullptr, component.encoded_image.data());
|
||||
EXPECT_EQ(VideoFrameType::kVideoFrameKey,
|
||||
component.encoded_image._frameType);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(TestMultiplexAdapter, ImageIndexIncreases) {
|
||||
std::unique_ptr<VideoFrame> yuva_frame = CreateInputFrame(true);
|
||||
const size_t expected_num_encoded_frames = 3;
|
||||
for (size_t i = 0; i < expected_num_encoded_frames; ++i) {
|
||||
EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, encoder_->Encode(*yuva_frame, nullptr));
|
||||
EncodedImage encoded_frame;
|
||||
CodecSpecificInfo codec_specific_info;
|
||||
ASSERT_TRUE(WaitForEncodedFrame(&encoded_frame, &codec_specific_info));
|
||||
const MultiplexImage& unpacked_frame =
|
||||
MultiplexEncodedImagePacker::Unpack(encoded_frame);
|
||||
EXPECT_EQ(i, unpacked_frame.image_index);
|
||||
EXPECT_EQ(
|
||||
i ? VideoFrameType::kVideoFrameDelta : VideoFrameType::kVideoFrameKey,
|
||||
encoded_frame._frameType);
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(TestMultiplexAdapter,
|
||||
TestMultiplexAdapter,
|
||||
::testing::Bool());
|
||||
|
||||
} // namespace webrtc
|
|
@ -34,17 +34,6 @@ namespace webrtc {
|
|||
bool VideoCodecInitializer::SetupCodec(const VideoEncoderConfig& config,
|
||||
const std::vector<VideoStream>& streams,
|
||||
VideoCodec* codec) {
|
||||
if (config.codec_type == kVideoCodecMultiplex) {
|
||||
VideoEncoderConfig associated_config = config.Copy();
|
||||
associated_config.codec_type = kVideoCodecVP9;
|
||||
if (!SetupCodec(associated_config, streams, codec)) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to create stereo encoder configuration.";
|
||||
return false;
|
||||
}
|
||||
codec->codecType = kVideoCodecMultiplex;
|
||||
return true;
|
||||
}
|
||||
|
||||
*codec = VideoEncoderConfigToVideoCodec(config, streams);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -86,8 +86,6 @@ class VideoCodecInitializerTest : public ::testing::Test {
|
|||
vp9_settings.numberOfTemporalLayers = num_temporal_streams;
|
||||
config_.encoder_specific_settings = rtc::make_ref_counted<
|
||||
webrtc::VideoEncoderConfig::Vp9EncoderSpecificSettings>(vp9_settings);
|
||||
} else if (type != VideoCodecType::kVideoCodecMultiplex) {
|
||||
ADD_FAILURE() << "Unexpected codec type: " << type;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -100,8 +98,6 @@ class VideoCodecInitializerTest : public ::testing::Test {
|
|||
bitrate_allocator_ = CreateBuiltinVideoBitrateAllocatorFactory()
|
||||
->CreateVideoBitrateAllocator(codec_out_);
|
||||
RTC_CHECK(bitrate_allocator_);
|
||||
if (codec_out_.codecType == VideoCodecType::kVideoCodecMultiplex)
|
||||
return true;
|
||||
|
||||
// Make sure temporal layers instances have been created.
|
||||
if (codec_out_.codecType == VideoCodecType::kVideoCodecVP8) {
|
||||
|
@ -288,12 +284,6 @@ TEST_F(VideoCodecInitializerTest, HighFpsSimulcastVp8Screenshare) {
|
|||
bitrate_allocation.GetBitrate(1, 1));
|
||||
}
|
||||
|
||||
TEST_F(VideoCodecInitializerTest, SingleStreamMultiplexCodec) {
|
||||
SetUpFor(VideoCodecType::kVideoCodecMultiplex, absl::nullopt, 1, 1, true);
|
||||
streams_.push_back(DefaultStream());
|
||||
EXPECT_TRUE(InitializeCodec());
|
||||
}
|
||||
|
||||
TEST_F(VideoCodecInitializerTest, Vp9SvcDefaultLayering) {
|
||||
SetUpFor(VideoCodecType::kVideoCodecVP9, absl::nullopt, 3, 3, false);
|
||||
VideoStream stream = DefaultStream();
|
||||
|
|
|
@ -70,7 +70,6 @@ TEST(BalancedDegradationSettings, GetsDefaultConfigIfNoList) {
|
|||
EXPECT_FALSE(settings.GetQpThresholds(kVideoCodecH264, 1));
|
||||
EXPECT_FALSE(settings.GetQpThresholds(kVideoCodecAV1, 1));
|
||||
EXPECT_FALSE(settings.GetQpThresholds(kVideoCodecGeneric, 1));
|
||||
EXPECT_FALSE(settings.GetQpThresholds(kVideoCodecMultiplex, 1));
|
||||
}
|
||||
|
||||
TEST(BalancedDegradationSettings, GetsConfig) {
|
||||
|
@ -399,7 +398,6 @@ TEST(BalancedDegradationSettings, CanAdaptUpWithCodecType) {
|
|||
EXPECT_TRUE(s.CanAdaptUp(kVideoCodecAV1, 1000, 77000));
|
||||
EXPECT_FALSE(s.CanAdaptUp(kVideoCodecGeneric, 1000, 24000));
|
||||
EXPECT_TRUE(s.CanAdaptUp(kVideoCodecGeneric, 1000, 25000));
|
||||
EXPECT_TRUE(s.CanAdaptUp(kVideoCodecMultiplex, 1000, 1)); // Not configured.
|
||||
}
|
||||
|
||||
TEST(BalancedDegradationSettings, CanAdaptUpResolution) {
|
||||
|
@ -435,8 +433,6 @@ TEST(BalancedDegradationSettings, CanAdaptUpResolutionWithCodecType) {
|
|||
EXPECT_TRUE(s.CanAdaptUpResolution(kVideoCodecAV1, 1000, 77000));
|
||||
EXPECT_FALSE(s.CanAdaptUpResolution(kVideoCodecGeneric, 1000, 24000));
|
||||
EXPECT_TRUE(s.CanAdaptUpResolution(kVideoCodecGeneric, 1000, 25000));
|
||||
EXPECT_TRUE(s.CanAdaptUpResolution(kVideoCodecMultiplex, 1000,
|
||||
1)); // Not configured.
|
||||
}
|
||||
|
||||
TEST(BalancedDegradationSettings, GetsFpsDiff) {
|
||||
|
|
|
@ -103,7 +103,6 @@ absl::optional<DataRate> GetExperimentalMinVideoBitrate(VideoCodecType type) {
|
|||
case kVideoCodecH264:
|
||||
return min_bitrate_h264.GetOptional();
|
||||
case kVideoCodecGeneric:
|
||||
case kVideoCodecMultiplex:
|
||||
return absl::nullopt;
|
||||
}
|
||||
|
||||
|
|
|
@ -31,9 +31,6 @@ TEST(GetExperimentalMinVideoBitrateTest,
|
|||
absl::nullopt);
|
||||
EXPECT_EQ(GetExperimentalMinVideoBitrate(VideoCodecType::kVideoCodecH264),
|
||||
absl::nullopt);
|
||||
EXPECT_EQ(
|
||||
GetExperimentalMinVideoBitrate(VideoCodecType::kVideoCodecMultiplex),
|
||||
absl::nullopt);
|
||||
}
|
||||
|
||||
TEST(GetExperimentalMinVideoBitrateTest,
|
||||
|
@ -49,9 +46,6 @@ TEST(GetExperimentalMinVideoBitrateTest,
|
|||
absl::nullopt);
|
||||
EXPECT_EQ(GetExperimentalMinVideoBitrate(VideoCodecType::kVideoCodecH264),
|
||||
absl::nullopt);
|
||||
EXPECT_EQ(
|
||||
GetExperimentalMinVideoBitrate(VideoCodecType::kVideoCodecMultiplex),
|
||||
absl::nullopt);
|
||||
}
|
||||
|
||||
TEST(GetExperimentalMinVideoBitrateTest, BrForAllCodecsIfDefined) {
|
||||
|
@ -66,9 +60,6 @@ TEST(GetExperimentalMinVideoBitrateTest, BrForAllCodecsIfDefined) {
|
|||
absl::make_optional(DataRate::KilobitsPerSec(123)));
|
||||
EXPECT_EQ(GetExperimentalMinVideoBitrate(VideoCodecType::kVideoCodecH264),
|
||||
absl::make_optional(DataRate::KilobitsPerSec(123)));
|
||||
EXPECT_EQ(
|
||||
GetExperimentalMinVideoBitrate(VideoCodecType::kVideoCodecMultiplex),
|
||||
absl::make_optional(DataRate::KilobitsPerSec(123)));
|
||||
}
|
||||
|
||||
TEST(GetExperimentalMinVideoBitrateTest, BrTrumpsSpecificCodecConfigs) {
|
||||
|
@ -84,9 +75,6 @@ TEST(GetExperimentalMinVideoBitrateTest, BrTrumpsSpecificCodecConfigs) {
|
|||
absl::make_optional(DataRate::KilobitsPerSec(123)));
|
||||
EXPECT_EQ(GetExperimentalMinVideoBitrate(VideoCodecType::kVideoCodecH264),
|
||||
absl::make_optional(DataRate::KilobitsPerSec(123)));
|
||||
EXPECT_EQ(
|
||||
GetExperimentalMinVideoBitrate(VideoCodecType::kVideoCodecMultiplex),
|
||||
absl::make_optional(DataRate::KilobitsPerSec(123)));
|
||||
}
|
||||
|
||||
TEST(GetExperimentalMinVideoBitrateTest,
|
||||
|
@ -103,9 +91,6 @@ TEST(GetExperimentalMinVideoBitrateTest,
|
|||
absl::nullopt);
|
||||
EXPECT_EQ(GetExperimentalMinVideoBitrate(VideoCodecType::kVideoCodecH264),
|
||||
absl::nullopt);
|
||||
EXPECT_EQ(
|
||||
GetExperimentalMinVideoBitrate(VideoCodecType::kVideoCodecMultiplex),
|
||||
absl::nullopt);
|
||||
}
|
||||
|
||||
TEST(GetExperimentalMinVideoBitrateTest, SpecificCodecConfigsUsedIfExpEnabled) {
|
||||
|
@ -121,9 +106,6 @@ TEST(GetExperimentalMinVideoBitrateTest, SpecificCodecConfigsUsedIfExpEnabled) {
|
|||
absl::make_optional(DataRate::KilobitsPerSec(200)));
|
||||
EXPECT_EQ(GetExperimentalMinVideoBitrate(VideoCodecType::kVideoCodecH264),
|
||||
absl::make_optional(DataRate::KilobitsPerSec(300)));
|
||||
EXPECT_EQ(
|
||||
GetExperimentalMinVideoBitrate(VideoCodecType::kVideoCodecMultiplex),
|
||||
absl::nullopt);
|
||||
}
|
||||
|
||||
TEST(GetExperimentalMinVideoBitrateTest,
|
||||
|
@ -152,9 +134,6 @@ TEST(GetExperimentalMinVideoBitrateTest,
|
|||
absl::make_optional(DataRate::KilobitsPerSec(200)));
|
||||
EXPECT_EQ(GetExperimentalMinVideoBitrate(VideoCodecType::kVideoCodecH264),
|
||||
absl::make_optional(DataRate::KilobitsPerSec(300)));
|
||||
EXPECT_EQ(
|
||||
GetExperimentalMinVideoBitrate(VideoCodecType::kVideoCodecMultiplex),
|
||||
absl::nullopt);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
|
|
@ -428,7 +428,6 @@ bool Convert(std::string inputfile,
|
|||
{VideoCodecType::kVideoCodecVP9, "VP9"},
|
||||
{VideoCodecType::kVideoCodecAV1, "AV1"},
|
||||
{VideoCodecType::kVideoCodecH264, "H264"},
|
||||
{VideoCodecType::kVideoCodecMultiplex, "MULTIPLEX"},
|
||||
{VideoCodecType::kVideoCodecH265, "H265"}};
|
||||
|
||||
fprintf(output,
|
||||
|
|
|
@ -128,7 +128,6 @@ if (rtc_include_tests && !build_with_chromium) {
|
|||
"../../modules/video_coding:video_codec_interface",
|
||||
"../../modules/video_coding:video_coding_utility",
|
||||
"../../modules/video_coding:webrtc_h264",
|
||||
"../../modules/video_coding:webrtc_multiplex",
|
||||
"../../modules/video_coding:webrtc_vp8",
|
||||
"../../modules/video_coding:webrtc_vp9",
|
||||
"../../modules/video_coding/svc:scalability_mode_util",
|
||||
|
|
|
@ -208,9 +208,6 @@ CreateEncoderSpecificSettings(VideoStreamConfig config) {
|
|||
case Codec::kVideoCodecAV1:
|
||||
case Codec::kVideoCodecH265:
|
||||
return nullptr;
|
||||
case Codec::kVideoCodecMultiplex:
|
||||
RTC_DCHECK_NOTREACHED();
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1070,7 +1070,6 @@ class Encoder : public EncodedImageCallback {
|
|||
vc.qpMax = cricket::kDefaultVideoMaxQpH26x;
|
||||
break;
|
||||
case kVideoCodecGeneric:
|
||||
case kVideoCodecMultiplex:
|
||||
RTC_CHECK_NOTREACHED();
|
||||
break;
|
||||
}
|
||||
|
@ -1278,7 +1277,6 @@ void SetDefaultCodecSpecificSettings(VideoCodec* vc, int num_temporal_layers) {
|
|||
case kVideoCodecH265:
|
||||
break;
|
||||
case kVideoCodecGeneric:
|
||||
case kVideoCodecMultiplex:
|
||||
RTC_CHECK_NOTREACHED();
|
||||
}
|
||||
}
|
||||
|
@ -1357,7 +1355,6 @@ SplitBitrateAndUpdateScalabilityMode(std::string codec_type,
|
|||
}
|
||||
break;
|
||||
case kVideoCodecGeneric:
|
||||
case kVideoCodecMultiplex:
|
||||
RTC_CHECK_NOTREACHED();
|
||||
}
|
||||
} else {
|
||||
|
@ -1380,7 +1377,6 @@ SplitBitrateAndUpdateScalabilityMode(std::string codec_type,
|
|||
RTC_CHECK(result) << "SetAv1SvcConfig failed";
|
||||
} break;
|
||||
case kVideoCodecGeneric:
|
||||
case kVideoCodecMultiplex:
|
||||
RTC_CHECK_NOTREACHED();
|
||||
}
|
||||
|
||||
|
|
|
@ -549,7 +549,6 @@ if (rtc_include_tests) {
|
|||
"../modules/video_coding",
|
||||
"../modules/video_coding:video_coding_utility",
|
||||
"../modules/video_coding:webrtc_h264",
|
||||
"../modules/video_coding:webrtc_multiplex",
|
||||
"../modules/video_coding:webrtc_vp8",
|
||||
"../modules/video_coding:webrtc_vp9",
|
||||
"../rtc_base:macromagic",
|
||||
|
@ -908,7 +907,6 @@ if (rtc_include_tests) {
|
|||
"../modules/video_coding:video_codec_interface",
|
||||
"../modules/video_coding:video_coding_utility",
|
||||
"../modules/video_coding:webrtc_h264",
|
||||
"../modules/video_coding:webrtc_multiplex",
|
||||
"../modules/video_coding:webrtc_vp8",
|
||||
"../modules/video_coding:webrtc_vp9",
|
||||
"../modules/video_coding:webrtc_vp9_helpers",
|
||||
|
|
|
@ -273,7 +273,6 @@ void EncoderOvershootDetector::UpdateHistograms() {
|
|||
average_overshoot_percent);
|
||||
break;
|
||||
case VideoCodecType::kVideoCodecGeneric:
|
||||
case VideoCodecType::kVideoCodecMultiplex:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,8 +39,6 @@ static std::string CodecTypeToHistogramSuffix(VideoCodecType codec) {
|
|||
return "H265";
|
||||
case kVideoCodecGeneric:
|
||||
return "Generic";
|
||||
case kVideoCodecMultiplex:
|
||||
return "Multiplex";
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -21,8 +21,6 @@
|
|||
#include "media/engine/internal_decoder_factory.h"
|
||||
#include "media/engine/internal_encoder_factory.h"
|
||||
#include "modules/video_coding/codecs/h264/include/h264.h"
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h"
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
|
||||
#include "modules/video_coding/codecs/vp8/include/vp8.h"
|
||||
#include "modules/video_coding/codecs/vp9/include/vp9.h"
|
||||
#include "test/call_test.h"
|
||||
|
@ -191,48 +189,6 @@ TEST_F(CodecEndToEndTest,
|
|||
RunBaseTest(&test);
|
||||
}
|
||||
|
||||
// Mutiplex tests are using VP9 as the underlying implementation.
|
||||
TEST_F(CodecEndToEndTest, SendsAndReceivesMultiplex) {
|
||||
InternalEncoderFactory internal_encoder_factory;
|
||||
InternalDecoderFactory internal_decoder_factory;
|
||||
test::FunctionVideoEncoderFactory encoder_factory(
|
||||
[&internal_encoder_factory]() {
|
||||
return std::make_unique<MultiplexEncoderAdapter>(
|
||||
&internal_encoder_factory, SdpVideoFormat(cricket::kVp9CodecName));
|
||||
});
|
||||
test::FunctionVideoDecoderFactory decoder_factory(
|
||||
[&internal_decoder_factory](const Environment& env,
|
||||
const SdpVideoFormat& /*format*/) {
|
||||
return std::make_unique<MultiplexDecoderAdapter>(
|
||||
env, &internal_decoder_factory,
|
||||
SdpVideoFormat(cricket::kVp9CodecName));
|
||||
});
|
||||
|
||||
CodecObserver test(5, kVideoRotation_0, absl::nullopt, "multiplex",
|
||||
&encoder_factory, &decoder_factory);
|
||||
RunBaseTest(&test);
|
||||
}
|
||||
|
||||
TEST_F(CodecEndToEndTest, SendsAndReceivesMultiplexVideoRotation90) {
|
||||
InternalEncoderFactory internal_encoder_factory;
|
||||
InternalDecoderFactory internal_decoder_factory;
|
||||
test::FunctionVideoEncoderFactory encoder_factory(
|
||||
[&internal_encoder_factory]() {
|
||||
return std::make_unique<MultiplexEncoderAdapter>(
|
||||
&internal_encoder_factory, SdpVideoFormat(cricket::kVp9CodecName));
|
||||
});
|
||||
test::FunctionVideoDecoderFactory decoder_factory(
|
||||
[&internal_decoder_factory](const Environment& env,
|
||||
const SdpVideoFormat& /*format*/) {
|
||||
return std::make_unique<MultiplexDecoderAdapter>(
|
||||
env, &internal_decoder_factory,
|
||||
SdpVideoFormat(cricket::kVp9CodecName));
|
||||
});
|
||||
CodecObserver test(5, kVideoRotation_90, absl::nullopt, "multiplex",
|
||||
&encoder_factory, &decoder_factory);
|
||||
RunBaseTest(&test);
|
||||
}
|
||||
|
||||
#endif // defined(RTC_ENABLE_VP9)
|
||||
|
||||
#if defined(WEBRTC_USE_H264)
|
||||
|
|
|
@ -147,33 +147,6 @@ TEST(FullStackTest, Generator_Net_Delay_0_0_Plr_0_VP9Profile2) {
|
|||
fixture->RunWithAnalyzer(generator);
|
||||
}
|
||||
|
||||
TEST(FullStackTest, Foreman_Cif_Net_Delay_0_0_Plr_0_Multiplex) {
|
||||
auto fixture = CreateVideoQualityTestFixture();
|
||||
ParamsWithLogging foreman_cif;
|
||||
foreman_cif.call.send_side_bwe = true;
|
||||
foreman_cif.video[0] = {
|
||||
true, 352, 288, 30,
|
||||
700000, 700000, 700000, false,
|
||||
"multiplex", 1, 0, 0,
|
||||
false, false, false, ClipNameToClipPath("foreman_cif")};
|
||||
foreman_cif.analyzer = {"foreman_cif_net_delay_0_0_plr_0_Multiplex", 0.0, 0.0,
|
||||
kFullStackTestDurationSecs};
|
||||
fixture->RunWithAnalyzer(foreman_cif);
|
||||
}
|
||||
|
||||
TEST(FullStackTest, Generator_Net_Delay_0_0_Plr_0_Multiplex) {
|
||||
auto fixture = CreateVideoQualityTestFixture();
|
||||
|
||||
ParamsWithLogging generator;
|
||||
generator.call.send_side_bwe = true;
|
||||
generator.video[0] = {
|
||||
true, 352, 288, 30, 700000, 700000, 700000, false,
|
||||
"multiplex", 1, 0, 0, false, false, false, "GeneratorI420A"};
|
||||
generator.analyzer = {"generator_net_delay_0_0_plr_0_Multiplex", 0.0, 0.0,
|
||||
kFullStackTestDurationSecs};
|
||||
fixture->RunWithAnalyzer(generator);
|
||||
}
|
||||
|
||||
#endif // defined(RTC_ENABLE_VP9)
|
||||
|
||||
#if defined(WEBRTC_LINUX)
|
||||
|
|
|
@ -217,36 +217,6 @@ TEST(PCFullStackTest, MAYBE_Pc_Generator_Net_Delay_0_0_Plr_0_VP9Profile2) {
|
|||
fixture->Run(RunParams(TimeDelta::Seconds(kTestDurationSec)));
|
||||
}
|
||||
|
||||
/*
|
||||
// TODO(bugs.webrtc.org/10639) migrate commented out test, when required
|
||||
// functionality will be supported in PeerConnection level framework.
|
||||
TEST(PCFullStackTest, ForemanCifWithoutPacketLossMultiplexI420Frame) {
|
||||
auto fixture = CreateVideoQualityTestFixture();
|
||||
ParamsWithLogging foreman_cif;
|
||||
foreman_cif.call.send_side_bwe = true;
|
||||
foreman_cif.video[0] = {
|
||||
true, 352, 288, 30,
|
||||
700000, 700000, 700000, false,
|
||||
"multiplex", 1, 0, 0,
|
||||
false, false, false, ClipNameToClipPath("foreman_cif")};
|
||||
foreman_cif.analyzer = {"foreman_cif_net_delay_0_0_plr_0_Multiplex", 0.0, 0.0,
|
||||
kTestDurationSec};
|
||||
fixture->RunWithAnalyzer(foreman_cif);
|
||||
}
|
||||
|
||||
TEST(PCFullStackTest, GeneratorWithoutPacketLossMultiplexI420AFrame) {
|
||||
auto fixture = CreateVideoQualityTestFixture();
|
||||
|
||||
ParamsWithLogging generator;
|
||||
generator.call.send_side_bwe = true;
|
||||
generator.video[0] = {
|
||||
true, 352, 288, 30, 700000, 700000, 700000, false,
|
||||
"multiplex", 1, 0, 0, false, false, false, "GeneratorI420A"};
|
||||
generator.analyzer = {"generator_net_delay_0_0_plr_0_Multiplex", 0.0, 0.0,
|
||||
kTestDurationSec};
|
||||
fixture->RunWithAnalyzer(generator);
|
||||
}
|
||||
*/
|
||||
#endif // defined(RTC_ENABLE_VP9)
|
||||
|
||||
TEST(PCFullStackTest, Pc_Net_Delay_0_0_Plr_0) {
|
||||
|
|
|
@ -40,8 +40,6 @@
|
|||
#include "modules/audio_device/include/audio_device.h"
|
||||
#include "modules/audio_mixer/audio_mixer_impl.h"
|
||||
#include "modules/video_coding/codecs/h264/include/h264.h"
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_decoder_adapter.h"
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
|
||||
#include "modules/video_coding/codecs/vp8/include/vp8.h"
|
||||
#include "modules/video_coding/codecs/vp9/include/vp9.h"
|
||||
#include "modules/video_coding/utility/ivf_file_writer.h"
|
||||
|
@ -297,10 +295,7 @@ std::unique_ptr<VideoDecoder> VideoQualityTest::CreateVideoDecoder(
|
|||
const Environment& env,
|
||||
const SdpVideoFormat& format) {
|
||||
std::unique_ptr<VideoDecoder> decoder;
|
||||
if (format.name == "multiplex") {
|
||||
decoder = std::make_unique<MultiplexDecoderAdapter>(
|
||||
env, decoder_factory_.get(), SdpVideoFormat(cricket::kVp9CodecName));
|
||||
} else if (format.name == "FakeCodec") {
|
||||
if (format.name == "FakeCodec") {
|
||||
decoder = webrtc::FakeVideoDecoderFactory::CreateVideoDecoder();
|
||||
} else {
|
||||
decoder = decoder_factory_->Create(env, format);
|
||||
|
@ -323,9 +318,6 @@ std::unique_ptr<VideoEncoder> VideoQualityTest::CreateVideoEncoder(
|
|||
if (format.name == "VP8") {
|
||||
encoder = std::make_unique<SimulcastEncoderAdapter>(encoder_factory_.get(),
|
||||
format);
|
||||
} else if (format.name == "multiplex") {
|
||||
encoder = std::make_unique<MultiplexEncoderAdapter>(
|
||||
encoder_factory_.get(), SdpVideoFormat(cricket::kVp9CodecName));
|
||||
} else if (format.name == "FakeCodec") {
|
||||
encoder = webrtc::FakeVideoEncoderFactory::CreateVideoEncoder();
|
||||
} else {
|
||||
|
@ -724,8 +716,6 @@ void VideoQualityTest::SetupVideo(Transport* send_transport,
|
|||
payload_type = test::VideoTestConstants::kPayloadTypeVP8;
|
||||
} else if (params_.video[video_idx].codec == "VP9") {
|
||||
payload_type = test::VideoTestConstants::kPayloadTypeVP9;
|
||||
} else if (params_.video[video_idx].codec == "multiplex") {
|
||||
payload_type = test::VideoTestConstants::kPayloadTypeVP9;
|
||||
} else if (params_.video[video_idx].codec == "FakeCodec") {
|
||||
payload_type = test::VideoTestConstants::kFakeVideoSendPayloadType;
|
||||
} else {
|
||||
|
|
|
@ -45,7 +45,6 @@
|
|||
#include "media/engine/webrtc_video_engine.h"
|
||||
#include "modules/video_coding/codecs/av1/libaom_av1_encoder.h"
|
||||
#include "modules/video_coding/codecs/h264/include/h264.h"
|
||||
#include "modules/video_coding/codecs/multiplex/include/multiplex_encoder_adapter.h"
|
||||
#include "modules/video_coding/codecs/vp8/include/vp8.h"
|
||||
#include "modules/video_coding/codecs/vp9/include/vp9.h"
|
||||
#include "modules/video_coding/codecs/vp9/include/vp9_globals.h"
|
||||
|
@ -8817,16 +8816,6 @@ class VideoStreamEncoderWithRealEncoderTest
|
|||
case kVideoCodecH264:
|
||||
encoder = H264Encoder::Create();
|
||||
break;
|
||||
case kVideoCodecMultiplex:
|
||||
mock_encoder_factory_for_multiplex_ =
|
||||
std::make_unique<MockVideoEncoderFactory>();
|
||||
EXPECT_CALL(*mock_encoder_factory_for_multiplex_, Die);
|
||||
EXPECT_CALL(*mock_encoder_factory_for_multiplex_, CreateVideoEncoder)
|
||||
.WillRepeatedly([] { return VP8Encoder::Create(); });
|
||||
encoder = std::make_unique<MultiplexEncoderAdapter>(
|
||||
mock_encoder_factory_for_multiplex_.get(), SdpVideoFormat("VP8"),
|
||||
false);
|
||||
break;
|
||||
case kVideoCodecH265:
|
||||
// TODO(bugs.webrtc.org/13485): Use a fake encoder
|
||||
break;
|
||||
|
@ -8908,11 +8897,6 @@ TEST_P(VideoStreamEncoderWithRealEncoderTest, EncoderMapsNativeNV12) {
|
|||
}
|
||||
|
||||
TEST_P(VideoStreamEncoderWithRealEncoderTest, HandlesLayerToggling) {
|
||||
if (codec_type_ == kVideoCodecMultiplex) {
|
||||
// Multiplex codec here uses wrapped mock codecs, ignore for this test.
|
||||
return;
|
||||
}
|
||||
|
||||
const size_t kNumSpatialLayers = 3u;
|
||||
const float kDownscaleFactors[] = {4.0, 2.0, 1.0};
|
||||
const int kFrameWidth = 1280;
|
||||
|
@ -9045,8 +9029,6 @@ constexpr std::pair<VideoCodecType, bool> kVP9DisallowConversion =
|
|||
std::make_pair(kVideoCodecVP9, /*allow_i420_conversion=*/false);
|
||||
constexpr std::pair<VideoCodecType, bool> kAV1AllowConversion =
|
||||
std::make_pair(kVideoCodecAV1, /*allow_i420_conversion=*/false);
|
||||
constexpr std::pair<VideoCodecType, bool> kMultiplexDisallowConversion =
|
||||
std::make_pair(kVideoCodecMultiplex, /*allow_i420_conversion=*/false);
|
||||
#if defined(WEBRTC_USE_H264)
|
||||
constexpr std::pair<VideoCodecType, bool> kH264AllowConversion =
|
||||
std::make_pair(kVideoCodecH264, /*allow_i420_conversion=*/true);
|
||||
|
@ -9060,7 +9042,6 @@ INSTANTIATE_TEST_SUITE_P(
|
|||
::testing::Values(kVP8DisallowConversion,
|
||||
kVP9DisallowConversion,
|
||||
kAV1AllowConversion,
|
||||
kMultiplexDisallowConversion,
|
||||
kH264AllowConversion),
|
||||
TestParametersVideoCodecAndAllowI420ConversionToString);
|
||||
#else
|
||||
|
@ -9069,8 +9050,7 @@ INSTANTIATE_TEST_SUITE_P(
|
|||
VideoStreamEncoderWithRealEncoderTest,
|
||||
::testing::Values(kVP8DisallowConversion,
|
||||
kVP9DisallowConversion,
|
||||
kAV1AllowConversion,
|
||||
kMultiplexDisallowConversion),
|
||||
kAV1AllowConversion),
|
||||
TestParametersVideoCodecAndAllowI420ConversionToString);
|
||||
#endif
|
||||
|
||||
|
|
Loading…
Reference in a new issue