mirror of
https://github.com/mollyim/webrtc.git
synced 2025-05-19 08:37:54 +01:00
Move class VideoCodec from common_types.h to its own api header file.
Bug: webrtc:7660 Change-Id: I91f19bfc2565461328f30081f8383e136419aefb Reviewed-on: https://webrtc-review.googlesource.com/79881 Commit-Queue: Niels Moller <nisse@webrtc.org> Reviewed-by: Rasmus Brandt <brandtr@webrtc.org> Reviewed-by: Danil Chapovalov <danilchap@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Cr-Commit-Position: refs/heads/master@{#23544}
This commit is contained in:
parent
b616d09e1b
commit
efc71e565e
23 changed files with 185 additions and 182 deletions
3
BUILD.gn
3
BUILD.gn
|
@ -383,9 +383,8 @@ rtc_source_set("typedefs") {
|
|||
]
|
||||
}
|
||||
|
||||
rtc_static_library("webrtc_common") {
|
||||
rtc_source_set("webrtc_common") {
|
||||
sources = [
|
||||
"common_types.cc",
|
||||
"common_types.h",
|
||||
]
|
||||
deps = [
|
||||
|
|
|
@ -17,6 +17,7 @@ rtc_source_set("video_codecs_api") {
|
|||
sources = [
|
||||
"sdp_video_format.cc",
|
||||
"sdp_video_format.h",
|
||||
"video_codec.cc",
|
||||
"video_codec.h",
|
||||
"video_decoder.cc",
|
||||
"video_decoder.h",
|
||||
|
|
|
@ -8,11 +8,12 @@
|
|||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
|
||||
#include <string.h>
|
||||
#include <algorithm>
|
||||
#include <limits>
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
|
||||
#include "rtc_base/checks.h"
|
|
@ -11,13 +11,155 @@
|
|||
#ifndef API_VIDEO_CODECS_VIDEO_CODEC_H_
|
||||
#define API_VIDEO_CODECS_VIDEO_CODEC_H_
|
||||
|
||||
// TODO(bugs.webrtc.org/7660): This is an initial place holder file. Downstream
|
||||
// users of VideoCodec must be updated to include this file, before contents can
|
||||
// be moved out of common_types.h.
|
||||
#include <string>
|
||||
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
|
||||
// The VideoCodec class represents an old defacto-api, which we're migrating
|
||||
namespace webrtc {
|
||||
|
||||
// The VideoCodec class represents an old defacto-apis, which we're migrating
|
||||
// away from slowly.
|
||||
|
||||
// Video codec
|
||||
enum VideoCodecComplexity {
|
||||
kComplexityNormal = 0,
|
||||
kComplexityHigh = 1,
|
||||
kComplexityHigher = 2,
|
||||
kComplexityMax = 3
|
||||
};
|
||||
|
||||
// VP8 specific
|
||||
struct VideoCodecVP8 {
|
||||
bool operator==(const VideoCodecVP8& other) const;
|
||||
bool operator!=(const VideoCodecVP8& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
VideoCodecComplexity complexity;
|
||||
unsigned char numberOfTemporalLayers;
|
||||
bool denoisingOn;
|
||||
bool automaticResizeOn;
|
||||
bool frameDroppingOn;
|
||||
int keyFrameInterval;
|
||||
};
|
||||
|
||||
enum class InterLayerPredMode {
|
||||
kOn, // Allow inter-layer prediction for all frames.
|
||||
// Frame of low spatial layer can be used for
|
||||
// prediction of next spatial layer frame.
|
||||
kOff, // Encoder produces independent spatial layers.
|
||||
kOnKeyPic // Allow inter-layer prediction only for frames
|
||||
// within key picture.
|
||||
};
|
||||
|
||||
// VP9 specific.
|
||||
struct VideoCodecVP9 {
|
||||
bool operator==(const VideoCodecVP9& other) const;
|
||||
bool operator!=(const VideoCodecVP9& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
VideoCodecComplexity complexity;
|
||||
unsigned char numberOfTemporalLayers;
|
||||
bool denoisingOn;
|
||||
bool frameDroppingOn;
|
||||
int keyFrameInterval;
|
||||
bool adaptiveQpMode;
|
||||
bool automaticResizeOn;
|
||||
unsigned char numberOfSpatialLayers;
|
||||
bool flexibleMode;
|
||||
InterLayerPredMode interLayerPred;
|
||||
};
|
||||
|
||||
// H264 specific.
|
||||
struct VideoCodecH264 {
|
||||
bool operator==(const VideoCodecH264& other) const;
|
||||
bool operator!=(const VideoCodecH264& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
bool frameDroppingOn;
|
||||
int keyFrameInterval;
|
||||
// These are NULL/0 if not externally negotiated.
|
||||
const uint8_t* spsData;
|
||||
size_t spsLen;
|
||||
const uint8_t* ppsData;
|
||||
size_t ppsLen;
|
||||
H264::Profile profile;
|
||||
};
|
||||
|
||||
// Translates from name of codec to codec type and vice versa.
|
||||
const char* CodecTypeToPayloadString(VideoCodecType type);
|
||||
VideoCodecType PayloadStringToCodecType(const std::string& name);
|
||||
|
||||
union VideoCodecUnion {
|
||||
VideoCodecVP8 VP8;
|
||||
VideoCodecVP9 VP9;
|
||||
VideoCodecH264 H264;
|
||||
};
|
||||
|
||||
enum VideoCodecMode { kRealtimeVideo, kScreensharing };
|
||||
|
||||
// Common video codec properties
|
||||
class VideoCodec {
|
||||
public:
|
||||
VideoCodec();
|
||||
|
||||
// Public variables. TODO(hta): Make them private with accessors.
|
||||
VideoCodecType codecType;
|
||||
unsigned char plType;
|
||||
|
||||
int width;
|
||||
int height;
|
||||
|
||||
unsigned int startBitrate; // kilobits/sec.
|
||||
unsigned int maxBitrate; // kilobits/sec.
|
||||
unsigned int minBitrate; // kilobits/sec.
|
||||
unsigned int targetBitrate; // kilobits/sec.
|
||||
|
||||
uint32_t maxFramerate;
|
||||
|
||||
// This enables/disables encoding and sending when there aren't multiple
|
||||
// simulcast streams,by allocating 0 bitrate if inactive.
|
||||
bool active;
|
||||
|
||||
unsigned int qpMax;
|
||||
unsigned char numberOfSimulcastStreams;
|
||||
SimulcastStream simulcastStream[kMaxSimulcastStreams];
|
||||
SpatialLayer spatialLayers[kMaxSpatialLayers];
|
||||
|
||||
VideoCodecMode mode;
|
||||
bool expect_encode_from_texture;
|
||||
|
||||
// Timing frames configuration. There is delay of delay_ms between two
|
||||
// consequent timing frames, excluding outliers. Frame is always made a
|
||||
// timing frame if it's at least outlier_ratio in percent of "ideal" average
|
||||
// frame given bitrate and framerate, i.e. if it's bigger than
|
||||
// |outlier_ratio / 100.0 * bitrate_bps / fps| in bits. This way, timing
|
||||
// frames will not be sent too often usually. Yet large frames will always
|
||||
// have timing information for debug purposes because they are more likely to
|
||||
// cause extra delays.
|
||||
struct TimingFrameTriggerThresholds {
|
||||
int64_t delay_ms;
|
||||
uint16_t outlier_ratio_percent;
|
||||
} timing_frame_thresholds;
|
||||
|
||||
bool operator==(const VideoCodec& other) const = delete;
|
||||
bool operator!=(const VideoCodec& other) const = delete;
|
||||
|
||||
// Accessors for codec specific information.
|
||||
// There is a const version of each that returns a reference,
|
||||
// and a non-const version that returns a pointer, in order
|
||||
// to allow modification of the parameters.
|
||||
VideoCodecVP8* VP8();
|
||||
const VideoCodecVP8& VP8() const;
|
||||
VideoCodecVP9* VP9();
|
||||
const VideoCodecVP9& VP9() const;
|
||||
VideoCodecH264* H264();
|
||||
const VideoCodecH264& H264() const;
|
||||
|
||||
private:
|
||||
// TODO(hta): Consider replacing the union with a pointer type.
|
||||
// This will allow removing the VideoCodec* types from this file.
|
||||
VideoCodecUnion codec_specific_;
|
||||
};
|
||||
|
||||
} // namespace webrtc
|
||||
#endif // API_VIDEO_CODECS_VIDEO_CODEC_H_
|
||||
|
|
|
@ -27,7 +27,6 @@ namespace webrtc {
|
|||
class RTPFragmentationHeader;
|
||||
// TODO(pbos): Expose these through a public (root) header or change these APIs.
|
||||
struct CodecSpecificInfo;
|
||||
class VideoCodec;
|
||||
|
||||
class EncodedImageCallback {
|
||||
public:
|
||||
|
|
|
@ -15,8 +15,8 @@
|
|||
#include <vector>
|
||||
|
||||
#include "api/optional.h"
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
#include "api/video_codecs/sdp_video_format.h"
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "rtc_base/refcount.h"
|
||||
#include "rtc_base/scoped_ref_ptr.h"
|
||||
|
||||
|
@ -28,8 +28,8 @@ struct VideoStream {
|
|||
VideoStream(const VideoStream& other);
|
||||
std::string ToString() const;
|
||||
|
||||
size_t width;
|
||||
size_t height;
|
||||
int width;
|
||||
int height;
|
||||
int max_framerate;
|
||||
|
||||
int min_bitrate_bps;
|
||||
|
|
137
common_types.h
137
common_types.h
|
@ -325,55 +325,6 @@ enum class VideoType {
|
|||
kBGRA,
|
||||
};
|
||||
|
||||
// Video codec
|
||||
enum VideoCodecComplexity {
|
||||
kComplexityNormal = 0,
|
||||
kComplexityHigh = 1,
|
||||
kComplexityHigher = 2,
|
||||
kComplexityMax = 3
|
||||
};
|
||||
|
||||
// VP8 specific
|
||||
struct VideoCodecVP8 {
|
||||
bool operator==(const VideoCodecVP8& other) const;
|
||||
bool operator!=(const VideoCodecVP8& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
VideoCodecComplexity complexity;
|
||||
unsigned char numberOfTemporalLayers;
|
||||
bool denoisingOn;
|
||||
bool automaticResizeOn;
|
||||
bool frameDroppingOn;
|
||||
int keyFrameInterval;
|
||||
};
|
||||
|
||||
enum class InterLayerPredMode {
|
||||
kOn, // Allow inter-layer prediction for all frames.
|
||||
// Frame of low spatial layer can be used for
|
||||
// prediction of next spatial layer frame.
|
||||
kOff, // Encoder produces independent spatial layers.
|
||||
kOnKeyPic // Allow inter-layer prediction only for frames
|
||||
// within key picture.
|
||||
};
|
||||
|
||||
// VP9 specific.
|
||||
struct VideoCodecVP9 {
|
||||
bool operator==(const VideoCodecVP9& other) const;
|
||||
bool operator!=(const VideoCodecVP9& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
VideoCodecComplexity complexity;
|
||||
unsigned char numberOfTemporalLayers;
|
||||
bool denoisingOn;
|
||||
bool frameDroppingOn;
|
||||
int keyFrameInterval;
|
||||
bool adaptiveQpMode;
|
||||
bool automaticResizeOn;
|
||||
unsigned char numberOfSpatialLayers;
|
||||
bool flexibleMode;
|
||||
InterLayerPredMode interLayerPred;
|
||||
};
|
||||
|
||||
// TODO(magjed): Move this and other H264 related classes out to their own file.
|
||||
namespace H264 {
|
||||
|
||||
|
@ -387,22 +338,6 @@ enum Profile {
|
|||
|
||||
} // namespace H264
|
||||
|
||||
// H264 specific.
|
||||
struct VideoCodecH264 {
|
||||
bool operator==(const VideoCodecH264& other) const;
|
||||
bool operator!=(const VideoCodecH264& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
bool frameDroppingOn;
|
||||
int keyFrameInterval;
|
||||
// These are NULL/0 if not externally negotiated.
|
||||
const uint8_t* spsData;
|
||||
size_t spsLen;
|
||||
const uint8_t* ppsData;
|
||||
size_t ppsLen;
|
||||
H264::Profile profile;
|
||||
};
|
||||
|
||||
// Video codec types
|
||||
enum VideoCodecType {
|
||||
// There are various memset(..., 0, ...) calls in the code that rely on
|
||||
|
@ -427,12 +362,6 @@ enum VideoCodecType {
|
|||
const char* CodecTypeToPayloadString(VideoCodecType type);
|
||||
VideoCodecType PayloadStringToCodecType(const std::string& name);
|
||||
|
||||
union VideoCodecUnion {
|
||||
VideoCodecVP8 VP8;
|
||||
VideoCodecVP9 VP9;
|
||||
VideoCodecH264 H264;
|
||||
};
|
||||
|
||||
struct SpatialLayer {
|
||||
bool operator==(const SpatialLayer& other) const;
|
||||
bool operator!=(const SpatialLayer& other) const { return !(*this == other); }
|
||||
|
@ -451,72 +380,6 @@ struct SpatialLayer {
|
|||
// settings such as resolution.
|
||||
typedef SpatialLayer SimulcastStream;
|
||||
|
||||
enum VideoCodecMode { kRealtimeVideo, kScreensharing };
|
||||
|
||||
// Common video codec properties
|
||||
class VideoCodec {
|
||||
public:
|
||||
VideoCodec();
|
||||
|
||||
// Public variables. TODO(hta): Make them private with accessors.
|
||||
VideoCodecType codecType;
|
||||
unsigned char plType;
|
||||
|
||||
unsigned short width;
|
||||
unsigned short height;
|
||||
|
||||
unsigned int startBitrate; // kilobits/sec.
|
||||
unsigned int maxBitrate; // kilobits/sec.
|
||||
unsigned int minBitrate; // kilobits/sec.
|
||||
unsigned int targetBitrate; // kilobits/sec.
|
||||
|
||||
uint32_t maxFramerate;
|
||||
|
||||
// This enables/disables encoding and sending when there aren't multiple
|
||||
// simulcast streams,by allocating 0 bitrate if inactive.
|
||||
bool active;
|
||||
|
||||
unsigned int qpMax;
|
||||
unsigned char numberOfSimulcastStreams;
|
||||
SimulcastStream simulcastStream[kMaxSimulcastStreams];
|
||||
SpatialLayer spatialLayers[kMaxSpatialLayers];
|
||||
|
||||
VideoCodecMode mode;
|
||||
bool expect_encode_from_texture;
|
||||
|
||||
// Timing frames configuration. There is delay of delay_ms between two
|
||||
// consequent timing frames, excluding outliers. Frame is always made a
|
||||
// timing frame if it's at least outlier_ratio in percent of "ideal" average
|
||||
// frame given bitrate and framerate, i.e. if it's bigger than
|
||||
// |outlier_ratio / 100.0 * bitrate_bps / fps| in bits. This way, timing
|
||||
// frames will not be sent too often usually. Yet large frames will always
|
||||
// have timing information for debug purposes because they are more likely to
|
||||
// cause extra delays.
|
||||
struct TimingFrameTriggerThresholds {
|
||||
int64_t delay_ms;
|
||||
uint16_t outlier_ratio_percent;
|
||||
} timing_frame_thresholds;
|
||||
|
||||
bool operator==(const VideoCodec& other) const = delete;
|
||||
bool operator!=(const VideoCodec& other) const = delete;
|
||||
|
||||
// Accessors for codec specific information.
|
||||
// There is a const version of each that returns a reference,
|
||||
// and a non-const version that returns a pointer, in order
|
||||
// to allow modification of the parameters.
|
||||
VideoCodecVP8* VP8();
|
||||
const VideoCodecVP8& VP8() const;
|
||||
VideoCodecVP9* VP9();
|
||||
const VideoCodecVP9& VP9() const;
|
||||
VideoCodecH264* H264();
|
||||
const VideoCodecH264& H264() const;
|
||||
|
||||
private:
|
||||
// TODO(hta): Consider replacing the union with a pointer type.
|
||||
// This will allow removing the VideoCodec* types from this file.
|
||||
VideoCodecUnion codec_specific_;
|
||||
};
|
||||
|
||||
// TODO(sprang): Remove this when downstream projects have been updated.
|
||||
using BitrateAllocation = VideoBitrateAllocation;
|
||||
|
||||
|
|
|
@ -2643,8 +2643,8 @@ TEST_F(WebRtcVideoChannelTest, ReconfiguresEncodersWhenNotSending) {
|
|||
|
||||
// No frames entered.
|
||||
std::vector<webrtc::VideoStream> streams = stream->GetVideoStreams();
|
||||
EXPECT_EQ(0u, streams[0].width);
|
||||
EXPECT_EQ(0u, streams[0].height);
|
||||
EXPECT_EQ(0, streams[0].width);
|
||||
EXPECT_EQ(0, streams[0].height);
|
||||
|
||||
FakeVideoCapturerWithTaskQueue capturer;
|
||||
EXPECT_TRUE(channel_->SetVideoSend(last_ssrc_, nullptr, &capturer));
|
||||
|
@ -2654,9 +2654,8 @@ TEST_F(WebRtcVideoChannelTest, ReconfiguresEncodersWhenNotSending) {
|
|||
|
||||
// Frame entered, should be reconfigured to new dimensions.
|
||||
streams = stream->GetVideoStreams();
|
||||
EXPECT_EQ(rtc::checked_cast<size_t>(capture_format.width), streams[0].width);
|
||||
EXPECT_EQ(rtc::checked_cast<size_t>(capture_format.height),
|
||||
streams[0].height);
|
||||
EXPECT_EQ(capture_format.width, streams[0].width);
|
||||
EXPECT_EQ(capture_format.height, streams[0].height);
|
||||
|
||||
EXPECT_TRUE(channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
|
||||
}
|
||||
|
@ -2694,10 +2693,8 @@ TEST_F(WebRtcVideoChannelTest, UsesCorrectSettingsForScreencast) {
|
|||
EXPECT_EQ(webrtc::VideoEncoderConfig::ContentType::kRealtimeVideo,
|
||||
encoder_config.content_type);
|
||||
std::vector<webrtc::VideoStream> streams = send_stream->GetVideoStreams();
|
||||
EXPECT_EQ(rtc::checked_cast<size_t>(capture_format_hd.width),
|
||||
streams.front().width);
|
||||
EXPECT_EQ(rtc::checked_cast<size_t>(capture_format_hd.height),
|
||||
streams.front().height);
|
||||
EXPECT_EQ(capture_format_hd.width, streams.front().width);
|
||||
EXPECT_EQ(capture_format_hd.height, streams.front().height);
|
||||
EXPECT_EQ(0, encoder_config.min_transmit_bitrate_bps)
|
||||
<< "Non-screenshare shouldn't use min-transmit bitrate.";
|
||||
|
||||
|
@ -2721,10 +2718,8 @@ TEST_F(WebRtcVideoChannelTest, UsesCorrectSettingsForScreencast) {
|
|||
encoder_config.min_transmit_bitrate_bps);
|
||||
|
||||
streams = send_stream->GetVideoStreams();
|
||||
EXPECT_EQ(rtc::checked_cast<size_t>(capture_format_hd.width),
|
||||
streams.front().width);
|
||||
EXPECT_EQ(rtc::checked_cast<size_t>(capture_format_hd.height),
|
||||
streams.front().height);
|
||||
EXPECT_EQ(capture_format_hd.width, streams.front().width);
|
||||
EXPECT_EQ(capture_format_hd.height, streams.front().height);
|
||||
EXPECT_FALSE(streams[0].num_temporal_layers.has_value());
|
||||
EXPECT_TRUE(channel_->SetVideoSend(last_ssrc_, nullptr, nullptr));
|
||||
}
|
||||
|
@ -5839,8 +5834,8 @@ class WebRtcVideoChannelSimulcastTest : public testing::Test {
|
|||
if (screenshare) {
|
||||
for (const webrtc::VideoStream& stream : expected_streams) {
|
||||
// Never scale screen content.
|
||||
EXPECT_EQ(stream.width, rtc::checked_cast<size_t>(capture_width));
|
||||
EXPECT_EQ(stream.height, rtc::checked_cast<size_t>(capture_height));
|
||||
EXPECT_EQ(stream.width, capture_width);
|
||||
EXPECT_EQ(stream.height, capture_height);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -198,6 +198,7 @@ rtc_static_library("rtp_rtcp") {
|
|||
"../../api:transport_api",
|
||||
"../../api/audio_codecs:audio_codecs_api",
|
||||
"../../api/video:video_bitrate_allocation",
|
||||
"../../api/video_codecs:video_codecs_api",
|
||||
"../../common_video",
|
||||
"../../logging:rtc_event_audio",
|
||||
"../../logging:rtc_event_log_api",
|
||||
|
@ -411,6 +412,7 @@ if (rtc_include_tests) {
|
|||
"../../api:transport_api",
|
||||
"../../api/video:video_bitrate_allocation",
|
||||
"../../api/video:video_frame",
|
||||
"../../api/video_codecs:video_codecs_api",
|
||||
"../../call:rtp_receiver",
|
||||
"../../common_video:common_video",
|
||||
"../../logging:mocks",
|
||||
|
|
|
@ -16,13 +16,12 @@
|
|||
|
||||
#include "api/audio_codecs/audio_format.h"
|
||||
#include "api/optional.h"
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
#include "modules/rtp_rtcp/source/rtp_utility.h"
|
||||
#include "rtc_base/criticalsection.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
||||
class VideoCodec;
|
||||
|
||||
class RTPPayloadRegistry {
|
||||
public:
|
||||
RTPPayloadRegistry();
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
|
||||
#include <algorithm>
|
||||
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "modules/audio_coding/codecs/audio_format_conversion.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <memory>
|
||||
#include <set>
|
||||
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
#include "modules/rtp_rtcp/include/rtp_header_parser.h"
|
||||
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
||||
#include "modules/rtp_rtcp/source/rtcp_packet.h"
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
#include "modules/rtp_rtcp/include/rtp_payload_registry.h"
|
||||
#include "modules/rtp_rtcp/include/rtp_rtcp.h"
|
||||
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
|
||||
|
|
|
@ -456,6 +456,7 @@ rtc_static_library("webrtc_vp9_helpers") {
|
|||
deps = [
|
||||
":video_codec_interface",
|
||||
"../..:webrtc_common",
|
||||
"../../api/video_codecs:video_codecs_api",
|
||||
"../../common_video",
|
||||
"../../rtc_base:checks",
|
||||
]
|
||||
|
|
|
@ -15,8 +15,7 @@
|
|||
#include <vector>
|
||||
#include <memory>
|
||||
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "typedefs.h" // NOLINT(build/include)
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
|
||||
#define VP8_TS_MAX_PERIODICITY 16
|
||||
#define VP8_TS_MAX_LAYERS 5
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
#include <vector>
|
||||
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
#include "common_video/include/video_bitrate_allocator.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
|
||||
#include "api/fec_controller.h"
|
||||
#include "api/video/video_frame.h"
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
#include "modules/include/module.h"
|
||||
#include "modules/include/module_common_types.h"
|
||||
#include "modules/video_coding/include/video_coding_defines.h"
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#ifndef MODULES_VIDEO_CODING_UTILITY_DEFAULT_VIDEO_BITRATE_ALLOCATOR_H_
|
||||
#define MODULES_VIDEO_CODING_UTILITY_DEFAULT_VIDEO_BITRATE_ALLOCATOR_H_
|
||||
|
||||
#include "common_types.h" // NOLINT(build/include)
|
||||
#include "api/video_codecs/video_codec.h"
|
||||
#include "common_video/include/video_bitrate_allocator.h"
|
||||
|
||||
namespace webrtc {
|
||||
|
|
|
@ -140,9 +140,9 @@ VideoCodec VideoCodecInitializer::VideoEncoderConfigToVideoCodec(
|
|||
sim_stream->active = streams[i].active;
|
||||
|
||||
video_codec.width =
|
||||
std::max(video_codec.width, static_cast<uint16_t>(streams[i].width));
|
||||
std::max(video_codec.width, streams[i].width);
|
||||
video_codec.height =
|
||||
std::max(video_codec.height, static_cast<uint16_t>(streams[i].height));
|
||||
std::max(video_codec.height, streams[i].height);
|
||||
video_codec.minBitrate =
|
||||
std::min(static_cast<uint16_t>(video_codec.minBitrate),
|
||||
static_cast<uint16_t>(streams[i].min_bitrate_bps / 1000));
|
||||
|
|
|
@ -380,8 +380,8 @@ int32_t MediaCodecVideoDecoder::Decode(
|
|||
|
||||
// Check if encoded frame dimension has changed.
|
||||
if ((inputImage._encodedWidth * inputImage._encodedHeight > 0) &&
|
||||
(inputImage._encodedWidth != codec_.width ||
|
||||
inputImage._encodedHeight != codec_.height)) {
|
||||
(static_cast<int>(inputImage._encodedWidth) != codec_.width ||
|
||||
static_cast<int>(inputImage._encodedHeight) != codec_.height)) {
|
||||
ALOGW << "Input resolution changed from " <<
|
||||
codec_.width << " x " << codec_.height << " to " <<
|
||||
inputImage._encodedWidth << " x " << inputImage._encodedHeight;
|
||||
|
|
|
@ -41,10 +41,10 @@ std::vector<VideoStream> CreateVideoStreams(
|
|||
int bitrate_left_bps = encoder_config.max_bitrate_bps;
|
||||
|
||||
for (size_t i = 0; i < encoder_config.number_of_streams; ++i) {
|
||||
stream_settings[i].width =
|
||||
(i + 1) * width / encoder_config.number_of_streams;
|
||||
stream_settings[i].height =
|
||||
(i + 1) * height / encoder_config.number_of_streams;
|
||||
stream_settings[i].width = static_cast<int>(
|
||||
(i + 1) * width / encoder_config.number_of_streams);
|
||||
stream_settings[i].height = static_cast<int>(
|
||||
(i + 1) * height / encoder_config.number_of_streams);
|
||||
stream_settings[i].max_framerate = 30;
|
||||
stream_settings[i].min_bitrate_bps =
|
||||
DefaultVideoStreamFactory::kDefaultMinBitratePerStream[i];
|
||||
|
|
|
@ -1260,8 +1260,8 @@ std::vector<int> VideoQualityTest::ParseCSV(const std::string& str) {
|
|||
VideoStream VideoQualityTest::DefaultVideoStream(const Params& params,
|
||||
size_t video_idx) {
|
||||
VideoStream stream;
|
||||
stream.width = params.video[video_idx].width;
|
||||
stream.height = params.video[video_idx].height;
|
||||
stream.width = static_cast<int>(params.video[video_idx].width);
|
||||
stream.height = static_cast<int>(params.video[video_idx].height);
|
||||
stream.max_framerate = params.video[video_idx].fps;
|
||||
stream.min_bitrate_bps = params.video[video_idx].min_bitrate_bps;
|
||||
stream.target_bitrate_bps = params.video[video_idx].target_bitrate_bps;
|
||||
|
|
|
@ -40,6 +40,8 @@ class VideoQualityTest : public test::CallTest {
|
|||
} call;
|
||||
struct Video {
|
||||
bool enabled;
|
||||
// TODO(nisse): Change type of |width| and |height| to int, which is what
|
||||
// the majority of the webrtc code uses for frame dimensions.
|
||||
size_t width;
|
||||
size_t height;
|
||||
int32_t fps;
|
||||
|
|
Loading…
Reference in a new issue