mirror of
https://github.com/mollyim/webrtc.git
synced 2025-05-13 05:40:42 +01:00

As documented in webrtc:11908 this cleanup is fairly invasive and when a part of a frequently executed code path, can be quite costly in terms of performance overhead. This is currently the case with synchronous calls between threads (Thread) as well with our proxy api classes. With this CL, all code in WebRTC should now either be using MessageHandlerAutoCleanup or calling MessageHandler(false) explicitly. Next steps will be to update external code to either depend on the AutoCleanup variant, or call MessageHandler(false). Changing the proxy classes to use TaskQueue set of concepts instead of MessageHandler. This avoids the perf overhead related to the cleanup above as well as incompatibility with the thread policy checks in Thread that some current external users of the proxies would otherwise run into (if we were to use Thread::Send() for synchronous call). Following this we'll move the cleanup step into the AutoCleanup class and an RTC_DCHECK that all calls to the MessageHandler are setting the flag to false, before eventually removing the flag and make MessageHandler pure virtual. Bug: webrtc:11908 Change-Id: Idf4ff9bcc8438cb8c583777e282005e0bc511c8f Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/183442 Reviewed-by: Artem Titov <titovartem@webrtc.org> Commit-Queue: Tommi <tommi@webrtc.org> Cr-Commit-Position: refs/heads/master@{#32049}
206 lines
5.7 KiB
C++
206 lines
5.7 KiB
C++
/*
|
|
* Copyright 2020 The WebRTC Project Authors. All rights reserved.
|
|
*
|
|
* Use of this source code is governed by a BSD-style license
|
|
* that can be found in the LICENSE file in the root of the source
|
|
* tree. An additional intellectual property rights grant can be found
|
|
* in the file PATENTS. All contributing project authors may
|
|
* be found in the AUTHORS file in the root of the source tree.
|
|
*/
|
|
|
|
#include "rtc_base/synchronization/mutex.h"
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
#include <atomic>
|
|
#include <memory>
|
|
#include <type_traits>
|
|
#include <utility>
|
|
#include <vector>
|
|
|
|
#include "benchmark/benchmark.h"
|
|
#include "rtc_base/checks.h"
|
|
#include "rtc_base/event.h"
|
|
#include "rtc_base/location.h"
|
|
#include "rtc_base/message_handler.h"
|
|
#include "rtc_base/platform_thread.h"
|
|
#include "rtc_base/synchronization/yield.h"
|
|
#include "rtc_base/thread.h"
|
|
#include "test/gtest.h"
|
|
|
|
namespace webrtc {
|
|
namespace {
|
|
|
|
using ::rtc::Event;
|
|
using ::rtc::Message;
|
|
using ::rtc::MessageHandler;
|
|
using ::rtc::Thread;
|
|
|
|
constexpr int kNumThreads = 16;
|
|
|
|
template <class MutexType>
|
|
class RTC_LOCKABLE RawMutexLocker {
|
|
public:
|
|
explicit RawMutexLocker(MutexType& mutex) : mutex_(mutex) {}
|
|
void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION() { mutex_.Lock(); }
|
|
void Unlock() RTC_UNLOCK_FUNCTION() { mutex_.Unlock(); }
|
|
|
|
private:
|
|
MutexType& mutex_;
|
|
};
|
|
|
|
class RTC_LOCKABLE RawMutexTryLocker {
|
|
public:
|
|
explicit RawMutexTryLocker(Mutex& mutex) : mutex_(mutex) {}
|
|
void Lock() RTC_EXCLUSIVE_LOCK_FUNCTION() {
|
|
while (!mutex_.TryLock()) {
|
|
YieldCurrentThread();
|
|
}
|
|
}
|
|
void Unlock() RTC_UNLOCK_FUNCTION() { mutex_.Unlock(); }
|
|
|
|
private:
|
|
Mutex& mutex_;
|
|
};
|
|
|
|
template <class MutexType, class MutexLockType>
|
|
class MutexLockLocker {
|
|
public:
|
|
explicit MutexLockLocker(MutexType& mutex) : mutex_(mutex) {}
|
|
void Lock() { lock_ = std::make_unique<MutexLockType>(&mutex_); }
|
|
void Unlock() { lock_ = nullptr; }
|
|
|
|
private:
|
|
MutexType& mutex_;
|
|
std::unique_ptr<MutexLockType> lock_;
|
|
};
|
|
|
|
template <class MutexType, class MutexLocker>
|
|
class LockRunner : public rtc::MessageHandlerAutoCleanup {
|
|
public:
|
|
template <typename... Args>
|
|
explicit LockRunner(Args... args)
|
|
: threads_active_(0),
|
|
start_event_(true, false),
|
|
done_event_(true, false),
|
|
shared_value_(0),
|
|
mutex_(args...),
|
|
locker_(mutex_) {}
|
|
|
|
bool Run() {
|
|
// Signal all threads to start.
|
|
start_event_.Set();
|
|
|
|
// Wait for all threads to finish.
|
|
return done_event_.Wait(kLongTime);
|
|
}
|
|
|
|
void SetExpectedThreadCount(int count) { threads_active_ = count; }
|
|
|
|
int shared_value() {
|
|
int shared_value;
|
|
locker_.Lock();
|
|
shared_value = shared_value_;
|
|
locker_.Unlock();
|
|
return shared_value_;
|
|
}
|
|
|
|
void OnMessage(Message* msg) override {
|
|
ASSERT_TRUE(start_event_.Wait(kLongTime));
|
|
locker_.Lock();
|
|
|
|
EXPECT_EQ(0, shared_value_);
|
|
int old = shared_value_;
|
|
|
|
// Use a loop to increase the chance of race. If the |locker_|
|
|
// implementation is faulty, it would be improbable that the error slips
|
|
// through.
|
|
for (int i = 0; i < kOperationsToRun; ++i) {
|
|
benchmark::DoNotOptimize(++shared_value_);
|
|
}
|
|
EXPECT_EQ(old + kOperationsToRun, shared_value_);
|
|
shared_value_ = 0;
|
|
|
|
locker_.Unlock();
|
|
if (threads_active_.fetch_sub(1) == 1) {
|
|
done_event_.Set();
|
|
}
|
|
}
|
|
|
|
private:
|
|
static constexpr int kLongTime = 10000; // 10 seconds
|
|
static constexpr int kOperationsToRun = 1000;
|
|
|
|
std::atomic<int> threads_active_;
|
|
Event start_event_;
|
|
Event done_event_;
|
|
int shared_value_;
|
|
MutexType mutex_;
|
|
MutexLocker locker_;
|
|
};
|
|
|
|
void StartThreads(std::vector<std::unique_ptr<Thread>>& threads,
|
|
MessageHandler* handler) {
|
|
for (int i = 0; i < kNumThreads; ++i) {
|
|
std::unique_ptr<Thread> thread(Thread::Create());
|
|
thread->Start();
|
|
thread->Post(RTC_FROM_HERE, handler);
|
|
threads.push_back(std::move(thread));
|
|
}
|
|
}
|
|
|
|
TEST(MutexTest, ProtectsSharedResourceWithMutexAndRawMutexLocker) {
|
|
std::vector<std::unique_ptr<Thread>> threads;
|
|
LockRunner<Mutex, RawMutexLocker<Mutex>> runner;
|
|
StartThreads(threads, &runner);
|
|
runner.SetExpectedThreadCount(kNumThreads);
|
|
EXPECT_TRUE(runner.Run());
|
|
EXPECT_EQ(0, runner.shared_value());
|
|
}
|
|
|
|
TEST(MutexTest, ProtectsSharedResourceWithMutexAndRawMutexTryLocker) {
|
|
std::vector<std::unique_ptr<Thread>> threads;
|
|
LockRunner<Mutex, RawMutexTryLocker> runner;
|
|
StartThreads(threads, &runner);
|
|
runner.SetExpectedThreadCount(kNumThreads);
|
|
EXPECT_TRUE(runner.Run());
|
|
EXPECT_EQ(0, runner.shared_value());
|
|
}
|
|
|
|
TEST(MutexTest, ProtectsSharedResourceWithMutexAndMutexLocker) {
|
|
std::vector<std::unique_ptr<Thread>> threads;
|
|
LockRunner<Mutex, MutexLockLocker<Mutex, MutexLock>> runner;
|
|
StartThreads(threads, &runner);
|
|
runner.SetExpectedThreadCount(kNumThreads);
|
|
EXPECT_TRUE(runner.Run());
|
|
EXPECT_EQ(0, runner.shared_value());
|
|
}
|
|
|
|
TEST(MutexTest, ProtectsSharedResourceWithGlobalMutexAndRawMutexLocker) {
|
|
std::vector<std::unique_ptr<Thread>> threads;
|
|
LockRunner<GlobalMutex, RawMutexLocker<GlobalMutex>> runner(absl::kConstInit);
|
|
StartThreads(threads, &runner);
|
|
runner.SetExpectedThreadCount(kNumThreads);
|
|
EXPECT_TRUE(runner.Run());
|
|
EXPECT_EQ(0, runner.shared_value());
|
|
}
|
|
|
|
TEST(MutexTest, ProtectsSharedResourceWithGlobalMutexAndMutexLocker) {
|
|
std::vector<std::unique_ptr<Thread>> threads;
|
|
LockRunner<GlobalMutex, MutexLockLocker<GlobalMutex, GlobalMutexLock>> runner(
|
|
absl::kConstInit);
|
|
StartThreads(threads, &runner);
|
|
runner.SetExpectedThreadCount(kNumThreads);
|
|
EXPECT_TRUE(runner.Run());
|
|
EXPECT_EQ(0, runner.shared_value());
|
|
}
|
|
|
|
TEST(MutexTest, GlobalMutexCanHaveStaticStorageDuration) {
|
|
ABSL_CONST_INIT static GlobalMutex global_lock(absl::kConstInit);
|
|
global_lock.Lock();
|
|
global_lock.Unlock();
|
|
}
|
|
|
|
} // namespace
|
|
} // namespace webrtc
|