mirror of
https://github.com/mollyim/webrtc.git
synced 2025-05-12 21:30:45 +01:00
PhysicalSocketServer: remove heap-based epoll_event handling.
This change deletes heap-based handling of dynamic number of epoll events handled per call to epoll, with the assumption that PSS load is likely not dominated by the epoll syscalls. This simplifies the logic in the code and removes a heap allocation. Bug: webrtc:11567 Change-Id: I34fbe1fa8bf0a037bf849a4adac1a0a333c9dd86 Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/175563 Commit-Queue: Markus Handell <handellm@webrtc.org> Reviewed-by: Karl Wiberg <kwiberg@webrtc.org> Cr-Commit-Position: refs/heads/master@{#31358}
This commit is contained in:
parent
b41316cd4c
commit
b7c63ab83a
2 changed files with 10 additions and 22 deletions
|
@ -1365,12 +1365,6 @@ bool PhysicalSocketServer::WaitSelect(int cmsWait, bool process_io) {
|
|||
|
||||
#if defined(WEBRTC_USE_EPOLL)
|
||||
|
||||
// Initial number of events to process with one call to "epoll_wait".
|
||||
static const size_t kInitialEpollEvents = 128;
|
||||
|
||||
// Maximum number of events to process with one call to "epoll_wait".
|
||||
static const size_t kMaxEpollEvents = 8192;
|
||||
|
||||
void PhysicalSocketServer::AddEpoll(Dispatcher* pdispatcher) {
|
||||
RTC_DCHECK(epoll_fd_ != INVALID_SOCKET);
|
||||
int fd = pdispatcher->GetDescriptor();
|
||||
|
@ -1437,20 +1431,13 @@ bool PhysicalSocketServer::WaitEpoll(int cmsWait) {
|
|||
tvStop = TimeAfter(cmsWait);
|
||||
}
|
||||
|
||||
if (epoll_events_.empty()) {
|
||||
// The initial space to receive events is created only if epoll is used.
|
||||
epoll_events_.resize(kInitialEpollEvents);
|
||||
}
|
||||
|
||||
fWait_ = true;
|
||||
|
||||
while (fWait_) {
|
||||
// Wait then call handlers as appropriate
|
||||
// < 0 means error
|
||||
// 0 means timeout
|
||||
// > 0 means count of descriptors ready
|
||||
int n = epoll_wait(epoll_fd_, &epoll_events_[0],
|
||||
static_cast<int>(epoll_events_.size()),
|
||||
int n = epoll_wait(epoll_fd_, epoll_events_.data(), epoll_events_.size(),
|
||||
static_cast<int>(tvWait));
|
||||
if (n < 0) {
|
||||
if (errno != EINTR) {
|
||||
|
@ -1483,13 +1470,6 @@ bool PhysicalSocketServer::WaitEpoll(int cmsWait) {
|
|||
}
|
||||
}
|
||||
|
||||
if (static_cast<size_t>(n) == epoll_events_.size() &&
|
||||
epoll_events_.size() < kMaxEpollEvents) {
|
||||
// We used the complete space to receive events, increase size for future
|
||||
// iterations.
|
||||
epoll_events_.resize(std::max(epoll_events_.size() * 2, kMaxEpollEvents));
|
||||
}
|
||||
|
||||
if (cmsWait != kForever) {
|
||||
tvWait = TimeDiff(tvStop, TimeMillis());
|
||||
if (tvWait < 0) {
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#define WEBRTC_USE_EPOLL 1
|
||||
#endif
|
||||
|
||||
#include <array>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
#include <vector>
|
||||
|
@ -81,6 +82,9 @@ class RTC_EXPORT PhysicalSocketServer : public SocketServer {
|
|||
void Update(Dispatcher* dispatcher);
|
||||
|
||||
private:
|
||||
// The number of events to process with one call to "epoll_wait".
|
||||
static constexpr size_t kNumEpollEvents = 128;
|
||||
|
||||
typedef std::set<Dispatcher*> DispatcherSet;
|
||||
|
||||
void AddRemovePendingDispatchers() RTC_EXCLUSIVE_LOCKS_REQUIRED(crit_);
|
||||
|
@ -95,8 +99,12 @@ class RTC_EXPORT PhysicalSocketServer : public SocketServer {
|
|||
bool WaitEpoll(int cms);
|
||||
bool WaitPoll(int cms, Dispatcher* dispatcher);
|
||||
|
||||
// This array is accessed in isolation by a thread calling into Wait().
|
||||
// It's useless to use a SequenceChecker to guard it because a socket
|
||||
// server can outlive the thread it's bound to, forcing the Wait call
|
||||
// to have to reset the sequence checker on Wait calls.
|
||||
std::array<epoll_event, kNumEpollEvents> epoll_events_;
|
||||
const int epoll_fd_ = INVALID_SOCKET;
|
||||
std::vector<struct epoll_event> epoll_events_;
|
||||
#endif // WEBRTC_USE_EPOLL
|
||||
DispatcherSet dispatchers_ RTC_GUARDED_BY(crit_);
|
||||
DispatcherSet pending_add_dispatchers_ RTC_GUARDED_BY(crit_);
|
||||
|
|
Loading…
Reference in a new issue