Use backticks not vertical bars to denote variables in comments for /sdk

Bug: webrtc:12338
Change-Id: Ifaad29ccb63b0f2f3aeefb77dae061ebc7f87e6c
Reviewed-on: https://webrtc-review.googlesource.com/c/src/+/227024
Reviewed-by: Harald Alvestrand <hta@webrtc.org>
Commit-Queue: Artem Titov <titovartem@webrtc.org>
Cr-Commit-Position: refs/heads/master@{#34561}
This commit is contained in:
Artem Titov 2021-07-27 12:23:39 +02:00 committed by WebRTC LUCI CQ
parent f0671921a1
commit d7ac581045
87 changed files with 235 additions and 235 deletions

View file

@ -158,7 +158,7 @@ public class Camera1Enumerator implements CameraEnumerator {
return ranges; return ranges;
} }
// Returns the camera index for camera with name |deviceName|, or throws IllegalArgumentException // Returns the camera index for camera with name `deviceName`, or throws IllegalArgumentException
// if no such camera can be found. // if no such camera can be found.
static int getCameraIndex(String deviceName) { static int getCameraIndex(String deviceName) {
Logging.d(TAG, "getCameraIndex: " + deviceName); Logging.d(TAG, "getCameraIndex: " + deviceName);

View file

@ -152,24 +152,24 @@ public class CameraEnumerationAndroid {
} }
} }
// Prefer a fps range with an upper bound close to |framerate|. Also prefer a fps range with a low // Prefer a fps range with an upper bound close to `framerate`. Also prefer a fps range with a low
// lower bound, to allow the framerate to fluctuate based on lightning conditions. // lower bound, to allow the framerate to fluctuate based on lightning conditions.
public static CaptureFormat.FramerateRange getClosestSupportedFramerateRange( public static CaptureFormat.FramerateRange getClosestSupportedFramerateRange(
List<CaptureFormat.FramerateRange> supportedFramerates, final int requestedFps) { List<CaptureFormat.FramerateRange> supportedFramerates, final int requestedFps) {
return Collections.min( return Collections.min(
supportedFramerates, new ClosestComparator<CaptureFormat.FramerateRange>() { supportedFramerates, new ClosestComparator<CaptureFormat.FramerateRange>() {
// Progressive penalty if the upper bound is further away than |MAX_FPS_DIFF_THRESHOLD| // Progressive penalty if the upper bound is further away than `MAX_FPS_DIFF_THRESHOLD`
// from requested. // from requested.
private static final int MAX_FPS_DIFF_THRESHOLD = 5000; private static final int MAX_FPS_DIFF_THRESHOLD = 5000;
private static final int MAX_FPS_LOW_DIFF_WEIGHT = 1; private static final int MAX_FPS_LOW_DIFF_WEIGHT = 1;
private static final int MAX_FPS_HIGH_DIFF_WEIGHT = 3; private static final int MAX_FPS_HIGH_DIFF_WEIGHT = 3;
// Progressive penalty if the lower bound is bigger than |MIN_FPS_THRESHOLD|. // Progressive penalty if the lower bound is bigger than `MIN_FPS_THRESHOLD`.
private static final int MIN_FPS_THRESHOLD = 8000; private static final int MIN_FPS_THRESHOLD = 8000;
private static final int MIN_FPS_LOW_VALUE_WEIGHT = 1; private static final int MIN_FPS_LOW_VALUE_WEIGHT = 1;
private static final int MIN_FPS_HIGH_VALUE_WEIGHT = 4; private static final int MIN_FPS_HIGH_VALUE_WEIGHT = 4;
// Use one weight for small |value| less than |threshold|, and another weight above. // Use one weight for small `value` less than `threshold`, and another weight above.
private int progressivePenalty(int value, int threshold, int lowWeight, int highWeight) { private int progressivePenalty(int value, int threshold, int lowWeight, int highWeight) {
return (value < threshold) ? value * lowWeight return (value < threshold) ? value * lowWeight
: threshold * lowWeight + (value - threshold) * highWeight; : threshold * lowWeight + (value - threshold) * highWeight;

View file

@ -48,7 +48,7 @@ public interface CameraVideoCapturer extends VideoCapturer {
* The callback may be called on an arbitrary thread. * The callback may be called on an arbitrary thread.
*/ */
public interface CameraSwitchHandler { public interface CameraSwitchHandler {
// Invoked on success. |isFrontCamera| is true if the new camera is front facing. // Invoked on success. `isFrontCamera` is true if the new camera is front facing.
void onCameraSwitchDone(boolean isFrontCamera); void onCameraSwitchDone(boolean isFrontCamera);
// Invoked on failure, e.g. camera is stopped or only one camera available. // Invoked on failure, e.g. camera is stopped or only one camera available.

View file

@ -63,7 +63,7 @@ public class DataChannel {
public final ByteBuffer data; public final ByteBuffer data;
/** /**
* Indicates whether |data| contains UTF-8 text or "binary data" * Indicates whether `data` contains UTF-8 text or "binary data"
* (i.e. anything else). * (i.e. anything else).
*/ */
public final boolean binary; public final boolean binary;
@ -110,7 +110,7 @@ public class DataChannel {
this.nativeDataChannel = nativeDataChannel; this.nativeDataChannel = nativeDataChannel;
} }
/** Register |observer|, replacing any previously-registered observer. */ /** Register `observer`, replacing any previously-registered observer. */
public void registerObserver(Observer observer) { public void registerObserver(Observer observer) {
checkDataChannelExists(); checkDataChannelExists();
if (nativeObserver != 0) { if (nativeObserver != 0) {
@ -157,7 +157,7 @@ public class DataChannel {
nativeClose(); nativeClose();
} }
/** Send |data| to the remote peer; return success. */ /** Send `data` to the remote peer; return success. */
public boolean send(Buffer buffer) { public boolean send(Buffer buffer) {
checkDataChannelExists(); checkDataChannelExists();
// TODO(fischman): this could be cleverer about avoiding copies if the // TODO(fischman): this could be cleverer about avoiding copies if the

View file

@ -146,8 +146,8 @@ public interface EglBase {
} }
/** /**
* Create a new context with the specified config attributes, sharing data with |sharedContext|. * Create a new context with the specified config attributes, sharing data with `sharedContext`.
* If |sharedContext| is null, a root context is created. This function will try to create an EGL * If `sharedContext` is null, a root context is created. This function will try to create an EGL
* 1.4 context if possible, and an EGL 1.0 context otherwise. * 1.4 context if possible, and an EGL 1.0 context otherwise.
*/ */
public static EglBase create(@Nullable Context sharedContext, int[] configAttributes) { public static EglBase create(@Nullable Context sharedContext, int[] configAttributes) {
@ -171,7 +171,7 @@ public interface EglBase {
} }
/** /**
* Helper function for creating a plain context, sharing data with |sharedContext|. This function * Helper function for creating a plain context, sharing data with `sharedContext`. This function
* will try to create an EGL 1.4 context if possible, and an EGL 1.0 context otherwise. * will try to create an EGL 1.4 context if possible, and an EGL 1.0 context otherwise.
*/ */
public static EglBase create(Context sharedContext) { public static EglBase create(Context sharedContext) {

View file

@ -111,8 +111,8 @@ public class EglRenderer implements VideoSink {
protected final String name; protected final String name;
// |renderThreadHandler| is a handler for communicating with |renderThread|, and is synchronized // `renderThreadHandler` is a handler for communicating with `renderThread`, and is synchronized
// on |handlerLock|. // on `handlerLock`.
private final Object handlerLock = new Object(); private final Object handlerLock = new Object();
@Nullable private Handler renderThreadHandler; @Nullable private Handler renderThreadHandler;
@ -136,11 +136,11 @@ public class EglRenderer implements VideoSink {
private boolean usePresentationTimeStamp; private boolean usePresentationTimeStamp;
private final Matrix drawMatrix = new Matrix(); private final Matrix drawMatrix = new Matrix();
// Pending frame to render. Serves as a queue with size 1. Synchronized on |frameLock|. // Pending frame to render. Serves as a queue with size 1. Synchronized on `frameLock`.
private final Object frameLock = new Object(); private final Object frameLock = new Object();
@Nullable private VideoFrame pendingFrame; @Nullable private VideoFrame pendingFrame;
// These variables are synchronized on |layoutLock|. // These variables are synchronized on `layoutLock`.
private final Object layoutLock = new Object(); private final Object layoutLock = new Object();
private float layoutAspectRatio; private float layoutAspectRatio;
// If true, mirrors the video stream horizontally. // If true, mirrors the video stream horizontally.
@ -148,7 +148,7 @@ public class EglRenderer implements VideoSink {
// If true, mirrors the video stream vertically. // If true, mirrors the video stream vertically.
private boolean mirrorVertically; private boolean mirrorVertically;
// These variables are synchronized on |statisticsLock|. // These variables are synchronized on `statisticsLock`.
private final Object statisticsLock = new Object(); private final Object statisticsLock = new Object();
// Total number of video frames received in renderFrame() call. // Total number of video frames received in renderFrame() call.
private int framesReceived; private int framesReceived;
@ -198,9 +198,9 @@ public class EglRenderer implements VideoSink {
} }
/** /**
* Initialize this class, sharing resources with |sharedContext|. The custom |drawer| will be used * Initialize this class, sharing resources with `sharedContext`. The custom `drawer` will be used
* for drawing frames on the EGLSurface. This class is responsible for calling release() on * for drawing frames on the EGLSurface. This class is responsible for calling release() on
* |drawer|. It is allowed to call init() to reinitialize the renderer after a previous * `drawer`. It is allowed to call init() to reinitialize the renderer after a previous
* init()/release() cycle. If usePresentationTimeStamp is true, eglPresentationTimeANDROID will be * init()/release() cycle. If usePresentationTimeStamp is true, eglPresentationTimeANDROID will be
* set with the frame timestamps, which specifies desired presentation time and might be useful * set with the frame timestamps, which specifies desired presentation time and might be useful
* for e.g. syncing audio and video. * for e.g. syncing audio and video.
@ -592,10 +592,10 @@ public class EglRenderer implements VideoSink {
} }
/** /**
* Renders and releases |pendingFrame|. * Renders and releases `pendingFrame`.
*/ */
private void renderFrameOnRenderThread() { private void renderFrameOnRenderThread() {
// Fetch and render |pendingFrame|. // Fetch and render `pendingFrame`.
final VideoFrame frame; final VideoFrame frame;
synchronized (frameLock) { synchronized (frameLock) {
if (pendingFrame == null) { if (pendingFrame == null) {

View file

@ -78,16 +78,16 @@ public class GlShader {
} }
/** /**
* Enable and upload a vertex array for attribute |label|. The vertex data is specified in * Enable and upload a vertex array for attribute `label`. The vertex data is specified in
* |buffer| with |dimension| number of components per vertex. * `buffer` with `dimension` number of components per vertex.
*/ */
public void setVertexAttribArray(String label, int dimension, FloatBuffer buffer) { public void setVertexAttribArray(String label, int dimension, FloatBuffer buffer) {
setVertexAttribArray(label, dimension, 0 /* stride */, buffer); setVertexAttribArray(label, dimension, 0 /* stride */, buffer);
} }
/** /**
* Enable and upload a vertex array for attribute |label|. The vertex data is specified in * Enable and upload a vertex array for attribute `label`. The vertex data is specified in
* |buffer| with |dimension| number of components per vertex and specified |stride|. * `buffer` with `dimension` number of components per vertex and specified `stride`.
*/ */
public void setVertexAttribArray(String label, int dimension, int stride, FloatBuffer buffer) { public void setVertexAttribArray(String label, int dimension, int stride, FloatBuffer buffer) {
if (program == -1) { if (program == -1) {

View file

@ -18,12 +18,12 @@ import java.util.Map;
// Rtc histograms can be queried through the API, getAndReset(). // Rtc histograms can be queried through the API, getAndReset().
// The returned map holds the name of a histogram and its samples. // The returned map holds the name of a histogram and its samples.
// //
// Example of |map| with one histogram: // Example of `map` with one histogram:
// |name|: "WebRTC.Video.InputFramesPerSecond" // `name`: "WebRTC.Video.InputFramesPerSecond"
// |min|: 1 // `min`: 1
// |max|: 100 // `max`: 100
// |bucketCount|: 50 // `bucketCount`: 50
// |samples|: [30]:1 // `samples`: [30]:1
// //
// Most histograms are not updated frequently (e.g. most video metrics are an // Most histograms are not updated frequently (e.g. most video metrics are an
// average over the call and recorded when a stream is removed). // average over the call and recorded when a stream is removed).

View file

@ -98,9 +98,9 @@ public interface NetworkChangeDetector {
/** /**
* Called when network preference change for a (list of) connection type(s). (e.g WIFI) is * Called when network preference change for a (list of) connection type(s). (e.g WIFI) is
* |NOT_PREFERRED| or |NEUTRAL|. * `NOT_PREFERRED` or `NEUTRAL`.
* *
* <p>note: |types| is a list of ConnectionTypes, so that all cellular types can be modified in * <p>note: `types` is a list of ConnectionTypes, so that all cellular types can be modified in
* one call. * one call.
*/ */
public void onNetworkPreference(List<ConnectionType> types, @NetworkPreference int preference); public void onNetworkPreference(List<ConnectionType> types, @NetworkPreference int preference);

View file

@ -172,7 +172,7 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver implements Netwo
} }
/** /**
* Returns connection type and status information about |network|. * Returns connection type and status information about `network`.
* Only callable on Lollipop and newer releases. * Only callable on Lollipop and newer releases.
*/ */
@SuppressLint("NewApi") @SuppressLint("NewApi")
@ -186,9 +186,9 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver implements Netwo
return new NetworkState(false, -1, -1, -1, -1); return new NetworkState(false, -1, -1, -1, -1);
} }
// The general logic of handling a VPN in this method is as follows. getNetworkInfo will // The general logic of handling a VPN in this method is as follows. getNetworkInfo will
// return the info of the network with the same id as in |network| when it is registered via // return the info of the network with the same id as in `network` when it is registered via
// ConnectivityManager.registerNetworkAgent in Android. |networkInfo| may or may not indicate // ConnectivityManager.registerNetworkAgent in Android. `networkInfo` may or may not indicate
// the type TYPE_VPN if |network| is a VPN. To reliably detect the VPN interface, we need to // the type TYPE_VPN if `network` is a VPN. To reliably detect the VPN interface, we need to
// query the network capability as below in the case when networkInfo.getType() is not // query the network capability as below in the case when networkInfo.getType() is not
// TYPE_VPN. On the other hand when networkInfo.getType() is TYPE_VPN, the only solution so // TYPE_VPN. On the other hand when networkInfo.getType() is TYPE_VPN, the only solution so
// far to obtain the underlying network information is to query the active network interface. // far to obtain the underlying network information is to query the active network interface.
@ -198,7 +198,7 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver implements Netwo
// getActiveNetworkInfo may thus give the wrong interface information, and one should note // getActiveNetworkInfo may thus give the wrong interface information, and one should note
// that getActiveNetworkInfo would return the default network interface if the VPN does not // that getActiveNetworkInfo would return the default network interface if the VPN does not
// specify its underlying networks in the implementation. Therefore, we need further compare // specify its underlying networks in the implementation. Therefore, we need further compare
// |network| to the active network. If they are not the same network, we will have to fall // `network` to the active network. If they are not the same network, we will have to fall
// back to report an unknown network. // back to report an unknown network.
if (networkInfo.getType() != ConnectivityManager.TYPE_VPN) { if (networkInfo.getType() != ConnectivityManager.TYPE_VPN) {
@ -209,15 +209,15 @@ public class NetworkMonitorAutoDetect extends BroadcastReceiver implements Netwo
|| !networkCapabilities.hasTransport(NetworkCapabilities.TRANSPORT_VPN)) { || !networkCapabilities.hasTransport(NetworkCapabilities.TRANSPORT_VPN)) {
return getNetworkState(networkInfo); return getNetworkState(networkInfo);
} }
// When |network| is in fact a VPN after querying its capability but |networkInfo| is not of // When `network` is in fact a VPN after querying its capability but `networkInfo` is not of
// type TYPE_VPN, |networkInfo| contains the info for the underlying network, and we return // type TYPE_VPN, `networkInfo` contains the info for the underlying network, and we return
// a NetworkState constructed from it. // a NetworkState constructed from it.
return new NetworkState(networkInfo.isConnected(), ConnectivityManager.TYPE_VPN, -1, return new NetworkState(networkInfo.isConnected(), ConnectivityManager.TYPE_VPN, -1,
networkInfo.getType(), networkInfo.getSubtype()); networkInfo.getType(), networkInfo.getSubtype());
} }
// When |networkInfo| is of type TYPE_VPN, which implies |network| is a VPN, we return the // When `networkInfo` is of type TYPE_VPN, which implies `network` is a VPN, we return the
// NetworkState of the active network via getActiveNetworkInfo(), if |network| is the active // NetworkState of the active network via getActiveNetworkInfo(), if `network` is the active
// network that supports the VPN. Otherwise, NetworkState of an unknown network with type -1 // network that supports the VPN. Otherwise, NetworkState of an unknown network with type -1
// will be returned. // will be returned.
// //

View file

@ -169,9 +169,9 @@ public class PeerConnection {
public final String password; public final String password;
public final TlsCertPolicy tlsCertPolicy; public final TlsCertPolicy tlsCertPolicy;
// If the URIs in |urls| only contain IP addresses, this field can be used // If the URIs in `urls` only contain IP addresses, this field can be used
// to indicate the hostname, which may be necessary for TLS (using the SNI // to indicate the hostname, which may be necessary for TLS (using the SNI
// extension). If |urls| itself contains the hostname, this isn't // extension). If `urls` itself contains the hostname, this isn't
// necessary. // necessary.
public final String hostname; public final String hostname;
@ -1106,7 +1106,7 @@ public class PeerConnection {
* transceiver will cause future calls to CreateOffer to add a media description * transceiver will cause future calls to CreateOffer to add a media description
* for the corresponding transceiver. * for the corresponding transceiver.
* *
* <p>The initial value of |mid| in the returned transceiver is null. Setting a * <p>The initial value of `mid` in the returned transceiver is null. Setting a
* new session description may change it to a non-null value. * new session description may change it to a non-null value.
* *
* <p>https://w3c.github.io/webrtc-pc/#dom-rtcpeerconnection-addtransceiver * <p>https://w3c.github.io/webrtc-pc/#dom-rtcpeerconnection-addtransceiver

View file

@ -133,7 +133,7 @@ public class PeerConnectionFactory {
public static class Options { public static class Options {
// Keep in sync with webrtc/rtc_base/network.h! // Keep in sync with webrtc/rtc_base/network.h!
// //
// These bit fields are defined for |networkIgnoreMask| below. // These bit fields are defined for `networkIgnoreMask` below.
static final int ADAPTER_TYPE_UNKNOWN = 0; static final int ADAPTER_TYPE_UNKNOWN = 0;
static final int ADAPTER_TYPE_ETHERNET = 1 << 0; static final int ADAPTER_TYPE_ETHERNET = 1 << 0;
static final int ADAPTER_TYPE_WIFI = 1 << 1; static final int ADAPTER_TYPE_WIFI = 1 << 1;

View file

@ -14,8 +14,8 @@ import java.util.Map;
/** /**
* Java version of webrtc::RTCStats. Represents an RTCStats object, as * Java version of webrtc::RTCStats. Represents an RTCStats object, as
* described in https://w3c.github.io/webrtc-stats/. The |id|, |timestampUs| * described in https://w3c.github.io/webrtc-stats/. The `id`, `timestampUs`
* and |type| accessors have the same meaning for this class as for the * and `type` accessors have the same meaning for this class as for the
* RTCStats dictionary. Each RTCStatsReport produced by getStats contains * RTCStats dictionary. Each RTCStatsReport produced by getStats contains
* multiple RTCStats objects; one for each underlying object (codec, stream, * multiple RTCStats objects; one for each underlying object (codec, stream,
* transport, etc.) that was inspected to produce the stats. * transport, etc.) that was inspected to produce the stats.

View file

@ -123,9 +123,9 @@ public class RendererCommon {
// clipped. // clipped.
// SCALE_ASPECT_BALANCED - Compromise between FIT and FILL. Video frame will fill as much as // SCALE_ASPECT_BALANCED - Compromise between FIT and FILL. Video frame will fill as much as
// possible of the view while maintaining aspect ratio, under the constraint that at least // possible of the view while maintaining aspect ratio, under the constraint that at least
// |BALANCED_VISIBLE_FRACTION| of the frame content will be shown. // `BALANCED_VISIBLE_FRACTION` of the frame content will be shown.
public static enum ScalingType { SCALE_ASPECT_FIT, SCALE_ASPECT_FILL, SCALE_ASPECT_BALANCED } public static enum ScalingType { SCALE_ASPECT_FIT, SCALE_ASPECT_FILL, SCALE_ASPECT_BALANCED }
// The minimum fraction of the frame content that will be shown for |SCALE_ASPECT_BALANCED|. // The minimum fraction of the frame content that will be shown for `SCALE_ASPECT_BALANCED`.
// This limits excessive cropping when adjusting display size. // This limits excessive cropping when adjusting display size.
private static float BALANCED_VISIBLE_FRACTION = 0.5625f; private static float BALANCED_VISIBLE_FRACTION = 0.5625f;
@ -209,7 +209,7 @@ public class RendererCommon {
} }
/** /**
* Move |matrix| transformation origin to (0.5, 0.5). This is the origin for texture coordinates * Move `matrix` transformation origin to (0.5, 0.5). This is the origin for texture coordinates
* that are in the range 0 to 1. * that are in the range 0 to 1.
*/ */
private static void adjustOrigin(float[] matrix) { private static void adjustOrigin(float[] matrix) {

View file

@ -39,7 +39,7 @@ public class RtpSender {
* *
* @param takeOwnership If true, the RtpSender takes ownership of the track * @param takeOwnership If true, the RtpSender takes ownership of the track
* from the caller, and will auto-dispose of it when no * from the caller, and will auto-dispose of it when no
* longer needed. |takeOwnership| should only be used if * longer needed. `takeOwnership` should only be used if
* the caller owns the track; it is not appropriate when * the caller owns the track; it is not appropriate when
* the track is owned by, for example, another RtpSender * the track is owned by, for example, another RtpSender
* or a MediaStream. * or a MediaStream.

View file

@ -42,9 +42,9 @@ public class SurfaceEglRenderer extends EglRenderer implements SurfaceHolder.Cal
} }
/** /**
* Initialize this class, sharing resources with |sharedContext|. The custom |drawer| will be used * Initialize this class, sharing resources with `sharedContext`. The custom `drawer` will be used
* for drawing frames on the EGLSurface. This class is responsible for calling release() on * for drawing frames on the EGLSurface. This class is responsible for calling release() on
* |drawer|. It is allowed to call init() to reinitialize the renderer after a previous * `drawer`. It is allowed to call init() to reinitialize the renderer after a previous
* init()/release() cycle. * init()/release() cycle.
*/ */
public void init(final EglBase.Context sharedContext, public void init(final EglBase.Context sharedContext,
@ -125,7 +125,7 @@ public class SurfaceEglRenderer extends EglRenderer implements SurfaceHolder.Cal
logD("surfaceChanged: format: " + format + " size: " + width + "x" + height); logD("surfaceChanged: format: " + format + " size: " + width + "x" + height);
} }
// Update frame dimensions and report any changes to |rendererEvents|. // Update frame dimensions and report any changes to `rendererEvents`.
private void updateFrameDimensionsAndReportEvents(VideoFrame frame) { private void updateFrameDimensionsAndReportEvents(VideoFrame frame) {
synchronized (layoutLock) { synchronized (layoutLock) {
if (isRenderingPaused) { if (isRenderingPaused) {

View file

@ -48,7 +48,7 @@ public class SurfaceTextureHelper {
private static final String TAG = "SurfaceTextureHelper"; private static final String TAG = "SurfaceTextureHelper";
/** /**
* Construct a new SurfaceTextureHelper sharing OpenGL resources with |sharedContext|. A dedicated * Construct a new SurfaceTextureHelper sharing OpenGL resources with `sharedContext`. A dedicated
* thread and handler is created for handling the SurfaceTexture. May return null if EGL fails to * thread and handler is created for handling the SurfaceTexture. May return null if EGL fails to
* initialize a pixel buffer surface and make it current. If alignTimestamps is true, the frame * initialize a pixel buffer surface and make it current. If alignTimestamps is true, the frame
* timestamps will be aligned to rtc::TimeNanos(). If frame timestamps are aligned to * timestamps will be aligned to rtc::TimeNanos(). If frame timestamps are aligned to
@ -66,7 +66,7 @@ public class SurfaceTextureHelper {
// The onFrameAvailable() callback will be executed on the SurfaceTexture ctor thread. See: // The onFrameAvailable() callback will be executed on the SurfaceTexture ctor thread. See:
// http://grepcode.com/file/repository.grepcode.com/java/ext/com.google.android/android/5.1.1_r1/android/graphics/SurfaceTexture.java#195. // http://grepcode.com/file/repository.grepcode.com/java/ext/com.google.android/android/5.1.1_r1/android/graphics/SurfaceTexture.java#195.
// Therefore, in order to control the callback thread on API lvl < 21, the SurfaceTextureHelper // Therefore, in order to control the callback thread on API lvl < 21, the SurfaceTextureHelper
// is constructed on the |handler| thread. // is constructed on the `handler` thread.
return ThreadUtils.invokeAtFrontUninterruptibly(handler, new Callable<SurfaceTextureHelper>() { return ThreadUtils.invokeAtFrontUninterruptibly(handler, new Callable<SurfaceTextureHelper>() {
@Nullable @Nullable
@Override @Override
@ -147,7 +147,7 @@ public class SurfaceTextureHelper {
@Nullable private final TimestampAligner timestampAligner; @Nullable private final TimestampAligner timestampAligner;
private final FrameRefMonitor frameRefMonitor; private final FrameRefMonitor frameRefMonitor;
// These variables are only accessed from the |handler| thread. // These variables are only accessed from the `handler` thread.
@Nullable private VideoSink listener; @Nullable private VideoSink listener;
// The possible states of this class. // The possible states of this class.
private boolean hasPendingTexture; private boolean hasPendingTexture;
@ -156,7 +156,7 @@ public class SurfaceTextureHelper {
private int frameRotation; private int frameRotation;
private int textureWidth; private int textureWidth;
private int textureHeight; private int textureHeight;
// |pendingListener| is set in setListener() and the runnable is posted to the handler thread. // `pendingListener` is set in setListener() and the runnable is posted to the handler thread.
// setListener() is not allowed to be called again before stopListening(), so this is thread safe. // setListener() is not allowed to be called again before stopListening(), so this is thread safe.
@Nullable private VideoSink pendingListener; @Nullable private VideoSink pendingListener;
final Runnable setListenerRunnable = new Runnable() { final Runnable setListenerRunnable = new Runnable() {
@ -223,7 +223,7 @@ public class SurfaceTextureHelper {
} }
/** /**
* Start to stream textures to the given |listener|. If you need to change listener, you need to * Start to stream textures to the given `listener`. If you need to change listener, you need to
* call stopListening() first. * call stopListening() first.
*/ */
public void startListening(final VideoSink listener) { public void startListening(final VideoSink listener) {
@ -331,7 +331,7 @@ public class SurfaceTextureHelper {
} }
/** /**
* Posts to the correct thread to convert |textureBuffer| to I420. * Posts to the correct thread to convert `textureBuffer` to I420.
* *
* @deprecated Use toI420() instead. * @deprecated Use toI420() instead.
*/ */

View file

@ -64,7 +64,7 @@ public class SurfaceViewRenderer extends SurfaceView
} }
/** /**
* Initialize this class, sharing resources with |sharedContext|. It is allowed to call init() to * Initialize this class, sharing resources with `sharedContext`. It is allowed to call init() to
* reinitialize the renderer after a previous init()/release() cycle. * reinitialize the renderer after a previous init()/release() cycle.
*/ */
public void init(EglBase.Context sharedContext, RendererCommon.RendererEvents rendererEvents) { public void init(EglBase.Context sharedContext, RendererCommon.RendererEvents rendererEvents) {
@ -72,9 +72,9 @@ public class SurfaceViewRenderer extends SurfaceView
} }
/** /**
* Initialize this class, sharing resources with |sharedContext|. The custom |drawer| will be used * Initialize this class, sharing resources with `sharedContext`. The custom `drawer` will be used
* for drawing frames on the EGLSurface. This class is responsible for calling release() on * for drawing frames on the EGLSurface. This class is responsible for calling release() on
* |drawer|. It is allowed to call init() to reinitialize the renderer after a previous * `drawer`. It is allowed to call init() to reinitialize the renderer after a previous
* init()/release() cycle. * init()/release() cycle.
*/ */
public void init(final EglBase.Context sharedContext, public void init(final EglBase.Context sharedContext,

View file

@ -31,7 +31,7 @@ public class TimestampAligner {
/** /**
* Translates camera timestamps to the same timescale as is used by rtc::TimeNanos(). * Translates camera timestamps to the same timescale as is used by rtc::TimeNanos().
* |cameraTimeNs| is assumed to be accurate, but with an unknown epoch and clock drift. Returns * `cameraTimeNs` is assumed to be accurate, but with an unknown epoch and clock drift. Returns
* the translated timestamp. * the translated timestamp.
*/ */
public long translateTimestamp(long cameraTimeNs) { public long translateTimestamp(long cameraTimeNs) {

View file

@ -238,7 +238,7 @@ public interface VideoEncoder {
public interface Callback { public interface Callback {
/** /**
* Old encoders assume that the byte buffer held by |frame| is not accessed after the call to * Old encoders assume that the byte buffer held by `frame` is not accessed after the call to
* this method returns. If the pipeline downstream needs to hold on to the buffer, it then has * this method returns. If the pipeline downstream needs to hold on to the buffer, it then has
* to make its own copy. We want to move to a model where no copying is needed, and instead use * to make its own copy. We want to move to a model where no copying is needed, and instead use
* retain()/release() to signal to the encoder when it is safe to reuse the buffer. * retain()/release() to signal to the encoder when it is safe to reuse the buffer.

View file

@ -60,8 +60,8 @@ public class VideoFrame implements RefCounted {
@Override @CalledByNative("Buffer") void release(); @Override @CalledByNative("Buffer") void release();
/** /**
* Crops a region defined by |cropx|, |cropY|, |cropWidth| and |cropHeight|. Scales it to size * Crops a region defined by `cropx`, `cropY`, `cropWidth` and `cropHeight`. Scales it to size
* |scaleWidth| x |scaleHeight|. * `scaleWidth` x `scaleHeight`.
*/ */
@CalledByNative("Buffer") @CalledByNative("Buffer")
Buffer cropAndScale( Buffer cropAndScale(

View file

@ -61,7 +61,7 @@ public class VideoFrameDrawer {
@Nullable private int[] yuvTextures; @Nullable private int[] yuvTextures;
/** /**
* Upload |planes| into OpenGL textures, taking stride into consideration. * Upload `planes` into OpenGL textures, taking stride into consideration.
* *
* @return Array of three texture indices corresponding to Y-, U-, and V-plane respectively. * @return Array of three texture indices corresponding to Y-, U-, and V-plane respectively.
*/ */
@ -145,8 +145,8 @@ public class VideoFrameDrawer {
private int renderWidth; private int renderWidth;
private int renderHeight; private int renderHeight;
// Calculate the frame size after |renderMatrix| is applied. Stores the output in member variables // Calculate the frame size after `renderMatrix` is applied. Stores the output in member variables
// |renderWidth| and |renderHeight| to avoid allocations since this function is called for every // `renderWidth` and `renderHeight` to avoid allocations since this function is called for every
// frame. // frame.
private void calculateTransformedRenderSize( private void calculateTransformedRenderSize(
int frameWidth, int frameHeight, @Nullable Matrix renderMatrix) { int frameWidth, int frameHeight, @Nullable Matrix renderMatrix) {
@ -155,7 +155,7 @@ public class VideoFrameDrawer {
renderHeight = frameHeight; renderHeight = frameHeight;
return; return;
} }
// Transform the texture coordinates (in the range [0, 1]) according to |renderMatrix|. // Transform the texture coordinates (in the range [0, 1]) according to `renderMatrix`.
renderMatrix.mapPoints(dstPoints, srcPoints); renderMatrix.mapPoints(dstPoints, srcPoints);
// Multiply with the width and height to get the positions in terms of pixels. // Multiply with the width and height to get the positions in terms of pixels.

View file

@ -153,7 +153,7 @@ public final class YuvConverter {
// +----+----+ // +----+----+
// //
// In memory, we use the same stride for all of Y, U and V. The // In memory, we use the same stride for all of Y, U and V. The
// U data starts at offset |height| * |stride| from the Y data, // U data starts at offset `height` * `stride` from the Y data,
// and the V data starts at at offset |stride/2| from the U // and the V data starts at at offset |stride/2| from the U
// data, with rows of U and V data alternating. // data, with rows of U and V data alternating.
// //
@ -161,12 +161,12 @@ public final class YuvConverter {
// a single byte per pixel (EGL10.EGL_COLOR_BUFFER_TYPE, // a single byte per pixel (EGL10.EGL_COLOR_BUFFER_TYPE,
// EGL10.EGL_LUMINANCE_BUFFER,), but that seems to be // EGL10.EGL_LUMINANCE_BUFFER,), but that seems to be
// unsupported by devices. So do the following hack: Allocate an // unsupported by devices. So do the following hack: Allocate an
// RGBA buffer, of width |stride|/4. To render each of these // RGBA buffer, of width `stride`/4. To render each of these
// large pixels, sample the texture at 4 different x coordinates // large pixels, sample the texture at 4 different x coordinates
// and store the results in the four components. // and store the results in the four components.
// //
// Since the V data needs to start on a boundary of such a // Since the V data needs to start on a boundary of such a
// larger pixel, it is not sufficient that |stride| is even, it // larger pixel, it is not sufficient that `stride` is even, it
// has to be a multiple of 8 pixels. // has to be a multiple of 8 pixels.
final int frameWidth = preparedBuffer.getWidth(); final int frameWidth = preparedBuffer.getWidth();
final int frameHeight = preparedBuffer.getHeight(); final int frameHeight = preparedBuffer.getHeight();

View file

@ -541,7 +541,7 @@ class CameraVideoCapturerTestFixtures {
capturerInstance.capturer.stopCapture(); capturerInstance.capturer.stopCapture();
capturerInstance.observer.releaseFrame(); capturerInstance.observer.releaseFrame();
// We can't change |capturer| at this point, but we should not crash. // We can't change `capturer` at this point, but we should not crash.
capturerInstance.capturer.switchCamera(null /* switchEventsHandler */); capturerInstance.capturer.switchCamera(null /* switchEventsHandler */);
capturerInstance.capturer.changeCaptureFormat(DEFAULT_WIDTH, DEFAULT_HEIGHT, DEFAULT_FPS); capturerInstance.capturer.changeCaptureFormat(DEFAULT_WIDTH, DEFAULT_HEIGHT, DEFAULT_FPS);

View file

@ -145,7 +145,7 @@ public class PeerConnectionTest {
// TODO(fischman) MOAR test ideas: // TODO(fischman) MOAR test ideas:
// - Test that PC.removeStream() works; requires a second // - Test that PC.removeStream() works; requires a second
// createOffer/createAnswer dance. // createOffer/createAnswer dance.
// - audit each place that uses |constraints| for specifying non-trivial // - audit each place that uses `constraints` for specifying non-trivial
// constraints (and ensure they're honored). // constraints (and ensure they're honored).
// - test error cases // - test error cases
// - ensure reasonable coverage of jni code is achieved. Coverage is // - ensure reasonable coverage of jni code is achieved. Coverage is

View file

@ -123,8 +123,8 @@ public class SurfaceTextureHelperTest {
surfaceTextureHelper.startListening(listener); surfaceTextureHelper.startListening(listener);
surfaceTextureHelper.setTextureSize(width, height); surfaceTextureHelper.setTextureSize(width, height);
// Create resources for stubbing an OES texture producer. |eglOesBase| has the SurfaceTexture in // Create resources for stubbing an OES texture producer. `eglOesBase` has the SurfaceTexture in
// |surfaceTextureHelper| as the target EGLSurface. // `surfaceTextureHelper` as the target EGLSurface.
final EglBase eglOesBase = EglBase.create(eglBase.getEglBaseContext(), EglBase.CONFIG_PLAIN); final EglBase eglOesBase = EglBase.create(eglBase.getEglBaseContext(), EglBase.CONFIG_PLAIN);
eglOesBase.createSurface(surfaceTextureHelper.getSurfaceTexture()); eglOesBase.createSurface(surfaceTextureHelper.getSurfaceTexture());
assertEquals(eglOesBase.surfaceWidth(), width); assertEquals(eglOesBase.surfaceWidth(), width);
@ -191,8 +191,8 @@ public class SurfaceTextureHelperTest {
surfaceTextureHelper.startListening(listener); surfaceTextureHelper.startListening(listener);
surfaceTextureHelper.setTextureSize(width, height); surfaceTextureHelper.setTextureSize(width, height);
// Create resources for stubbing an OES texture producer. |eglOesBase| has the SurfaceTexture in // Create resources for stubbing an OES texture producer. `eglOesBase` has the SurfaceTexture in
// |surfaceTextureHelper| as the target EGLSurface. // `surfaceTextureHelper` as the target EGLSurface.
final EglBase eglOesBase = EglBase.create(eglBase.getEglBaseContext(), EglBase.CONFIG_PLAIN); final EglBase eglOesBase = EglBase.create(eglBase.getEglBaseContext(), EglBase.CONFIG_PLAIN);
eglOesBase.createSurface(surfaceTextureHelper.getSurfaceTexture()); eglOesBase.createSurface(surfaceTextureHelper.getSurfaceTexture());
assertEquals(eglOesBase.surfaceWidth(), width); assertEquals(eglOesBase.surfaceWidth(), width);
@ -410,7 +410,7 @@ public class SurfaceTextureHelperTest {
eglBase.swapBuffers(); eglBase.swapBuffers();
listener1.waitForTextureBuffer().release(); listener1.waitForTextureBuffer().release();
// Stop listening - |listener1| should not receive any textures after this. // Stop listening - `listener1` should not receive any textures after this.
surfaceTextureHelper.stopListening(); surfaceTextureHelper.stopListening();
// Connect different listener. // Connect different listener.
@ -423,7 +423,7 @@ public class SurfaceTextureHelperTest {
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT); GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
eglBase.swapBuffers(); eglBase.swapBuffers();
// Check that |listener2| received the frame, and not |listener1|. // Check that `listener2` received the frame, and not `listener1`.
listener2.waitForTextureBuffer().release(); listener2.waitForTextureBuffer().release();
listener1.assertNoFrameIsDelivered(/* waitPeriodMs= */ 1); listener1.assertNoFrameIsDelivered(/* waitPeriodMs= */ 1);
@ -446,8 +446,8 @@ public class SurfaceTextureHelperTest {
surfaceTextureHelper.startListening(listener); surfaceTextureHelper.startListening(listener);
surfaceTextureHelper.setTextureSize(width, height); surfaceTextureHelper.setTextureSize(width, height);
// Create resources for stubbing an OES texture producer. |eglBase| has the SurfaceTexture in // Create resources for stubbing an OES texture producer. `eglBase` has the SurfaceTexture in
// |surfaceTextureHelper| as the target EGLSurface. // `surfaceTextureHelper` as the target EGLSurface.
eglBase.createSurface(surfaceTextureHelper.getSurfaceTexture()); eglBase.createSurface(surfaceTextureHelper.getSurfaceTexture());
assertEquals(eglBase.surfaceWidth(), width); assertEquals(eglBase.surfaceWidth(), width);

View file

@ -153,7 +153,7 @@ public class VideoFrameBufferTest {
} }
/** /**
* Create an RGB texture buffer available in |eglContext| with the same pixel content as the given * Create an RGB texture buffer available in `eglContext` with the same pixel content as the given
* I420 buffer. * I420 buffer.
*/ */
public static VideoFrame.TextureBuffer createRgbTextureBuffer( public static VideoFrame.TextureBuffer createRgbTextureBuffer(
@ -191,7 +191,7 @@ public class VideoFrameBufferTest {
} }
/** /**
* Create an OES texture buffer available in |eglContext| with the same pixel content as the given * Create an OES texture buffer available in `eglContext` with the same pixel content as the given
* I420 buffer. * I420 buffer.
*/ */
public static VideoFrame.TextureBuffer createOesTextureBuffer( public static VideoFrame.TextureBuffer createOesTextureBuffer(

View file

@ -18,7 +18,7 @@
#include "sdk/android/native_api/jni/java_types.h" #include "sdk/android/native_api/jni/java_types.h"
#include "sdk/android/native_api/jni/scoped_java_ref.h" #include "sdk/android/native_api/jni/scoped_java_ref.h"
// Abort the process if |jni| has a Java exception pending. This macros uses the // Abort the process if `jni` has a Java exception pending. This macros uses the
// comma operator to execute ExceptionDescribe and ExceptionClear ignoring their // comma operator to execute ExceptionDescribe and ExceptionClear ignoring their
// return values and sending "" to the error stream. // return values and sending "" to the error stream.
#define CHECK_EXCEPTION(jni) \ #define CHECK_EXCEPTION(jni) \

View file

@ -30,7 +30,7 @@
#include "rtc_base/checks.h" #include "rtc_base/checks.h"
#include "sdk/android/native_api/jni/scoped_java_ref.h" #include "sdk/android/native_api/jni/scoped_java_ref.h"
// Abort the process if |jni| has a Java exception pending. // Abort the process if `jni` has a Java exception pending.
// This macros uses the comma operator to execute ExceptionDescribe // This macros uses the comma operator to execute ExceptionDescribe
// and ExceptionClear ignoring their return values and sending "" // and ExceptionClear ignoring their return values and sending ""
// to the error stream. // to the error stream.
@ -110,7 +110,7 @@ class Iterable {
RTC_DISALLOW_COPY_AND_ASSIGN(Iterable); RTC_DISALLOW_COPY_AND_ASSIGN(Iterable);
}; };
// Returns true if |obj| == null in Java. // Returns true if `obj` == null in Java.
bool IsNull(JNIEnv* jni, const JavaRef<jobject>& obj); bool IsNull(JNIEnv* jni, const JavaRef<jobject>& obj);
// Returns the name of a Java enum. // Returns the name of a Java enum.
@ -319,7 +319,7 @@ ScopedJavaLocalRef<jobject> NativeToJavaStringMap(JNIEnv* env,
return builder.GetJavaMap(); return builder.GetJavaMap();
} }
// Return a |jlong| that will correctly convert back to |ptr|. This is needed // Return a `jlong` that will correctly convert back to `ptr`. This is needed
// because the alternative (of silently passing a 32-bit pointer to a vararg // because the alternative (of silently passing a 32-bit pointer to a vararg
// function expecting a 64-bit param) picks up garbage in the high 32 bits. // function expecting a 64-bit param) picks up garbage in the high 32 bits.
jlong NativeToJavaPointer(void* ptr); jlong NativeToJavaPointer(void* ptr);

View file

@ -74,7 +74,7 @@ class JavaRef : public JavaRef<jobject> {
template <typename T> template <typename T>
class JavaParamRef : public JavaRef<T> { class JavaParamRef : public JavaRef<T> {
public: public:
// Assumes that |obj| is a parameter passed to a JNI method from Java. // Assumes that `obj` is a parameter passed to a JNI method from Java.
// Does not assume ownership as parameters should not be deleted. // Does not assume ownership as parameters should not be deleted.
explicit JavaParamRef(T obj) : JavaRef<T>(obj) {} explicit JavaParamRef(T obj) : JavaRef<T>(obj) {}
JavaParamRef(JNIEnv*, T obj) : JavaRef<T>(obj) {} JavaParamRef(JNIEnv*, T obj) : JavaRef<T>(obj) {}
@ -112,7 +112,7 @@ class ScopedJavaLocalRef : public JavaRef<T> {
Reset(other.obj(), OwnershipPolicy::RETAIN); Reset(other.obj(), OwnershipPolicy::RETAIN);
} }
// Assumes that |obj| is a reference to a Java object and takes // Assumes that `obj` is a reference to a Java object and takes
// ownership of this reference. This should preferably not be used // ownership of this reference. This should preferably not be used
// outside of JNI helper functions. // outside of JNI helper functions.
ScopedJavaLocalRef(JNIEnv* env, T obj) : JavaRef<T>(obj), env_(env) {} ScopedJavaLocalRef(JNIEnv* env, T obj) : JavaRef<T>(obj), env_(env) {}

View file

@ -20,7 +20,7 @@
namespace webrtc { namespace webrtc {
// Creates java PeerConnectionFactory with specified |pcf|. // Creates java PeerConnectionFactory with specified `pcf`.
jobject NativeToJavaPeerConnectionFactory( jobject NativeToJavaPeerConnectionFactory(
JNIEnv* jni, JNIEnv* jni,
rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> pcf, rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> pcf,

View file

@ -99,7 +99,7 @@ ABSL_CONST_INIT GlobalMutex g_signal_handler_lock(absl::kConstInit);
SignalHandlerOutputState* volatile g_signal_handler_output_state; SignalHandlerOutputState* volatile g_signal_handler_output_state;
// This function is called iteratively for each stack trace element and stores // This function is called iteratively for each stack trace element and stores
// the element in the array from |unwind_output_state|. // the element in the array from `unwind_output_state`.
_Unwind_Reason_Code UnwindBacktrace(struct _Unwind_Context* unwind_context, _Unwind_Reason_Code UnwindBacktrace(struct _Unwind_Context* unwind_context,
void* unwind_output_state) { void* unwind_output_state) {
SignalHandlerOutputState* const output_state = SignalHandlerOutputState* const output_state =
@ -136,7 +136,7 @@ void SignalHandler(int signum, siginfo_t* info, void* ptr) {
// Temporarily change the signal handler to a function that records a raw stack // Temporarily change the signal handler to a function that records a raw stack
// trace and interrupt the given tid. This function will block until the output // trace and interrupt the given tid. This function will block until the output
// thread stack trace has been stored in |params|. The return value is an error // thread stack trace has been stored in `params`. The return value is an error
// string on failure and null on success. // string on failure and null on success.
const char* CaptureRawStacktrace(int pid, const char* CaptureRawStacktrace(int pid,
int tid, int tid,
@ -206,8 +206,8 @@ std::vector<StackTraceElement> FormatStackTrace(
std::vector<StackTraceElement> GetStackTrace(int tid) { std::vector<StackTraceElement> GetStackTrace(int tid) {
// Only a thread itself can unwind its stack, so we will interrupt the given // Only a thread itself can unwind its stack, so we will interrupt the given
// tid with a custom signal handler in order to unwind its stack. The stack // tid with a custom signal handler in order to unwind its stack. The stack
// will be recorded to |params| through the use of the global pointer // will be recorded to `params` through the use of the global pointer
// |g_signal_handler_param|. // `g_signal_handler_param`.
SignalHandlerOutputState params; SignalHandlerOutputState params;
const char* error_string = CaptureRawStacktrace(getpid(), tid, &params); const char* error_string = CaptureRawStacktrace(getpid(), tid, &params);

View file

@ -65,7 +65,7 @@ static const int kFilePlayTimeInSec = 5;
static const size_t kBitsPerSample = 16; static const size_t kBitsPerSample = 16;
static const size_t kBytesPerSample = kBitsPerSample / 8; static const size_t kBytesPerSample = kBitsPerSample / 8;
// Run the full-duplex test during this time (unit is in seconds). // Run the full-duplex test during this time (unit is in seconds).
// Note that first |kNumIgnoreFirstCallbacks| are ignored. // Note that first `kNumIgnoreFirstCallbacks` are ignored.
static const int kFullDuplexTimeInSec = 5; static const int kFullDuplexTimeInSec = 5;
// Wait for the callback sequence to stabilize by ignoring this amount of the // Wait for the callback sequence to stabilize by ignoring this amount of the
// initial callbacks (avoids initial FIFO access). // initial callbacks (avoids initial FIFO access).
@ -124,7 +124,7 @@ class FileAudioStream : public AudioStreamInterface {
void Write(const void* source, size_t num_frames) override {} void Write(const void* source, size_t num_frames) override {}
// Read samples from file stored in memory (at construction) and copy // Read samples from file stored in memory (at construction) and copy
// |num_frames| (<=> 10ms) to the |destination| byte buffer. // `num_frames` (<=> 10ms) to the `destination` byte buffer.
void Read(void* destination, size_t num_frames) override { void Read(void* destination, size_t num_frames) override {
memcpy(destination, static_cast<int16_t*>(&file_[file_pos_]), memcpy(destination, static_cast<int16_t*>(&file_[file_pos_]),
num_frames * sizeof(int16_t)); num_frames * sizeof(int16_t));
@ -168,7 +168,7 @@ class FifoAudioStream : public AudioStreamInterface {
~FifoAudioStream() { Flush(); } ~FifoAudioStream() { Flush(); }
// Allocate new memory, copy |num_frames| samples from |source| into memory // Allocate new memory, copy `num_frames` samples from `source` into memory
// and add pointer to the memory location to end of the list. // and add pointer to the memory location to end of the list.
// Increases the size of the FIFO by one element. // Increases the size of the FIFO by one element.
void Write(const void* source, size_t num_frames) override { void Write(const void* source, size_t num_frames) override {
@ -189,8 +189,8 @@ class FifoAudioStream : public AudioStreamInterface {
total_written_elements_ += size; total_written_elements_ += size;
} }
// Read pointer to data buffer from front of list, copy |num_frames| of stored // Read pointer to data buffer from front of list, copy `num_frames` of stored
// data into |destination| and delete the utilized memory allocation. // data into `destination` and delete the utilized memory allocation.
// Decreases the size of the FIFO by one element. // Decreases the size of the FIFO by one element.
void Read(void* destination, size_t num_frames) override { void Read(void* destination, size_t num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_); ASSERT_EQ(num_frames, frames_per_buffer_);
@ -248,7 +248,7 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
rec_count_(0), rec_count_(0),
pulse_time_(0) {} pulse_time_(0) {}
// Insert periodic impulses in first two samples of |destination|. // Insert periodic impulses in first two samples of `destination`.
void Read(void* destination, size_t num_frames) override { void Read(void* destination, size_t num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_); ASSERT_EQ(num_frames, frames_per_buffer_);
if (play_count_ == 0) { if (play_count_ == 0) {
@ -269,14 +269,14 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
} }
} }
// Detect received impulses in |source|, derive time between transmission and // Detect received impulses in `source`, derive time between transmission and
// detection and add the calculated delay to list of latencies. // detection and add the calculated delay to list of latencies.
void Write(const void* source, size_t num_frames) override { void Write(const void* source, size_t num_frames) override {
ASSERT_EQ(num_frames, frames_per_buffer_); ASSERT_EQ(num_frames, frames_per_buffer_);
rec_count_++; rec_count_++;
if (pulse_time_ == 0) { if (pulse_time_ == 0) {
// Avoid detection of new impulse response until a new impulse has // Avoid detection of new impulse response until a new impulse has
// been transmitted (sets |pulse_time_| to value larger than zero). // been transmitted (sets `pulse_time_` to value larger than zero).
return; return;
} }
const int16_t* ptr16 = static_cast<const int16_t*>(source); const int16_t* ptr16 = static_cast<const int16_t*>(source);
@ -295,7 +295,7 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
// Total latency is the difference between transmit time and detection // Total latency is the difference between transmit time and detection
// tome plus the extra delay within the buffer in which we detected the // tome plus the extra delay within the buffer in which we detected the
// received impulse. It is transmitted at sample 0 but can be received // received impulse. It is transmitted at sample 0 but can be received
// at sample N where N > 0. The term |extra_delay| accounts for N and it // at sample N where N > 0. The term `extra_delay` accounts for N and it
// is a value between 0 and 10ms. // is a value between 0 and 10ms.
latencies_.push_back(now_time - pulse_time_ + extra_delay); latencies_.push_back(now_time - pulse_time_ + extra_delay);
pulse_time_ = 0; pulse_time_ = 0;

View file

@ -514,7 +514,7 @@ class AndroidVideoDecoder implements VideoDecoder, VideoSink {
throw new AssertionError("Stride is not divisible by two: " + stride); throw new AssertionError("Stride is not divisible by two: " + stride);
} }
// Note that the case with odd |sliceHeight| is handled in a special way. // Note that the case with odd `sliceHeight` is handled in a special way.
// The chroma height contained in the payload is rounded down instead of // The chroma height contained in the payload is rounded down instead of
// up, making it one row less than what we expect in WebRTC. Therefore, we // up, making it one row less than what we expect in WebRTC. Therefore, we
// have to duplicate the last chroma rows for this case. Also, the offset // have to duplicate the last chroma rows for this case. Also, the offset

View file

@ -133,7 +133,7 @@ class Camera1Session implements CameraSession {
private static CaptureFormat findClosestCaptureFormat( private static CaptureFormat findClosestCaptureFormat(
android.hardware.Camera.Parameters parameters, int width, int height, int framerate) { android.hardware.Camera.Parameters parameters, int width, int height, int framerate) {
// Find closest supported format for |width| x |height| @ |framerate|. // Find closest supported format for `width` x `height` @ `framerate`.
final List<CaptureFormat.FramerateRange> supportedFramerates = final List<CaptureFormat.FramerateRange> supportedFramerates =
Camera1Enumerator.convertFramerates(parameters.getSupportedPreviewFpsRange()); Camera1Enumerator.convertFramerates(parameters.getSupportedPreviewFpsRange());
Logging.d(TAG, "Available fps ranges: " + supportedFramerates); Logging.d(TAG, "Available fps ranges: " + supportedFramerates);

View file

@ -69,7 +69,7 @@ class EglBase14Impl implements EglBase14 {
} }
// Create a new context with the specified config type, sharing data with sharedContext. // Create a new context with the specified config type, sharing data with sharedContext.
// |sharedContext| may be null. // `sharedContext` may be null.
public EglBase14Impl(EGLContext sharedContext, int[] configAttributes) { public EglBase14Impl(EGLContext sharedContext, int[] configAttributes) {
eglDisplay = getEglDisplay(); eglDisplay = getEglDisplay();
eglConfig = getEglConfig(eglDisplay, configAttributes); eglConfig = getEglConfig(eglDisplay, configAttributes);

View file

@ -22,7 +22,7 @@ import org.webrtc.Logging;
// This class wraps control of three different platform effects. Supported // This class wraps control of three different platform effects. Supported
// effects are: AcousticEchoCanceler (AEC) and NoiseSuppressor (NS). // effects are: AcousticEchoCanceler (AEC) and NoiseSuppressor (NS).
// Calling enable() will active all effects that are // Calling enable() will active all effects that are
// supported by the device if the corresponding |shouldEnableXXX| member is set. // supported by the device if the corresponding `shouldEnableXXX` member is set.
class WebRtcAudioEffects { class WebRtcAudioEffects {
private static final boolean DEBUG = false; private static final boolean DEBUG = false;
@ -71,7 +71,7 @@ class WebRtcAudioEffects {
} }
// Call this method to enable or disable the platform AEC. It modifies // Call this method to enable or disable the platform AEC. It modifies
// |shouldEnableAec| which is used in enable() where the actual state // `shouldEnableAec` which is used in enable() where the actual state
// of the AEC effect is modified. Returns true if HW AEC is supported and // of the AEC effect is modified. Returns true if HW AEC is supported and
// false otherwise. // false otherwise.
public boolean setAEC(boolean enable) { public boolean setAEC(boolean enable) {
@ -90,7 +90,7 @@ class WebRtcAudioEffects {
} }
// Call this method to enable or disable the platform NS. It modifies // Call this method to enable or disable the platform NS. It modifies
// |shouldEnableNs| which is used in enable() where the actual state // `shouldEnableNs` which is used in enable() where the actual state
// of the NS effect is modified. Returns true if HW NS is supported and // of the NS effect is modified. Returns true if HW NS is supported and
// false otherwise. // false otherwise.
public boolean setNS(boolean enable) { public boolean setNS(boolean enable) {
@ -180,7 +180,7 @@ class WebRtcAudioEffects {
} }
} }
// Returns true for effect types in |type| that are of "VoIP" types: // Returns true for effect types in `type` that are of "VoIP" types:
// Acoustic Echo Canceler (AEC) or Automatic Gain Control (AGC) or // Acoustic Echo Canceler (AEC) or Automatic Gain Control (AGC) or
// Noise Suppressor (NS). Note that, an extra check for support is needed // Noise Suppressor (NS). Note that, an extra check for support is needed
// in each comparison since some devices includes effects in the // in each comparison since some devices includes effects in the
@ -217,7 +217,7 @@ class WebRtcAudioEffects {
} }
// Returns true if an effect of the specified type is available. Functionally // Returns true if an effect of the specified type is available. Functionally
// equivalent to (NoiseSuppressor|AutomaticGainControl|...).isAvailable(), but // equivalent to (NoiseSuppressor`AutomaticGainControl`...).isAvailable(), but
// faster as it avoids the expensive OS call to enumerate effects. // faster as it avoids the expensive OS call to enumerate effects.
private static boolean isEffectTypeAvailable(UUID effectType, UUID blockListedUuid) { private static boolean isEffectTypeAvailable(UUID effectType, UUID blockListedUuid) {
Descriptor[] effects = getAvailableEffects(); Descriptor[] effects = getAvailableEffects();

View file

@ -237,7 +237,7 @@ class WebRtcAudioRecord {
// Returns true if verifyAudioConfig() succeeds. This value is set after a specific delay when // Returns true if verifyAudioConfig() succeeds. This value is set after a specific delay when
// startRecording() has been called. Hence, should preferably be called in combination with // startRecording() has been called. Hence, should preferably be called in combination with
// stopRecording() to ensure that it has been set properly. |isAudioConfigVerified| is // stopRecording() to ensure that it has been set properly. `isAudioConfigVerified` is
// enabled in WebRtcAudioRecord to ensure that the returned value is valid. // enabled in WebRtcAudioRecord to ensure that the returned value is valid.
@CalledByNative @CalledByNative
boolean isAudioSourceMatchingRecordingSession() { boolean isAudioSourceMatchingRecordingSession() {
@ -491,7 +491,7 @@ class WebRtcAudioRecord {
long nativeAudioRecordJni, ByteBuffer byteBuffer); long nativeAudioRecordJni, ByteBuffer byteBuffer);
private native void nativeDataIsRecorded(long nativeAudioRecordJni, int bytes); private native void nativeDataIsRecorded(long nativeAudioRecordJni, int bytes);
// Sets all recorded samples to zero if |mute| is true, i.e., ensures that // Sets all recorded samples to zero if `mute` is true, i.e., ensures that
// the microphone is muted. // the microphone is muted.
public void setMicrophoneMute(boolean mute) { public void setMicrophoneMute(boolean mute) {
Logging.w(TAG, "setMicrophoneMute(" + mute + ")"); Logging.w(TAG, "setMicrophoneMute(" + mute + ")");

View file

@ -76,7 +76,7 @@ class WebRtcAudioTrack {
private @Nullable AudioTrackThread audioThread; private @Nullable AudioTrackThread audioThread;
private final VolumeLogger volumeLogger; private final VolumeLogger volumeLogger;
// Samples to be played are replaced by zeros if |speakerMute| is set to true. // Samples to be played are replaced by zeros if `speakerMute` is set to true.
// Can be used to ensure that the speaker is fully muted. // Can be used to ensure that the speaker is fully muted.
private volatile boolean speakerMute; private volatile boolean speakerMute;
private byte[] emptyBytes; private byte[] emptyBytes;
@ -218,9 +218,9 @@ class WebRtcAudioTrack {
Logging.d(TAG, "minBufferSizeInBytes: " + minBufferSizeInBytes); Logging.d(TAG, "minBufferSizeInBytes: " + minBufferSizeInBytes);
// For the streaming mode, data must be written to the audio sink in // For the streaming mode, data must be written to the audio sink in
// chunks of size (given by byteBuffer.capacity()) less than or equal // chunks of size (given by byteBuffer.capacity()) less than or equal
// to the total buffer size |minBufferSizeInBytes|. But, we have seen // to the total buffer size `minBufferSizeInBytes`. But, we have seen
// reports of "getMinBufferSize(): error querying hardware". Hence, it // reports of "getMinBufferSize(): error querying hardware". Hence, it
// can happen that |minBufferSizeInBytes| contains an invalid value. // can happen that `minBufferSizeInBytes` contains an invalid value.
if (minBufferSizeInBytes < byteBuffer.capacity()) { if (minBufferSizeInBytes < byteBuffer.capacity()) {
reportWebRtcAudioTrackInitError("AudioTrack.getMinBufferSize returns an invalid value."); reportWebRtcAudioTrackInitError("AudioTrack.getMinBufferSize returns an invalid value.");
return -1; return -1;
@ -559,7 +559,7 @@ class WebRtcAudioTrack {
long nativeAudioTrackJni, ByteBuffer byteBuffer); long nativeAudioTrackJni, ByteBuffer byteBuffer);
private static native void nativeGetPlayoutData(long nativeAudioTrackJni, int bytes); private static native void nativeGetPlayoutData(long nativeAudioTrackJni, int bytes);
// Sets all samples to be played out to zero if |mute| is true, i.e., // Sets all samples to be played out to zero if `mute` is true, i.e.,
// ensures that the speaker is muted. // ensures that the speaker is muted.
public void setSpeakerMute(boolean mute) { public void setSpeakerMute(boolean mute) {
Logging.w(TAG, "setSpeakerMute(" + mute + ")"); Logging.w(TAG, "setSpeakerMute(" + mute + ")");

View file

@ -31,14 +31,14 @@ static ScopedJavaLocalRef<jobject> JNI_Metrics_GetAndReset(JNIEnv* jni) {
std::map<std::string, std::unique_ptr<metrics::SampleInfo>> histograms; std::map<std::string, std::unique_ptr<metrics::SampleInfo>> histograms;
metrics::GetAndReset(&histograms); metrics::GetAndReset(&histograms);
for (const auto& kv : histograms) { for (const auto& kv : histograms) {
// Create and add samples to |HistogramInfo|. // Create and add samples to `HistogramInfo`.
ScopedJavaLocalRef<jobject> j_info = Java_HistogramInfo_Constructor( ScopedJavaLocalRef<jobject> j_info = Java_HistogramInfo_Constructor(
jni, kv.second->min, kv.second->max, jni, kv.second->min, kv.second->max,
static_cast<int>(kv.second->bucket_count)); static_cast<int>(kv.second->bucket_count));
for (const auto& sample : kv.second->samples) { for (const auto& sample : kv.second->samples) {
Java_HistogramInfo_addSample(jni, j_info, sample.first, sample.second); Java_HistogramInfo_addSample(jni, j_info, sample.first, sample.second);
} }
// Add |HistogramInfo| to |Metrics|. // Add `HistogramInfo` to `Metrics`.
ScopedJavaLocalRef<jstring> j_name = NativeToJavaString(jni, kv.first); ScopedJavaLocalRef<jstring> j_name = NativeToJavaString(jni, kv.first);
Java_Metrics_add(jni, j_metrics, j_name, j_info); Java_Metrics_add(jni, j_metrics, j_name, j_info);
} }

View file

@ -376,7 +376,7 @@ rtc::NetworkBindingResult AndroidNetworkMonitor::BindSocketToNetwork(
rv = lollipopSetNetworkForSocket(*network_handle, socket_fd); rv = lollipopSetNetworkForSocket(*network_handle, socket_fd);
} }
// If |network| has since disconnected, |rv| will be ENONET. Surface this as // If `network` has since disconnected, `rv` will be ENONET. Surface this as
// ERR_NETWORK_CHANGED, rather than MapSystemError(ENONET) which gives back // ERR_NETWORK_CHANGED, rather than MapSystemError(ENONET) which gives back
// the less descriptive ERR_FAILED. // the less descriptive ERR_FAILED.
if (rv == 0) { if (rv == 0) {

View file

@ -76,7 +76,7 @@ class AndroidNetworkMonitor : public rtc::NetworkMonitorInterface {
void Start() override; void Start() override;
void Stop() override; void Stop() override;
// Does |this| NetworkMonitorInterface implement BindSocketToNetwork? // Does `this` NetworkMonitorInterface implement BindSocketToNetwork?
// Only Android returns true. // Only Android returns true.
virtual bool SupportsBindSocketToNetwork() const override { return true; } virtual bool SupportsBindSocketToNetwork() const override { return true; }

View file

@ -200,7 +200,7 @@ aaudio_data_callback_result_t AAudioPlayer::OnDataCallback(void* audio_data,
} }
// Read audio data from the WebRTC source using the FineAudioBuffer object // Read audio data from the WebRTC source using the FineAudioBuffer object
// and write that data into |audio_data| to be played out by AAudio. // and write that data into `audio_data` to be played out by AAudio.
// Prime output with zeros during a short initial phase to avoid distortion. // Prime output with zeros during a short initial phase to avoid distortion.
// TODO(henrika): do more work to figure out of if the initial forced silence // TODO(henrika): do more work to figure out of if the initial forced silence
// period is really needed. // period is really needed.

View file

@ -81,8 +81,8 @@ class AAudioPlayer final : public AudioOutput,
protected: protected:
// AAudioObserverInterface implementation. // AAudioObserverInterface implementation.
// For an output stream, this function should render and write |num_frames| // For an output stream, this function should render and write `num_frames`
// of data in the streams current data format to the |audio_data| buffer. // of data in the streams current data format to the `audio_data` buffer.
// Called on a real-time thread owned by AAudio. // Called on a real-time thread owned by AAudio.
aaudio_data_callback_result_t OnDataCallback(void* audio_data, aaudio_data_callback_result_t OnDataCallback(void* audio_data,
int32_t num_frames) override; int32_t num_frames) override;

View file

@ -157,7 +157,7 @@ void AAudioRecorder::OnErrorCallback(aaudio_result_t error) {
} }
} }
// Read and process |num_frames| of data from the |audio_data| buffer. // Read and process `num_frames` of data from the `audio_data` buffer.
// TODO(henrika): possibly add trace here to be included in systrace. // TODO(henrika): possibly add trace here to be included in systrace.
// See https://developer.android.com/studio/profile/systrace-commandline.html. // See https://developer.android.com/studio/profile/systrace-commandline.html.
aaudio_data_callback_result_t AAudioRecorder::OnDataCallback( aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
@ -191,7 +191,7 @@ aaudio_data_callback_result_t AAudioRecorder::OnDataCallback(
RTC_DLOG(INFO) << "input latency: " << latency_millis_ RTC_DLOG(INFO) << "input latency: " << latency_millis_
<< ", num_frames: " << num_frames; << ", num_frames: " << num_frames;
} }
// Copy recorded audio in |audio_data| to the WebRTC sink using the // Copy recorded audio in `audio_data` to the WebRTC sink using the
// FineAudioBuffer object. // FineAudioBuffer object.
fine_audio_buffer_->DeliverRecordedData( fine_audio_buffer_->DeliverRecordedData(
rtc::MakeArrayView(static_cast<const int16_t*>(audio_data), rtc::MakeArrayView(static_cast<const int16_t*>(audio_data),

View file

@ -72,8 +72,8 @@ class AAudioRecorder : public AudioInput,
protected: protected:
// AAudioObserverInterface implementation. // AAudioObserverInterface implementation.
// For an input stream, this function should read |num_frames| of recorded // For an input stream, this function should read `num_frames` of recorded
// data, in the stream's current data format, from the |audio_data| buffer. // data, in the stream's current data format, from the `audio_data` buffer.
// Called on a real-time thread owned by AAudio. // Called on a real-time thread owned by AAudio.
aaudio_data_callback_result_t OnDataCallback(void* audio_data, aaudio_data_callback_result_t OnDataCallback(void* audio_data,
int32_t num_frames) override; int32_t num_frames) override;

View file

@ -253,8 +253,8 @@ void AudioRecordJni::DataIsRecorded(JNIEnv* env,
audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_, audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_,
frames_per_buffer_); frames_per_buffer_);
// We provide one (combined) fixed delay estimate for the APM and use the // We provide one (combined) fixed delay estimate for the APM and use the
// |playDelayMs| parameter only. Components like the AEC only sees the sum // `playDelayMs` parameter only. Components like the AEC only sees the sum
// of |playDelayMs| and |recDelayMs|, hence the distributions does not matter. // of `playDelayMs` and `recDelayMs`, hence the distributions does not matter.
audio_device_buffer_->SetVQEData(total_delay_ms_, 0); audio_device_buffer_->SetVQEData(total_delay_ms_, 0);
if (audio_device_buffer_->DeliverRecordedData() == -1) { if (audio_device_buffer_->DeliverRecordedData() == -1) {
RTC_LOG(INFO) << "AudioDeviceBuffer::DeliverRecordedData failed"; RTC_LOG(INFO) << "AudioDeviceBuffer::DeliverRecordedData failed";

View file

@ -74,8 +74,8 @@ class AudioRecordJni : public AudioInput {
int32_t EnableBuiltInNS(bool enable) override; int32_t EnableBuiltInNS(bool enable) override;
// Called from Java side so we can cache the address of the Java-manged // Called from Java side so we can cache the address of the Java-manged
// |byte_buffer| in |direct_buffer_address_|. The size of the buffer // `byte_buffer` in `direct_buffer_address_`. The size of the buffer
// is also stored in |direct_buffer_capacity_in_bytes_|. // is also stored in `direct_buffer_capacity_in_bytes_`.
// This method will be called by the WebRtcAudioRecord constructor, i.e., // This method will be called by the WebRtcAudioRecord constructor, i.e.,
// on the same thread that this object is created on. // on the same thread that this object is created on.
void CacheDirectBufferAddress(JNIEnv* env, void CacheDirectBufferAddress(JNIEnv* env,
@ -83,8 +83,8 @@ class AudioRecordJni : public AudioInput {
const JavaParamRef<jobject>& byte_buffer); const JavaParamRef<jobject>& byte_buffer);
// Called periodically by the Java based WebRtcAudioRecord object when // Called periodically by the Java based WebRtcAudioRecord object when
// recording has started. Each call indicates that there are |length| new // recording has started. Each call indicates that there are `length` new
// bytes recorded in the memory area |direct_buffer_address_| and it is // bytes recorded in the memory area `direct_buffer_address_` and it is
// now time to send these to the consumer. // now time to send these to the consumer.
// This method is called on a high-priority thread from Java. The name of // This method is called on a high-priority thread from Java. The name of
// the thread is 'AudioRecordThread'. // the thread is 'AudioRecordThread'.
@ -111,10 +111,10 @@ class AudioRecordJni : public AudioInput {
// possible values. See audio_common.h for details. // possible values. See audio_common.h for details.
const int total_delay_ms_; const int total_delay_ms_;
// Cached copy of address to direct audio buffer owned by |j_audio_record_|. // Cached copy of address to direct audio buffer owned by `j_audio_record_`.
void* direct_buffer_address_; void* direct_buffer_address_;
// Number of bytes in the direct audio buffer owned by |j_audio_record_|. // Number of bytes in the direct audio buffer owned by `j_audio_record_`.
size_t direct_buffer_capacity_in_bytes_; size_t direct_buffer_capacity_in_bytes_;
// Number audio frames per audio buffer. Each audio frame corresponds to // Number audio frames per audio buffer. Each audio frame corresponds to

View file

@ -71,14 +71,14 @@ class AudioTrackJni : public AudioOutput {
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override; void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
// Called from Java side so we can cache the address of the Java-manged // Called from Java side so we can cache the address of the Java-manged
// |byte_buffer| in |direct_buffer_address_|. The size of the buffer // `byte_buffer` in `direct_buffer_address_`. The size of the buffer
// is also stored in |direct_buffer_capacity_in_bytes_|. // is also stored in `direct_buffer_capacity_in_bytes_`.
// Called on the same thread as the creating thread. // Called on the same thread as the creating thread.
void CacheDirectBufferAddress(JNIEnv* env, void CacheDirectBufferAddress(JNIEnv* env,
const JavaParamRef<jobject>& byte_buffer); const JavaParamRef<jobject>& byte_buffer);
// Called periodically by the Java based WebRtcAudioTrack object when // Called periodically by the Java based WebRtcAudioTrack object when
// playout has started. Each call indicates that |length| new bytes should // playout has started. Each call indicates that `length` new bytes should
// be written to the memory area |direct_buffer_address_| for playout. // be written to the memory area `direct_buffer_address_` for playout.
// This method is called on a high-priority thread from Java. The name of // This method is called on a high-priority thread from Java. The name of
// the thread is 'AudioTrackThread'. // the thread is 'AudioTrackThread'.
void GetPlayoutData(JNIEnv* env, size_t length); void GetPlayoutData(JNIEnv* env, size_t length);
@ -99,10 +99,10 @@ class AudioTrackJni : public AudioOutput {
// AudioManager. // AudioManager.
const AudioParameters audio_parameters_; const AudioParameters audio_parameters_;
// Cached copy of address to direct audio buffer owned by |j_audio_track_|. // Cached copy of address to direct audio buffer owned by `j_audio_track_`.
void* direct_buffer_address_; void* direct_buffer_address_;
// Number of bytes in the direct audio buffer owned by |j_audio_track_|. // Number of bytes in the direct audio buffer owned by `j_audio_track_`.
size_t direct_buffer_capacity_in_bytes_; size_t direct_buffer_capacity_in_bytes_;
// Number of audio frames per audio buffer. Each audio frame corresponds to // Number of audio frames per audio buffer. Each audio frame corresponds to

View file

@ -95,7 +95,7 @@ class OpenSLESPlayer : public AudioOutput {
// Reads audio data in PCM format using the AudioDeviceBuffer. // Reads audio data in PCM format using the AudioDeviceBuffer.
// Can be called both on the main thread (during Start()) and from the // Can be called both on the main thread (during Start()) and from the
// internal audio thread while output streaming is active. // internal audio thread while output streaming is active.
// If the |silence| flag is set, the audio is filled with zeros instead of // If the `silence` flag is set, the audio is filled with zeros instead of
// asking the WebRTC layer for real audio data. This procedure is also known // asking the WebRTC layer for real audio data. This procedure is also known
// as audio priming. // as audio priming.
void EnqueuePlayoutData(bool silence); void EnqueuePlayoutData(bool silence);
@ -106,7 +106,7 @@ class OpenSLESPlayer : public AudioOutput {
// Obtaines the SL Engine Interface from the existing global Engine object. // Obtaines the SL Engine Interface from the existing global Engine object.
// The interface exposes creation methods of all the OpenSL ES object types. // The interface exposes creation methods of all the OpenSL ES object types.
// This method defines the |engine_| member variable. // This method defines the `engine_` member variable.
bool ObtainEngineInterface(); bool ObtainEngineInterface();
// Creates/destroys the output mix object. // Creates/destroys the output mix object.

View file

@ -88,7 +88,7 @@ class OpenSLESRecorder : public AudioInput {
private: private:
// Obtaines the SL Engine Interface from the existing global Engine object. // Obtaines the SL Engine Interface from the existing global Engine object.
// The interface exposes creation methods of all the OpenSL ES object types. // The interface exposes creation methods of all the OpenSL ES object types.
// This method defines the |engine_| member variable. // This method defines the `engine_` member variable.
bool ObtainEngineInterface(); bool ObtainEngineInterface();
// Creates/destroys the audio recorder and the simple-buffer queue object. // Creates/destroys the audio recorder and the simple-buffer queue object.
@ -109,7 +109,7 @@ class OpenSLESRecorder : public AudioInput {
// Wraps calls to SLAndroidSimpleBufferQueueState::Enqueue() and it can be // Wraps calls to SLAndroidSimpleBufferQueueState::Enqueue() and it can be
// called both on the main thread (but before recording has started) and from // called both on the main thread (but before recording has started) and from
// the internal audio thread while input streaming is active. It uses // the internal audio thread while input streaming is active. It uses
// |simple_buffer_queue_| but no lock is needed since the initial calls from // `simple_buffer_queue_` but no lock is needed since the initial calls from
// the main thread and the native callback thread are mutually exclusive. // the main thread and the native callback thread are mutually exclusive.
bool EnqueueAudioBuffer(); bool EnqueueAudioBuffer();

View file

@ -15,7 +15,7 @@
namespace webrtc { namespace webrtc {
// If |atomic_class_id| set, it'll return immediately. Otherwise, it will look // If `atomic_class_id` set, it'll return immediately. Otherwise, it will look
// up the class and store it. If there's a race, we take care to only store one // up the class and store it. If there's a race, we take care to only store one
// global reference (and the duplicated effort will happen only once). // global reference (and the duplicated effort will happen only once).
jclass LazyGetClass(JNIEnv* env, jclass LazyGetClass(JNIEnv* env,
@ -29,18 +29,18 @@ jclass LazyGetClass(JNIEnv* env,
jclass cas_result = nullptr; jclass cas_result = nullptr;
if (std::atomic_compare_exchange_strong(atomic_class_id, &cas_result, if (std::atomic_compare_exchange_strong(atomic_class_id, &cas_result,
clazz.obj())) { clazz.obj())) {
// We sucessfully stored |clazz| in |atomic_class_id|, so we are // We sucessfully stored `clazz` in `atomic_class_id`, so we are
// intentionally leaking the global ref since it's now stored there. // intentionally leaking the global ref since it's now stored there.
return clazz.Release(); return clazz.Release();
} else { } else {
// Some other thread came before us and stored a global pointer in // Some other thread came before us and stored a global pointer in
// |atomic_class_id|. Relase our global ref and return the ref from the // `atomic_class_id`. Relase our global ref and return the ref from the
// other thread. // other thread.
return cas_result; return cas_result;
} }
} }
// If |atomic_method_id| set, it'll return immediately. Otherwise, it will look // If `atomic_method_id` set, it'll return immediately. Otherwise, it will look
// up the method id and store it. If there's a race, it's ok since the values // up the method id and store it. If there's a race, it's ok since the values
// are the same (and the duplicated effort will happen only once). // are the same (and the duplicated effort will happen only once).
template <MethodID::Type type> template <MethodID::Type type>

View file

@ -44,11 +44,11 @@
namespace webrtc { namespace webrtc {
// This function will initialize |atomic_class_id| to contain a global ref to // This function will initialize `atomic_class_id` to contain a global ref to
// the given class, and will return that ref on subsequent calls. The caller is // the given class, and will return that ref on subsequent calls. The caller is
// responsible to zero-initialize |atomic_class_id|. It's fine to // responsible to zero-initialize `atomic_class_id`. It's fine to
// simultaneously call this on multiple threads referencing the same // simultaneously call this on multiple threads referencing the same
// |atomic_method_id|. // `atomic_method_id`.
jclass LazyGetClass(JNIEnv* env, jclass LazyGetClass(JNIEnv* env,
const char* class_name, const char* class_name,
std::atomic<jclass>* atomic_class_id); std::atomic<jclass>* atomic_class_id);
@ -61,11 +61,11 @@ class MethodID {
TYPE_INSTANCE, TYPE_INSTANCE,
}; };
// This function will initialize |atomic_method_id| to contain a ref to // This function will initialize `atomic_method_id` to contain a ref to
// the given method, and will return that ref on subsequent calls. The caller // the given method, and will return that ref on subsequent calls. The caller
// is responsible to zero-initialize |atomic_method_id|. It's fine to // is responsible to zero-initialize `atomic_method_id`. It's fine to
// simultaneously call this on multiple threads referencing the same // simultaneously call this on multiple threads referencing the same
// |atomic_method_id|. // `atomic_method_id`.
template <Type type> template <Type type>
static jmethodID LazyGet(JNIEnv* env, static jmethodID LazyGet(JNIEnv* env,
jclass clazz, jclass clazz,
@ -151,7 +151,7 @@ struct BASE_EXPORT JniJavaCallContextChecked {
const char* jni_signature, const char* jni_signature,
std::atomic<jmethodID>* atomic_method_id) { std::atomic<jmethodID>* atomic_method_id) {
base.Init<type>(env, clazz, method_name, jni_signature, atomic_method_id); base.Init<type>(env, clazz, method_name, jni_signature, atomic_method_id);
// Reset |pc| to correct caller. // Reset `pc` to correct caller.
base.pc = reinterpret_cast<uintptr_t>(__builtin_return_address(0)); base.pc = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
} }

View file

@ -27,7 +27,7 @@ static JavaVM* g_jvm = nullptr;
static pthread_once_t g_jni_ptr_once = PTHREAD_ONCE_INIT; static pthread_once_t g_jni_ptr_once = PTHREAD_ONCE_INIT;
// Key for per-thread JNIEnv* data. Non-NULL in threads attached to |g_jvm| by // Key for per-thread JNIEnv* data. Non-NULL in threads attached to `g_jvm` by
// AttachCurrentThreadIfNeeded(), NULL in unattached threads and threads that // AttachCurrentThreadIfNeeded(), NULL in unattached threads and threads that
// were attached by the JVM because of a Java->native call. // were attached by the JVM because of a Java->native call.
static pthread_key_t g_jni_ptr; static pthread_key_t g_jni_ptr;
@ -48,7 +48,7 @@ JNIEnv* GetEnv() {
} }
static void ThreadDestructor(void* prev_jni_ptr) { static void ThreadDestructor(void* prev_jni_ptr) {
// This function only runs on threads where |g_jni_ptr| is non-NULL, meaning // This function only runs on threads where `g_jni_ptr` is non-NULL, meaning
// we were responsible for originally attaching the thread, so are responsible // we were responsible for originally attaching the thread, so are responsible
// for detaching it now. However, because some JVM implementations (notably // for detaching it now. However, because some JVM implementations (notably
// Oracle's http://goo.gl/eHApYT) also use the pthread_key_create mechanism, // Oracle's http://goo.gl/eHApYT) also use the pthread_key_create mechanism,
@ -102,7 +102,7 @@ static std::string GetThreadName() {
return std::string(name); return std::string(name);
} }
// Return a |JNIEnv*| usable on this thread. Attaches to |g_jvm| if necessary. // Return a |JNIEnv*| usable on this thread. Attaches to `g_jvm` if necessary.
JNIEnv* AttachCurrentThreadIfNeeded() { JNIEnv* AttachCurrentThreadIfNeeded() {
JNIEnv* jni = GetEnv(); JNIEnv* jni = GetEnv();
if (jni) if (jni)

View file

@ -23,7 +23,7 @@ JNIEnv* GetEnv();
JavaVM* GetJVM(); JavaVM* GetJVM();
// Return a |JNIEnv*| usable on this thread. Attaches to |g_jvm| if necessary. // Return a |JNIEnv*| usable on this thread. Attaches to `g_jvm` if necessary.
JNIEnv* AttachCurrentThreadIfNeeded(); JNIEnv* AttachCurrentThreadIfNeeded();
} // namespace jni } // namespace jni

View file

@ -49,7 +49,7 @@ JavaMediaStream::JavaMediaStream(
observer_->SignalVideoTrackAdded.connect( observer_->SignalVideoTrackAdded.connect(
this, &JavaMediaStream::OnVideoTrackAddedToStream); this, &JavaMediaStream::OnVideoTrackAddedToStream);
// |j_media_stream| holds one reference. Corresponding Release() is in // `j_media_stream` holds one reference. Corresponding Release() is in
// MediaStream_free, triggered by MediaStream.dispose(). // MediaStream_free, triggered by MediaStream.dispose().
media_stream.release(); media_stream.release();
} }

View file

@ -499,7 +499,7 @@ static ScopedJavaLocalRef<jobject> JNI_PeerConnection_GetLocalDescription(
const JavaParamRef<jobject>& j_pc) { const JavaParamRef<jobject>& j_pc) {
PeerConnectionInterface* pc = ExtractNativePC(jni, j_pc); PeerConnectionInterface* pc = ExtractNativePC(jni, j_pc);
// It's only safe to operate on SessionDescriptionInterface on the // It's only safe to operate on SessionDescriptionInterface on the
// signaling thread, but |jni| may only be used on the current thread, so we // signaling thread, but `jni` may only be used on the current thread, so we
// must do this odd dance. // must do this odd dance.
std::string sdp; std::string sdp;
std::string type; std::string type;
@ -518,7 +518,7 @@ static ScopedJavaLocalRef<jobject> JNI_PeerConnection_GetRemoteDescription(
const JavaParamRef<jobject>& j_pc) { const JavaParamRef<jobject>& j_pc) {
PeerConnectionInterface* pc = ExtractNativePC(jni, j_pc); PeerConnectionInterface* pc = ExtractNativePC(jni, j_pc);
// It's only safe to operate on SessionDescriptionInterface on the // It's only safe to operate on SessionDescriptionInterface on the
// signaling thread, but |jni| may only be used on the current thread, so we // signaling thread, but `jni` may only be used on the current thread, so we
// must do this odd dance. // must do this odd dance.
std::string sdp; std::string sdp;
std::string type; std::string type;

View file

@ -242,9 +242,9 @@ static void JNI_PeerConnectionFactory_ShutdownInternalTracer(JNIEnv* jni) {
} }
// Following parameters are optional: // Following parameters are optional:
// |audio_device_module|, |jencoder_factory|, |jdecoder_factory|, // `audio_device_module`, `jencoder_factory`, `jdecoder_factory`,
// |audio_processor|, |fec_controller_factory|, // `audio_processor`, `fec_controller_factory`,
// |network_state_predictor_factory|, |neteq_factory|. // `network_state_predictor_factory`, `neteq_factory`.
ScopedJavaLocalRef<jobject> CreatePeerConnectionFactoryForJava( ScopedJavaLocalRef<jobject> CreatePeerConnectionFactoryForJava(
JNIEnv* jni, JNIEnv* jni,
const JavaParamRef<jobject>& jcontext, const JavaParamRef<jobject>& jcontext,

View file

@ -18,7 +18,7 @@
namespace webrtc { namespace webrtc {
namespace jni { namespace jni {
// Creates java PeerConnectionFactory with specified |pcf|. // Creates java PeerConnectionFactory with specified `pcf`.
jobject NativeToJavaPeerConnectionFactory( jobject NativeToJavaPeerConnectionFactory(
JNIEnv* jni, JNIEnv* jni,
rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> pcf, rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> pcf,

View file

@ -23,7 +23,7 @@ ScopedJavaLocalRef<jobject> NativeToJavaRtpReceiver(
JNIEnv* env, JNIEnv* env,
rtc::scoped_refptr<RtpReceiverInterface> receiver); rtc::scoped_refptr<RtpReceiverInterface> receiver);
// Takes ownership of the passed |j_receiver| and stores it as a global // Takes ownership of the passed `j_receiver` and stores it as a global
// reference. Will call dispose() in the dtor. // reference. Will call dispose() in the dtor.
class JavaRtpReceiverGlobalOwner { class JavaRtpReceiverGlobalOwner {
public: public:

View file

@ -27,7 +27,7 @@ ScopedJavaLocalRef<jobject> NativeToJavaRtpTransceiver(
JNIEnv* env, JNIEnv* env,
rtc::scoped_refptr<RtpTransceiverInterface> transceiver); rtc::scoped_refptr<RtpTransceiverInterface> transceiver);
// This takes ownership of the of the |j_transceiver| and stores it as a global // This takes ownership of the of the `j_transceiver` and stores it as a global
// reference. This calls the Java Transceiver's dispose() method with the dtor. // reference. This calls the Java Transceiver's dispose() method with the dtor.
class JavaRtpTransceiverGlobalOwner { class JavaRtpTransceiverGlobalOwner {
public: public:

View file

@ -267,7 +267,7 @@ void VideoEncoderWrapper::OnEncodedFrame(
frame_extra_infos_.pop_front(); frame_extra_infos_.pop_front();
} }
// This is a bit subtle. The |frame| variable from the lambda capture is // This is a bit subtle. The `frame` variable from the lambda capture is
// const. Which implies that (i) we need to make a copy to be able to // const. Which implies that (i) we need to make a copy to be able to
// write to the metadata, and (ii) we should avoid using the .data() // write to the metadata, and (ii) we should avoid using the .data()
// method (including implicit conversion to ArrayView) on the non-const // method (including implicit conversion to ArrayView) on the non-const

View file

@ -41,8 +41,8 @@ class AndroidVideoBuffer : public VideoFrameBuffer {
const ScopedJavaGlobalRef<jobject>& video_frame_buffer() const; const ScopedJavaGlobalRef<jobject>& video_frame_buffer() const;
// Crops a region defined by |crop_x|, |crop_y|, |crop_width| and // Crops a region defined by `crop_x`, `crop_y`, `crop_width` and
// |crop_height|. Scales it to size |scale_width| x |scale_height|. // `crop_height`. Scales it to size `scale_width` x `scale_height`.
rtc::scoped_refptr<VideoFrameBuffer> CropAndScale(int crop_x, rtc::scoped_refptr<VideoFrameBuffer> CropAndScale(int crop_x,
int crop_y, int crop_y,
int crop_width, int crop_width,

View file

@ -17,8 +17,8 @@ namespace webrtc {
namespace { namespace {
// Find the highest-priority instance of the T-valued constraint named by // Find the highest-priority instance of the T-valued constraint named by
// |key| and return its value as |value|. |constraints| can be null. // `key` and return its value as `value`. `constraints` can be null.
// If |mandatory_constraints| is non-null, it is incremented if the key appears // If `mandatory_constraints` is non-null, it is incremented if the key appears
// among the mandatory constraints. // among the mandatory constraints.
// Returns true if the key was found and has a valid value for type T. // Returns true if the key was found and has a valid value for type T.
// If the key appears multiple times as an optional constraint, appearances // If the key appears multiple times as an optional constraint, appearances
@ -135,8 +135,8 @@ const char MediaConstraints::kRawPacketizationForVideoEnabled[] =
const char MediaConstraints::kNumSimulcastLayers[] = "googNumSimulcastLayers"; const char MediaConstraints::kNumSimulcastLayers[] = "googNumSimulcastLayers";
// Set |value| to the value associated with the first appearance of |key|, or // Set `value` to the value associated with the first appearance of `key`, or
// return false if |key| is not found. // return false if `key` is not found.
bool MediaConstraints::Constraints::FindFirst(const std::string& key, bool MediaConstraints::Constraints::FindFirst(const std::string& key,
std::string* value) const { std::string* value) const {
for (Constraints::const_iterator iter = begin(); iter != end(); ++iter) { for (Constraints::const_iterator iter = begin(); iter != end(); ++iter) {
@ -209,7 +209,7 @@ void CopyConstraintsIntoAudioOptions(const MediaConstraints* constraints,
ConstraintToOptional<std::string>( ConstraintToOptional<std::string>(
constraints, MediaConstraints::kAudioNetworkAdaptorConfig, constraints, MediaConstraints::kAudioNetworkAdaptorConfig,
&options->audio_network_adaptor_config); &options->audio_network_adaptor_config);
// When |kAudioNetworkAdaptorConfig| is defined, it both means that audio // When `kAudioNetworkAdaptorConfig` is defined, it both means that audio
// network adaptor is desired, and provides the config string. // network adaptor is desired, and provides the config string.
if (options->audio_network_adaptor_config) { if (options->audio_network_adaptor_config) {
options->audio_network_adaptor = true; options->audio_network_adaptor = true;

View file

@ -20,7 +20,7 @@ RTC_OBJC_EXPORT
- (instancetype)init NS_UNAVAILABLE; - (instancetype)init NS_UNAVAILABLE;
// Sets the volume for the RTCMediaSource. |volume| is a gain value in the range // Sets the volume for the RTCMediaSource. `volume` is a gain value in the range
// [0, 10]. // [0, 10].
// Temporary fix to be able to modify volume of remote audio tracks. // Temporary fix to be able to modify volume of remote audio tracks.
// TODO(kthelgason): Property stays here temporarily until a proper volume-api // TODO(kthelgason): Property stays here temporarily until a proper volume-api

View file

@ -84,7 +84,7 @@ RTC_OBJC_EXPORT
@property(nonatomic, nullable) RTC_OBJC_TYPE(RTCCertificate) * certificate; @property(nonatomic, nullable) RTC_OBJC_TYPE(RTCCertificate) * certificate;
/** Which candidates the ICE agent is allowed to use. The W3C calls it /** Which candidates the ICE agent is allowed to use. The W3C calls it
* |iceTransportPolicy|, while in C++ it is called |type|. */ * `iceTransportPolicy`, while in C++ it is called `type`. */
@property(nonatomic, assign) RTCIceTransportPolicy iceTransportPolicy; @property(nonatomic, assign) RTCIceTransportPolicy iceTransportPolicy;
/** The media-bundling policy to use when gathering ICE candidates. */ /** The media-bundling policy to use when gathering ICE candidates. */
@ -144,7 +144,7 @@ RTC_OBJC_EXPORT
*/ */
@property(nonatomic, assign) BOOL shouldPresumeWritableWhenFullyRelayed; @property(nonatomic, assign) BOOL shouldPresumeWritableWhenFullyRelayed;
/* This flag is only effective when |continualGatheringPolicy| is /* This flag is only effective when `continualGatheringPolicy` is
* RTCContinualGatheringPolicyGatherContinually. * RTCContinualGatheringPolicyGatherContinually.
* *
* If YES, after the ICE transport type is changed such that new types of * If YES, after the ICE transport type is changed such that new types of

View file

@ -21,13 +21,13 @@ RTC_OBJC_EXPORT
/** NSData representation of the underlying buffer. */ /** NSData representation of the underlying buffer. */
@property(nonatomic, readonly) NSData *data; @property(nonatomic, readonly) NSData *data;
/** Indicates whether |data| contains UTF-8 or binary data. */ /** Indicates whether `data` contains UTF-8 or binary data. */
@property(nonatomic, readonly) BOOL isBinary; @property(nonatomic, readonly) BOOL isBinary;
- (instancetype)init NS_UNAVAILABLE; - (instancetype)init NS_UNAVAILABLE;
/** /**
* Initialize an RTCDataBuffer from NSData. |isBinary| indicates whether |data| * Initialize an RTCDataBuffer from NSData. `isBinary` indicates whether `data`
* contains UTF-8 or binary data. * contains UTF-8 or binary data.
*/ */
- (instancetype)initWithData:(NSData *)data isBinary:(BOOL)isBinary; - (instancetype)initWithData:(NSData *)data isBinary:(BOOL)isBinary;
@ -47,7 +47,7 @@ RTC_OBJC_EXPORT
didReceiveMessageWithBuffer:(RTC_OBJC_TYPE(RTCDataBuffer) *)buffer; didReceiveMessageWithBuffer:(RTC_OBJC_TYPE(RTCDataBuffer) *)buffer;
@optional @optional
/** The data channel's |bufferedAmount| changed. */ /** The data channel's `bufferedAmount` changed. */
- (void)dataChannel:(RTC_OBJC_TYPE(RTCDataChannel) *)dataChannel - (void)dataChannel:(RTC_OBJC_TYPE(RTCDataChannel) *)dataChannel
didChangeBufferedAmount:(uint64_t)amount; didChangeBufferedAmount:(uint64_t)amount;
@ -124,7 +124,7 @@ RTC_OBJC_EXPORT
/** Closes the data channel. */ /** Closes the data channel. */
- (void)close; - (void)close;
/** Attempt to send |data| on this data channel's underlying data transport. */ /** Attempt to send `data` on this data channel's underlying data transport. */
- (BOOL)sendData:(RTC_OBJC_TYPE(RTCDataBuffer) *)data; - (BOOL)sendData:(RTC_OBJC_TYPE(RTCDataBuffer) *)data;
@end @end

View file

@ -43,7 +43,7 @@ RTC_OBJC_EXPORT
// kRTCFileLoggerTypeCall. // kRTCFileLoggerTypeCall.
@property(nonatomic, readonly) RTCFileLoggerRotationType rotationType; @property(nonatomic, readonly) RTCFileLoggerRotationType rotationType;
// Disables buffering disk writes. Should be set before |start|. Buffering // Disables buffering disk writes. Should be set before `start`. Buffering
// is enabled by default for performance. // is enabled by default for performance.
@property(nonatomic, assign) BOOL shouldDisableBuffering; @property(nonatomic, assign) BOOL shouldDisableBuffering;

View file

@ -37,9 +37,9 @@ RTC_OBJC_EXPORT
@property(nonatomic, readonly) RTCTlsCertPolicy tlsCertPolicy; @property(nonatomic, readonly) RTCTlsCertPolicy tlsCertPolicy;
/** /**
If the URIs in |urls| only contain IP addresses, this field can be used If the URIs in `urls` only contain IP addresses, this field can be used
to indicate the hostname, which may be necessary for TLS (using the SNI to indicate the hostname, which may be necessary for TLS (using the SNI
extension). If |urls| itself contains the hostname, this isn't necessary. extension). If `urls` itself contains the hostname, this isn't necessary.
*/ */
@property(nonatomic, readonly, nullable) NSString *hostname; @property(nonatomic, readonly, nullable) NSString *hostname;

View file

@ -174,7 +174,7 @@ RTC_OBJC_EXPORT
*/ */
@property(nonatomic, weak, nullable) id<RTC_OBJC_TYPE(RTCPeerConnectionDelegate)> delegate; @property(nonatomic, weak, nullable) id<RTC_OBJC_TYPE(RTCPeerConnectionDelegate)> delegate;
/** This property is not available with RTCSdpSemanticsUnifiedPlan. Please use /** This property is not available with RTCSdpSemanticsUnifiedPlan. Please use
* |senders| instead. * `senders` instead.
*/ */
@property(nonatomic, readonly) NSArray<RTC_OBJC_TYPE(RTCMediaStream) *> *localStreams; @property(nonatomic, readonly) NSArray<RTC_OBJC_TYPE(RTCMediaStream) *> *localStreams;
@property(nonatomic, readonly, nullable) RTC_OBJC_TYPE(RTCSessionDescription) * localDescription; @property(nonatomic, readonly, nullable) RTC_OBJC_TYPE(RTCSessionDescription) * localDescription;
@ -207,7 +207,7 @@ RTC_OBJC_EXPORT
- (instancetype)init NS_UNAVAILABLE; - (instancetype)init NS_UNAVAILABLE;
/** Sets the PeerConnection's global configuration to |configuration|. /** Sets the PeerConnection's global configuration to `configuration`.
* Any changes to STUN/TURN servers or ICE candidate policy will affect the * Any changes to STUN/TURN servers or ICE candidate policy will affect the
* next gathering phase, and cause the next call to createOffer to generate * next gathering phase, and cause the next call to createOffer to generate
* new ICE credentials. Note that the BUNDLE and RTCP-multiplexing policies * new ICE credentials. Note that the BUNDLE and RTCP-multiplexing policies
@ -243,7 +243,7 @@ RTC_OBJC_EXPORT
/** Add a new media stream track to be sent on this peer connection, and return /** Add a new media stream track to be sent on this peer connection, and return
* the newly created RTCRtpSender. The RTCRtpSender will be * the newly created RTCRtpSender. The RTCRtpSender will be
* associated with the streams specified in the |streamIds| list. * associated with the streams specified in the `streamIds` list.
* *
* Errors: If an error occurs, returns nil. An error can occur if: * Errors: If an error occurs, returns nil. An error can occur if:
* - A sender already exists for the track. * - A sender already exists for the track.
@ -265,7 +265,7 @@ RTC_OBJC_EXPORT
* transceivers. Adding a transceiver will cause future calls to CreateOffer * transceivers. Adding a transceiver will cause future calls to CreateOffer
* to add a media description for the corresponding transceiver. * to add a media description for the corresponding transceiver.
* *
* The initial value of |mid| in the returned transceiver is nil. Setting a * The initial value of `mid` in the returned transceiver is nil. Setting a
* new session description may change it to a non-nil value. * new session description may change it to a non-nil value.
* *
* https://w3c.github.io/webrtc-pc/#dom-rtcpeerconnection-addtransceiver * https://w3c.github.io/webrtc-pc/#dom-rtcpeerconnection-addtransceiver
@ -325,7 +325,7 @@ RTC_OBJC_EXPORT
/** Limits the bandwidth allocated for all RTP streams sent by this /** Limits the bandwidth allocated for all RTP streams sent by this
* PeerConnection. Nil parameters will be unchanged. Setting * PeerConnection. Nil parameters will be unchanged. Setting
* |currentBitrateBps| will force the available bitrate estimate to the given * `currentBitrateBps` will force the available bitrate estimate to the given
* value. Returns YES if the parameters were successfully updated. * value. Returns YES if the parameters were successfully updated.
*/ */
- (BOOL)setBweMinBitrateBps:(nullable NSNumber *)minBitrateBps - (BOOL)setBweMinBitrateBps:(nullable NSNumber *)minBitrateBps
@ -365,7 +365,7 @@ typedef void (^RTCStatisticsCompletionHandler)(RTC_OBJC_TYPE(RTCStatisticsReport
@interface RTC_OBJC_TYPE (RTCPeerConnection) @interface RTC_OBJC_TYPE (RTCPeerConnection)
(Stats) (Stats)
/** Gather stats for the given RTCMediaStreamTrack. If |mediaStreamTrack| is nil /** Gather stats for the given RTCMediaStreamTrack. If `mediaStreamTrack` is nil
* statistics are gathered for all tracks. * statistics are gathered for all tracks.
*/ */
- (void)statsForTrack - (void)statsForTrack

View file

@ -22,8 +22,8 @@ NS_ASSUME_NONNULL_BEGIN
*/ */
@property(nonatomic, readonly) int activationCount; @property(nonatomic, readonly) int activationCount;
/** The number of times |beginWebRTCSession| was called without a balanced call /** The number of times `beginWebRTCSession` was called without a balanced call
* to |endWebRTCSession|. * to `endWebRTCSession`.
*/ */
@property(nonatomic, readonly) int webRTCSessionCount; @property(nonatomic, readonly) int webRTCSessionCount;
@ -57,16 +57,16 @@ NS_ASSUME_NONNULL_BEGIN
/** Configure the audio session for WebRTC. This call will fail if the session /** Configure the audio session for WebRTC. This call will fail if the session
* is already configured. On other failures, we will attempt to restore the * is already configured. On other failures, we will attempt to restore the
* previously used audio session configuration. * previously used audio session configuration.
* |lockForConfiguration| must be called first. * `lockForConfiguration` must be called first.
* Successful calls to configureWebRTCSession must be matched by calls to * Successful calls to configureWebRTCSession must be matched by calls to
* |unconfigureWebRTCSession|. * `unconfigureWebRTCSession`.
*/ */
- (BOOL)configureWebRTCSession:(NSError **)outError; - (BOOL)configureWebRTCSession:(NSError **)outError;
/** Unconfigures the session for WebRTC. This will attempt to restore the /** Unconfigures the session for WebRTC. This will attempt to restore the
* audio session to the settings used before |configureWebRTCSession| was * audio session to the settings used before `configureWebRTCSession` was
* called. * called.
* |lockForConfiguration| must be called first. * `lockForConfiguration` must be called first.
*/ */
- (BOOL)unconfigureWebRTCSession:(NSError **)outError; - (BOOL)unconfigureWebRTCSession:(NSError **)outError;

View file

@ -209,9 +209,9 @@ RTC_OBJC_EXPORT
/** Relinquishes exclusive access to the audio session. */ /** Relinquishes exclusive access to the audio session. */
- (void)unlockForConfiguration; - (void)unlockForConfiguration;
/** If |active|, activates the audio session if it isn't already active. /** If `active`, activates the audio session if it isn't already active.
* Successful calls must be balanced with a setActive:NO when activation is no * Successful calls must be balanced with a setActive:NO when activation is no
* longer required. If not |active|, deactivates the audio session if one is * longer required. If not `active`, deactivates the audio session if one is
* active and this is the last balanced call. When deactivating, the * active and this is the last balanced call. When deactivating, the
* AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation option is passed to * AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation option is passed to
* AVAudioSession. * AVAudioSession.
@ -219,7 +219,7 @@ RTC_OBJC_EXPORT
- (BOOL)setActive:(BOOL)active error:(NSError **)outError; - (BOOL)setActive:(BOOL)active error:(NSError **)outError;
// The following methods are proxies for the associated methods on // The following methods are proxies for the associated methods on
// AVAudioSession. |lockForConfiguration| must be called before using them // AVAudioSession. `lockForConfiguration` must be called before using them
// otherwise they will fail with kRTCAudioSessionErrorLockRequired. // otherwise they will fail with kRTCAudioSessionErrorLockRequired.
- (BOOL)setCategory:(NSString *)category - (BOOL)setCategory:(NSString *)category
@ -245,13 +245,13 @@ RTC_OBJC_EXPORT
/** Applies the configuration to the current session. Attempts to set all /** Applies the configuration to the current session. Attempts to set all
* properties even if previous ones fail. Only the last error will be * properties even if previous ones fail. Only the last error will be
* returned. * returned.
* |lockForConfiguration| must be called first. * `lockForConfiguration` must be called first.
*/ */
- (BOOL)setConfiguration : (RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *)configuration error - (BOOL)setConfiguration : (RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *)configuration error
: (NSError **)outError; : (NSError **)outError;
/** Convenience method that calls both setConfiguration and setActive. /** Convenience method that calls both setConfiguration and setActive.
* |lockForConfiguration| must be called first. * `lockForConfiguration` must be called first.
*/ */
- (BOOL)setConfiguration:(RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *)configuration - (BOOL)setConfiguration:(RTC_OBJC_TYPE(RTCAudioSessionConfiguration) *)configuration
active:(BOOL)active active:(BOOL)active

View file

@ -23,7 +23,7 @@ class AudioSessionObserver;
- (instancetype)init NS_UNAVAILABLE; - (instancetype)init NS_UNAVAILABLE;
/** |observer| is a raw pointer and should be kept alive /** `observer` is a raw pointer and should be kept alive
* for this object's lifetime. * for this object's lifetime.
*/ */
- (instancetype)initWithObserver:(webrtc::AudioSessionObserver *)observer NS_DESIGNATED_INITIALIZER; - (instancetype)initWithObserver:(webrtc::AudioSessionObserver *)observer NS_DESIGNATED_INITIALIZER;

View file

@ -165,7 +165,7 @@ typedef NS_ENUM(NSInteger, RTCFileVideoCapturerStatus) {
int64_t presentationDifferenceRound = lroundf(presentationDifference * NSEC_PER_SEC); int64_t presentationDifferenceRound = lroundf(presentationDifference * NSEC_PER_SEC);
__block dispatch_source_t timer = [self createStrictTimer]; __block dispatch_source_t timer = [self createStrictTimer];
// Strict timer that will fire |presentationDifferenceRound| ns from now and never again. // Strict timer that will fire `presentationDifferenceRound` ns from now and never again.
dispatch_source_set_timer(timer, dispatch_source_set_timer(timer,
dispatch_time(DISPATCH_TIME_NOW, presentationDifferenceRound), dispatch_time(DISPATCH_TIME_NOW, presentationDifferenceRound),
DISPATCH_TIME_FOREVER, DISPATCH_TIME_FOREVER,

View file

@ -14,7 +14,7 @@
@interface RTCNetworkMonitor () @interface RTCNetworkMonitor ()
/** |observer| is a raw pointer and should be kept alive /** `observer` is a raw pointer and should be kept alive
* for this object's lifetime. * for this object's lifetime.
*/ */
- (instancetype)initWithObserver:(webrtc::NetworkMonitorObserver *)observer - (instancetype)initWithObserver:(webrtc::NetworkMonitorObserver *)observer

View file

@ -31,11 +31,11 @@
// the method that will trigger the binding of the render // the method that will trigger the binding of the render
// buffer. Because the standard behaviour of -[UIView setNeedsDisplay] // buffer. Because the standard behaviour of -[UIView setNeedsDisplay]
// is disabled for the reasons above, the RTC_OBJC_TYPE(RTCEAGLVideoView) maintains // is disabled for the reasons above, the RTC_OBJC_TYPE(RTCEAGLVideoView) maintains
// its own |isDirty| flag. // its own `isDirty` flag.
@interface RTC_OBJC_TYPE (RTCEAGLVideoView) @interface RTC_OBJC_TYPE (RTCEAGLVideoView)
()<GLKViewDelegate> ()<GLKViewDelegate>
// |videoFrame| is set when we receive a frame from a worker thread and is read // `videoFrame` is set when we receive a frame from a worker thread and is read
// from the display link callback so atomicity is required. // from the display link callback so atomicity is required.
@property(atomic, strong) RTC_OBJC_TYPE(RTCVideoFrame) * videoFrame; @property(atomic, strong) RTC_OBJC_TYPE(RTCVideoFrame) * videoFrame;
@property(nonatomic, readonly) GLKView *glkView; @property(nonatomic, readonly) GLKView *glkView;
@ -183,7 +183,7 @@
// redrawn. This occurs on main thread. // redrawn. This occurs on main thread.
- (void)glkView:(GLKView *)view drawInRect:(CGRect)rect { - (void)glkView:(GLKView *)view drawInRect:(CGRect)rect {
// The renderer will draw the frame to the framebuffer corresponding to the // The renderer will draw the frame to the framebuffer corresponding to the
// one used by |view|. // one used by `view`.
RTC_OBJC_TYPE(RTCVideoFrame) *frame = self.videoFrame; RTC_OBJC_TYPE(RTCVideoFrame) *frame = self.videoFrame;
if (!frame || frame.timeStampNs == _lastDrawnFrameTimeStampNs) { if (!frame || frame.timeStampNs == _lastDrawnFrameTimeStampNs) {
return; return;

View file

@ -25,7 +25,7 @@
@interface RTC_OBJC_TYPE (RTCNSGLVideoView) @interface RTC_OBJC_TYPE (RTCNSGLVideoView)
() ()
// |videoFrame| is set when we receive a frame from a worker thread and is read // `videoFrame` is set when we receive a frame from a worker thread and is read
// from the display link callback so atomicity is required. // from the display link callback so atomicity is required.
@property(atomic, strong) RTC_OBJC_TYPE(RTCVideoFrame) * @property(atomic, strong) RTC_OBJC_TYPE(RTCVideoFrame) *
videoFrame; videoFrame;

View file

@ -36,7 +36,7 @@ const char kRTCVertexShaderSource[] =
" v_texcoord = texcoord;\n" " v_texcoord = texcoord;\n"
"}\n"; "}\n";
// Compiles a shader of the given |type| with GLSL source |source| and returns // Compiles a shader of the given `type` with GLSL source `source` and returns
// the shader handle or 0 on error. // the shader handle or 0 on error.
GLuint RTCCreateShader(GLenum type, const GLchar *source) { GLuint RTCCreateShader(GLenum type, const GLchar *source) {
GLuint shader = glCreateShader(type); GLuint shader = glCreateShader(type);

View file

@ -273,7 +273,7 @@ CFStringRef ExtractProfile(const webrtc::H264ProfileLevelId &profile_level_id) {
} }
// The function returns the max allowed sample rate (pixels per second) that // The function returns the max allowed sample rate (pixels per second) that
// can be processed by given encoder with |profile_level_id|. // can be processed by given encoder with `profile_level_id`.
// See https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-H.264-201610-S!!PDF-E&type=items // See https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-H.264-201610-S!!PDF-E&type=items
// for details. // for details.
NSUInteger GetMaxSampleRate(const webrtc::H264ProfileLevelId &profile_level_id) { NSUInteger GetMaxSampleRate(const webrtc::H264ProfileLevelId &profile_level_id) {
@ -723,7 +723,7 @@ NSUInteger GetMaxSampleRate(const webrtc::H264ProfileLevelId &profile_level_id)
if (_compressionSession) { if (_compressionSession) {
SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_AverageBitRate, bitrateBps); SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_AverageBitRate, bitrateBps);
// With zero |_maxAllowedFrameRate|, we fall back to automatic frame rate detection. // With zero `_maxAllowedFrameRate`, we fall back to automatic frame rate detection.
if (_maxAllowedFrameRate > 0) { if (_maxAllowedFrameRate > 0) {
SetVTSessionProperty( SetVTSessionProperty(
_compressionSession, kVTCompressionPropertyKey_ExpectedFrameRate, frameRate); _compressionSession, kVTCompressionPropertyKey_ExpectedFrameRate, frameRate);

View file

@ -111,7 +111,7 @@ bool H264CMSampleBufferToAnnexBBuffer(CMSampleBufferRef avcc_sample_buffer,
} }
size_t bytes_remaining = block_buffer_size; size_t bytes_remaining = block_buffer_size;
while (bytes_remaining > 0) { while (bytes_remaining > 0) {
// The size type here must match |nalu_header_size|, we expect 4 bytes. // The size type here must match `nalu_header_size`, we expect 4 bytes.
// Read the length of the next packet of data. Must convert from big endian // Read the length of the next packet of data. Must convert from big endian
// to host endian. // to host endian.
RTC_DCHECK_GE(bytes_remaining, (size_t)nalu_header_size); RTC_DCHECK_GE(bytes_remaining, (size_t)nalu_header_size);

View file

@ -26,7 +26,7 @@ namespace webrtc {
// Converts a sample buffer emitted from the VideoToolbox encoder into a buffer // Converts a sample buffer emitted from the VideoToolbox encoder into a buffer
// suitable for RTP. The sample buffer is in avcc format whereas the rtp buffer // suitable for RTP. The sample buffer is in avcc format whereas the rtp buffer
// needs to be in Annex B format. Data is written directly to |annexb_buffer|. // needs to be in Annex B format. Data is written directly to `annexb_buffer`.
bool H264CMSampleBufferToAnnexBBuffer(CMSampleBufferRef avcc_sample_buffer, bool H264CMSampleBufferToAnnexBBuffer(CMSampleBufferRef avcc_sample_buffer,
bool is_keyframe, bool is_keyframe,
rtc::Buffer* annexb_buffer); rtc::Buffer* annexb_buffer);
@ -34,8 +34,8 @@ bool H264CMSampleBufferToAnnexBBuffer(CMSampleBufferRef avcc_sample_buffer,
// Converts a buffer received from RTP into a sample buffer suitable for the // Converts a buffer received from RTP into a sample buffer suitable for the
// VideoToolbox decoder. The RTP buffer is in annex b format whereas the sample // VideoToolbox decoder. The RTP buffer is in annex b format whereas the sample
// buffer is in avcc format. // buffer is in avcc format.
// If |is_keyframe| is true then |video_format| is ignored since the format will // If `is_keyframe` is true then `video_format` is ignored since the format will
// be read from the buffer. Otherwise |video_format| must be provided. // be read from the buffer. Otherwise `video_format` must be provided.
// Caller is responsible for releasing the created sample buffer. // Caller is responsible for releasing the created sample buffer.
bool H264AnnexBBufferToCMSampleBuffer(const uint8_t* annexb_buffer, bool H264AnnexBBufferToCMSampleBuffer(const uint8_t* annexb_buffer,
size_t annexb_buffer_size, size_t annexb_buffer_size,

View file

@ -40,9 +40,9 @@ RTC_OBJC_EXPORT
- (BOOL)requiresScalingToWidth:(int)width height:(int)height; - (BOOL)requiresScalingToWidth:(int)width height:(int)height;
- (int)bufferSizeForCroppingAndScalingToWidth:(int)width height:(int)height; - (int)bufferSizeForCroppingAndScalingToWidth:(int)width height:(int)height;
/** The minimum size of the |tmpBuffer| must be the number of bytes returned from the /** The minimum size of the `tmpBuffer` must be the number of bytes returned from the
* bufferSizeForCroppingAndScalingToWidth:height: method. * bufferSizeForCroppingAndScalingToWidth:height: method.
* If that size is 0, the |tmpBuffer| may be nil. * If that size is 0, the `tmpBuffer` may be nil.
*/ */
- (BOOL)cropAndScaleTo:(CVPixelBufferRef)outputPixelBuffer - (BOOL)cropAndScaleTo:(CVPixelBufferRef)outputPixelBuffer
withTempBuffer:(nullable uint8_t *)tmpBuffer; withTempBuffer:(nullable uint8_t *)tmpBuffer;

View file

@ -17,9 +17,9 @@
namespace webrtc { namespace webrtc {
// If |bypass_voice_processing| is true, WebRTC will attempt to disable hardware // If `bypass_voice_processing` is true, WebRTC will attempt to disable hardware
// audio processing on iOS. // audio processing on iOS.
// Warning: Setting |bypass_voice_processing| will have unpredictable // Warning: Setting `bypass_voice_processing` will have unpredictable
// consequences for the audio path in the device. It is not advisable to use in // consequences for the audio path in the device. It is not advisable to use in
// most scenarios. // most scenarios.
rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule( rtc::scoped_refptr<AudioDeviceModule> CreateAudioDeviceModule(

View file

@ -164,7 +164,7 @@ class AudioDeviceIOS : public AudioDeviceGeneric,
bool IsInterrupted(); bool IsInterrupted();
private: private:
// Called by the relevant AudioSessionObserver methods on |thread_|. // Called by the relevant AudioSessionObserver methods on `thread_`.
void HandleInterruptionBegin(); void HandleInterruptionBegin();
void HandleInterruptionEnd(); void HandleInterruptionEnd();
void HandleValidRouteChange(); void HandleValidRouteChange();
@ -173,7 +173,7 @@ class AudioDeviceIOS : public AudioDeviceGeneric,
void HandlePlayoutGlitchDetected(); void HandlePlayoutGlitchDetected();
void HandleOutputVolumeChange(); void HandleOutputVolumeChange();
// Uses current |playout_parameters_| and |record_parameters_| to inform the // Uses current `playout_parameters_` and `record_parameters_` to inform the
// audio device buffer (ADB) about our internal audio parameters. // audio device buffer (ADB) about our internal audio parameters.
void UpdateAudioDeviceBuffer(); void UpdateAudioDeviceBuffer();
@ -181,7 +181,7 @@ class AudioDeviceIOS : public AudioDeviceGeneric,
// values may be different once the AVAudioSession has been activated. // values may be different once the AVAudioSession has been activated.
// This method asks for the current hardware parameters and takes actions // This method asks for the current hardware parameters and takes actions
// if they should differ from what we have asked for initially. It also // if they should differ from what we have asked for initially. It also
// defines |playout_parameters_| and |record_parameters_|. // defines `playout_parameters_` and `record_parameters_`.
void SetupAudioBuffersForActiveAudioSession(); void SetupAudioBuffersForActiveAudioSession();
// Creates the audio unit. // Creates the audio unit.

View file

@ -386,7 +386,7 @@ OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags
// Allocate AudioBuffers to be used as storage for the received audio. // Allocate AudioBuffers to be used as storage for the received audio.
// The AudioBufferList structure works as a placeholder for the // The AudioBufferList structure works as a placeholder for the
// AudioBuffer structure, which holds a pointer to the actual data buffer // AudioBuffer structure, which holds a pointer to the actual data buffer
// in |record_audio_buffer_|. Recorded audio will be rendered into this memory // in `record_audio_buffer_`. Recorded audio will be rendered into this memory
// at each input callback when calling AudioUnitRender(). // at each input callback when calling AudioUnitRender().
AudioBufferList audio_buffer_list; AudioBufferList audio_buffer_list;
audio_buffer_list.mNumberBuffers = 1; audio_buffer_list.mNumberBuffers = 1;
@ -397,7 +397,7 @@ OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags
audio_buffer->mData = reinterpret_cast<int8_t*>(record_audio_buffer_.data()); audio_buffer->mData = reinterpret_cast<int8_t*>(record_audio_buffer_.data());
// Obtain the recorded audio samples by initiating a rendering cycle. // Obtain the recorded audio samples by initiating a rendering cycle.
// Since it happens on the input bus, the |io_data| parameter is a reference // Since it happens on the input bus, the `io_data` parameter is a reference
// to the preallocated audio buffer list that the audio unit renders into. // to the preallocated audio buffer list that the audio unit renders into.
// We can make the audio unit provide a buffer instead in io_data, but we // We can make the audio unit provide a buffer instead in io_data, but we
// currently just use our own. // currently just use our own.
@ -467,7 +467,7 @@ OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags,
// Read decoded 16-bit PCM samples from WebRTC (using a size that matches // Read decoded 16-bit PCM samples from WebRTC (using a size that matches
// the native I/O audio unit) and copy the result to the audio buffer in the // the native I/O audio unit) and copy the result to the audio buffer in the
// |io_data| destination. // `io_data` destination.
fine_audio_buffer_->GetPlayoutData( fine_audio_buffer_->GetPlayoutData(
rtc::ArrayView<int16_t>(static_cast<int16_t*>(audio_buffer->mData), num_frames), rtc::ArrayView<int16_t>(static_cast<int16_t*>(audio_buffer->mData), num_frames),
kFixedPlayoutDelayEstimate); kFixedPlayoutDelayEstimate);

View file

@ -24,7 +24,7 @@ class NetworkMonitorObserver {
public: public:
// Called when a path update occurs, on network monitor dispatch queue. // Called when a path update occurs, on network monitor dispatch queue.
// //
// |adapter_type_by_name| is a map from interface name (i.e. "pdp_ip0") to // `adapter_type_by_name` is a map from interface name (i.e. "pdp_ip0") to
// adapter type, for all available interfaces on the current path. If an // adapter type, for all available interfaces on the current path. If an
// interface name isn't present it can be assumed to be unavailable. // interface name isn't present it can be assumed to be unavailable.
virtual void OnPathUpdate( virtual void OnPathUpdate(

View file

@ -118,7 +118,7 @@ static const NSUInteger kNumCallbacksPerSecond = 100;
// Play out a test file during this time (unit is in seconds). // Play out a test file during this time (unit is in seconds).
static const NSUInteger kFilePlayTimeInSec = 15; static const NSUInteger kFilePlayTimeInSec = 15;
// Run the full-duplex test during this time (unit is in seconds). // Run the full-duplex test during this time (unit is in seconds).
// Note that first |kNumIgnoreFirstCallbacks| are ignored. // Note that first `kNumIgnoreFirstCallbacks` are ignored.
static const NSUInteger kFullDuplexTimeInSec = 10; static const NSUInteger kFullDuplexTimeInSec = 10;
// Wait for the callback sequence to stabilize by ignoring this amount of the // Wait for the callback sequence to stabilize by ignoring this amount of the
// initial callbacks (avoids initial FIFO access). // initial callbacks (avoids initial FIFO access).