/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef MOZILLA_TRACKBUFFERSMANAGER_H_ #define MOZILLA_TRACKBUFFERSMANAGER_H_ #include "mozilla/Atomics.h" #include "mozilla/Maybe.h" #include "mozilla/Monitor.h" #include "mozilla/Pair.h" #include "mozilla/dom/SourceBufferBinding.h" #include "SourceBufferContentManager.h" #include "MediaDataDemuxer.h" #include "MediaSourceDecoder.h" #include "nsProxyRelease.h" #include "nsTArray.h" namespace mozilla { class ContainerParser; class MediaByteBuffer; class MediaRawData; class MediaSourceDemuxer; class SourceBufferResource; namespace dom { class SourceBufferAttributes; } class TrackBuffersManager : public SourceBufferContentManager { public: typedef MozPromise CodedFrameProcessingPromise; typedef TrackInfo::TrackType TrackType; typedef MediaData::Type MediaType; typedef nsTArray> TrackBuffer; TrackBuffersManager(dom::SourceBufferAttributes* aAttributes, MediaSourceDecoder* aParentDecoder, const nsACString& aType); bool AppendData(MediaByteBuffer* aData, media::TimeUnit aTimestampOffset) override; RefPtr BufferAppend() override; void AbortAppendData() override; void ResetParserState() override; RefPtr RangeRemoval(media::TimeUnit aStart, media::TimeUnit aEnd) override; EvictDataResult EvictData(media::TimeUnit aPlaybackTime, uint32_t aThreshold, media::TimeUnit* aBufferStartTime) override; void EvictBefore(media::TimeUnit aTime) override; media::TimeIntervals Buffered() override; int64_t GetSize() override; void Ended() override; void Detach() override; AppendState GetAppendState() override { return mAppendState; } void SetGroupStartTimestamp(const media::TimeUnit& aGroupStartTimestamp) override; void RestartGroupStartTimestamp() override; media::TimeUnit GroupEndTimestamp() override; // Interface for MediaSourceDemuxer MediaInfo GetMetadata(); const TrackBuffer& GetTrackBuffer(TrackInfo::TrackType aTrack); const media::TimeIntervals& Buffered(TrackInfo::TrackType); media::TimeIntervals SafeBuffered(TrackInfo::TrackType) const; bool IsEnded() const { return mEnded; } media::TimeUnit Seek(TrackInfo::TrackType aTrack, const media::TimeUnit& aTime, const media::TimeUnit& aFuzz); uint32_t SkipToNextRandomAccessPoint(TrackInfo::TrackType aTrack, const media::TimeUnit& aTimeThreadshold, bool& aFound); already_AddRefed GetSample(TrackInfo::TrackType aTrack, const media::TimeUnit& aFuzz, bool& aError); media::TimeUnit GetNextRandomAccessPoint(TrackInfo::TrackType aTrack); void AddSizeOfResources(MediaSourceDecoder::ResourceSizes* aSizes); private: // for MediaSourceDemuxer::GetMozDebugReaderData friend class MediaSourceDemuxer; virtual ~TrackBuffersManager(); // All following functions run on the taskqueue. RefPtr InitSegmentParserLoop(); void ScheduleSegmentParserLoop(); void SegmentParserLoop(); void AppendIncomingBuffers(); void InitializationSegmentReceived(); void ShutdownDemuxers(); void CreateDemuxerforMIMEType(); void ResetDemuxingState(); void NeedMoreData(); void RejectAppend(nsresult aRejectValue, const char* aName); // Will return a promise that will be resolved once all frames of the current // media segment have been processed. RefPtr CodedFrameProcessing(); void CompleteCodedFrameProcessing(); // Called by ResetParserState. void CompleteResetParserState(); RefPtr CodedFrameRemovalWithPromise(media::TimeInterval aInterval); bool CodedFrameRemoval(media::TimeInterval aInterval); void SetAppendState(AppendState aAppendState); bool HasVideo() const { return mVideoTracks.mNumTracks > 0; } bool HasAudio() const { return mAudioTracks.mNumTracks > 0; } typedef Pair, media::TimeUnit> IncomingBuffer; void AppendIncomingBuffer(IncomingBuffer aData); nsTArray mIncomingBuffers; // The input buffer as per http://w3c.github.io/media-source/index.html#sourcebuffer-input-buffer RefPtr mInputBuffer; // The current append state as per https://w3c.github.io/media-source/#sourcebuffer-append-state // Accessed on both the main thread and the task queue. Atomic mAppendState; // Buffer full flag as per https://w3c.github.io/media-source/#sourcebuffer-buffer-full-flag. // Accessed on both the main thread and the task queue. // TODO: Unused for now. Atomic mBufferFull; bool mFirstInitializationSegmentReceived; // Set to true once a new segment is started. bool mNewMediaSegmentStarted; bool mActiveTrack; Maybe mGroupStartTimestamp; media::TimeUnit mGroupEndTimestamp; nsCString mType; // ContainerParser objects and methods. // Those are used to parse the incoming input buffer. // Recreate the ContainerParser and if aReuseInitData is true then // feed it with the previous init segment found. void RecreateParser(bool aReuseInitData); nsAutoPtr mParser; // Demuxer objects and methods. void AppendDataToCurrentInputBuffer(MediaByteBuffer* aData); RefPtr mInitData; // Temporary input buffer to handle partial media segment header. // We store the current input buffer content into it should we need to // reinitialize the demuxer once we have some samples and a discontinuity is // detected. RefPtr mPendingInputBuffer; RefPtr mCurrentInputBuffer; RefPtr mInputDemuxer; // Length already processed in current media segment. uint64_t mProcessedInput; Maybe mLastParsedEndTime; void OnDemuxerInitDone(nsresult); void OnDemuxerInitFailed(DemuxerFailureReason aFailure); void OnDemuxerResetDone(nsresult); MozPromiseRequestHolder mDemuxerInitRequest; bool mEncrypted; void OnDemuxFailed(TrackType aTrack, DemuxerFailureReason aFailure); void DoDemuxVideo(); void OnVideoDemuxCompleted(RefPtr aSamples); void OnVideoDemuxFailed(DemuxerFailureReason aFailure) { mVideoTracks.mDemuxRequest.Complete(); OnDemuxFailed(TrackType::kVideoTrack, aFailure); } void DoDemuxAudio(); void OnAudioDemuxCompleted(RefPtr aSamples); void OnAudioDemuxFailed(DemuxerFailureReason aFailure) { mAudioTracks.mDemuxRequest.Complete(); OnDemuxFailed(TrackType::kAudioTrack, aFailure); } void DoEvictData(const media::TimeUnit& aPlaybackTime, uint32_t aThreshold); struct TrackData { TrackData() : mNumTracks(0) , mNeedRandomAccessPoint(true) , mSizeBuffer(0) {} uint32_t mNumTracks; // Definition of variables: // https://w3c.github.io/media-source/#track-buffers // Last decode timestamp variable that stores the decode timestamp of the // last coded frame appended in the current coded frame group. // The variable is initially unset to indicate that no coded frames have // been appended yet. Maybe mLastDecodeTimestamp; // Last frame duration variable that stores the coded frame duration of the // last coded frame appended in the current coded frame group. // The variable is initially unset to indicate that no coded frames have // been appended yet. Maybe mLastFrameDuration; // Highest end timestamp variable that stores the highest coded frame end // timestamp across all coded frames in the current coded frame group that // were appended to this track buffer. // The variable is initially unset to indicate that no coded frames have // been appended yet. Maybe mHighestEndTimestamp; // Longest frame duration seen in a coded frame group. Maybe mLongestFrameDuration; // Need random access point flag variable that keeps track of whether the // track buffer is waiting for a random access point coded frame. // The variable is initially set to true to indicate that random access // point coded frame is needed before anything can be added to the track // buffer. bool mNeedRandomAccessPoint; RefPtr mDemuxer; MozPromiseRequestHolder mDemuxRequest; // Highest end timestamp of the last media segment demuxed. media::TimeUnit mLastParsedEndTime; // If set, position where the next contiguous frame will be inserted. // If a discontinuity is detected, it will be unset and recalculated upon // the next insertion. Maybe mNextInsertionIndex; // Samples just demuxed, but not yet parsed. TrackBuffer mQueuedSamples; // We only manage a single track of each type at this time. nsTArray mBuffers; // Track buffer ranges variable that represents the presentation time ranges // occupied by the coded frames currently stored in the track buffer. media::TimeIntervals mBufferedRanges; // Sanitized mBufferedRanges with a fuzz of half a sample's duration applied // This buffered ranges is the basis of what is exposed to the JS. media::TimeIntervals mSanitizedBufferedRanges; // Byte size of all samples contained in this track buffer. uint32_t mSizeBuffer; // TrackInfo of the first metadata received. RefPtr mInfo; // TrackInfo of the last metadata parsed (updated with each init segment. RefPtr mLastInfo; // If set, position of the next sample to be retrieved by GetSample(). // If the position is equal to the TrackBuffer's length, it indicates that // we've reached EOS. Maybe mNextGetSampleIndex; // Approximation of the next sample's decode timestamp. media::TimeUnit mNextSampleTimecode; // Approximation of the next sample's presentation timestamp. media::TimeUnit mNextSampleTime; void ResetAppendState() { mLastDecodeTimestamp.reset(); mLastFrameDuration.reset(); mHighestEndTimestamp.reset(); mNeedRandomAccessPoint = true; mLongestFrameDuration.reset(); mNextInsertionIndex.reset(); } void AddSizeOfResources(MediaSourceDecoder::ResourceSizes* aSizes); }; void CheckSequenceDiscontinuity(const media::TimeUnit& aPresentationTime); void ProcessFrames(TrackBuffer& aSamples, TrackData& aTrackData); bool CheckNextInsertionIndex(TrackData& aTrackData, const media::TimeUnit& aSampleTime); void InsertFrames(TrackBuffer& aSamples, const media::TimeIntervals& aIntervals, TrackData& aTrackData); // Remove all frames and their dependencies contained in aIntervals. // Return the index at which frames were first removed or 0 if no frames // removed. size_t RemoveFrames(const media::TimeIntervals& aIntervals, TrackData& aTrackData, uint32_t aStartIndex); // Find index of sample. Return a negative value if not found. uint32_t FindSampleIndex(const TrackBuffer& aTrackBuffer, const media::TimeInterval& aInterval); void UpdateBufferedRanges(); void RejectProcessing(nsresult aRejectValue, const char* aName); void ResolveProcessing(bool aResolveValue, const char* aName); MozPromiseRequestHolder mProcessingRequest; MozPromiseHolder mProcessingPromise; MozPromiseHolder mAppendPromise; // Trackbuffers definition. nsTArray GetTracksList(); TrackData& GetTracksData(TrackType aTrack) { switch(aTrack) { case TrackType::kVideoTrack: return mVideoTracks; case TrackType::kAudioTrack: default: return mAudioTracks; } } TrackData mVideoTracks; TrackData mAudioTracks; // TaskQueue methods and objects. AbstractThread* GetTaskQueue() { return mTaskQueue; } bool OnTaskQueue() { return !GetTaskQueue() || GetTaskQueue()->IsCurrentThreadIn(); } RefPtr mTaskQueue; media::TimeInterval mAppendWindow; media::TimeUnit mTimestampOffset; media::TimeUnit mLastTimestampOffset; void RestoreCachedVariables(); // Strong references to external objects. RefPtr mSourceBufferAttributes; nsMainThreadPtrHandle mParentDecoder; // Set to true if mediasource state changed to ended. Atomic mEnded; // Global size of this source buffer content. Atomic mSizeSourceBuffer; uint32_t mEvictionThreshold; Atomic mEvictionOccurred; // Monitor to protect following objects accessed across multipple threads. // mMonitor is also notified if the value of mAppendRunning becomes false. mutable Monitor mMonitor; // Set to true while a BufferAppend is running or is pending. Atomic mAppendRunning; // Stable audio and video track time ranges. media::TimeIntervals mVideoBufferedRanges; media::TimeIntervals mAudioBufferedRanges; media::TimeUnit mOfficialGroupEndTimestamp; // MediaInfo of the first init segment read. MediaInfo mInfo; }; } // namespace mozilla #endif /* MOZILLA_TRACKBUFFERSMANAGER_H_ */