VIDEO: Add seeking capability to QuickTimeDecoder

svn-id: r55203
This commit is contained in:
Matthew Hoops 2011-01-11 17:27:37 +00:00
parent 3bb5a9fe71
commit 3cb7224c10
2 changed files with 179 additions and 85 deletions

View file

@ -144,20 +144,128 @@ PixelFormat QuickTimeDecoder::getPixelFormat() const {
return codec->getPixelFormat(); return codec->getPixelFormat();
} }
void QuickTimeDecoder::rewind() { uint32 QuickTimeDecoder::findKeyFrame(uint32 frame) const {
VideoDecoder::reset(); for (int i = _streams[_videoStreamIndex]->keyframe_count - 1; i >= 0; i--)
_nextFrameStartTime = 0; if (_streams[_videoStreamIndex]->keyframes[i] <= frame)
return _streams[_videoStreamIndex]->keyframes[i];
// Restart the audio too // If none found, we'll assume the requested frame is a key frame
stopAudio(); return frame;
if (_audioStreamIndex >= 0) {
_curAudioChunk = 0;
STSDEntry *entry = &_streams[_audioStreamIndex]->stsdEntries[0];
_audStream = Audio::makeQueuingAudioStream(entry->sampleRate, entry->channels == 2);
} }
void QuickTimeDecoder::seekToFrame(uint32 frame) {
assert(_videoStreamIndex >= 0);
assert(frame < _streams[_videoStreamIndex]->nb_frames);
// Stop all audio (for now)
stopAudio();
// Track down the keyframe
_curFrame = findKeyFrame(frame) - 1;
while (_curFrame < (int32)frame - 1)
decodeNextFrame();
// Map out the starting point
_nextFrameStartTime = 0;
uint32 curFrame = 0;
for (int32 i = 0; i < _streams[_videoStreamIndex]->stts_count && curFrame < frame; i++) {
for (int32 j = 0; j < _streams[_videoStreamIndex]->stts_data[i].count && curFrame < frame; j++) {
curFrame++;
_nextFrameStartTime += _streams[_videoStreamIndex]->stts_data[i].duration;
}
}
// Adjust the video starting point
_startTime = g_system->getMillis() - _nextFrameStartTime;
// Adjust the audio starting point
if (_audioStreamIndex >= 0) {
_audioStartOffset = VideoTimestamp(_nextFrameStartTime, _streams[_videoStreamIndex]->time_scale);
// Re-create the audio stream
STSDEntry *entry = &_streams[_audioStreamIndex]->stsdEntries[0];
_audStream = Audio::makeQueuingAudioStream(entry->sampleRate, entry->channels == 2);
// First, we need to track down what audio sample we need
uint32 curTime = 0;
uint sample = 0;
bool done = false;
for (int32 i = 0; i < _streams[_audioStreamIndex]->stts_count && !done; i++) {
for (int32 j = 0; j < _streams[_audioStreamIndex]->stts_data[i].count; j++) {
curTime += _streams[_audioStreamIndex]->stts_data[i].duration;
if (curTime > Graphics::VideoTimestamp(_nextFrameStartTime, _streams[_videoStreamIndex]->time_scale).getUnitsInScale(_streams[_audioStreamIndex]->time_scale)) {
done = true;
break;
}
sample++;
}
}
// Now to track down what chunk it's in
_curAudioChunk = 0;
uint32 totalSamples = 0;
for (uint32 i = 0; i < _streams[_audioStreamIndex]->sample_to_chunk_sz; i++, _curAudioChunk++) {
int sampleToChunkIndex = -1;
for (uint32 j = 0; j < _streams[_audioStreamIndex]->sample_to_chunk_sz; j++)
if (i >= _streams[_audioStreamIndex]->sample_to_chunk[j].first)
sampleToChunkIndex = j;
assert(sampleToChunkIndex >= 0);
totalSamples += _streams[_audioStreamIndex]->sample_to_chunk[sampleToChunkIndex].count;
if (sample < totalSamples) {
totalSamples -= _streams[_audioStreamIndex]->sample_to_chunk[sampleToChunkIndex].count;
break;
}
}
// Reposition the audio stream
readNextAudioChunk();
if (sample != totalSamples) {
// HACK: Skip a certain amount of samples from the stream
// (There's got to be a better way to do this!)
int16 *tempBuffer = new int16[sample - totalSamples];
_audStream->readBuffer(tempBuffer, sample - totalSamples);
delete[] tempBuffer;
debug(3, "Skipping %d audio samples", sample - totalSamples);
}
// Restart the audio
startAudio(); startAudio();
} }
}
void QuickTimeDecoder::seekToTime(VideoTimestamp time) {
// TODO: Audio-only seeking (or really, have QuickTime sounds)
if (_videoStreamIndex < 0)
error("Audio-only seeking not supported");
// Convert to the local time scale
uint32 localTime = time.getUnitsInScale(_streams[_videoStreamIndex]->time_scale);
// Try to find the last frame that should have been decoded
uint32 frame = 0;
uint32 totalDuration = 0;
bool done = false;
for (int32 i = 0; i < _streams[_videoStreamIndex]->stts_count && !done; i++) {
for (int32 j = 0; j < _streams[_videoStreamIndex]->stts_data[i].count; j++) {
totalDuration += _streams[_videoStreamIndex]->stts_data[i].duration;
if (localTime < totalDuration) {
done = true;
break;
}
frame++;
}
}
seekToFrame(frame);
}
Codec *QuickTimeDecoder::createCodec(uint32 codecTag, byte bitsPerPixel) { Codec *QuickTimeDecoder::createCodec(uint32 codecTag, byte bitsPerPixel) {
if (codecTag == MKID_BE('cvid')) { if (codecTag == MKID_BE('cvid')) {
@ -285,9 +393,9 @@ bool QuickTimeDecoder::endOfVideo() const {
uint32 QuickTimeDecoder::getElapsedTime() const { uint32 QuickTimeDecoder::getElapsedTime() const {
if (_audStream) if (_audStream)
return g_system->getMixer()->getSoundElapsedTime(_audHandle); return g_system->getMixer()->getSoundElapsedTime(_audHandle) + _audioStartOffset.getUnitsInScale(1000);
return g_system->getMillis() - _startTime; return VideoDecoder::getElapsedTime();
} }
uint32 QuickTimeDecoder::getTimeToNextFrame() const { uint32 QuickTimeDecoder::getTimeToNextFrame() const {
@ -310,7 +418,6 @@ bool QuickTimeDecoder::loadFile(const Common::String &filename) {
_foundMOOV = false; _foundMOOV = false;
_numStreams = 0; _numStreams = 0;
_partial = 0;
_videoStreamIndex = _audioStreamIndex = -1; _videoStreamIndex = _audioStreamIndex = -1;
_startTime = 0; _startTime = 0;
@ -348,7 +455,6 @@ bool QuickTimeDecoder::load(Common::SeekableReadStream *stream) {
_fd = stream; _fd = stream;
_foundMOOV = false; _foundMOOV = false;
_numStreams = 0; _numStreams = 0;
_partial = 0;
_videoStreamIndex = _audioStreamIndex = -1; _videoStreamIndex = _audioStreamIndex = -1;
_startTime = 0; _startTime = 0;
@ -407,6 +513,8 @@ void QuickTimeDecoder::init() {
startAudio(); startAudio();
} }
_audioStartOffset = VideoTimestamp(0);
} }
// Initialize video, if present // Initialize video, if present
@ -651,7 +759,6 @@ int QuickTimeDecoder::readTRAK(MOVatom atom) {
if (!sc) if (!sc)
return -1; return -1;
sc->sample_to_chunk_index = -1;
sc->codec_type = CODEC_TYPE_MOV_OTHER; sc->codec_type = CODEC_TYPE_MOV_OTHER;
sc->start_time = 0; // XXX: check sc->start_time = 0; // XXX: check
_streams[_numStreams++] = sc; _streams[_numStreams++] = sc;
@ -985,10 +1092,10 @@ int QuickTimeDecoder::readSTSC(MOVatom atom) {
return -1; return -1;
for (uint32 i = 0; i < st->sample_to_chunk_sz; i++) { for (uint32 i = 0; i < st->sample_to_chunk_sz; i++) {
st->sample_to_chunk[i].first = _fd->readUint32BE(); st->sample_to_chunk[i].first = _fd->readUint32BE() - 1;
st->sample_to_chunk[i].count = _fd->readUint32BE(); st->sample_to_chunk[i].count = _fd->readUint32BE();
st->sample_to_chunk[i].id = _fd->readUint32BE(); st->sample_to_chunk[i].id = _fd->readUint32BE();
//printf ("Sample to Chunk[%d]: First = %d, Count = %d\n", i, st->sample_to_chunk[i].first, st->sample_to_chunk[i].count); //warning("Sample to Chunk[%d]: First = %d, Count = %d", i, st->sample_to_chunk[i].first, st->sample_to_chunk[i].count);
} }
return 0; return 0;
@ -1010,7 +1117,7 @@ int QuickTimeDecoder::readSTSS(MOVatom atom) {
return -1; return -1;
for (uint32 i = 0; i < st->keyframe_count; i++) { for (uint32 i = 0; i < st->keyframe_count; i++) {
st->keyframes[i] = _fd->readUint32BE(); st->keyframes[i] = _fd->readUint32BE() - 1; // Adjust here, the frames are based on 1
debug(6, "keyframes[%d] = %d", i, st->keyframes[i]); debug(6, "keyframes[%d] = %d", i, st->keyframes[i]);
} }
@ -1107,18 +1214,6 @@ int QuickTimeDecoder::readSTCO(MOVatom atom) {
st->chunk_offsets[i] = _fd->readUint32BE() - _beginOffset; st->chunk_offsets[i] = _fd->readUint32BE() - _beginOffset;
} }
for (uint32 i = 0; i < _numStreams; i++) {
MOVStreamContext *sc2 = _streams[i];
if (sc2 && sc2->chunk_offsets) {
uint32 first = sc2->chunk_offsets[0];
uint32 last = sc2->chunk_offsets[sc2->chunk_count - 1];
if(first >= st->chunk_offsets[st->chunk_count - 1] || last <= st->chunk_offsets[0])
_ni = 1;
}
}
return 0; return 0;
} }
@ -1175,7 +1270,7 @@ Common::SeekableReadStream *QuickTimeDecoder::getNextFramePacket(uint32 &descId)
int32 sampleToChunkIndex = -1; int32 sampleToChunkIndex = -1;
for (uint32 j = 0; j < _streams[_videoStreamIndex]->sample_to_chunk_sz; j++) for (uint32 j = 0; j < _streams[_videoStreamIndex]->sample_to_chunk_sz; j++)
if (i >= _streams[_videoStreamIndex]->sample_to_chunk[j].first - 1) if (i >= _streams[_videoStreamIndex]->sample_to_chunk[j].first)
sampleToChunkIndex = j; sampleToChunkIndex = j;
if (sampleToChunkIndex < 0) if (sampleToChunkIndex < 0)
@ -1273,36 +1368,14 @@ uint32 QuickTimeDecoder::getAudioChunkSampleCount(uint chunk) {
uint32 sampleCount = 0; uint32 sampleCount = 0;
for (uint32 j = 0; j < _streams[_audioStreamIndex]->sample_to_chunk_sz; j++) for (uint32 j = 0; j < _streams[_audioStreamIndex]->sample_to_chunk_sz; j++)
if (chunk >= (_streams[_audioStreamIndex]->sample_to_chunk[j].first - 1)) if (chunk >= _streams[_audioStreamIndex]->sample_to_chunk[j].first)
sampleCount = _streams[_audioStreamIndex]->sample_to_chunk[j].count; sampleCount = _streams[_audioStreamIndex]->sample_to_chunk[j].count;
return sampleCount; return sampleCount;
} }
void QuickTimeDecoder::updateAudioBuffer() { void QuickTimeDecoder::readNextAudioChunk() {
if (!_audStream)
return;
STSDEntry *entry = &_streams[_audioStreamIndex]->stsdEntries[0]; STSDEntry *entry = &_streams[_audioStreamIndex]->stsdEntries[0];
// Calculate the amount of chunks we need in memory until the next frame
uint32 timeToNextFrame = getTimeToNextFrame();
uint32 numberOfChunksNeeded = 0;
uint32 timeFilled = 0;
uint32 curAudioChunk = _curAudioChunk - _audStream->numQueuedStreams();
for (; timeFilled < timeToNextFrame && curAudioChunk < _streams[_audioStreamIndex]->chunk_count; numberOfChunksNeeded++, curAudioChunk++) {
uint32 sampleCount = getAudioChunkSampleCount(curAudioChunk);
assert(sampleCount);
timeFilled += sampleCount * 1000 / entry->sampleRate;
}
// Add a couple extra to ensure we don't underrun
numberOfChunksNeeded += 3;
// Keep three streams in buffer so that if/when the first two end, it goes right into the next
for (; _audStream->numQueuedStreams() < numberOfChunksNeeded && _curAudioChunk < _streams[_audioStreamIndex]->chunk_count; _curAudioChunk++) {
Common::MemoryWriteStreamDynamic *wStream = new Common::MemoryWriteStreamDynamic(); Common::MemoryWriteStreamDynamic *wStream = new Common::MemoryWriteStreamDynamic();
_fd->seek(_streams[_audioStreamIndex]->chunk_offsets[_curAudioChunk]); _fd->seek(_streams[_audioStreamIndex]->chunk_offsets[_curAudioChunk]);
@ -1337,7 +1410,35 @@ void QuickTimeDecoder::updateAudioBuffer() {
// Now queue the buffer // Now queue the buffer
_audStream->queueAudioStream(createAudioStream(new Common::MemoryReadStream(wStream->getData(), wStream->size(), DisposeAfterUse::YES))); _audStream->queueAudioStream(createAudioStream(new Common::MemoryReadStream(wStream->getData(), wStream->size(), DisposeAfterUse::YES)));
delete wStream; delete wStream;
_curAudioChunk++;
} }
void QuickTimeDecoder::updateAudioBuffer() {
if (!_audStream)
return;
STSDEntry *entry = &_streams[_audioStreamIndex]->stsdEntries[0];
// Calculate the amount of chunks we need in memory until the next frame
uint32 timeToNextFrame = getTimeToNextFrame();
uint32 numberOfChunksNeeded = 0;
uint32 timeFilled = 0;
uint32 curAudioChunk = _curAudioChunk - _audStream->numQueuedStreams();
for (; timeFilled < timeToNextFrame && curAudioChunk < _streams[_audioStreamIndex]->chunk_count; numberOfChunksNeeded++, curAudioChunk++) {
uint32 sampleCount = getAudioChunkSampleCount(curAudioChunk);
assert(sampleCount);
timeFilled += sampleCount * 1000 / entry->sampleRate;
}
// Add a couple extra to ensure we don't underrun
numberOfChunksNeeded += 3;
// Keep three streams in buffer so that if/when the first two end, it goes right into the next
while (_audStream->numQueuedStreams() < numberOfChunksNeeded && _curAudioChunk < _streams[_audioStreamIndex]->chunk_count)
readNextAudioChunk();
} }
QuickTimeDecoder::STSDEntry::STSDEntry() { QuickTimeDecoder::STSDEntry::STSDEntry() {
@ -1370,7 +1471,6 @@ QuickTimeDecoder::MOVStreamContext::MOVStreamContext() {
QuickTimeDecoder::MOVStreamContext::~MOVStreamContext() { QuickTimeDecoder::MOVStreamContext::~MOVStreamContext() {
delete[] chunk_offsets; delete[] chunk_offsets;
delete[] stts_data; delete[] stts_data;
delete[] ctts_data;
delete[] sample_to_chunk; delete[] sample_to_chunk;
delete[] sample_sizes; delete[] sample_sizes;
delete[] keyframes; delete[] keyframes;

View file

@ -52,7 +52,7 @@ namespace Common {
namespace Graphics { namespace Graphics {
class QuickTimeDecoder : public RewindableVideoDecoder { class QuickTimeDecoder : public SeekableVideoDecoder {
public: public:
QuickTimeDecoder(); QuickTimeDecoder();
virtual ~QuickTimeDecoder(); virtual ~QuickTimeDecoder();
@ -113,8 +113,9 @@ public:
uint32 getTimeToNextFrame() const; uint32 getTimeToNextFrame() const;
PixelFormat getPixelFormat() const; PixelFormat getPixelFormat() const;
// RewindableVideoDecoder API // SeekableVideoDecoder API
void rewind(); void seekToFrame(uint32 frame);
void seekToTime(VideoTimestamp time);
private: private:
// This is the file handle from which data is read from. It can be the actual file handle or a decompressed stream. // This is the file handle from which data is read from. It can be the actual file handle or a decompressed stream.
@ -176,17 +177,9 @@ private:
uint32 *chunk_offsets; uint32 *chunk_offsets;
int stts_count; int stts_count;
MOVstts *stts_data; MOVstts *stts_data;
int ctts_count;
MOVstts *ctts_data;
int edit_count; /* number of 'edit' (elst atom) */ int edit_count; /* number of 'edit' (elst atom) */
uint32 sample_to_chunk_sz; uint32 sample_to_chunk_sz;
MOVstsc *sample_to_chunk; MOVstsc *sample_to_chunk;
int32 sample_to_chunk_index;
int sample_to_time_index;
uint32 sample_to_time_sample;
uint32 sample_to_time_time;
int sample_to_ctime_index;
int sample_to_ctime_sample;
uint32 sample_size; uint32 sample_size;
uint32 sample_count; uint32 sample_count;
uint32 *sample_sizes; uint32 *sample_sizes;
@ -197,7 +190,7 @@ private:
uint16 width; uint16 width;
uint16 height; uint16 height;
int codec_type; CodecType codec_type;
uint32 stsdEntryCount; uint32 stsdEntryCount;
STSDEntry *stsdEntries; STSDEntry *stsdEntries;
@ -215,9 +208,7 @@ private:
bool _foundMOOV; bool _foundMOOV;
uint32 _timeScale; uint32 _timeScale;
uint32 _duration; uint32 _duration;
MOVStreamContext *_partial;
uint32 _numStreams; uint32 _numStreams;
int _ni;
Common::Rational _scaleFactorX; Common::Rational _scaleFactorX;
Common::Rational _scaleFactorY; Common::Rational _scaleFactorY;
MOVStreamContext *_streams[20]; MOVStreamContext *_streams[20];
@ -237,15 +228,18 @@ private:
void startAudio(); void startAudio();
void stopAudio(); void stopAudio();
void updateAudioBuffer(); void updateAudioBuffer();
void readNextAudioChunk();
uint32 getAudioChunkSampleCount(uint chunk); uint32 getAudioChunkSampleCount(uint chunk);
int8 _audioStreamIndex; int8 _audioStreamIndex;
uint _curAudioChunk; uint _curAudioChunk;
Audio::SoundHandle _audHandle; Audio::SoundHandle _audHandle;
VideoTimestamp _audioStartOffset;
Codec *createCodec(uint32 codecTag, byte bitsPerPixel); Codec *createCodec(uint32 codecTag, byte bitsPerPixel);
Codec *findDefaultVideoCodec() const; Codec *findDefaultVideoCodec() const;
uint32 _nextFrameStartTime; uint32 _nextFrameStartTime;
int8 _videoStreamIndex; int8 _videoStreamIndex;
uint32 findKeyFrame(uint32 frame) const;
Surface *_scaledSurface; Surface *_scaledSurface;
const Surface *scaleSurface(const Surface *frame); const Surface *scaleSurface(const Surface *frame);