MpegParserpublic class MpegParser extends BasicPullParser
Fields Summary |
---|
boolean | saveOutputFlag | String | AoutName | String | VoutName | FileOutputStream | aout | FileOutputStream | vout | boolean | throwOutputFlag | boolean | hideAudioTracks | boolean | hideVideoTracks | static final long | NO_PTS_VAL | private static final float | EPSILON_PTS | private static final float | EPSILON_NS | private static final long | PRE_ROLLING_DELTA_NS | private static final byte | UNKNOWN_TYPE | private static final byte | AUDIO_TYPE | private static final byte | VIDEO_TYPE | private static final byte | SYS11172_TYPE | private static final int | AUDIO_TRACK_BUF_SIZE | private static final int | VIDEO_TRACK_BUF_SIZE | private static final int | PACK_START_CODE | private static final int | SYSTEM_HEADER_START_CODE | private static final int | PACKET_START_CODE_24 | private static final int | END_CODE | private static final int | MIN_STREAM_CODE | private static final int | MAX_STREAM_CODE | private static final int | PRIVATE_STREAM2_CODE | private static final int | VIDEO_PICTURE_START_CODE | private static final int | VIDEO_SEQUENCE_HEADER_CODE | private static final int | VIDEO_GROUP_START_CODE | private static final int | MAX_AUDIO_STREAMS | private static final int | MAX_VIDEO_STREAMS | private static final int | MAX_NUM_STREAMS | private static final int | MIN_AUDIO_ID | private static final int | MAX_AUDIO_ID | private static final int | MIN_VIDEO_ID | private static final int | MAX_VIDEO_ID | private static int | MAX_TRACKS_SUPPORTED | private static ContentDescriptor[] | supportedFormat | private PullSourceStream | stream | private TrackList[] | trackList | private Track[] | tracks | private Track[] | videoTracks | private Track[] | audioTracks | private int | videoCount | private int | audioCount | private int | numSupportedTracks | private int | numTracks | private int | numPackets | private int | initTmpBufLen | private byte[] | initTmpStreamBuf | private byte | streamType | private long | streamContentLength | private SystemHeader | sysHeader | private boolean | sysHeaderSeen | boolean | EOMflag | boolean | parserErrorFlag | private boolean | durationInitialized | private boolean | sysPausedFlag | private boolean | seekableStreamFlag | private boolean | randomAccessStreamFlag | private static JMFSecurity | jmfSecurity | private static boolean | securityPrivelege | private Method[] | mSecurity | private Class[] | clSecurity | private Object[] | argsSecurity | private long | startLocation | private Time | durationNscontent duration in NS | private Time | lastSetPositionTimelast seek time | private long | startPTSfirst content PTS | long | currentPTSlast encountered content PTS | long | endPTSend of content PTS | private long | AVstartTimeNs | private long | AVcurrentTimeNs | private long | AVlastTimeNs | private long | lastAudioNs | private MpegBufferThread | mpThread | static int[] | bitrates | static int[] | samplerates |
Methods Summary |
---|
public void | close()
stop();
flushInnerBuffers();
super.close();
if (mpThread != null)
mpThread.kill();
| private long | convBytesToTimeAV(long bytes)
long time;
if (trackList[0] == null)
return 0;
if (streamType == AUDIO_TYPE) {
if (((Audio)(trackList[0].media)).bitRate == 0) {
time = 0L;
} else {
time = (bytes << 3) / ((Audio)(trackList[0].media)).bitRate;
time *= 1000000L; // for nanoseconds
}
} else { /* VIDEO_TYPE */
if (((Video)(trackList[0].media)).bitRate == 0) {
time = 0L;
} else {
time = (bytes << 3) / ((Video)(trackList[0].media)).bitRate;
time *= 1000000000L; // for nanoseconds
}
}
return time;
| private long | convNanosecondsToPTS(long val)
return (val * 9 / 100000L);
| private long | convPTStoNanoseconds(long val)
return (val * 100000 / 9L);
| private long | convTimeToBytesAV(long time)
long bytes;
if (streamType == AUDIO_TYPE) {
bytes = (time >> 3) * ((Audio)(trackList[0].media)).bitRate;
bytes /= 1000000L; // because of nanoseconds
} else { /* VIDEO_TYPE */
bytes = (time >> 3) * ((Video)(trackList[0].media)).bitRate;
bytes /= 1000000000L; // because of nanoseconds
}
return bytes;
| private int | detectStreamType(byte[] streamBuf)
int i=0, code, videoCount=0, audioCount=0;
boolean found=false;
/* Copy each byte from the bitsream into a temporary buffer.
* If the stream is system, continue from where we got to in the
* stream and don't use the bytes in the temporary buffer.
* If the stream is raw MPEG audio/video - just copy the temporary
* buffer into the single track buffer.
*/
if (streamType != UNKNOWN_TYPE) {
return 0;
}
try {
/* try to look for generic codes */
readBytes(stream, streamBuf, 4);
while ((!found) && (i < streamBuf.length-5)) {
code = ((streamBuf[i] & 0xFF) << 24)
| ((streamBuf[i+1] & 0xFF) << 16)
| ((streamBuf[i+2] & 0xFF) << 8)
| (streamBuf[i+3] & 0xFF);
switch (code) {
case PACK_START_CODE :
/* check what happen right after the pack header end */
/* byte 0010XXX1 */
i++;
readBytes(stream, streamBuf, i+3, 1); /* read the next byte */
if ((streamBuf[i+3] & (byte)0xF1) == (byte)0x21) {
streamType = SYS11172_TYPE;
found = true;
}
continue; /* not a "real" pack code - skip on reading next byte */
case VIDEO_SEQUENCE_HEADER_CODE :
if (i == 0) { /* first code on the bitstream */
streamType = VIDEO_TYPE;
found = true;
}
case VIDEO_PICTURE_START_CODE :
case VIDEO_GROUP_START_CODE :
videoCount++;
break;
default :
/* check if audio frame sync word and legal layer code */
if ( ((code & 0xFFF00000) == 0xFFF00000) &&
((code & 0x00060000) != 0x00000000) &&
isValidMp3Header(code) ) {
audioCount++;
// if (i == 0) { /* first code on the bitstream */
streamType = AUDIO_TYPE;
found = true;
// }
startLocation = i;
}
/* otherwise, do nothing */
break;
}
i++;
readBytes(stream, streamBuf, i+3, 1); /* read the next byte */
}
} catch (IOException e) {
/* apply some very very simple logic */
if (streamType == UNKNOWN_TYPE) {
if (videoCount > 0) {
streamType = VIDEO_TYPE;
}
else if (audioCount > 0) {
streamType = AUDIO_TYPE;
}
}
updateEOMState();
EOMflag = true;
throw e;
}
/* apply some simple logic */
if (streamType == UNKNOWN_TYPE) {
if (videoCount > 4) {
streamType = VIDEO_TYPE;
}
else if (audioCount > 20) {
streamType = AUDIO_TYPE;
}
}
// I think this applies only to Audio
if (seekableStreamFlag && (streamType == AUDIO_TYPE)) {
int duration = -1;
Seekable s = (Seekable) stream;
long currentPos = s.tell();
// s.seek(0);
s.seek(startLocation);
int frameHeader = readInt(stream);
int h_id = (frameHeader>>>19) & 0x03; // MPEG version
int h_layer = (frameHeader>>>17) & 0x03; // Audio Layer
int h_bitrate = (frameHeader>>>12) & 0x0f;
int h_samplerate = (frameHeader>>>10) & 0x03;
int h_padding = (frameHeader>>> 9) & 0x01;
int h_mode = (frameHeader>>> 6) & 0x03; // Channel mode
int bitrate = bitrates[h_id][h_layer][h_bitrate];
// TODO: check if streamContentLength is not unknown/unbounded
// duration = (int)(streamContentLength/(bitrate * 125));
// Look for Xing VBR header
int offset = (((h_id & 1) == 1) ?
((h_mode != 3) ?
(32+4) :
(17+4))
: ((h_mode != 3) ?
(17+4) :
( 9+4)));
s.seek(offset);
String hdr = readString(stream);
if (hdr.equals("Xing")) {
int flags = readInt(stream);
int frames = readInt(stream);
int bytes = readInt(stream);
int samplerate = samplerates[h_id][h_samplerate];
int frameSize = 144000 * bitrate / samplerate + h_padding;
duration = (frameSize * frames) / (bitrate * 125); // Fixed time per frame
if (duration > 0) {
durationInitialized = true;
durationNs = new Time((double) duration);
}
}
s.seek(currentPos);
}
return (i+4);
| private int | extractAudioInfo(byte[] tmpBuf, com.ibm.media.parser.video.MpegParser$TrackList trackInfo, int dataLen, boolean AVOnlyState)
Audio audio = new Audio();
int br, sr, numBytes;
/* tables for standard MPEG-1 audio */
int samplingFrequencyTable[] = {44100, 48000, 32000};
final short bitrateIndexTableL2[] =
{0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384};
/* bitrate table for layer 3:
* {0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320};
*/
/* tables for MPEG-2 audio extension */
/* int samplingFrequencyTable[] = {22050, 24000, 16000}; */
final short bitrateIndexTableL23Ext[] =
{0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160};
/* bitrate table for layer 1 extension:
{0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256};
*/
numBytes = (AVOnlyState) ? dataLen : (readBytes(stream, tmpBuf, dataLen));
for (int i = (int) startLocation ; i < numBytes-3 ; i++) {
// doesn't handle header start code which is splitted on end-of-packet
if ( (tmpBuf[i] == (byte)0xFF) && ( (tmpBuf[i+1] &(byte)0xF0) == (byte)0xF0) ) {
audio.ID = (tmpBuf[i+1] & 0x0008) >> 3;
audio.layer = 4 - ((tmpBuf[i+1] & 0x0006) >> 1);
audio.protection = (tmpBuf[i+1] & 0x0001);
br = (tmpBuf[i+2] & 0x00F0) >> 4;
sr = (tmpBuf[i+2] & 0x000C) >> 2;
{
if ( (sr < 0) || (sr >= samplingFrequencyTable.length) ) {
throw new BadDataException("Non Standard sample rates not supported");
}
}
audio.mode = (tmpBuf[i+3] & 0x00C0) >> 6;
audio.modeExt = (tmpBuf[i+3] & 0x0030) >> 4;
audio.channels = (audio.mode == 3) ? 1 : 2;
audio.copyright = (tmpBuf[i+3] & 0x0008) >> 3;
audio.original = (tmpBuf[i+3] & 0x0004)>> 2;
audio.emphasis = (tmpBuf[i+3] & 0x0003);
audio.valid = (br != 0x000F);
/* calculate sampling frequency and bitrate values */
if (audio.ID == 1) { /* standard MPEG-1 */
audio.sampleRate = samplingFrequencyTable[sr];
if (audio.layer == 3) {
if (br < 2) {
audio.bitRate = bitrateIndexTableL2[br];
} else if (br == 2) {
audio.bitRate = 40;
} else {
audio.bitRate = bitrateIndexTableL2[br-1];
}
} else if (audio.layer == 2) {
audio.bitRate = bitrateIndexTableL2[br];
} else { /* layer 1 */
audio.bitRate = br << 5;
}
} else { /* extension MPEG-2 */
audio.sampleRate = samplingFrequencyTable[sr]>>1;
if ((audio.layer == 3) || (audio.layer == 2)) {
audio.bitRate = bitrateIndexTableL23Ext[br];
} else { /* layer 1 */
if (br < 9) {
audio.bitRate = bitrateIndexTableL2[br];
} else if (br == 9) {
audio.bitRate = 144;
} else if (br == 10) {
audio.bitRate = bitrateIndexTableL2[br-1];
} else if (br == 11) {
audio.bitRate = 176;
} else {
audio.bitRate = bitrateIndexTableL2[br-2];
}
}
}
/* this is a calculation for one decoded frame length, in bytes */
// if (audio.bitRate < 30) {
// decodeFrameLen = 10 * 1024;
// } else {
// br = (audio.layer == 1) ? 48 : 144;
// decodeFrameLen = (int)(8 * (float)(audio.bitRate * 1000 * br) / audio.sampleRate);
// }
/* num of bytes for a duration of 1 second */
trackInfo.readFrameSize = (audio.bitRate * 1000) >> 3;
trackInfo.infoFlag = true;
trackInfo.media = audio;
break;
}
}
//trackInfo.media.toString();
return numBytes;
| private int | extractStreamInfo(byte[] tmpBuf, int streamID, int dataLen, boolean AVOnlyState)
byte stype = UNKNOWN_TYPE;
TrackList trackInfo = trackList[streamID];
int numBytes;
/* check if need to initialize the buffer and the stream structure */
if (trackInfo.trackType == UNKNOWN_TYPE) {
/* update fields */
stype = (AVOnlyState) ? streamType :
((streamID < MIN_VIDEO_ID) ? AUDIO_TYPE : VIDEO_TYPE);
trackInfo.init(stype);
sysHeader.streamFlags[streamID] = true;
trackInfo.startPTS = currentPTS;
}
/* extract specific audio / video info */
if (stype == AUDIO_TYPE) {
numBytes = extractAudioInfo(tmpBuf, trackInfo, dataLen, AVOnlyState);
} else { /* VIDEO_TYPE */
numBytes = extractVideoInfo(tmpBuf, trackInfo, dataLen, AVOnlyState);
}
if (trackInfo.infoFlag == true) {
if (AVOnlyState) {
tracks[0] = new MediaTrack(trackInfo);
} else {
tracks[numTracks] = new MediaTrack(trackInfo);
numTracks++;
}
}
return numBytes;
| private int | extractVideoInfo(byte[] tmpBuf, com.ibm.media.parser.video.MpegParser$TrackList trackInfo, int dataLen, boolean AVOnlyState)
Video video = new Video();
int i, code, numBytes, pr;
float aspectRatioTable[] =
{0.0f, 1.0f, 0.6735f, 0.7031f, 0.7615f, 0.8055f, 0.8437f,
0.8935f, 0.9375f, 0.9815f, 1.0255f, 1.0695f, 1.1250f,
1.1575f, 1.2015f, 1.0f};
float pictureRateTable[] =
{0.0f, 23.976f, 24.0f, 25.0f, 29.97f, 30.0f, 50.0f, 59.94f,
60.0f, -1.0f, -1.0f, -1.0f, -1.0f, -1.0f, -1.0f, -1.0f};
numBytes = (AVOnlyState) ? dataLen : (readBytes(stream, tmpBuf, dataLen));
for (i = 0 ; i < numBytes-10 ; i++) {
// doesn't handle header start code which is splitted on end-of-packet
// not working in Win, because of padding with the sign bit:
// code = data[i] << 24 | data[i+1] << 16 | data[i+2] << 8 | data [i+3];
code = ((tmpBuf[i] << 24) & 0xFF000000) | ((tmpBuf[i+1] << 16) & 0x00FF0000) |
((tmpBuf[i+2] << 8) & 0x0000FF00) | (tmpBuf[i+3] & 0x000000FF);
if (code == VIDEO_SEQUENCE_HEADER_CODE) {
video.width = (tmpBuf[i+4+0] & 0x00FF) << 4;
video.width |= (((int)tmpBuf[i+4+1]) >> 4) & 0x000F;
video.height = (tmpBuf[i+4+1] & 0x000F) << 8;
video.height |= (tmpBuf[i+4+2] & 0x00FF);
pr = (tmpBuf[i+4+3] & 0x00F0) >> 4;
video.pelAspectRatio = aspectRatioTable[pr];
pr = tmpBuf[i+4+3] & 0x000F;
video.pictureRate = pictureRateTable[pr];
pr = ( (tmpBuf[i+4+4] & 0x00FF) << 10 ) |
( (tmpBuf[i+4+5] & 0x00FF) << 2 ) |
( (tmpBuf[i+4+6] & 0x00C0) >> 6);
video.bitRate = pr * 400; // bitrate in units of 400 bps
if ( (video.pelAspectRatio == 0.0) || (video.pictureRate == 0.0) ) {
throw new BadDataException("video header corrupted");
}
if (video.pictureRate < 23.0) {
trackInfo.readFrameSize = 64 * 1024;
} else {
/* readFrameSize should be 1 second, but limited to not more
than half the video track buf size (arbitrary) */
trackInfo.readFrameSize = (int)(video.bitRate >> 3);
if (trackInfo.readFrameSize > (VIDEO_TRACK_BUF_SIZE>>1)) {
trackInfo.readFrameSize = VIDEO_TRACK_BUF_SIZE>>1 ;
}
}
trackInfo.infoFlag = true;
trackInfo.media = video;
//trackInfo.media.toString();
break;
}
}
return numBytes;
| void | flushInnerBuffers()
TrackList trackInfo;
for (int i = 0; i < numTracks ; i++) {
if (tracks[i] != null) {
trackInfo = ((MediaTrack)tracks[i]).getTrackInfo();
// Release the wait in copyStreamDataToInnerBuffer.
synchronized (trackInfo.bufQ) {
trackInfo.flushFlag = true;
trackInfo.bufQ.notifyAll();
}
trackInfo.flushBuffer();
}
}
| public javax.media.Time | getDuration()
if (durationInitialized) {
return durationNs;
}
else { // try to update the duration
if (EOMflag) {
// updateEOMState() is already called when EOM was detected
durationInitialized = true ;
}
return durationNs;
}
| private long | getLocation()
return getLocation(stream);
| public javax.media.Time | getMediaTime()
Time mtime;
if (streamType == SYS11172_TYPE) {
if (currentPTS == NO_PTS_VAL) {
mtime = new Time(0L);
} else {
mtime = new Time(convPTStoNanoseconds(currentPTS - startPTS));
}
} else { /* Audio / Viseo only */
/// AVcurrentTimeNs = convBytesToTimeAV(AVtotalBytesRead);
AVcurrentTimeNs = convBytesToTimeAV(getLocation(stream));
mtime = new Time(AVcurrentTimeNs);
}
return mtime;
| public java.lang.String | getName()Returns a descriptive name for the plug-in.
This is a user readable string.
return "Parser for MPEG-1 file format";
| private int | getStreamID(byte bval)
return ((bval & 0xFF) - 0xC0);
| public javax.media.protocol.ContentDescriptor[] | getSupportedInputContentDescriptors()
return supportedFormat;
| public javax.media.Track[] | getTracks()
/* check if the tracks are already initialized */
if (streamType == SYS11172_TYPE) {
if ((hideAudioTracks) && (videoTracks != null)) {
return videoTracks;
}
if ((hideVideoTracks) && (audioTracks != null)) {
return audioTracks;
}
}
if (tracks != null) {
return tracks;
}
try {
initTmpBufLen = (AUDIO_TRACK_BUF_SIZE < VIDEO_TRACK_BUF_SIZE) ?
AUDIO_TRACK_BUF_SIZE : VIDEO_TRACK_BUF_SIZE;
initTmpStreamBuf = new byte[initTmpBufLen];
/* detect stream type: Audio only / Video only / interleaved (system) */
initTmpBufLen = detectStreamType(initTmpStreamBuf);
/* extract the tracks information */
switch (streamType) {
case AUDIO_TYPE :
case VIDEO_TYPE :
initTrackAudioVideoOnly();
break;
case SYS11172_TYPE :
initTrackSystemStream();
break;
case UNKNOWN_TYPE :
default :
throw new BadHeaderException("Couldn't detect stream type");
}
// System.out.println("Number of tracks: " + numTracks);
initDuration();
if (saveOutputFlag) {
aout = new FileOutputStream(AoutName);
vout = new FileOutputStream(VoutName);
}
/* activate the inner thread for filling the inner buffers */
if (streamType == SYS11172_TYPE) {
if ( /*securityPrivelege && */ (jmfSecurity != null) ) {
String permission = null;
try {
if (jmfSecurity.getName().startsWith("jmf-security")) {
permission = "thread";
jmfSecurity.requestPermission(mSecurity, clSecurity, argsSecurity,
JMFSecurity.THREAD);
mSecurity[0].invoke(clSecurity[0], argsSecurity[0]);
permission = "thread group";
jmfSecurity.requestPermission(mSecurity, clSecurity, argsSecurity,
JMFSecurity.THREAD_GROUP);
mSecurity[0].invoke(clSecurity[0], argsSecurity[0]);
} else if (jmfSecurity.getName().startsWith("internet")) {
PolicyEngine.checkPermission(PermissionID.THREAD);
PolicyEngine.assertPermission(PermissionID.THREAD);
}
} catch (Throwable e) {
if (JMFSecurityManager.DEBUG) {
System.err.println( "Unable to get " + permission +
" privilege " + e);
}
securityPrivelege = false;
// TODO: Do the right thing if permissions cannot be obtained.
// User should be notified via an event
}
}
if ( (jmfSecurity != null) && (jmfSecurity.getName().startsWith("jdk12"))) {
try {
Constructor cons = jdk12CreateThreadAction.cons;
mpThread = (MpegBufferThread) jdk12.doPrivM.invoke(
jdk12.ac,
new Object[] {
cons.newInstance(
new Object[] {
MpegBufferThread.class,
})});
} catch (Exception e) {
System.err.println("MpegParser: Caught Exception " + e);
}
} else {
mpThread = new MpegBufferThread();
}
if (mpThread != null) {
mpThread.setParser(this);
mpThread.start(); // I don't think you need permission for start
}
if (saveOutputFlag || throwOutputFlag) {
try {
Thread.sleep(30000);
} catch (InterruptedException e) {}
}
}
/* return the resulting tracks */
if (streamType == SYS11172_TYPE) {
if (hideAudioTracks) {
return videoTracks;
}
if (hideVideoTracks) {
return audioTracks;
}
}
return tracks;
} catch (BadDataException e) {
parserErrorFlag = true;
throw new BadHeaderException("Bad data");
} catch (BadHeaderException e) {
parserErrorFlag = true;
throw e;
} catch (IOException e) {
updateEOMState();
EOMflag = true;
throw e;
}
| private void | initDuration()
if (streamContentLength != SourceStream.LENGTH_UNKNOWN) {
if (streamType == SYS11172_TYPE) {
if (randomAccessStreamFlag) {
initDurationSystemSeekableRA();
}
} else {
updateDurationAudioVideoOnly();
}
}
| private void | initDurationSystemSeekableRA()
long baseLocation=0L, ltmp;
int saveNumPackets = numPackets;
boolean saveEOMflag = EOMflag;
baseLocation = ((Seekable)stream).tell();
/* look for the base time */
if (startPTS == NO_PTS_VAL) {
EOMflag = false;
((Seekable)stream).seek(0L);
try {
mpegSystemParseBitstream(true, 64*1024L, false, NO_PTS_VAL);
} catch (Exception e) {
}
}
if (startPTS == NO_PTS_VAL) {
startPTS = 0L;
}
/* look for the EOM time */
if (endPTS == NO_PTS_VAL) {
EOMflag = false;
currentPTS = NO_PTS_VAL;
ltmp = streamContentLength - 128*1024;
if (ltmp < 0) {
ltmp = 0;
}
((Seekable)stream).seek(ltmp);
try {
mpegSystemParseBitstream(true, 128*1024L, false, NO_PTS_VAL);
} catch (Exception e) {
}
endPTS = currentPTS;
}
if (endPTS == NO_PTS_VAL) {
endPTS = startPTS;
}
/* calc the duration */
ltmp = endPTS - startPTS;
if (ltmp < 0) { /* wrong values */
ltmp = 0;
parserErrorFlag = true;
}
durationNs = new Time(convPTStoNanoseconds(ltmp));
lastSetPositionTime = new Time(convPTStoNanoseconds(startPTS));
((Seekable)stream).seek(baseLocation);
EOMflag = saveEOMflag;
numPackets = saveNumPackets;
durationInitialized = true ;
| private void | initTrackAudioVideoOnly()
TrackList trackInfo;
int possibleLen, itmp=0;
numTracks = 1;
tracks = new Track[1];
trackList[0] = new TrackList();
/* fill the whole buffer with data */
possibleLen = (streamType == AUDIO_TYPE) ? AUDIO_TRACK_BUF_SIZE :
VIDEO_TRACK_BUF_SIZE;
if (initTmpBufLen < possibleLen) {
if (possibleLen > initTmpStreamBuf.length) { /* enlarge buffer if needed */
byte[] tmpBuf2 = new byte[possibleLen];
System.arraycopy (initTmpStreamBuf, 0, tmpBuf2, 0, initTmpBufLen);
initTmpStreamBuf = tmpBuf2;
}
try {
itmp = readBytes(stream, initTmpStreamBuf, initTmpBufLen, (possibleLen - initTmpBufLen));
} catch (IOException e) {
updateEOMState();
EOMflag = true;
}
initTmpBufLen += itmp;
}
trackInfo = trackList[0];
do { /* look for track's embeded info */
extractStreamInfo(initTmpStreamBuf, 0, initTmpBufLen, true);
if (trackInfo.infoFlag) {
break; /* the info was found */
}
/* else - read more data, throw the existing data */
try {
itmp = readBytes(stream, initTmpStreamBuf, possibleLen);
} catch (IOException e) {
updateEOMState();
EOMflag = true;
break;
}
initTmpBufLen = itmp;
} while (trackInfo.infoFlag == false) ;
/* it's a real problem if we didn't detect any valid info till now */
if (trackInfo.infoFlag == false) { /* not a legal stream */
numTracks = 0;
tracks = null;
throw new BadHeaderException("Sorry, No tracks found");
}
/* now, if seekable, move to the beginning of the file */
/// if (seekable....) {
((Seekable)stream).seek(0L);
initTmpBufLen = 0;
EOMflag = false;
/// } else {
/* cannot jump to file beginning, just 'remember' the data exist in the buffer
* (maybe we lose some, if we had more than one loop)
*/
/// ............
/// }
| private void | initTrackSystemStream()
int i;
tracks = new Track[MAX_TRACKS_SUPPORTED]; /* temporary allocation */
for (i = 0 ; i < tracks.length ; i++) {
tracks[i] = null;
}
for (i = 0 ; i < trackList.length ; i++) {
trackList[i] = null;
}
/* read first chunks of data */
mpegSystemParseBitstream(false, 0L, true, NO_PTS_VAL);
/* it's a real problem if we didn't detect any existing track till now */
if (numTracks == 0) {
throw new BadHeaderException("Sorry, No tracks found");
}
/* now create a correct length array of tracks */
{
Track[] tmpTracks = new Track[numTracks];
for (i = 0 ; i < numTracks ; i++) {
tmpTracks[i] = tracks[i]; /* copy pointer */
}
tracks = tmpTracks;
}
/* reorgenize the order of the tracks in the tracks array */
if (hideAudioTracks) {
TrackList trackInfo;
int v;
/* count video tracks first */
for (i = 0 ; i < numTracks ; i++) {
if (tracks[i] != null) {
trackInfo = ((MediaTrack)tracks[i]).getTrackInfo();
if (trackInfo.trackType == VIDEO_TYPE) {
videoCount++;
}
}
}
if (videoCount == 0) { /* no video tracks */
throw new BadHeaderException("Sorry, No video tracks found");
}
videoTracks = new Track[videoCount];
/* copy pointers to video tracks only */
for (i=0, v=0 ; i < numTracks ; i++) {
if (tracks[i] != null) {
trackInfo = ((MediaTrack)tracks[i]).getTrackInfo();
if (trackInfo.trackType == VIDEO_TYPE) {
videoTracks[v] = tracks[i];
}
}
}
}
if (hideVideoTracks) {
TrackList trackInfo;
int v;
/* count video tracks first */
for (i = 0 ; i < numTracks ; i++) {
if (tracks[i] != null) {
trackInfo = ((MediaTrack)tracks[i]).getTrackInfo();
if (trackInfo.trackType == AUDIO_TYPE) {
audioCount++;
}
}
}
if (audioCount == 0) { /* no audio tracks */
throw new BadHeaderException("Sorry, No video tracks found");
}
audioTracks = new Track[audioCount];
/* copy pointers to audio tracks only */
for (i=0, v=0 ; i < numTracks ; i++) {
if (tracks[i] != null) {
trackInfo = ((MediaTrack)tracks[i]).getTrackInfo();
if (trackInfo.trackType == AUDIO_TYPE) {
audioTracks[v] = tracks[i];
}
}
}
}
| private boolean | isValidMp3Header(int code)
return
(((code >>> 21) & 0x7ff) == 0x7ff && // sync
((code >>> 19) & 0x3) != 1 && // version
((code >>> 17) & 0x3) != 0 && // layer
((code >>> 12) & 0xf) != 0 && // bit rate
((code >>> 12) & 0xf) != 0xf && // bit rate
((code >>> 10) & 0x3) != 0x3 && // sample rate
(code & 0x3) != 0x2); // emphasis
| long | mpegSystemParseBitstream(boolean justLooking, long range, boolean justEnough, long newPTS)
byte bval;
byte[] buf1 = new byte[1];
int code = 0;
boolean read4 = true, packFound = false;
long baseLocation = getLocation(stream);
long lastPacketLocation = baseLocation;
long lastLastPacketLocation = baseLocation;
long loc = baseLocation+4;
long lastCurrentPTS = NO_PTS_VAL;
long savePTS = NO_PTS_VAL;
while ((!sysPausedFlag && !EOMflag) || justLooking || justEnough) {
if (justEnough && !needingMore()) {
break; /* stop if we've gotten enough data */
}
if (justLooking) {
if (getLocation(stream) - baseLocation > range) {
break; /* stop if parsed more than range bytes */
}
if (newPTS != NO_PTS_VAL) { /* check if PTS was found */
if (newPTS < startPTS) {
return (-1L); /* should seek before this point */
}
if (newPTS <= currentPTS) {
if (newPTS == currentPTS) {
return lastPacketLocation;
} else { /* < */
currentPTS = lastCurrentPTS;
return lastLastPacketLocation;
}
}
}
}
if (read4) { /* read 4 bytes of code */
code = readInt(stream, true);
} else { /* read only the next byte */
readBytes(stream, buf1, 1);
code = ((code << 8) & 0xFFFFFF00) | (buf1[0] & 0x00FF);
}
switch (code) {
case PACK_START_CODE :
parsePackHeader();
read4 = true;
packFound = true;
break;
case SYSTEM_HEADER_START_CODE :
parseSystemHeader();
read4 = true;
break;
case END_CODE :
EOMflag = true;
/// ???? if ((lastPTS == NO_PTS_VAL) && (newPTS == NO_PTS_VAL)) {
if (endPTS == NO_PTS_VAL) { /// maybe update always if lastPTS wasn't accurate enough....
endPTS = currentPTS;
}
if ((!justLooking) || (newPTS != NO_PTS_VAL)) {
updateEOMState();
}
break;
default :
/* packet start code (it's only 24 bits) or default (error) */
if ( ((code >> 8) == PACKET_START_CODE_24) &&
((!justLooking) || (packFound & justLooking)) ) {
if (justLooking && (newPTS != NO_PTS_VAL)) {
loc = getLocation(stream);
savePTS = currentPTS;
}
bval = (byte)(code & 0x000000FF);
parsePacket(bval, justLooking);
read4 = true;
/* update for setPosition call */
if (justLooking && (newPTS != NO_PTS_VAL)) {
/* it seems there is no special need to initialize
* the 'lastCurrentPTS' and the 'lastLastPacketLocation'
*/
if (savePTS != currentPTS) { /* new PTS here */
lastCurrentPTS = savePTS;
lastLastPacketLocation = lastPacketLocation;
lastPacketLocation = loc - 4;
}
}
break;
} else { /* another code - shouldn't be */
read4 = false;
break;
}
}
}
/* in general, can catch here BadDataException & BadHeaderException,
* that may thrown because wrong start code (if read4 was false....)
*/
return ((EOMflag) ? (-2L) : (-1L));
| boolean | needingMore()
TrackList trackInfo;
for (int i = 0; i < numTracks ; i++) {
if (tracks[i] != null) {
trackInfo = ((MediaTrack)tracks[i]).getTrackInfo();
if (trackInfo.bufQ.canRead()) {
return false;
}
}
}
return true;
| private void | parsePackHeader()
byte[] buf1 = new byte[1];
readBytes(stream, buf1, 1);
if ((buf1[0] & (byte)0xF0) != (byte)0x20) { /* check 0010xxxx */
throw new BadDataException("invalid pack header");
}
if ((buf1[0] & (byte)0x01) != (byte)0x01) { /* check marker bit #0 */
throw new BadDataException("illegal marker bit");
}
/* skip mux_rate */
skip(stream, 7);
/* we decide that there is no point to extract the value of the SCR
* (Systen Clock Reference) here, because there are movies in which
* there isn't any match between the SCR time and the PTS time.
* If we would extract it here, the code would be:
* long scr = ((long)(buf1[0] & 0x000E)) << 29;
* scr = ((scr << 31) >> 31); << make it signed num !? >>
* int itmp = readInt(stream, true);
* if ((itmp & 0x00010001) != 0x00010001) { << check 2 marker bits on #16 and #0 >>
* throw new BadDataException("illegal marker bit");
* }
* int itmp2 = (itmp & 0xFFFE0000) >> 2; << bits 29..15 >>
* scr |= ((long) (itmp2 & 0x3fffffff));
* scr |= (long)((itmp & 0x0000FFFE) >> 1); << bits 14..0 >>
* if (startSCR == NO_PTS_VAL) {
* startSCR = scr;
* }
* currentSCR = scr;
* skip(stream, 3);
*/
| private void | parsePacket(byte bval, boolean justLooking)
int streamID, itmp, itmp2;
int packetLen, count=0, dataSize;
int STDBufSize=0;
int STDBufScale=0;
int numWrittenToTmpBuf = 0;
byte[] tmpBuf = null;
byte[] buf1 = new byte[1];
long pts;
TrackList trackInfo;
/* identify the stream ID */
if (((bval & 0x00FF) < MIN_STREAM_CODE) || ((bval & 0x00FF) > MAX_STREAM_CODE)) {
throw new BadDataException("invalid stream(track) number");
}
streamID = getStreamID(bval);
/* read packet length */
packetLen = readShort(stream, true);
buf1[0] = bval;
/* could check here if there are are enough bytes in the input */
if ((buf1[0] & 0x00FF) != PRIVATE_STREAM2_CODE) {
/* skip stuffing bytes */
do {
readBytes(stream, buf1, 1);
count++;
} while (buf1[0] == (byte)0xFF);
/* STD buf details (meanwhile do nothing with this info) */
if ((buf1[0] & (byte)0xC0) == (byte)0x40) {
STDBufScale = ((buf1[0] & 0x0020) >> 5);
STDBufSize = (((int)buf1[0]) & 0x001F) << 8;
readBytes(stream, buf1, 1);
STDBufSize |= (int)buf1[0];
readBytes(stream, buf1, 1);
count += 2;
}
/* PTS - presentation time stamp (for now, do not try to attach to a spesific frame header) */
if ((buf1[0] & (byte)0xE0) == (byte)0x20) {
pts = ((long)(buf1[0] & 0x000E)) << 29;
pts = ((pts << 31) >> 31); /* make it signed num! (and lose the 33 bit) */
if ((buf1[0] & (byte)0x01) != (byte)0x01) { /* check marker bit #0 */
throw new BadDataException("illegal marker bit");
}
itmp = readInt(stream, true);
count += 4;
if ((itmp & 0x00010001) != 0x00010001) { /* check 2 marker bits on #16 and #0 */
throw new BadDataException("illegal marker bit");
}
// for bits 29..15 - problem if msb==1: pts |= (long)((itmp & 0xFFFE0000) >> 2);
itmp2 = (itmp & 0xFFFE0000) >> 2; /* bits 29..15 */
pts |= ((long) (itmp2 & 0x3fffffff)); /* bits 29..15 */
pts |= (long)((itmp & 0x0000FFFE) >> 1); /* bits 14..0 */
currentPTS = pts;
if (startPTS == NO_PTS_VAL) {
startPTS = currentPTS;
if ((startPTS > 0) && (startPTS <= EPSILON_PTS)) { /* actually zero time */
startPTS = 0L;
}
}
/* if there is a DTS - decoding time stamp - skip it */
if ((buf1[0] & (byte)0xF0) == (byte)0x30) {
skip(stream, 5);
count += 5;
}
} else if (buf1[0] != (byte)0x0F) { /* else - just validate the 8 bit code */
throw new BadDataException("invalid packet");
}
} /* end of not PRIVATE_STREAM2_CODE */
/* handle the packet data */
dataSize = packetLen - count;
if (justLooking) {
skip(stream, dataSize);
return;
}
/* check if it is reserved or private stream */
if ((streamID < 0) || (streamID >= MAX_NUM_STREAMS)) {
skip(stream, dataSize);
} else { /* (regular) audio/video stream */
if (trackList[streamID] == null) { /* in the first packet of the track */
trackList[streamID] = new TrackList();
}
trackInfo = trackList[streamID];
/* if stream not initialized yet, and/or couldn't extract stream info yet */
if (trackInfo.infoFlag == false) {
tmpBuf = new byte[dataSize];
numWrittenToTmpBuf = extractStreamInfo(tmpBuf, streamID, dataSize, false);
}
if (trackInfo.infoFlag == false) { /* no header found in this packet */
trackList[streamID] = null;
if (numWrittenToTmpBuf < dataSize) {
skip(stream, (dataSize-numWrittenToTmpBuf)); /* skip the rest of the packet */
}
} else {
/* update PTS if needed */
if (startPTS == NO_PTS_VAL) {
trackInfo.startPTS = currentPTS;
}
/* now copy data */
trackInfo.copyStreamDataToInnerBuffer(tmpBuf,
numWrittenToTmpBuf,
dataSize - numWrittenToTmpBuf,
currentPTS);
trackInfo.numPackets++;
if (dataSize > trackInfo.maxPacketSize) {
trackInfo.maxPacketSize = dataSize;
}
}
}
numPackets++;
| private void | parseSystemHeader()
byte bval;
byte[] buf1 = new byte[1];
int itmp, size, scale, streamID, i, len;
short stmp;
/* read header length */
len = readShort(stream, true);
if (sysHeaderSeen) { /* not the first system header */
skip(stream, len);
} else { /* first one - parse it */
sysHeader.resetSystemHeader();
sysHeader.headerLen = len;
/* ...could check if there are enough bytes in the input... */
itmp = readInt(stream, true);
len -= 4;
if ((itmp & 0x80000100) != 0x80000100) { // marker bits on #31 and #8
throw new BadHeaderException("illegal marker bits in system header");
}
sysHeader.rateBound = (itmp & 0x7FFFFE00) >> 9;
sysHeader.audioBound = (itmp & 0x000000FC) >> 2;
sysHeader.fixedFlag = (itmp & 0x00000002) >> 1;
sysHeader.CSPSFlag = itmp & 0x00000001;
readBytes(stream, buf1, 1);
bval = buf1[0];
len -= 1;
if ((bval & (byte)0x20) != (byte)0x20) { // check marker bits #5
throw new BadHeaderException("illegal marker bits in system header");
}
sysHeader.audioLockFlag = (bval & 0x0080) >> 7;
sysHeader.videoLockFlag = (bval & 0x0040) >> 6;
sysHeader.videoBound = bval & 0x001F;
readBytes(stream, buf1, 1);
len -= 1;
sysHeader.reserved = buf1[0];
/* read streams STD info */
while (len > 1) {
readBytes(stream, buf1, 1);
bval = buf1[0];
len -= 1;
if ((bval & (byte)0x80) != (byte)0x80) // end of STD info
break;
/* check if STD refers to all audio streams */
if (bval == (byte)0xb8) {
stmp = readShort(stream, true);
len -= 2;
if ((stmp & 0x0000C000) != 0x0000C000) {
throw new BadHeaderException("illegal marker bits in system header");
}
size = stmp & 0x00001FFF; /* in 128 byte units */
sysHeader.allAudioSTDFlag = true;
for (i = MIN_AUDIO_ID ; i <= MAX_AUDIO_ID ; i++) {
/* do not set the stream_flags[i] field, because
* info isn't track specific
*/
sysHeader.STDBufBoundScale[i] = 0;
sysHeader.STDBufSizeBound[i] = size;
}
}
/* check if STD refers to all video streams */
else if (bval == (byte)0xb9) {
stmp = readShort(stream, true);
len -= 2;
if ((stmp & 0x0000C000) != 0x0000C000) {
throw new BadHeaderException("illegal marker bits in system header");
}
size = stmp & 0x00001FFF; /* in 1024 byte units */
sysHeader.allVideoSTDFlag = true;
for (i = MIN_VIDEO_ID ; i <= MAX_VIDEO_ID ; i++) {
/* do not set the stream_flags[i] field, because
* info isn't track specific
*/
sysHeader.STDBufBoundScale[i] = 1;
sysHeader.STDBufSizeBound[i] = size;
}
}
/* STD information of specific stream/track */
else {
if (((bval & 0x00FF) < MIN_STREAM_CODE)
|| ((bval & 0x00FF) > MAX_STREAM_CODE)) {
throw new BadHeaderException("illegal track number in system header");
}
streamID = getStreamID(bval);
if ((streamID >= 0) && (streamID < MAX_NUM_STREAMS)) {
stmp = readShort(stream, true);
len -= 2;
if ((stmp & 0x0000C000) != 0x0000C000) {
throw new BadHeaderException("illegal marker bits in system header");
}
scale = (stmp & 0x00002000) >> 13;
size = stmp & 0x00001FFF; /* in 1024 byte units */
sysHeader.streamFlags[streamID] = true;
sysHeader.STDBufBoundScale[streamID] = scale;
sysHeader.STDBufSizeBound[streamID] = size;
}
}
}
if (len < 0) {
throw new BadHeaderException("illegal system header");
}
if (len > 0) {
skip(stream, len);
}
sysHeaderSeen = true; /////
}
| void | saveInnerBuffersToFiles()
TrackList trackInfo;
for (int i = 0; i < numTracks ; i++) {
if (tracks[i] != null) {
trackInfo = ((MediaTrack)tracks[i]).getTrackInfo();
trackInfo.saveBufToFile();
}
}
| public javax.media.Time | setPosition(javax.media.Time where, int rounding)
Time newTime = null, preWhere;
if ((! durationInitialized) || (durationNs == Duration.DURATION_UNKNOWN)) {
return new Time(0L);
}
preWhere = new Time(where.getNanoseconds() - PRE_ROLLING_DELTA_NS);
long newTimeNs;
if (streamType == SYS11172_TYPE) {
flushInnerBuffers();
long preWherePTS, wherePTS, newPTS;
preWherePTS = convNanosecondsToPTS(preWhere.getNanoseconds());
preWherePTS += startPTS; /* 'convert' to our PTS values */
wherePTS = convNanosecondsToPTS(where.getNanoseconds());
wherePTS += startPTS; /* 'convert' to our PTS values */
newPTS = setPositionSystemSeekableRA(preWherePTS, wherePTS);
/* newPTS is already in the 'outside world' values */
newTimeNs = convPTStoNanoseconds(newPTS);
lastAudioNs = newTimeNs;
} else { /* Audio/Video only */
newTimeNs = setPositionAudioVideoOnly(preWhere.getNanoseconds(),
where.getNanoseconds());
lastAudioNs = newTimeNs;
}
newTime = new Time(newTimeNs);
// To guarantee that the position time is never the same.
if (lastSetPositionTime.getNanoseconds() == newTimeNs)
newTimeNs++;
lastSetPositionTime = new Time(newTimeNs);
EOMflag = false;
parserErrorFlag = false;
// System.out.println("Set position to: "+(float)where.getSeconds()+" --> "+(float)preWhere.getSeconds()+" --> "+(float)newTime.getSeconds());
// System.out.flush();
return newTime;
| private long | setPositionAudioVideoOnly(long where, long origWhere)
long newTime, pos;
if (origWhere <= AVstartTimeNs + EPSILON_NS) {
newTime = AVstartTimeNs;
((Seekable)stream).seek(0L);
/// AVtotalBytesRead = 0L;
} else if (origWhere >= AVlastTimeNs - EPSILON_NS) {
newTime = AVlastTimeNs - AVstartTimeNs;
((Seekable)stream).seek(streamContentLength);
/// AVtotalBytesRead = streamContentLength;
} else {
newTime = where;
pos = convTimeToBytesAV(where);
((Seekable)stream).seek(pos);
/// AVtotalBytesRead = pos;
}
return newTime;
| private long | setPositionSystemSeekableRA(long wherePTS, long origWherePTS)
long newTime = NO_PTS_VAL;
long lres = -1;
long range, step, pos;
long saveStartPTS = startPTS;
boolean saveEOMflag = EOMflag;
boolean zeroPosFlag = false;
if ((endPTS == NO_PTS_VAL) || (startPTS == NO_PTS_VAL)) {
newTime = 0L;
((Seekable)stream).seek(0L);
} else if (origWherePTS <= startPTS + EPSILON_PTS) {
newTime = 0L;
((Seekable)stream).seek(0L);
} else if (origWherePTS >= endPTS - EPSILON_PTS) {
newTime = endPTS - startPTS;
((Seekable)stream).seek(streamContentLength);
} else if (endPTS - startPTS < EPSILON_PTS) {
newTime = 0L;
((Seekable)stream).seek(0L);
} else {
/* try to guess the location */
pos = (long)(streamContentLength *
((wherePTS - startPTS) / ((float)(endPTS - startPTS))));
step = 20 * 1024L; /* arbitrary */
pos -= step;
if (pos < 0) {
pos = 0;
}
range = streamContentLength - pos; // first range is till the end of media
while (true) {
((Seekable)stream).seek(pos);
currentPTS = NO_PTS_VAL;
startPTS = NO_PTS_VAL;
EOMflag = false;
try {
lres = mpegSystemParseBitstream(true, range, false, wherePTS);
} catch (IOException e) {
lres = -2;
saveEOMflag = true;
} catch (Exception e) {
lres = -1;
}
if (lres >= 0) { /* PTS found */
newTime = currentPTS - saveStartPTS;
((Seekable)stream).seek(lres);
break;
} else if (lres == -2) {
newTime = endPTS - saveStartPTS;
((Seekable)stream).seek(streamContentLength);
break;
} else { /* lres == -1 */
pos -= step;
if (pos <= 0) {
if (zeroPosFlag) { /* couldn't find any. decide on 0L */
newTime = 0L;
((Seekable)stream).seek(0L);
break;
}
pos = 0;
zeroPosFlag = true; /* a flag to prevent loop forever */
}
range = 3 * step;
}
} /* end of while() */
startPTS = saveStartPTS;
EOMflag = saveEOMflag; // redandant, actually
}
return newTime;
| public void | setSource(javax.media.protocol.DataSource source)
super.setSource(source);
stream = (PullSourceStream) streams[0];
streamContentLength = stream.getContentLength(); // can be LENGTH_UNKNOWN
seekableStreamFlag = (streams[0] instanceof Seekable);
if (!seekableStreamFlag)
throw new IncompatibleSourceException("Mpeg Stream is not Seekable");
randomAccessStreamFlag = seekableStreamFlag && ((Seekable) streams[0]).isRandomAccess();
| public void | start()
super.start();
sysPausedFlag = false;
if (mpThread != null)
mpThread.start();
| public void | stop()
super.stop();
sysPausedFlag = true;
if (mpThread != null)
mpThread.pause();
// Release any blocking readFrame.
TrackList info;
for (int i = 0; i < numTracks ; i++) {
if (tracks[i] != null && tracks[i].isEnabled()) {
info = ((MediaTrack)tracks[i]).getTrackInfo();
info.releaseReadFrame();
}
}
| void | throwInnerBuffersContents()
TrackList trackInfo;
for (int i = 0; i < numTracks ; i++) {
if (tracks[i] != null) {
trackInfo = ((MediaTrack)tracks[i]).getTrackInfo();
trackInfo.flushBuffer();
}
}
| private void | updateDurationAudioVideoOnly()
if (durationInitialized) // NEW
return;
AVstartTimeNs = 0L;
AVcurrentTimeNs = 0L;
AVlastTimeNs = convBytesToTimeAV(streamContentLength);
durationNs = new Time(AVlastTimeNs - AVstartTimeNs);
durationInitialized = true ;
| void | updateEOMState()
/* there is a problem with using the getLocation(stream) method:
* in case of non-Seekable stream, the BasicPullParser just
* count the number of bytes, without considering skipping
* (treatment there is mistaken!!), no reseting at EOM, etc...
* That's why the following calculation will work properly for
* non-Seekable streams IF AND ONLY IF there was no scroll-movement
* on the first pass till the first EOM!!
* For Seekable streams there is no problem.
*/
if (! durationInitialized) {
if (streamContentLength == SourceStream.LENGTH_UNKNOWN) {
streamContentLength = getLocation(stream);
}
/* for System - both cases of not random-accessible or stream length unknown */
if (streamType == SYS11172_TYPE) {
if (startPTS == NO_PTS_VAL) {
startPTS = 0L;
}
if (endPTS == NO_PTS_VAL) {
endPTS = currentPTS;
}
if (endPTS == NO_PTS_VAL) {
endPTS = startPTS;
}
long ltmp = endPTS - startPTS;
if (ltmp < 0) { /* wrong values */
ltmp = 0;
parserErrorFlag = true;
}
durationNs = new Time(convPTStoNanoseconds(ltmp));
durationInitialized = true ;
} else { /* for Audio/Video only */
updateDurationAudioVideoOnly();
}
/* update the "global" duration */
////???? sendEvent (new DurationUpdateEvent(this, durationNs));
}
| void | updateTrackEOM()Generate the EOM buffer and add to the buffer Q.
for (int i = 0 ; i < trackList.length ; i++) {
if (trackList[i] != null)
trackList[i].generateEOM();
}
|
|