GlueGen v2.6.0-rc-20250712
GlueGen, Native Binding Generator for Java™ (public API).
AudioSink.java
Go to the documentation of this file.
1/**
2 * Copyright 2013-2023 JogAmp Community. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without modification, are
5 * permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright notice, this list of
8 * conditions and the following disclaimer.
9 *
10 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
11 * of conditions and the following disclaimer in the documentation and/or other materials
12 * provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY JogAmp Community ``AS IS'' AND ANY EXPRESS OR IMPLIED
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
16 * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JogAmp Community OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
19 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
21 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
22 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 *
24 * The views and conclusions contained in the software and documentation are those of the
25 * authors and should not be interpreted as representing official policies, either expressed
26 * or implied, of JogAmp Community.
27 */
28package com.jogamp.common.av;
29
30import java.nio.ByteBuffer;
31
32import jogamp.common.Debug;
33
34public interface AudioSink {
35 public static final boolean DEBUG = Debug.debug("AudioSink");
36
37 /** Default frame duration in millisecond, i.e. 1 {@link AudioFrame} per {@value} ms. */
38 public static final int DefaultFrameDuration = 32;
39
40 /** Initial audio queue size in milliseconds. {@value} ms, i.e. 16 {@link AudioFrame}s per 32 ms. See {@link #init(AudioFormat, float, int)}.*/
41 public static final int DefaultQueueSize = 16 * 32; // 512 ms
42 /** Audio queue size w/ video in milliseconds. {@value} ms, i.e. 32 {@link AudioFrame}s per 32 ms. See {@link #init(AudioFormat, float, int)}.*/
43 public static final int DefaultQueueSizeWithVideo = 32 * 32; // 1024 ms
44
45 /** Default {@link AudioFormat}, [type PCM, sampleRate 44100, sampleSize 16, channelCount 2, signed, fixedP, !planar, littleEndian]. */
46 public static final AudioFormat DefaultFormat = new AudioFormat(44100, 16, 2, true /* signed */,
47 true /* fixed point */, false /* planar */, true /* littleEndian */);
48
49 /**
50 * Abstract audio frame containing multiple audio samples per channel, tracking {@link TimeFrameI} pts and size in bytes.
51 * <p>
52 * One {@link AudioFrame} may contain multiple pairs of samples per channel,
53 * i.e. this {@link AudioFrame} does not limit a frame to be one sample per channel.
54 * See its application in {@link AudioSink#enqueueData(int, ByteBuffer, int)}.
55 * </p>
56 * <p>
57 * Implementations may assign actual data to queue frames from streaming, see {@link AudioDataFrame}.
58 * </p>
59 * @see AudioSink#enqueueData(int, ByteBuffer, int)
60 */
61 public static abstract class AudioFrame extends TimeFrameI {
62 protected int byteSize;
63
64 /**
65 * Ctor w/ zero duration, {@link #INVALID_PTS} and zero byte size
66 */
67 public AudioFrame() {
68 this.byteSize = 0;
69 }
70 /**
71 * Create a new instance
72 * @param pts frame pts in milliseconds
73 * @param duration frame duration in milliseconds
74 * @param byteCount size in bytes
75 */
76 public AudioFrame(final int pts, final int duration, final int byteCount) {
77 super(pts, duration);
78 this.byteSize=byteCount;
79 }
80
81 /** Get this frame's size in bytes. */
82 public final int getByteSize() { return byteSize; }
83 /** Set this frame's size in bytes. */
84 public final void setByteSize(final int size) { this.byteSize=size; }
85
86 @Override
87 public String toString() {
88 return "AudioFrame[pts " + pts + " ms, l " + duration + " ms, "+byteSize + " bytes]";
89 }
90 }
91 /**
92 * Audio data frame example of {@link AudioFrame} with actual audio data being attached.
93 */
94 public static class AudioDataFrame extends AudioFrame {
95 protected final ByteBuffer data;
96
97 /**
98 * Create a new instance
99 * @param pts frame pts in milliseconds
100 * @param duration frame duration in milliseconds
101 * @param bytes audio data
102 * @param byteCount size in bytes
103 */
104 public AudioDataFrame(final int pts, final int duration, final ByteBuffer bytes, final int byteCount) {
105 super(pts, duration, byteCount);
106 if( byteCount > bytes.remaining() ) {
107 throw new IllegalArgumentException("Give size "+byteCount+" exceeds remaining bytes in ls "+bytes+". "+this);
108 }
109 this.data=bytes;
110 }
111
112 /** Get this frame's data. */
113 public final ByteBuffer getData() { return data; }
114
115 @Override
116 public String toString() {
117 return "AudioDataFrame[pts " + pts + " ms, l " + duration + " ms, "+byteSize + " bytes, " + data + "]";
118 }
119 }
120
121 /**
122 * Makes the audio context current on the calling thread, if implementation utilizes context locking.
123 * <p>
124 * If implementation doesn't utilizes context locking, method always returns true.
125 * </p>
126 * <p>
127 * Recursive call to {@link #makeCurrent()} and hence {@link #release()} are supported.
128 * </p>
129 * <p>
130 * At any point in time one context can only be current by one thread,
131 * and one thread can only have one context current.
132 * </p>
133 * @param throwException if true, throws ALException if context is null, current thread holds another context or failed to natively make current
134 * @return true if current thread holds no other context and context successfully made current, otherwise false
135 * @see #release()
136 */
137 public boolean makeCurrent(final boolean throwException);
138
139 /**
140 * Releases control of this audio context from the current thread, if implementation utilizes context locking.
141 * <p>
142 * If implementation doesn't utilizes context locking, method always returns true.
143 * </p>
144 * <p>
145 * Recursive call to {@link #makeCurrent()} and hence {@link #release()} are supported.
146 * </p>
147 * @param throwException if true, throws ALException if context has not been previously made current on current thread
148 * or native release failed.
149 * @return true if context has previously been made current on the current thread and successfully released, otherwise false
150 * @see #makeCurrent()
151 */
152 public boolean release(final boolean throwException);
153
154 /**
155 * Returns the <code>available state</code> of this instance.
156 * <p>
157 * The <code>available state</code> is affected by this instance
158 * overall availability, i.e. after instantiation,
159 * as well as by {@link #destroy()}.
160 * </p>
161 */
162 public boolean isAvailable();
163
164 /** Returns the playback speed. */
165 public float getPlaySpeed();
166
167 /**
168 * Sets the playback speed.
169 * <p>
170 * To simplify test, play speed is <i>normalized</i>, i.e.
171 * <ul>
172 * <li><code>1.0f</code>: if <code> Math.abs(1.0f - rate) < 0.01f </code></li>
173 * </ul>
174 * </p>
175 * @return true if successful, otherwise false, i.e. due to unsupported value range of implementation.
176 */
177 public boolean setPlaySpeed(float s);
178
179 /** Returns the volume. */
180 public float getVolume();
181
182 /**
183 * Sets the volume [0f..1f].
184 * <p>
185 * To simplify test, volume is <i>normalized</i>, i.e.
186 * <ul>
187 * <li><code>0.0f</code>: if <code> Math.abs(v) < 0.01f </code></li>
188 * <li><code>1.0f</code>: if <code> Math.abs(1.0f - v) < 0.01f </code></li>
189 * </ul>
190 * </p>
191 * @return true if successful, otherwise false, i.e. due to unsupported value range of implementation.
192 */
193 public boolean setVolume(float v);
194
195 /**
196 * Returns the number of sources the used device is capable to mix.
197 * <p>
198 * This device attribute is only formally exposed and not used,
199 * since an audio sink is only utilizing one source.
200 * </p>
201 * <p>
202 * May return <code>-1</code> if undefined.
203 * </p>
204 * @return
205 */
206 public int getSourceCount();
207
208 /**
209 * Returns the default (minimum) latency in seconds
210 * <p>
211 * Latency might be the reciprocal mixer-refresh-interval [Hz], e.g. 50 Hz refresh-rate = 20ms minimum latency.
212 * </p>
213 * <p>
214 * May return 20ms for a 50 Hz refresh rate if undefined.
215 * </p>
216 */
217 public float getDefaultLatency();
218
219 /**
220 * Returns the native {@link AudioFormat} by this sink.
221 * <p>
222 * The native format is guaranteed to be supported
223 * and shall reflect this sinks most native format,
224 * i.e. best performance w/o data conversion.
225 * </p>
226 * <p>
227 * The native format is not impacted by {@link #setChannelLimit(int)}.
228 * </p>
229 * <p>
230 * May return {@link AudioSink#DefaultFormat} if undefined.
231 * </p>
232 * @see #init(AudioFormat, float, int)
233 */
235
236 /**
237 * Returns the preferred {@link AudioFormat} by this sink.
238 * <p>
239 * The preferred format is a subset of {@link #getNativeFormat()},
240 * impacted by {@link #setChannelLimit(int)}.
241 * </p>
242 * <p>
243 * Known {@link #AudioFormat} attributes considered by implementations:
244 * <ul>
245 * <li>ALAudioSink: {@link AudioFormat#sampleRate}.
246 * <li>ALAudioSink: {@link AudioFormat#channelCount}
247 * </ul>
248 * </p>
249 * @see #getNativeFormat()
250 * @see #init(AudioFormat, float, int)
251 * @see #setChannelLimit(int)
252 * @see #isSupported(AudioFormat)
253 */
255
256 /**
257 * Limit maximum supported audio channels by user.
258 * <p>
259 * Must be set before {@link #getPreferredFormat()}, {@link #isSupported(AudioFormat)} and naturally {@link #init(AudioFormat, int, int)}.
260 * </p>
261 * <p>
262 * May be utilized to enforce 1 channel (mono) downsampling
263 * in combination with JOAL/OpenAL to experience spatial 3D position effects.
264 * </p>
265 * @param cc maximum supported audio channels, will be clipped [1..{@link #getNativeFormat()}.{@link AudioFormat#channelCount channelCount}]
266 * @see #getNativeFormat()
267 * @see #getPreferredFormat()
268 * @see #isSupported(AudioFormat)
269 * @see #init(AudioFormat, int, int)
270 */
271 public void setChannelLimit(final int cc);
272
273 /**
274 * Returns true if the given format is supported by the sink, otherwise false.
275 * <p>
276 * The {@link #getPreferredFormat()} is used to validate compatibility with the given format.
277 * </p>
278 * @see #init(AudioFormat, float, int)
279 * @see #getPreferredFormat()
280 */
281 public boolean isSupported(AudioFormat format);
282
283 /**
284 * Initializes the sink.
285 * <p>
286 * Implementation must match the given <code>requestedFormat</code> {@link AudioFormat}.
287 * </p>
288 * <p>
289 * Caller shall validate <code>requestedFormat</code> via {@link #isSupported(AudioFormat)}
290 * beforehand and try to find a suitable supported one.
291 * {@link #getPreferredFormat()} may help.
292 * </p>
293 * @param requestedFormat the requested {@link AudioFormat}.
294 * @param frameDurationHint average {@link AudioFrame} duration hint in milliseconds.
295 * May assist to adjust latency of the backend, as currently used for JOAL's ALAudioSink.
296 * A value below 30ms or {@link #DefaultFrameDuration} may increase the audio processing load.
297 * Assumed as {@link #DefaultFrameDuration}, if <code>frameDuration < 1 ms</code>.
298 * @param queueSize queue size in milliseconds, see {@link #DefaultQueueSize}.
299 * @return true if successful, otherwise false
300 * @see #enqueueData(int, ByteBuffer, int)
301 * @see #getAvgFrameDuration()
302 */
303 public boolean init(AudioFormat requestedFormat, int frameDurationHint, int queueSize);
304
305 /**
306 * Returns the {@link AudioFormat} as chosen by {@link #init(AudioFormat, float, int)},
307 * i.e. it shall match the <i>requestedFormat</i>.
308 */
310
311 /**
312 * Returns the (minimum) latency in seconds of this sink as set by {@link #init(AudioFormat, float, int)}, see {@link #getDefaultLatency()}.
313 * <p>
314 * Latency might be the reciprocal mixer-refresh-interval [Hz], e.g. 50 Hz refresh-rate = 20ms minimum latency.
315 * </p>
316 * @see #init(AudioFormat, float, int)
317 */
318 public float getLatency();
319
320 /**
321 * Returns true, if {@link #play()} has been requested <i>and</i> the sink is still playing,
322 * otherwise false.
323 */
324 public boolean isPlaying();
325
326 /**
327 * Play buffers queued via {@link #enqueueData(AudioFrame)} from current internal position.
328 * If no buffers are yet queued or the queue runs empty, playback is being continued when buffers are enqueued later on.
329 * @see #enqueueData(AudioFrame)
330 * @see #pause()
331 */
332 public void play();
333
334 /**
335 * Pause playing buffers while keeping enqueued data incl. it's internal position.
336 * @see #play()
337 * @see #flush()
338 * @see #enqueueData(AudioFrame)
339 */
340 public void pause();
341
342 /**
343 * Flush all queued buffers, implies {@link #pause()}.
344 * <p>
345 * {@link #init(AudioFormat, float, int)} must be called first.
346 * </p>
347 * @see #play()
348 * @see #pause()
349 * @see #enqueueData(AudioFrame)
350 * @see #init(AudioFormat, float, int)
351 */
352 public void flush();
353
354 /** Destroys this instance, i.e. closes all streams and devices allocated. */
355 public void destroy();
356
357 /**
358 * Returns the number of allocated buffers as requested by
359 * {@link #init(AudioFormat, float, int)}.
360 * @see #init(AudioFormat, float, int)
361 */
362 public int getFrameCount();
363
364 /**
365 * Returns the current enqueued frames count since {@link #init(AudioFormat, float, int)}.
366 * @see #init(AudioFormat, float, int)
367 */
369
370 /**
371 * Returns the current number of frames queued for playing.
372 * <p>
373 * {@link #init(AudioFormat, float, int)} must be called first.
374 * </p>
375 * @see #init(AudioFormat, float, int)
376 */
378
379 /**
380 * Returns the current number of bytes queued for playing.
381 * <p>
382 * {@link #init(AudioFormat, float, int)} must be called first.
383 * </p>
384 * @see #init(AudioFormat, float, int)
385 */
386 public int getQueuedByteCount();
387
388 /**
389 * Returns the current queued frame time in seconds for playing.
390 * <p>
391 * {@link #init(AudioFormat, float, int)} must be called first.
392 * </p>
393 * @see #init(AudioFormat, float, int)
394 */
395 public float getQueuedDuration();
396
397 /**
398 * Returns average frame duration last assessed @ {@link #enqueueData(int, ByteBuffer, int)} when queue was full.
399 * <pre>
400 * avgFrameDuration = {@link #getQueuedDuration()} / {@link #getQueuedFrameCount()}
401 * </pre>
402 */
403 public float getAvgFrameDuration();
404
405 /**
406 * Return the audio presentation timestamp ({@link PTS}).
407 * <p>
408 * In case implementation updates the audio buffer passively, consider using {@link #updateQueue()}.
409 * </p>
410 * @see #updateQueue()
411 * @see #enqueueData(int, ByteBuffer, int)
412 */
413 public PTS getPTS();
414
415 /**
416 * Return the last buffered audio presentation timestamp (PTS) in milliseconds.
417 * @see #getPTS()
418 */
419 public int getLastBufferedPTS();
420
421 /**
422 * Returns the current number of frames in the sink available for writing.
423 * <p>
424 * {@link #init(AudioFormat, float, int)} must be called first.
425 * </p>
426 * @see #init(AudioFormat, float, int)
427 */
428 public int getFreeFrameCount();
429
430 /**
431 * Enqueue <code>byteCount</code> bytes as a new {@link AudioFrame} to this sink.
432 * <p>
433 * The data must comply with the chosen {@link AudioFormat} as set via {@link #init(AudioFormat, float, int)}.
434 * </p>
435 * <p>
436 * {@link #init(AudioFormat, float, int)} must be called first.
437 * </p>
438 * @param pts presentation time stamp in milliseconds for the newly enqueued {@link AudioFrame}
439 * @param bytes audio data for the newly enqueued {@link AudioFrame}
440 * @returns the enqueued internal {@link AudioFrame}.
441 * @see #init(AudioFormat, float, int)
442 */
443 public AudioFrame enqueueData(int pts, ByteBuffer bytes, int byteCount);
444
445 /**
446 * Update queue beyond {@link #enqueueData(int, ByteBuffer, int)} including audio PTS.
447 * <p>
448 * Useful in case implementation only updates the buffer passively via {@link #enqueueData(int, ByteBuffer, int) enqueueing data}
449 * to add new data to the queue and not on a event basis.
450 * </p>
451 * @return the updated {@link PTS} instance
452 * @see #getPTS()
453 * @see #enqueueData(int, ByteBuffer, int)
454 */
455 public PTS updateQueue();
456}
Specifies the linear audio PCM format.
Audio data frame example of AudioFrame with actual audio data being attached.
Definition: AudioSink.java:94
final ByteBuffer getData()
Get this frame's data.
Definition: AudioSink.java:113
AudioDataFrame(final int pts, final int duration, final ByteBuffer bytes, final int byteCount)
Create a new instance.
Definition: AudioSink.java:104
Abstract audio frame containing multiple audio samples per channel, tracking TimeFrameI pts and size ...
Definition: AudioSink.java:61
final int getByteSize()
Get this frame's size in bytes.
Definition: AudioSink.java:82
AudioFrame(final int pts, final int duration, final int byteCount)
Create a new instance.
Definition: AudioSink.java:76
AudioFrame()
Ctor w/ zero duration, INVALID_PTS and zero byte size.
Definition: AudioSink.java:67
final void setByteSize(final int size)
Set this frame's size in bytes.
Definition: AudioSink.java:84
Presentation Timestamp (PTS) with added System Clock Reference (SCR) via set(long,...
Definition: PTS.java:46
Integer time frame in milliseconds, maybe specialized for texture/video, audio, .
Definition: TimeFrameI.java:49
static final int DefaultQueueSizeWithVideo
Audio queue size w/ video in milliseconds.
Definition: AudioSink.java:43
AudioFormat getChosenFormat()
Returns the AudioFormat as chosen by init(AudioFormat, float, int), i.e.
AudioFormat getNativeFormat()
Returns the native AudioFormat by this sink.
static final AudioFormat DefaultFormat
Default AudioFormat, [type PCM, sampleRate 44100, sampleSize 16, channelCount 2, signed,...
Definition: AudioSink.java:46
AudioFrame enqueueData(int pts, ByteBuffer bytes, int byteCount)
Enqueue byteCount bytes as a new AudioFrame to this sink.
float getDefaultLatency()
Returns the default (minimum) latency in seconds.
float getVolume()
Returns the volume.
boolean isPlaying()
Returns true, if play() has been requested and the sink is still playing, otherwise false.
int getQueuedByteCount()
Returns the current number of bytes queued for playing.
void destroy()
Destroys this instance, i.e.
void flush()
Flush all queued buffers, implies pause().
boolean setVolume(float v)
Sets the volume [0f..1f].
boolean makeCurrent(final boolean throwException)
Makes the audio context current on the calling thread, if implementation utilizes context locking.
int getSourceCount()
Returns the number of sources the used device is capable to mix.
float getPlaySpeed()
Returns the playback speed.
static final int DefaultFrameDuration
Default frame duration in millisecond, i.e.
Definition: AudioSink.java:38
static final int DefaultQueueSize
Initial audio queue size in milliseconds.
Definition: AudioSink.java:41
int getFreeFrameCount()
Returns the current number of frames in the sink available for writing.
int getFrameCount()
Returns the number of allocated buffers as requested by init(AudioFormat, float, int).
PTS updateQueue()
Update queue beyond enqueueData(int, ByteBuffer, int) including audio PTS.
void pause()
Pause playing buffers while keeping enqueued data incl.
PTS getPTS()
Return the audio presentation timestamp (PTS).
AudioFormat getPreferredFormat()
Returns the preferred AudioFormat by this sink.
boolean setPlaySpeed(float s)
Sets the playback speed.
float getLatency()
Returns the (minimum) latency in seconds of this sink as set by init(AudioFormat, float,...
float getAvgFrameDuration()
Returns average frame duration last assessed @ enqueueData(int, ByteBuffer, int) when queue was full.
void setChannelLimit(final int cc)
Limit maximum supported audio channels by user.
float getQueuedDuration()
Returns the current queued frame time in seconds for playing.
boolean init(AudioFormat requestedFormat, int frameDurationHint, int queueSize)
Initializes the sink.
void play()
Play buffers queued via enqueueData(AudioFrame) from current internal position.
boolean isAvailable()
Returns the available state of this instance.
int getQueuedFrameCount()
Returns the current number of frames queued for playing.
int getEnqueuedFrameCount()
Returns the current enqueued frames count since init(AudioFormat, float, int).
boolean release(final boolean throwException)
Releases control of this audio context from the current thread, if implementation utilizes context lo...
static final boolean DEBUG
Definition: AudioSink.java:35
boolean isSupported(AudioFormat format)
Returns true if the given format is supported by the sink, otherwise false.
int getLastBufferedPTS()
Return the last buffered audio presentation timestamp (PTS) in milliseconds.