1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound and ASIO) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2011 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.0.10
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
82 // The order here will control the order of RtAudio's API search in
\r
84 #if defined(__UNIX_JACK__)
\r
85 apis.push_back( UNIX_JACK );
\r
87 #if defined(__LINUX_ALSA__)
\r
88 apis.push_back( LINUX_ALSA );
\r
90 #if defined(__LINUX_OSS__)
\r
91 apis.push_back( LINUX_OSS );
\r
93 #if defined(__WINDOWS_ASIO__)
\r
94 apis.push_back( WINDOWS_ASIO );
\r
96 #if defined(__WINDOWS_DS__)
\r
97 apis.push_back( WINDOWS_DS );
\r
99 #if defined(__MACOSX_CORE__)
\r
100 apis.push_back( MACOSX_CORE );
\r
102 #if defined(__RTAUDIO_DUMMY__)
\r
103 apis.push_back( RTAUDIO_DUMMY );
\r
107 void RtAudio :: openRtApi( RtAudio::Api api )
\r
109 #if defined(__UNIX_JACK__)
\r
110 if ( api == UNIX_JACK )
\r
111 rtapi_ = new RtApiJack();
\r
113 #if defined(__LINUX_ALSA__)
\r
114 if ( api == LINUX_ALSA )
\r
115 rtapi_ = new RtApiAlsa();
\r
117 #if defined(__LINUX_OSS__)
\r
118 if ( api == LINUX_OSS )
\r
119 rtapi_ = new RtApiOss();
\r
121 #if defined(__WINDOWS_ASIO__)
\r
122 if ( api == WINDOWS_ASIO )
\r
123 rtapi_ = new RtApiAsio();
\r
125 #if defined(__WINDOWS_DS__)
\r
126 if ( api == WINDOWS_DS )
\r
127 rtapi_ = new RtApiDs();
\r
129 #if defined(__MACOSX_CORE__)
\r
130 if ( api == MACOSX_CORE )
\r
131 rtapi_ = new RtApiCore();
\r
133 #if defined(__RTAUDIO_DUMMY__)
\r
134 if ( api == RTAUDIO_DUMMY )
\r
135 rtapi_ = new RtApiDummy();
\r
139 RtAudio :: RtAudio( RtAudio::Api api ) throw()
\r
143 if ( api != UNSPECIFIED ) {
\r
144 // Attempt to open the specified API.
\r
146 if ( rtapi_ ) return;
\r
148 // No compiled support for specified API value. Issue a debug
\r
149 // warning and continue as if no API was specified.
\r
150 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
153 // Iterate through the compiled APIs and return as soon as we find
\r
154 // one with at least one device or we reach the end of the list.
\r
155 std::vector< RtAudio::Api > apis;
\r
156 getCompiledApi( apis );
\r
157 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
158 openRtApi( apis[i] );
\r
159 if ( rtapi_->getDeviceCount() ) break;
\r
162 if ( rtapi_ ) return;
\r
164 // It should not be possible to get here because the preprocessor
\r
165 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
166 // API-specific definitions are passed to the compiler. But just in
\r
167 // case something weird happens, we'll print out an error message.
\r
168 std::cerr << "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
171 RtAudio :: ~RtAudio() throw()
\r
176 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
177 RtAudio::StreamParameters *inputParameters,
\r
178 RtAudioFormat format, unsigned int sampleRate,
\r
179 unsigned int *bufferFrames,
\r
180 RtAudioCallback callback, void *userData,
\r
181 RtAudio::StreamOptions *options )
\r
183 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
184 sampleRate, bufferFrames, callback,
\r
185 userData, options );
\r
188 // *************************************************** //
\r
190 // Public RtApi definitions (see end of file for
\r
191 // private or protected utility functions).
\r
193 // *************************************************** //
\r
197 stream_.state = STREAM_CLOSED;
\r
198 stream_.mode = UNINITIALIZED;
\r
199 stream_.apiHandle = 0;
\r
200 stream_.userBuffer[0] = 0;
\r
201 stream_.userBuffer[1] = 0;
\r
202 MUTEX_INITIALIZE( &stream_.mutex );
\r
203 showWarnings_ = true;
\r
208 MUTEX_DESTROY( &stream_.mutex );
\r
211 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
212 RtAudio::StreamParameters *iParams,
\r
213 RtAudioFormat format, unsigned int sampleRate,
\r
214 unsigned int *bufferFrames,
\r
215 RtAudioCallback callback, void *userData,
\r
216 RtAudio::StreamOptions *options )
\r
218 if ( stream_.state != STREAM_CLOSED ) {
\r
219 errorText_ = "RtApi::openStream: a stream is already open!";
\r
220 error( RtError::INVALID_USE );
\r
223 if ( oParams && oParams->nChannels < 1 ) {
\r
224 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
225 error( RtError::INVALID_USE );
\r
228 if ( iParams && iParams->nChannels < 1 ) {
\r
229 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
230 error( RtError::INVALID_USE );
\r
233 if ( oParams == NULL && iParams == NULL ) {
\r
234 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
235 error( RtError::INVALID_USE );
\r
238 if ( formatBytes(format) == 0 ) {
\r
239 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
240 error( RtError::INVALID_USE );
\r
243 unsigned int nDevices = getDeviceCount();
\r
244 unsigned int oChannels = 0;
\r
246 oChannels = oParams->nChannels;
\r
247 if ( oParams->deviceId >= nDevices ) {
\r
248 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
249 error( RtError::INVALID_USE );
\r
253 unsigned int iChannels = 0;
\r
255 iChannels = iParams->nChannels;
\r
256 if ( iParams->deviceId >= nDevices ) {
\r
257 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
258 error( RtError::INVALID_USE );
\r
265 if ( oChannels > 0 ) {
\r
267 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
268 sampleRate, format, bufferFrames, options );
\r
269 if ( result == false ) error( RtError::SYSTEM_ERROR );
\r
272 if ( iChannels > 0 ) {
\r
274 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
275 sampleRate, format, bufferFrames, options );
\r
276 if ( result == false ) {
\r
277 if ( oChannels > 0 ) closeStream();
\r
278 error( RtError::SYSTEM_ERROR );
\r
282 stream_.callbackInfo.callback = (void *) callback;
\r
283 stream_.callbackInfo.userData = userData;
\r
285 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
286 stream_.state = STREAM_STOPPED;
\r
289 unsigned int RtApi :: getDefaultInputDevice( void )
\r
291 // Should be implemented in subclasses if possible.
\r
295 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
297 // Should be implemented in subclasses if possible.
\r
301 void RtApi :: closeStream( void )
\r
303 // MUST be implemented in subclasses!
\r
307 bool RtApi :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
308 unsigned int firstChannel, unsigned int sampleRate,
\r
309 RtAudioFormat format, unsigned int *bufferSize,
\r
310 RtAudio::StreamOptions *options )
\r
312 // MUST be implemented in subclasses!
\r
316 void RtApi :: tickStreamTime( void )
\r
318 // Subclasses that do not provide their own implementation of
\r
319 // getStreamTime should call this function once per buffer I/O to
\r
320 // provide basic stream time support.
\r
322 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
324 #if defined( HAVE_GETTIMEOFDAY )
\r
325 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
329 long RtApi :: getStreamLatency( void )
\r
333 long totalLatency = 0;
\r
334 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
335 totalLatency = stream_.latency[0];
\r
336 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
337 totalLatency += stream_.latency[1];
\r
339 return totalLatency;
\r
342 double RtApi :: getStreamTime( void )
\r
346 #if defined( HAVE_GETTIMEOFDAY )
\r
347 // Return a very accurate estimate of the stream time by
\r
348 // adding in the elapsed time since the last tick.
\r
349 struct timeval then;
\r
350 struct timeval now;
\r
352 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
353 return stream_.streamTime;
\r
355 gettimeofday( &now, NULL );
\r
356 then = stream_.lastTickTimestamp;
\r
357 return stream_.streamTime +
\r
358 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
359 (then.tv_sec + 0.000001 * then.tv_usec));
\r
361 return stream_.streamTime;
\r
365 unsigned int RtApi :: getStreamSampleRate( void )
\r
369 return stream_.sampleRate;
\r
373 // *************************************************** //
\r
375 // OS/API-specific methods.
\r
377 // *************************************************** //
\r
379 #if defined(__MACOSX_CORE__)
\r
381 // The OS X CoreAudio API is designed to use a separate callback
\r
382 // procedure for each of its audio devices. A single RtAudio duplex
\r
383 // stream using two different devices is supported here, though it
\r
384 // cannot be guaranteed to always behave correctly because we cannot
\r
385 // synchronize these two callbacks.
\r
387 // A property listener is installed for over/underrun information.
\r
388 // However, no functionality is currently provided to allow property
\r
389 // listeners to trigger user handlers because it is unclear what could
\r
390 // be done if a critical stream parameter (buffer size, sample rate,
\r
391 // device disconnect) notification arrived. The listeners entail
\r
392 // quite a bit of extra code and most likely, a user program wouldn't
\r
393 // be prepared for the result anyway. However, we do provide a flag
\r
394 // to the client callback function to inform of an over/underrun.
\r
396 // A structure to hold various information related to the CoreAudio API
\r
398 struct CoreHandle {
\r
399 AudioDeviceID id[2]; // device ids
\r
400 AudioDeviceIOProcID procId[2];
\r
401 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
402 UInt32 nStreams[2]; // number of streams to use
\r
404 char *deviceBuffer;
\r
405 pthread_cond_t condition;
\r
406 int drainCounter; // Tracks callback counts when draining
\r
407 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
410 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
413 RtApiCore:: RtApiCore()
\r
415 // This is a largely undocumented but absolutely necessary
\r
416 // requirement starting with OS-X 10.6. If not called, queries and
\r
417 // updates to various audio device properties are not handled
\r
419 CFRunLoopRef theRunLoop = NULL;
\r
420 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
421 kAudioObjectPropertyScopeGlobal,
\r
422 kAudioObjectPropertyElementMaster };
\r
423 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
424 if ( result != noErr ) {
\r
425 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
426 error( RtError::WARNING );
\r
430 RtApiCore :: ~RtApiCore()
\r
432 // The subclass destructor gets called before the base class
\r
433 // destructor, so close an existing stream before deallocating
\r
434 // apiDeviceId memory.
\r
435 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
438 unsigned int RtApiCore :: getDeviceCount( void )
\r
440 // Find out how many audio devices there are, if any.
\r
442 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
443 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
444 if ( result != noErr ) {
\r
445 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
446 error( RtError::WARNING );
\r
450 return dataSize / sizeof( AudioDeviceID );
\r
453 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
455 unsigned int nDevices = getDeviceCount();
\r
456 if ( nDevices <= 1 ) return 0;
\r
459 UInt32 dataSize = sizeof( AudioDeviceID );
\r
460 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
461 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
462 if ( result != noErr ) {
\r
463 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
464 error( RtError::WARNING );
\r
468 dataSize *= nDevices;
\r
469 AudioDeviceID deviceList[ nDevices ];
\r
470 property.mSelector = kAudioHardwarePropertyDevices;
\r
471 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
472 if ( result != noErr ) {
\r
473 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
474 error( RtError::WARNING );
\r
478 for ( unsigned int i=0; i<nDevices; i++ )
\r
479 if ( id == deviceList[i] ) return i;
\r
481 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
482 error( RtError::WARNING );
\r
486 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
488 unsigned int nDevices = getDeviceCount();
\r
489 if ( nDevices <= 1 ) return 0;
\r
492 UInt32 dataSize = sizeof( AudioDeviceID );
\r
493 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
494 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
495 if ( result != noErr ) {
\r
496 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
497 error( RtError::WARNING );
\r
501 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
502 AudioDeviceID deviceList[ nDevices ];
\r
503 property.mSelector = kAudioHardwarePropertyDevices;
\r
504 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
505 if ( result != noErr ) {
\r
506 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
507 error( RtError::WARNING );
\r
511 for ( unsigned int i=0; i<nDevices; i++ )
\r
512 if ( id == deviceList[i] ) return i;
\r
514 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
515 error( RtError::WARNING );
\r
519 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
521 RtAudio::DeviceInfo info;
\r
522 info.probed = false;
\r
525 unsigned int nDevices = getDeviceCount();
\r
526 if ( nDevices == 0 ) {
\r
527 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
528 error( RtError::INVALID_USE );
\r
531 if ( device >= nDevices ) {
\r
532 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
533 error( RtError::INVALID_USE );
\r
536 AudioDeviceID deviceList[ nDevices ];
\r
537 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
538 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
539 kAudioObjectPropertyScopeGlobal,
\r
540 kAudioObjectPropertyElementMaster };
\r
541 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
542 0, NULL, &dataSize, (void *) &deviceList );
\r
543 if ( result != noErr ) {
\r
544 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
545 error( RtError::WARNING );
\r
549 AudioDeviceID id = deviceList[ device ];
\r
551 // Get the device name.
\r
553 CFStringRef cfname;
\r
554 dataSize = sizeof( CFStringRef );
\r
555 property.mSelector = kAudioObjectPropertyManufacturer;
\r
556 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
557 if ( result != noErr ) {
\r
558 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
559 errorText_ = errorStream_.str();
\r
560 error( RtError::WARNING );
\r
564 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
565 int length = CFStringGetLength(cfname);
\r
566 char *mname = (char *)malloc(length * 3 + 1);
\r
567 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
568 info.name.append( (const char *)mname, strlen(mname) );
\r
569 info.name.append( ": " );
\r
570 CFRelease( cfname );
\r
573 property.mSelector = kAudioObjectPropertyName;
\r
574 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
575 if ( result != noErr ) {
\r
576 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
577 errorText_ = errorStream_.str();
\r
578 error( RtError::WARNING );
\r
582 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
583 length = CFStringGetLength(cfname);
\r
584 char *name = (char *)malloc(length * 3 + 1);
\r
585 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
586 info.name.append( (const char *)name, strlen(name) );
\r
587 CFRelease( cfname );
\r
590 // Get the output stream "configuration".
\r
591 AudioBufferList *bufferList = nil;
\r
592 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
593 property.mScope = kAudioDevicePropertyScopeOutput;
\r
594 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
596 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
597 if ( result != noErr || dataSize == 0 ) {
\r
598 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
599 errorText_ = errorStream_.str();
\r
600 error( RtError::WARNING );
\r
604 // Allocate the AudioBufferList.
\r
605 bufferList = (AudioBufferList *) malloc( dataSize );
\r
606 if ( bufferList == NULL ) {
\r
607 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
608 error( RtError::WARNING );
\r
612 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
613 if ( result != noErr || dataSize == 0 ) {
\r
614 free( bufferList );
\r
615 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
616 errorText_ = errorStream_.str();
\r
617 error( RtError::WARNING );
\r
621 // Get output channel information.
\r
622 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
623 for ( i=0; i<nStreams; i++ )
\r
624 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
625 free( bufferList );
\r
627 // Get the input stream "configuration".
\r
628 property.mScope = kAudioDevicePropertyScopeInput;
\r
629 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
630 if ( result != noErr || dataSize == 0 ) {
\r
631 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
632 errorText_ = errorStream_.str();
\r
633 error( RtError::WARNING );
\r
637 // Allocate the AudioBufferList.
\r
638 bufferList = (AudioBufferList *) malloc( dataSize );
\r
639 if ( bufferList == NULL ) {
\r
640 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
641 error( RtError::WARNING );
\r
645 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
646 if (result != noErr || dataSize == 0) {
\r
647 free( bufferList );
\r
648 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
649 errorText_ = errorStream_.str();
\r
650 error( RtError::WARNING );
\r
654 // Get input channel information.
\r
655 nStreams = bufferList->mNumberBuffers;
\r
656 for ( i=0; i<nStreams; i++ )
\r
657 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
658 free( bufferList );
\r
660 // If device opens for both playback and capture, we determine the channels.
\r
661 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
662 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
664 // Probe the device sample rates.
\r
665 bool isInput = false;
\r
666 if ( info.outputChannels == 0 ) isInput = true;
\r
668 // Determine the supported sample rates.
\r
669 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
670 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
671 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
672 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
673 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
674 errorText_ = errorStream_.str();
\r
675 error( RtError::WARNING );
\r
679 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
680 AudioValueRange rangeList[ nRanges ];
\r
681 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
682 if ( result != kAudioHardwareNoError ) {
\r
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
684 errorText_ = errorStream_.str();
\r
685 error( RtError::WARNING );
\r
689 Float64 minimumRate = 100000000.0, maximumRate = 0.0;
\r
690 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
691 if ( rangeList[i].mMinimum < minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
692 if ( rangeList[i].mMaximum > maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
695 info.sampleRates.clear();
\r
696 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
697 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
698 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
701 if ( info.sampleRates.size() == 0 ) {
\r
702 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
703 errorText_ = errorStream_.str();
\r
704 error( RtError::WARNING );
\r
708 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
709 // Thus, any other "physical" formats supported by the device are of
\r
710 // no interest to the client.
\r
711 info.nativeFormats = RTAUDIO_FLOAT32;
\r
713 if ( info.outputChannels > 0 )
\r
714 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
715 if ( info.inputChannels > 0 )
\r
716 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
718 info.probed = true;
\r
722 OSStatus callbackHandler( AudioDeviceID inDevice,
\r
723 const AudioTimeStamp* inNow,
\r
724 const AudioBufferList* inInputData,
\r
725 const AudioTimeStamp* inInputTime,
\r
726 AudioBufferList* outOutputData,
\r
727 const AudioTimeStamp* inOutputTime,
\r
728 void* infoPointer )
\r
730 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
732 RtApiCore *object = (RtApiCore *) info->object;
\r
733 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
734 return kAudioHardwareUnspecifiedError;
\r
736 return kAudioHardwareNoError;
\r
739 OSStatus xrunListener( AudioObjectID inDevice,
\r
741 const AudioObjectPropertyAddress properties[],
\r
742 void* handlePointer )
\r
744 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
745 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
746 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
747 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
748 handle->xrun[1] = true;
\r
750 handle->xrun[0] = true;
\r
754 return kAudioHardwareNoError;
\r
757 OSStatus rateListener( AudioObjectID inDevice,
\r
759 const AudioObjectPropertyAddress properties[],
\r
760 void* ratePointer )
\r
763 Float64 *rate = (Float64 *) ratePointer;
\r
764 UInt32 dataSize = sizeof( Float64 );
\r
765 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
766 kAudioObjectPropertyScopeGlobal,
\r
767 kAudioObjectPropertyElementMaster };
\r
768 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
769 return kAudioHardwareNoError;
\r
772 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
773 unsigned int firstChannel, unsigned int sampleRate,
\r
774 RtAudioFormat format, unsigned int *bufferSize,
\r
775 RtAudio::StreamOptions *options )
\r
778 unsigned int nDevices = getDeviceCount();
\r
779 if ( nDevices == 0 ) {
\r
780 // This should not happen because a check is made before this function is called.
\r
781 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
785 if ( device >= nDevices ) {
\r
786 // This should not happen because a check is made before this function is called.
\r
787 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
791 AudioDeviceID deviceList[ nDevices ];
\r
792 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
793 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
794 kAudioObjectPropertyScopeGlobal,
\r
795 kAudioObjectPropertyElementMaster };
\r
796 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
797 0, NULL, &dataSize, (void *) &deviceList );
\r
798 if ( result != noErr ) {
\r
799 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
803 AudioDeviceID id = deviceList[ device ];
\r
805 // Setup for stream mode.
\r
806 bool isInput = false;
\r
807 if ( mode == INPUT ) {
\r
809 property.mScope = kAudioDevicePropertyScopeInput;
\r
812 property.mScope = kAudioDevicePropertyScopeOutput;
\r
814 // Get the stream "configuration".
\r
815 AudioBufferList *bufferList = nil;
\r
817 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
818 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
819 if ( result != noErr || dataSize == 0 ) {
\r
820 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
821 errorText_ = errorStream_.str();
\r
825 // Allocate the AudioBufferList.
\r
826 bufferList = (AudioBufferList *) malloc( dataSize );
\r
827 if ( bufferList == NULL ) {
\r
828 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
832 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
833 if (result != noErr || dataSize == 0) {
\r
834 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
835 errorText_ = errorStream_.str();
\r
839 // Search for one or more streams that contain the desired number of
\r
840 // channels. CoreAudio devices can have an arbitrary number of
\r
841 // streams and each stream can have an arbitrary number of channels.
\r
842 // For each stream, a single buffer of interleaved samples is
\r
843 // provided. RtAudio prefers the use of one stream of interleaved
\r
844 // data or multiple consecutive single-channel streams. However, we
\r
845 // now support multiple consecutive multi-channel streams of
\r
846 // interleaved data as well.
\r
847 UInt32 iStream, offsetCounter = firstChannel;
\r
848 UInt32 nStreams = bufferList->mNumberBuffers;
\r
849 bool monoMode = false;
\r
850 bool foundStream = false;
\r
852 // First check that the device supports the requested number of
\r
854 UInt32 deviceChannels = 0;
\r
855 for ( iStream=0; iStream<nStreams; iStream++ )
\r
856 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
858 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
859 free( bufferList );
\r
860 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
861 errorText_ = errorStream_.str();
\r
865 // Look for a single stream meeting our needs.
\r
866 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
867 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
868 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
869 if ( streamChannels >= channels + offsetCounter ) {
\r
870 firstStream = iStream;
\r
871 channelOffset = offsetCounter;
\r
872 foundStream = true;
\r
875 if ( streamChannels > offsetCounter ) break;
\r
876 offsetCounter -= streamChannels;
\r
879 // If we didn't find a single stream above, then we should be able
\r
880 // to meet the channel specification with multiple streams.
\r
881 if ( foundStream == false ) {
\r
883 offsetCounter = firstChannel;
\r
884 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
885 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
886 if ( streamChannels > offsetCounter ) break;
\r
887 offsetCounter -= streamChannels;
\r
890 firstStream = iStream;
\r
891 channelOffset = offsetCounter;
\r
892 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
894 if ( streamChannels > 1 ) monoMode = false;
\r
895 while ( channelCounter > 0 ) {
\r
896 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
897 if ( streamChannels > 1 ) monoMode = false;
\r
898 channelCounter -= streamChannels;
\r
903 free( bufferList );
\r
905 // Determine the buffer size.
\r
906 AudioValueRange bufferRange;
\r
907 dataSize = sizeof( AudioValueRange );
\r
908 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
909 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
911 if ( result != noErr ) {
\r
912 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
913 errorText_ = errorStream_.str();
\r
917 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
918 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
919 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
921 // Set the buffer size. For multiple streams, I'm assuming we only
\r
922 // need to make this setting for the master channel.
\r
923 UInt32 theSize = (UInt32) *bufferSize;
\r
924 dataSize = sizeof( UInt32 );
\r
925 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
926 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
928 if ( result != noErr ) {
\r
929 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
930 errorText_ = errorStream_.str();
\r
934 // If attempting to setup a duplex stream, the bufferSize parameter
\r
935 // MUST be the same in both directions!
\r
936 *bufferSize = theSize;
\r
937 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
938 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
939 errorText_ = errorStream_.str();
\r
943 stream_.bufferSize = *bufferSize;
\r
944 stream_.nBuffers = 1;
\r
946 // Try to set "hog" mode ... it's not clear to me this is working.
\r
947 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
949 dataSize = sizeof( hog_pid );
\r
950 property.mSelector = kAudioDevicePropertyHogMode;
\r
951 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
952 if ( result != noErr ) {
\r
953 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
954 errorText_ = errorStream_.str();
\r
958 if ( hog_pid != getpid() ) {
\r
959 hog_pid = getpid();
\r
960 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
961 if ( result != noErr ) {
\r
962 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
963 errorText_ = errorStream_.str();
\r
969 // Check and if necessary, change the sample rate for the device.
\r
970 Float64 nominalRate;
\r
971 dataSize = sizeof( Float64 );
\r
972 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
973 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
975 if ( result != noErr ) {
\r
976 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
977 errorText_ = errorStream_.str();
\r
981 // Only change the sample rate if off by more than 1 Hz.
\r
982 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
984 // Set a property listener for the sample rate change
\r
985 Float64 reportedRate = 0.0;
\r
986 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
987 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
988 if ( result != noErr ) {
\r
989 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
990 errorText_ = errorStream_.str();
\r
994 nominalRate = (Float64) sampleRate;
\r
995 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
997 if ( result != noErr ) {
\r
998 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
999 errorText_ = errorStream_.str();
\r
1003 // Now wait until the reported nominal rate is what we just set.
\r
1004 UInt32 microCounter = 0;
\r
1005 while ( reportedRate != nominalRate ) {
\r
1006 microCounter += 5000;
\r
1007 if ( microCounter > 5000000 ) break;
\r
1011 // Remove the property listener.
\r
1012 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1014 if ( microCounter > 5000000 ) {
\r
1015 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1016 errorText_ = errorStream_.str();
\r
1021 // Now set the stream format for all streams. Also, check the
\r
1022 // physical format of the device and change that if necessary.
\r
1023 AudioStreamBasicDescription description;
\r
1024 dataSize = sizeof( AudioStreamBasicDescription );
\r
1025 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1026 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1027 if ( result != noErr ) {
\r
1028 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1029 errorText_ = errorStream_.str();
\r
1033 // Set the sample rate and data format id. However, only make the
\r
1034 // change if the sample rate is not within 1.0 of the desired
\r
1035 // rate and the format is not linear pcm.
\r
1036 bool updateFormat = false;
\r
1037 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1038 description.mSampleRate = (Float64) sampleRate;
\r
1039 updateFormat = true;
\r
1042 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1043 description.mFormatID = kAudioFormatLinearPCM;
\r
1044 updateFormat = true;
\r
1047 if ( updateFormat ) {
\r
1048 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1049 if ( result != noErr ) {
\r
1050 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1051 errorText_ = errorStream_.str();
\r
1056 // Now check the physical format.
\r
1057 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1058 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1059 if ( result != noErr ) {
\r
1060 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1061 errorText_ = errorStream_.str();
\r
1065 //std::cout << "Current physical stream format:" << std::endl;
\r
1066 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1067 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1068 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1069 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1071 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1072 description.mFormatID = kAudioFormatLinearPCM;
\r
1073 //description.mSampleRate = (Float64) sampleRate;
\r
1074 AudioStreamBasicDescription testDescription = description;
\r
1075 UInt32 formatFlags;
\r
1077 // We'll try higher bit rates first and then work our way down.
\r
1078 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1079 formatFlags = description.mFormatFlags | kLinearPCMFormatFlagIsFloat & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1080 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1081 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1082 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1083 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1084 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1085 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1086 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1087 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1088 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1089 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1090 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1092 bool setPhysicalFormat = false;
\r
1093 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1094 testDescription = description;
\r
1095 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1096 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1097 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1098 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1100 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1101 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1102 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1103 if ( result == noErr ) {
\r
1104 setPhysicalFormat = true;
\r
1105 //std::cout << "Updated physical stream format:" << std::endl;
\r
1106 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1107 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1108 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1109 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1114 if ( !setPhysicalFormat ) {
\r
1115 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1116 errorText_ = errorStream_.str();
\r
1119 } // done setting virtual/physical formats.
\r
1121 // Get the stream / device latency.
\r
1123 dataSize = sizeof( UInt32 );
\r
1124 property.mSelector = kAudioDevicePropertyLatency;
\r
1125 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1126 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1127 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1129 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1130 errorText_ = errorStream_.str();
\r
1131 error( RtError::WARNING );
\r
1135 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1136 // always be presented in native-endian format, so we should never
\r
1137 // need to byte swap.
\r
1138 stream_.doByteSwap[mode] = false;
\r
1140 // From the CoreAudio documentation, PCM data must be supplied as
\r
1142 stream_.userFormat = format;
\r
1143 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1145 if ( streamCount == 1 )
\r
1146 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1147 else // multiple streams
\r
1148 stream_.nDeviceChannels[mode] = channels;
\r
1149 stream_.nUserChannels[mode] = channels;
\r
1150 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1151 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1152 else stream_.userInterleaved = true;
\r
1153 stream_.deviceInterleaved[mode] = true;
\r
1154 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1156 // Set flags for buffer conversion.
\r
1157 stream_.doConvertBuffer[mode] = false;
\r
1158 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1159 stream_.doConvertBuffer[mode] = true;
\r
1160 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1161 stream_.doConvertBuffer[mode] = true;
\r
1162 if ( streamCount == 1 ) {
\r
1163 if ( stream_.nUserChannels[mode] > 1 &&
\r
1164 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1165 stream_.doConvertBuffer[mode] = true;
\r
1167 else if ( monoMode && stream_.userInterleaved )
\r
1168 stream_.doConvertBuffer[mode] = true;
\r
1170 // Allocate our CoreHandle structure for the stream.
\r
1171 CoreHandle *handle = 0;
\r
1172 if ( stream_.apiHandle == 0 ) {
\r
1174 handle = new CoreHandle;
\r
1176 catch ( std::bad_alloc& ) {
\r
1177 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1181 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1182 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1185 stream_.apiHandle = (void *) handle;
\r
1188 handle = (CoreHandle *) stream_.apiHandle;
\r
1189 handle->iStream[mode] = firstStream;
\r
1190 handle->nStreams[mode] = streamCount;
\r
1191 handle->id[mode] = id;
\r
1193 // Allocate necessary internal buffers.
\r
1194 unsigned long bufferBytes;
\r
1195 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1196 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1197 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1198 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1199 if ( stream_.userBuffer[mode] == NULL ) {
\r
1200 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1204 // If possible, we will make use of the CoreAudio stream buffers as
\r
1205 // "device buffers". However, we can't do this if using multiple
\r
1207 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1209 bool makeBuffer = true;
\r
1210 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1211 if ( mode == INPUT ) {
\r
1212 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1213 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1214 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1218 if ( makeBuffer ) {
\r
1219 bufferBytes *= *bufferSize;
\r
1220 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1221 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1222 if ( stream_.deviceBuffer == NULL ) {
\r
1223 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1229 stream_.sampleRate = sampleRate;
\r
1230 stream_.device[mode] = device;
\r
1231 stream_.state = STREAM_STOPPED;
\r
1232 stream_.callbackInfo.object = (void *) this;
\r
1234 // Setup the buffer conversion information structure.
\r
1235 if ( stream_.doConvertBuffer[mode] ) {
\r
1236 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1237 else setConvertInfo( mode, channelOffset );
\r
1240 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1241 // Only one callback procedure per device.
\r
1242 stream_.mode = DUPLEX;
\r
1244 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1245 if ( result != noErr ) {
\r
1246 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1247 errorText_ = errorStream_.str();
\r
1250 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1251 stream_.mode = DUPLEX;
\r
1253 stream_.mode = mode;
\r
1256 // Setup the device property listener for over/underload.
\r
1257 property.mSelector = kAudioDeviceProcessorOverload;
\r
1258 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1264 pthread_cond_destroy( &handle->condition );
\r
1266 stream_.apiHandle = 0;
\r
1269 for ( int i=0; i<2; i++ ) {
\r
1270 if ( stream_.userBuffer[i] ) {
\r
1271 free( stream_.userBuffer[i] );
\r
1272 stream_.userBuffer[i] = 0;
\r
1276 if ( stream_.deviceBuffer ) {
\r
1277 free( stream_.deviceBuffer );
\r
1278 stream_.deviceBuffer = 0;
\r
1284 void RtApiCore :: closeStream( void )
\r
1286 if ( stream_.state == STREAM_CLOSED ) {
\r
1287 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1288 error( RtError::WARNING );
\r
1292 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1293 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1294 if ( stream_.state == STREAM_RUNNING )
\r
1295 AudioDeviceStop( handle->id[0], handle->procId[0] );
\r
1296 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1299 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1300 if ( stream_.state == STREAM_RUNNING )
\r
1301 AudioDeviceStop( handle->id[1], handle->procId[1] );
\r
1302 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1305 for ( int i=0; i<2; i++ ) {
\r
1306 if ( stream_.userBuffer[i] ) {
\r
1307 free( stream_.userBuffer[i] );
\r
1308 stream_.userBuffer[i] = 0;
\r
1312 if ( stream_.deviceBuffer ) {
\r
1313 free( stream_.deviceBuffer );
\r
1314 stream_.deviceBuffer = 0;
\r
1317 // Destroy pthread condition variable.
\r
1318 pthread_cond_destroy( &handle->condition );
\r
1320 stream_.apiHandle = 0;
\r
1322 stream_.mode = UNINITIALIZED;
\r
1323 stream_.state = STREAM_CLOSED;
\r
1326 void RtApiCore :: startStream( void )
\r
1329 if ( stream_.state == STREAM_RUNNING ) {
\r
1330 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1331 error( RtError::WARNING );
\r
1335 MUTEX_LOCK( &stream_.mutex );
\r
1337 OSStatus result = noErr;
\r
1338 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1339 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1341 result = AudioDeviceStart( handle->id[0], handle->procId[0] );
\r
1342 if ( result != noErr ) {
\r
1343 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1344 errorText_ = errorStream_.str();
\r
1349 if ( stream_.mode == INPUT ||
\r
1350 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1352 result = AudioDeviceStart( handle->id[1], handle->procId[1] );
\r
1353 if ( result != noErr ) {
\r
1354 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1355 errorText_ = errorStream_.str();
\r
1360 handle->drainCounter = 0;
\r
1361 handle->internalDrain = false;
\r
1362 stream_.state = STREAM_RUNNING;
\r
1365 MUTEX_UNLOCK( &stream_.mutex );
\r
1367 if ( result == noErr ) return;
\r
1368 error( RtError::SYSTEM_ERROR );
\r
1371 void RtApiCore :: stopStream( void )
\r
1374 if ( stream_.state == STREAM_STOPPED ) {
\r
1375 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1376 error( RtError::WARNING );
\r
1380 MUTEX_LOCK( &stream_.mutex );
\r
1382 if ( stream_.state == STREAM_STOPPED ) {
\r
1383 MUTEX_UNLOCK( &stream_.mutex );
\r
1387 OSStatus result = noErr;
\r
1388 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1389 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1391 if ( handle->drainCounter == 0 ) {
\r
1392 handle->drainCounter = 2;
\r
1393 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1396 MUTEX_UNLOCK( &stream_.mutex );
\r
1397 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1398 MUTEX_LOCK( &stream_.mutex );
\r
1399 if ( result != noErr ) {
\r
1400 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1401 errorText_ = errorStream_.str();
\r
1406 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1408 MUTEX_UNLOCK( &stream_.mutex );
\r
1409 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1410 MUTEX_LOCK( &stream_.mutex );
\r
1411 if ( result != noErr ) {
\r
1412 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1413 errorText_ = errorStream_.str();
\r
1418 stream_.state = STREAM_STOPPED;
\r
1421 MUTEX_UNLOCK( &stream_.mutex );
\r
1423 if ( result == noErr ) return;
\r
1424 error( RtError::SYSTEM_ERROR );
\r
1427 void RtApiCore :: abortStream( void )
\r
1430 if ( stream_.state == STREAM_STOPPED ) {
\r
1431 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1432 error( RtError::WARNING );
\r
1436 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1437 handle->drainCounter = 2;
\r
1442 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1443 const AudioBufferList *inBufferList,
\r
1444 const AudioBufferList *outBufferList )
\r
1446 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
1447 if ( stream_.state == STREAM_CLOSED ) {
\r
1448 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1449 error( RtError::WARNING );
\r
1453 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1454 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1456 // Check if we were draining the stream and signal is finished.
\r
1457 if ( handle->drainCounter > 3 ) {
\r
1458 if ( handle->internalDrain == true )
\r
1460 else // external call to stopStream()
\r
1461 pthread_cond_signal( &handle->condition );
\r
1465 MUTEX_LOCK( &stream_.mutex );
\r
1467 // The state might change while waiting on a mutex.
\r
1468 if ( stream_.state == STREAM_STOPPED ) {
\r
1469 MUTEX_UNLOCK( &stream_.mutex );
\r
1473 AudioDeviceID outputDevice = handle->id[0];
\r
1475 // Invoke user callback to get fresh output data UNLESS we are
\r
1476 // draining stream or duplex mode AND the input/output devices are
\r
1477 // different AND this function is called for the input device.
\r
1478 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1479 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1480 double streamTime = getStreamTime();
\r
1481 RtAudioStreamStatus status = 0;
\r
1482 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1483 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1484 handle->xrun[0] = false;
\r
1486 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1487 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1488 handle->xrun[1] = false;
\r
1491 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1492 stream_.bufferSize, streamTime, status, info->userData );
\r
1493 if ( handle->drainCounter == 2 ) {
\r
1494 MUTEX_UNLOCK( &stream_.mutex );
\r
1498 else if ( handle->drainCounter == 1 )
\r
1499 handle->internalDrain = true;
\r
1502 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1504 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1506 if ( handle->nStreams[0] == 1 ) {
\r
1507 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1509 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1511 else { // fill multiple streams with zeros
\r
1512 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1513 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1515 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1519 else if ( handle->nStreams[0] == 1 ) {
\r
1520 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1521 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1522 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1524 else { // copy from user buffer
\r
1525 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1526 stream_.userBuffer[0],
\r
1527 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1530 else { // fill multiple streams
\r
1531 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1532 if ( stream_.doConvertBuffer[0] ) {
\r
1533 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1534 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1537 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1538 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1539 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1540 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1541 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1544 else { // fill multiple multi-channel streams with interleaved data
\r
1545 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1546 Float32 *out, *in;
\r
1548 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1549 UInt32 inChannels = stream_.nUserChannels[0];
\r
1550 if ( stream_.doConvertBuffer[0] ) {
\r
1551 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1552 inChannels = stream_.nDeviceChannels[0];
\r
1555 if ( inInterleaved ) inOffset = 1;
\r
1556 else inOffset = stream_.bufferSize;
\r
1558 channelsLeft = inChannels;
\r
1559 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1561 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1562 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1565 // Account for possible channel offset in first stream
\r
1566 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1567 streamChannels -= stream_.channelOffset[0];
\r
1568 outJump = stream_.channelOffset[0];
\r
1572 // Account for possible unfilled channels at end of the last stream
\r
1573 if ( streamChannels > channelsLeft ) {
\r
1574 outJump = streamChannels - channelsLeft;
\r
1575 streamChannels = channelsLeft;
\r
1578 // Determine input buffer offsets and skips
\r
1579 if ( inInterleaved ) {
\r
1580 inJump = inChannels;
\r
1581 in += inChannels - channelsLeft;
\r
1585 in += (inChannels - channelsLeft) * inOffset;
\r
1588 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1589 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1590 *out++ = in[j*inOffset];
\r
1595 channelsLeft -= streamChannels;
\r
1600 if ( handle->drainCounter ) {
\r
1601 handle->drainCounter++;
\r
1606 AudioDeviceID inputDevice;
\r
1607 inputDevice = handle->id[1];
\r
1608 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1610 if ( handle->nStreams[1] == 1 ) {
\r
1611 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1612 convertBuffer( stream_.userBuffer[1],
\r
1613 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1614 stream_.convertInfo[1] );
\r
1616 else { // copy to user buffer
\r
1617 memcpy( stream_.userBuffer[1],
\r
1618 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1619 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1622 else { // read from multiple streams
\r
1623 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1624 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1626 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1627 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1628 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1629 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1630 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1633 else { // read from multiple multi-channel streams
\r
1634 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1635 Float32 *out, *in;
\r
1637 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1638 UInt32 outChannels = stream_.nUserChannels[1];
\r
1639 if ( stream_.doConvertBuffer[1] ) {
\r
1640 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1641 outChannels = stream_.nDeviceChannels[1];
\r
1644 if ( outInterleaved ) outOffset = 1;
\r
1645 else outOffset = stream_.bufferSize;
\r
1647 channelsLeft = outChannels;
\r
1648 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1650 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1651 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1654 // Account for possible channel offset in first stream
\r
1655 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1656 streamChannels -= stream_.channelOffset[1];
\r
1657 inJump = stream_.channelOffset[1];
\r
1661 // Account for possible unread channels at end of the last stream
\r
1662 if ( streamChannels > channelsLeft ) {
\r
1663 inJump = streamChannels - channelsLeft;
\r
1664 streamChannels = channelsLeft;
\r
1667 // Determine output buffer offsets and skips
\r
1668 if ( outInterleaved ) {
\r
1669 outJump = outChannels;
\r
1670 out += outChannels - channelsLeft;
\r
1674 out += (outChannels - channelsLeft) * outOffset;
\r
1677 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1678 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1679 out[j*outOffset] = *in++;
\r
1684 channelsLeft -= streamChannels;
\r
1688 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1689 convertBuffer( stream_.userBuffer[1],
\r
1690 stream_.deviceBuffer,
\r
1691 stream_.convertInfo[1] );
\r
1697 MUTEX_UNLOCK( &stream_.mutex );
\r
1699 RtApi::tickStreamTime();
\r
1703 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1707 case kAudioHardwareNotRunningError:
\r
1708 return "kAudioHardwareNotRunningError";
\r
1710 case kAudioHardwareUnspecifiedError:
\r
1711 return "kAudioHardwareUnspecifiedError";
\r
1713 case kAudioHardwareUnknownPropertyError:
\r
1714 return "kAudioHardwareUnknownPropertyError";
\r
1716 case kAudioHardwareBadPropertySizeError:
\r
1717 return "kAudioHardwareBadPropertySizeError";
\r
1719 case kAudioHardwareIllegalOperationError:
\r
1720 return "kAudioHardwareIllegalOperationError";
\r
1722 case kAudioHardwareBadObjectError:
\r
1723 return "kAudioHardwareBadObjectError";
\r
1725 case kAudioHardwareBadDeviceError:
\r
1726 return "kAudioHardwareBadDeviceError";
\r
1728 case kAudioHardwareBadStreamError:
\r
1729 return "kAudioHardwareBadStreamError";
\r
1731 case kAudioHardwareUnsupportedOperationError:
\r
1732 return "kAudioHardwareUnsupportedOperationError";
\r
1734 case kAudioDeviceUnsupportedFormatError:
\r
1735 return "kAudioDeviceUnsupportedFormatError";
\r
1737 case kAudioDevicePermissionsError:
\r
1738 return "kAudioDevicePermissionsError";
\r
1741 return "CoreAudio unknown error";
\r
1745 //******************** End of __MACOSX_CORE__ *********************//
\r
1748 #if defined(__UNIX_JACK__)
\r
1750 // JACK is a low-latency audio server, originally written for the
\r
1751 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1752 // connect a number of different applications to an audio device, as
\r
1753 // well as allowing them to share audio between themselves.
\r
1755 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1756 // have ports connected to the server. The JACK server is typically
\r
1757 // started in a terminal as follows:
\r
1759 // .jackd -d alsa -d hw:0
\r
1761 // or through an interface program such as qjackctl. Many of the
\r
1762 // parameters normally set for a stream are fixed by the JACK server
\r
1763 // and can be specified when the JACK server is started. In
\r
1766 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1768 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1769 // frames, and number of buffers = 4. Once the server is running, it
\r
1770 // is not possible to override these values. If the values are not
\r
1771 // specified in the command-line, the JACK server uses default values.
\r
1773 // The JACK server does not have to be running when an instance of
\r
1774 // RtApiJack is created, though the function getDeviceCount() will
\r
1775 // report 0 devices found until JACK has been started. When no
\r
1776 // devices are available (i.e., the JACK server is not running), a
\r
1777 // stream cannot be opened.
\r
1779 #include <jack/jack.h>
\r
1780 #include <unistd.h>
\r
1783 // A structure to hold various information related to the Jack API
\r
1784 // implementation.
\r
1785 struct JackHandle {
\r
1786 jack_client_t *client;
\r
1787 jack_port_t **ports[2];
\r
1788 std::string deviceName[2];
\r
1790 pthread_cond_t condition;
\r
1791 int drainCounter; // Tracks callback counts when draining
\r
1792 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1795 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1798 ThreadHandle threadId;
\r
1799 void jackSilentError( const char * ) {};
\r
1801 RtApiJack :: RtApiJack()
\r
1803 // Nothing to do here.
\r
1804 #if !defined(__RTAUDIO_DEBUG__)
\r
1805 // Turn off Jack's internal error reporting.
\r
1806 jack_set_error_function( &jackSilentError );
\r
1810 RtApiJack :: ~RtApiJack()
\r
1812 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1815 unsigned int RtApiJack :: getDeviceCount( void )
\r
1817 // See if we can become a jack client.
\r
1818 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1819 jack_status_t *status = NULL;
\r
1820 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1821 if ( client == 0 ) return 0;
\r
1823 const char **ports;
\r
1824 std::string port, previousPort;
\r
1825 unsigned int nChannels = 0, nDevices = 0;
\r
1826 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1828 // Parse the port names up to the first colon (:).
\r
1829 size_t iColon = 0;
\r
1831 port = (char *) ports[ nChannels ];
\r
1832 iColon = port.find(":");
\r
1833 if ( iColon != std::string::npos ) {
\r
1834 port = port.substr( 0, iColon + 1 );
\r
1835 if ( port != previousPort ) {
\r
1837 previousPort = port;
\r
1840 } while ( ports[++nChannels] );
\r
1844 jack_client_close( client );
\r
1848 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1850 RtAudio::DeviceInfo info;
\r
1851 info.probed = false;
\r
1853 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1854 jack_status_t *status = NULL;
\r
1855 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1856 if ( client == 0 ) {
\r
1857 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1858 error( RtError::WARNING );
\r
1862 const char **ports;
\r
1863 std::string port, previousPort;
\r
1864 unsigned int nPorts = 0, nDevices = 0;
\r
1865 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1867 // Parse the port names up to the first colon (:).
\r
1868 size_t iColon = 0;
\r
1870 port = (char *) ports[ nPorts ];
\r
1871 iColon = port.find(":");
\r
1872 if ( iColon != std::string::npos ) {
\r
1873 port = port.substr( 0, iColon );
\r
1874 if ( port != previousPort ) {
\r
1875 if ( nDevices == device ) info.name = port;
\r
1877 previousPort = port;
\r
1880 } while ( ports[++nPorts] );
\r
1884 if ( device >= nDevices ) {
\r
1885 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1886 error( RtError::INVALID_USE );
\r
1889 // Get the current jack server sample rate.
\r
1890 info.sampleRates.clear();
\r
1891 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1893 // Count the available ports containing the client name as device
\r
1894 // channels. Jack "input ports" equal RtAudio output channels.
\r
1895 unsigned int nChannels = 0;
\r
1896 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1898 while ( ports[ nChannels ] ) nChannels++;
\r
1900 info.outputChannels = nChannels;
\r
1903 // Jack "output ports" equal RtAudio input channels.
\r
1905 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1907 while ( ports[ nChannels ] ) nChannels++;
\r
1909 info.inputChannels = nChannels;
\r
1912 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
1913 jack_client_close(client);
\r
1914 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
1915 error( RtError::WARNING );
\r
1919 // If device opens for both playback and capture, we determine the channels.
\r
1920 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
1921 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
1923 // Jack always uses 32-bit floats.
\r
1924 info.nativeFormats = RTAUDIO_FLOAT32;
\r
1926 // Jack doesn't provide default devices so we'll use the first available one.
\r
1927 if ( device == 0 && info.outputChannels > 0 )
\r
1928 info.isDefaultOutput = true;
\r
1929 if ( device == 0 && info.inputChannels > 0 )
\r
1930 info.isDefaultInput = true;
\r
1932 jack_client_close(client);
\r
1933 info.probed = true;
\r
1937 int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
1939 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1941 RtApiJack *object = (RtApiJack *) info->object;
\r
1942 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
1947 // This function will be called by a spawned thread when the Jack
\r
1948 // server signals that it is shutting down. It is necessary to handle
\r
1949 // it this way because the jackShutdown() function must return before
\r
1950 // the jack_deactivate() function (in closeStream()) will return.
\r
1951 extern "C" void *jackCloseStream( void *ptr )
\r
1953 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1954 RtApiJack *object = (RtApiJack *) info->object;
\r
1956 object->closeStream();
\r
1958 pthread_exit( NULL );
\r
1960 void jackShutdown( void *infoPointer )
\r
1962 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1963 RtApiJack *object = (RtApiJack *) info->object;
\r
1965 // Check current stream state. If stopped, then we'll assume this
\r
1966 // was called as a result of a call to RtApiJack::stopStream (the
\r
1967 // deactivation of a client handle causes this function to be called).
\r
1968 // If not, we'll assume the Jack server is shutting down or some
\r
1969 // other problem occurred and we should close the stream.
\r
1970 if ( object->isStreamRunning() == false ) return;
\r
1972 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
1973 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
1976 int jackXrun( void *infoPointer )
\r
1978 JackHandle *handle = (JackHandle *) infoPointer;
\r
1980 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
1981 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
1986 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
1987 unsigned int firstChannel, unsigned int sampleRate,
\r
1988 RtAudioFormat format, unsigned int *bufferSize,
\r
1989 RtAudio::StreamOptions *options )
\r
1991 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
1993 // Look for jack server and try to become a client (only do once per stream).
\r
1994 jack_client_t *client = 0;
\r
1995 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
1996 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1997 jack_status_t *status = NULL;
\r
1998 if ( options && !options->streamName.empty() )
\r
1999 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2001 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2002 if ( client == 0 ) {
\r
2003 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2004 error( RtError::WARNING );
\r
2009 // The handle must have been created on an earlier pass.
\r
2010 client = handle->client;
\r
2013 const char **ports;
\r
2014 std::string port, previousPort, deviceName;
\r
2015 unsigned int nPorts = 0, nDevices = 0;
\r
2016 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2018 // Parse the port names up to the first colon (:).
\r
2019 size_t iColon = 0;
\r
2021 port = (char *) ports[ nPorts ];
\r
2022 iColon = port.find(":");
\r
2023 if ( iColon != std::string::npos ) {
\r
2024 port = port.substr( 0, iColon );
\r
2025 if ( port != previousPort ) {
\r
2026 if ( nDevices == device ) deviceName = port;
\r
2028 previousPort = port;
\r
2031 } while ( ports[++nPorts] );
\r
2035 if ( device >= nDevices ) {
\r
2036 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2040 // Count the available ports containing the client name as device
\r
2041 // channels. Jack "input ports" equal RtAudio output channels.
\r
2042 unsigned int nChannels = 0;
\r
2043 unsigned long flag = JackPortIsInput;
\r
2044 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2045 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2047 while ( ports[ nChannels ] ) nChannels++;
\r
2051 // Compare the jack ports for specified client to the requested number of channels.
\r
2052 if ( nChannels < (channels + firstChannel) ) {
\r
2053 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2054 errorText_ = errorStream_.str();
\r
2058 // Check the jack server sample rate.
\r
2059 unsigned int jackRate = jack_get_sample_rate( client );
\r
2060 if ( sampleRate != jackRate ) {
\r
2061 jack_client_close( client );
\r
2062 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2063 errorText_ = errorStream_.str();
\r
2066 stream_.sampleRate = jackRate;
\r
2068 // Get the latency of the JACK port.
\r
2069 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2070 if ( ports[ firstChannel ] )
\r
2071 stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2074 // The jack server always uses 32-bit floating-point data.
\r
2075 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2076 stream_.userFormat = format;
\r
2078 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2079 else stream_.userInterleaved = true;
\r
2081 // Jack always uses non-interleaved buffers.
\r
2082 stream_.deviceInterleaved[mode] = false;
\r
2084 // Jack always provides host byte-ordered data.
\r
2085 stream_.doByteSwap[mode] = false;
\r
2087 // Get the buffer size. The buffer size and number of buffers
\r
2088 // (periods) is set when the jack server is started.
\r
2089 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2090 *bufferSize = stream_.bufferSize;
\r
2092 stream_.nDeviceChannels[mode] = channels;
\r
2093 stream_.nUserChannels[mode] = channels;
\r
2095 // Set flags for buffer conversion.
\r
2096 stream_.doConvertBuffer[mode] = false;
\r
2097 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2098 stream_.doConvertBuffer[mode] = true;
\r
2099 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2100 stream_.nUserChannels[mode] > 1 )
\r
2101 stream_.doConvertBuffer[mode] = true;
\r
2103 // Allocate our JackHandle structure for the stream.
\r
2104 if ( handle == 0 ) {
\r
2106 handle = new JackHandle;
\r
2108 catch ( std::bad_alloc& ) {
\r
2109 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2113 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2114 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2117 stream_.apiHandle = (void *) handle;
\r
2118 handle->client = client;
\r
2120 handle->deviceName[mode] = deviceName;
\r
2122 // Allocate necessary internal buffers.
\r
2123 unsigned long bufferBytes;
\r
2124 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2125 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2126 if ( stream_.userBuffer[mode] == NULL ) {
\r
2127 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2131 if ( stream_.doConvertBuffer[mode] ) {
\r
2133 bool makeBuffer = true;
\r
2134 if ( mode == OUTPUT )
\r
2135 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2136 else { // mode == INPUT
\r
2137 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2138 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2139 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2140 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2144 if ( makeBuffer ) {
\r
2145 bufferBytes *= *bufferSize;
\r
2146 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2147 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2148 if ( stream_.deviceBuffer == NULL ) {
\r
2149 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2155 // Allocate memory for the Jack ports (channels) identifiers.
\r
2156 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2157 if ( handle->ports[mode] == NULL ) {
\r
2158 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2162 stream_.device[mode] = device;
\r
2163 stream_.channelOffset[mode] = firstChannel;
\r
2164 stream_.state = STREAM_STOPPED;
\r
2165 stream_.callbackInfo.object = (void *) this;
\r
2167 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2168 // We had already set up the stream for output.
\r
2169 stream_.mode = DUPLEX;
\r
2171 stream_.mode = mode;
\r
2172 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2173 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2174 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2177 // Register our ports.
\r
2179 if ( mode == OUTPUT ) {
\r
2180 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2181 snprintf( label, 64, "outport %d", i );
\r
2182 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2183 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2187 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2188 snprintf( label, 64, "inport %d", i );
\r
2189 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2190 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2194 // Setup the buffer conversion information structure. We don't use
\r
2195 // buffers to do channel offsets, so we override that parameter
\r
2197 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2203 pthread_cond_destroy( &handle->condition );
\r
2204 jack_client_close( handle->client );
\r
2206 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2207 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2210 stream_.apiHandle = 0;
\r
2213 for ( int i=0; i<2; i++ ) {
\r
2214 if ( stream_.userBuffer[i] ) {
\r
2215 free( stream_.userBuffer[i] );
\r
2216 stream_.userBuffer[i] = 0;
\r
2220 if ( stream_.deviceBuffer ) {
\r
2221 free( stream_.deviceBuffer );
\r
2222 stream_.deviceBuffer = 0;
\r
2228 void RtApiJack :: closeStream( void )
\r
2230 if ( stream_.state == STREAM_CLOSED ) {
\r
2231 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2232 error( RtError::WARNING );
\r
2236 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2239 if ( stream_.state == STREAM_RUNNING )
\r
2240 jack_deactivate( handle->client );
\r
2242 jack_client_close( handle->client );
\r
2246 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2247 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2248 pthread_cond_destroy( &handle->condition );
\r
2250 stream_.apiHandle = 0;
\r
2253 for ( int i=0; i<2; i++ ) {
\r
2254 if ( stream_.userBuffer[i] ) {
\r
2255 free( stream_.userBuffer[i] );
\r
2256 stream_.userBuffer[i] = 0;
\r
2260 if ( stream_.deviceBuffer ) {
\r
2261 free( stream_.deviceBuffer );
\r
2262 stream_.deviceBuffer = 0;
\r
2265 stream_.mode = UNINITIALIZED;
\r
2266 stream_.state = STREAM_CLOSED;
\r
2269 void RtApiJack :: startStream( void )
\r
2272 if ( stream_.state == STREAM_RUNNING ) {
\r
2273 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2274 error( RtError::WARNING );
\r
2278 MUTEX_LOCK(&stream_.mutex);
\r
2280 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2281 int result = jack_activate( handle->client );
\r
2283 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2287 const char **ports;
\r
2289 // Get the list of available ports.
\r
2290 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2292 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2293 if ( ports == NULL) {
\r
2294 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2298 // Now make the port connections. Since RtAudio wasn't designed to
\r
2299 // allow the user to select particular channels of a device, we'll
\r
2300 // just open the first "nChannels" ports with offset.
\r
2301 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2303 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2304 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2307 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2314 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2316 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2317 if ( ports == NULL) {
\r
2318 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2322 // Now make the port connections. See note above.
\r
2323 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2325 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2326 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2329 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2336 handle->drainCounter = 0;
\r
2337 handle->internalDrain = false;
\r
2338 stream_.state = STREAM_RUNNING;
\r
2341 MUTEX_UNLOCK(&stream_.mutex);
\r
2343 if ( result == 0 ) return;
\r
2344 error( RtError::SYSTEM_ERROR );
\r
2347 void RtApiJack :: stopStream( void )
\r
2350 if ( stream_.state == STREAM_STOPPED ) {
\r
2351 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2352 error( RtError::WARNING );
\r
2356 MUTEX_LOCK( &stream_.mutex );
\r
2358 if ( stream_.state == STREAM_STOPPED ) {
\r
2359 MUTEX_UNLOCK( &stream_.mutex );
\r
2363 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2364 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2366 if ( handle->drainCounter == 0 ) {
\r
2367 handle->drainCounter = 2;
\r
2368 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2372 jack_deactivate( handle->client );
\r
2373 stream_.state = STREAM_STOPPED;
\r
2375 MUTEX_UNLOCK( &stream_.mutex );
\r
2378 void RtApiJack :: abortStream( void )
\r
2381 if ( stream_.state == STREAM_STOPPED ) {
\r
2382 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2383 error( RtError::WARNING );
\r
2387 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2388 handle->drainCounter = 2;
\r
2393 // This function will be called by a spawned thread when the user
\r
2394 // callback function signals that the stream should be stopped or
\r
2395 // aborted. It is necessary to handle it this way because the
\r
2396 // callbackEvent() function must return before the jack_deactivate()
\r
2397 // function will return.
\r
2398 extern "C" void *jackStopStream( void *ptr )
\r
2400 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2401 RtApiJack *object = (RtApiJack *) info->object;
\r
2403 object->stopStream();
\r
2405 pthread_exit( NULL );
\r
2408 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2410 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
2411 if ( stream_.state == STREAM_CLOSED ) {
\r
2412 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2413 error( RtError::WARNING );
\r
2416 if ( stream_.bufferSize != nframes ) {
\r
2417 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2418 error( RtError::WARNING );
\r
2422 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2423 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2425 // Check if we were draining the stream and signal is finished.
\r
2426 if ( handle->drainCounter > 3 ) {
\r
2427 if ( handle->internalDrain == true )
\r
2428 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2430 pthread_cond_signal( &handle->condition );
\r
2434 MUTEX_LOCK( &stream_.mutex );
\r
2436 // The state might change while waiting on a mutex.
\r
2437 if ( stream_.state == STREAM_STOPPED ) {
\r
2438 MUTEX_UNLOCK( &stream_.mutex );
\r
2442 // Invoke user callback first, to get fresh output data.
\r
2443 if ( handle->drainCounter == 0 ) {
\r
2444 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2445 double streamTime = getStreamTime();
\r
2446 RtAudioStreamStatus status = 0;
\r
2447 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2448 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2449 handle->xrun[0] = false;
\r
2451 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2452 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2453 handle->xrun[1] = false;
\r
2455 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2456 stream_.bufferSize, streamTime, status, info->userData );
\r
2457 if ( handle->drainCounter == 2 ) {
\r
2458 MUTEX_UNLOCK( &stream_.mutex );
\r
2460 pthread_create( &id, NULL, jackStopStream, info );
\r
2463 else if ( handle->drainCounter == 1 )
\r
2464 handle->internalDrain = true;
\r
2467 jack_default_audio_sample_t *jackbuffer;
\r
2468 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2471 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2473 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2474 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2475 memset( jackbuffer, 0, bufferBytes );
\r
2479 else if ( stream_.doConvertBuffer[0] ) {
\r
2481 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2483 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2484 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2485 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2488 else { // no buffer conversion
\r
2489 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2490 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2491 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2495 if ( handle->drainCounter ) {
\r
2496 handle->drainCounter++;
\r
2501 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2503 if ( stream_.doConvertBuffer[1] ) {
\r
2504 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2505 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2506 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2508 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2510 else { // no buffer conversion
\r
2511 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2512 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2513 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2519 MUTEX_UNLOCK(&stream_.mutex);
\r
2521 RtApi::tickStreamTime();
\r
2524 //******************** End of __UNIX_JACK__ *********************//
\r
2527 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2529 // The ASIO API is designed around a callback scheme, so this
\r
2530 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2531 // Jack. The primary constraint with ASIO is that it only allows
\r
2532 // access to a single driver at a time. Thus, it is not possible to
\r
2533 // have more than one simultaneous RtAudio stream.
\r
2535 // This implementation also requires a number of external ASIO files
\r
2536 // and a few global variables. The ASIO callback scheme does not
\r
2537 // allow for the passing of user data, so we must create a global
\r
2538 // pointer to our callbackInfo structure.
\r
2540 // On unix systems, we make use of a pthread condition variable.
\r
2541 // Since there is no equivalent in Windows, I hacked something based
\r
2542 // on information found in
\r
2543 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2545 #include "asiosys.h"
\r
2547 #include "iasiothiscallresolver.h"
\r
2548 #include "asiodrivers.h"
\r
2551 AsioDrivers drivers;
\r
2552 ASIOCallbacks asioCallbacks;
\r
2553 ASIODriverInfo driverInfo;
\r
2554 CallbackInfo *asioCallbackInfo;
\r
2557 struct AsioHandle {
\r
2558 int drainCounter; // Tracks callback counts when draining
\r
2559 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2560 ASIOBufferInfo *bufferInfos;
\r
2564 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2567 // Function declarations (definitions at end of section)
\r
2568 static const char* getAsioErrorString( ASIOError result );
\r
2569 void sampleRateChanged( ASIOSampleRate sRate );
\r
2570 long asioMessages( long selector, long value, void* message, double* opt );
\r
2572 RtApiAsio :: RtApiAsio()
\r
2574 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2575 // CoInitialize beforehand, but it must be for appartment threading
\r
2576 // (in which case, CoInitilialize will return S_FALSE here).
\r
2577 coInitialized_ = false;
\r
2578 HRESULT hr = CoInitialize( NULL );
\r
2579 if ( FAILED(hr) ) {
\r
2580 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2581 error( RtError::WARNING );
\r
2583 coInitialized_ = true;
\r
2585 drivers.removeCurrentDriver();
\r
2586 driverInfo.asioVersion = 2;
\r
2588 // See note in DirectSound implementation about GetDesktopWindow().
\r
2589 driverInfo.sysRef = GetForegroundWindow();
\r
2592 RtApiAsio :: ~RtApiAsio()
\r
2594 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2595 if ( coInitialized_ ) CoUninitialize();
\r
2598 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2600 return (unsigned int) drivers.asioGetNumDev();
\r
2603 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2605 RtAudio::DeviceInfo info;
\r
2606 info.probed = false;
\r
2609 unsigned int nDevices = getDeviceCount();
\r
2610 if ( nDevices == 0 ) {
\r
2611 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2612 error( RtError::INVALID_USE );
\r
2615 if ( device >= nDevices ) {
\r
2616 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2617 error( RtError::INVALID_USE );
\r
2620 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2621 if ( stream_.state != STREAM_CLOSED ) {
\r
2622 if ( device >= devices_.size() ) {
\r
2623 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2624 error( RtError::WARNING );
\r
2627 return devices_[ device ];
\r
2630 char driverName[32];
\r
2631 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2632 if ( result != ASE_OK ) {
\r
2633 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2634 errorText_ = errorStream_.str();
\r
2635 error( RtError::WARNING );
\r
2639 info.name = driverName;
\r
2641 if ( !drivers.loadDriver( driverName ) ) {
\r
2642 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2643 errorText_ = errorStream_.str();
\r
2644 error( RtError::WARNING );
\r
2648 result = ASIOInit( &driverInfo );
\r
2649 if ( result != ASE_OK ) {
\r
2650 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2651 errorText_ = errorStream_.str();
\r
2652 error( RtError::WARNING );
\r
2656 // Determine the device channel information.
\r
2657 long inputChannels, outputChannels;
\r
2658 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2659 if ( result != ASE_OK ) {
\r
2660 drivers.removeCurrentDriver();
\r
2661 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2662 errorText_ = errorStream_.str();
\r
2663 error( RtError::WARNING );
\r
2667 info.outputChannels = outputChannels;
\r
2668 info.inputChannels = inputChannels;
\r
2669 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2670 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2672 // Determine the supported sample rates.
\r
2673 info.sampleRates.clear();
\r
2674 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2675 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2676 if ( result == ASE_OK )
\r
2677 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2680 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2681 ASIOChannelInfo channelInfo;
\r
2682 channelInfo.channel = 0;
\r
2683 channelInfo.isInput = true;
\r
2684 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2685 result = ASIOGetChannelInfo( &channelInfo );
\r
2686 if ( result != ASE_OK ) {
\r
2687 drivers.removeCurrentDriver();
\r
2688 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2689 errorText_ = errorStream_.str();
\r
2690 error( RtError::WARNING );
\r
2694 info.nativeFormats = 0;
\r
2695 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2696 info.nativeFormats |= RTAUDIO_SINT16;
\r
2697 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2698 info.nativeFormats |= RTAUDIO_SINT32;
\r
2699 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2700 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2701 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2702 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2704 if ( info.outputChannels > 0 )
\r
2705 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2706 if ( info.inputChannels > 0 )
\r
2707 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2709 info.probed = true;
\r
2710 drivers.removeCurrentDriver();
\r
2714 void bufferSwitch( long index, ASIOBool processNow )
\r
2716 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2717 object->callbackEvent( index );
\r
2720 void RtApiAsio :: saveDeviceInfo( void )
\r
2724 unsigned int nDevices = getDeviceCount();
\r
2725 devices_.resize( nDevices );
\r
2726 for ( unsigned int i=0; i<nDevices; i++ )
\r
2727 devices_[i] = getDeviceInfo( i );
\r
2730 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2731 unsigned int firstChannel, unsigned int sampleRate,
\r
2732 RtAudioFormat format, unsigned int *bufferSize,
\r
2733 RtAudio::StreamOptions *options )
\r
2735 // For ASIO, a duplex stream MUST use the same driver.
\r
2736 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2737 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2741 char driverName[32];
\r
2742 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2743 if ( result != ASE_OK ) {
\r
2744 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2745 errorText_ = errorStream_.str();
\r
2749 // Only load the driver once for duplex stream.
\r
2750 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2751 // The getDeviceInfo() function will not work when a stream is open
\r
2752 // because ASIO does not allow multiple devices to run at the same
\r
2753 // time. Thus, we'll probe the system before opening a stream and
\r
2754 // save the results for use by getDeviceInfo().
\r
2755 this->saveDeviceInfo();
\r
2757 if ( !drivers.loadDriver( driverName ) ) {
\r
2758 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2759 errorText_ = errorStream_.str();
\r
2763 result = ASIOInit( &driverInfo );
\r
2764 if ( result != ASE_OK ) {
\r
2765 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2766 errorText_ = errorStream_.str();
\r
2771 // Check the device channel count.
\r
2772 long inputChannels, outputChannels;
\r
2773 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2774 if ( result != ASE_OK ) {
\r
2775 drivers.removeCurrentDriver();
\r
2776 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2777 errorText_ = errorStream_.str();
\r
2781 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2782 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2783 drivers.removeCurrentDriver();
\r
2784 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2785 errorText_ = errorStream_.str();
\r
2788 stream_.nDeviceChannels[mode] = channels;
\r
2789 stream_.nUserChannels[mode] = channels;
\r
2790 stream_.channelOffset[mode] = firstChannel;
\r
2792 // Verify the sample rate is supported.
\r
2793 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2794 if ( result != ASE_OK ) {
\r
2795 drivers.removeCurrentDriver();
\r
2796 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2797 errorText_ = errorStream_.str();
\r
2801 // Get the current sample rate
\r
2802 ASIOSampleRate currentRate;
\r
2803 result = ASIOGetSampleRate( ¤tRate );
\r
2804 if ( result != ASE_OK ) {
\r
2805 drivers.removeCurrentDriver();
\r
2806 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2807 errorText_ = errorStream_.str();
\r
2811 // Set the sample rate only if necessary
\r
2812 if ( currentRate != sampleRate ) {
\r
2813 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2814 if ( result != ASE_OK ) {
\r
2815 drivers.removeCurrentDriver();
\r
2816 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2817 errorText_ = errorStream_.str();
\r
2822 // Determine the driver data type.
\r
2823 ASIOChannelInfo channelInfo;
\r
2824 channelInfo.channel = 0;
\r
2825 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2826 else channelInfo.isInput = true;
\r
2827 result = ASIOGetChannelInfo( &channelInfo );
\r
2828 if ( result != ASE_OK ) {
\r
2829 drivers.removeCurrentDriver();
\r
2830 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2831 errorText_ = errorStream_.str();
\r
2835 // Assuming WINDOWS host is always little-endian.
\r
2836 stream_.doByteSwap[mode] = false;
\r
2837 stream_.userFormat = format;
\r
2838 stream_.deviceFormat[mode] = 0;
\r
2839 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2840 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2841 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2843 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2844 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2845 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2847 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2848 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2849 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2851 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2852 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2853 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2856 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2857 drivers.removeCurrentDriver();
\r
2858 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2859 errorText_ = errorStream_.str();
\r
2863 // Set the buffer size. For a duplex stream, this will end up
\r
2864 // setting the buffer size based on the input constraints, which
\r
2866 long minSize, maxSize, preferSize, granularity;
\r
2867 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2868 if ( result != ASE_OK ) {
\r
2869 drivers.removeCurrentDriver();
\r
2870 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2871 errorText_ = errorStream_.str();
\r
2875 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2876 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2877 else if ( granularity == -1 ) {
\r
2878 // Make sure bufferSize is a power of two.
\r
2879 int log2_of_min_size = 0;
\r
2880 int log2_of_max_size = 0;
\r
2882 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2883 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2884 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2887 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2888 int min_delta_num = log2_of_min_size;
\r
2890 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2891 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2892 if (current_delta < min_delta) {
\r
2893 min_delta = current_delta;
\r
2894 min_delta_num = i;
\r
2898 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2899 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2900 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2902 else if ( granularity != 0 ) {
\r
2903 // Set to an even multiple of granularity, rounding up.
\r
2904 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2907 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2908 drivers.removeCurrentDriver();
\r
2909 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
2913 stream_.bufferSize = *bufferSize;
\r
2914 stream_.nBuffers = 2;
\r
2916 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2917 else stream_.userInterleaved = true;
\r
2919 // ASIO always uses non-interleaved buffers.
\r
2920 stream_.deviceInterleaved[mode] = false;
\r
2922 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
2923 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2924 if ( handle == 0 ) {
\r
2926 handle = new AsioHandle;
\r
2928 catch ( std::bad_alloc& ) {
\r
2929 //if ( handle == NULL ) {
\r
2930 drivers.removeCurrentDriver();
\r
2931 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
2934 handle->bufferInfos = 0;
\r
2936 // Create a manual-reset event.
\r
2937 handle->condition = CreateEvent( NULL, // no security
\r
2938 TRUE, // manual-reset
\r
2939 FALSE, // non-signaled initially
\r
2940 NULL ); // unnamed
\r
2941 stream_.apiHandle = (void *) handle;
\r
2944 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
2945 // and output separately, we'll have to dispose of previously
\r
2946 // created output buffers for a duplex stream.
\r
2947 long inputLatency, outputLatency;
\r
2948 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
2949 ASIODisposeBuffers();
\r
2950 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
2953 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
2954 bool buffersAllocated = false;
\r
2955 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
2956 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
2957 if ( handle->bufferInfos == NULL ) {
\r
2958 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
2959 errorText_ = errorStream_.str();
\r
2963 ASIOBufferInfo *infos;
\r
2964 infos = handle->bufferInfos;
\r
2965 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
2966 infos->isInput = ASIOFalse;
\r
2967 infos->channelNum = i + stream_.channelOffset[0];
\r
2968 infos->buffers[0] = infos->buffers[1] = 0;
\r
2970 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
2971 infos->isInput = ASIOTrue;
\r
2972 infos->channelNum = i + stream_.channelOffset[1];
\r
2973 infos->buffers[0] = infos->buffers[1] = 0;
\r
2976 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
2977 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
2978 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
2979 asioCallbacks.asioMessage = &asioMessages;
\r
2980 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
2981 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
2982 if ( result != ASE_OK ) {
\r
2983 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
2984 errorText_ = errorStream_.str();
\r
2987 buffersAllocated = true;
\r
2989 // Set flags for buffer conversion.
\r
2990 stream_.doConvertBuffer[mode] = false;
\r
2991 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2992 stream_.doConvertBuffer[mode] = true;
\r
2993 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2994 stream_.nUserChannels[mode] > 1 )
\r
2995 stream_.doConvertBuffer[mode] = true;
\r
2997 // Allocate necessary internal buffers
\r
2998 unsigned long bufferBytes;
\r
2999 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3000 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3001 if ( stream_.userBuffer[mode] == NULL ) {
\r
3002 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3006 if ( stream_.doConvertBuffer[mode] ) {
\r
3008 bool makeBuffer = true;
\r
3009 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3010 if ( mode == INPUT ) {
\r
3011 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3012 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3013 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3017 if ( makeBuffer ) {
\r
3018 bufferBytes *= *bufferSize;
\r
3019 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3020 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3021 if ( stream_.deviceBuffer == NULL ) {
\r
3022 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3028 stream_.sampleRate = sampleRate;
\r
3029 stream_.device[mode] = device;
\r
3030 stream_.state = STREAM_STOPPED;
\r
3031 asioCallbackInfo = &stream_.callbackInfo;
\r
3032 stream_.callbackInfo.object = (void *) this;
\r
3033 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3034 // We had already set up an output stream.
\r
3035 stream_.mode = DUPLEX;
\r
3037 stream_.mode = mode;
\r
3039 // Determine device latencies
\r
3040 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3041 if ( result != ASE_OK ) {
\r
3042 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3043 errorText_ = errorStream_.str();
\r
3044 error( RtError::WARNING); // warn but don't fail
\r
3047 stream_.latency[0] = outputLatency;
\r
3048 stream_.latency[1] = inputLatency;
\r
3051 // Setup the buffer conversion information structure. We don't use
\r
3052 // buffers to do channel offsets, so we override that parameter
\r
3054 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3059 if ( buffersAllocated )
\r
3060 ASIODisposeBuffers();
\r
3061 drivers.removeCurrentDriver();
\r
3064 CloseHandle( handle->condition );
\r
3065 if ( handle->bufferInfos )
\r
3066 free( handle->bufferInfos );
\r
3068 stream_.apiHandle = 0;
\r
3071 for ( int i=0; i<2; i++ ) {
\r
3072 if ( stream_.userBuffer[i] ) {
\r
3073 free( stream_.userBuffer[i] );
\r
3074 stream_.userBuffer[i] = 0;
\r
3078 if ( stream_.deviceBuffer ) {
\r
3079 free( stream_.deviceBuffer );
\r
3080 stream_.deviceBuffer = 0;
\r
3086 void RtApiAsio :: closeStream()
\r
3088 if ( stream_.state == STREAM_CLOSED ) {
\r
3089 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3090 error( RtError::WARNING );
\r
3094 if ( stream_.state == STREAM_RUNNING ) {
\r
3095 stream_.state = STREAM_STOPPED;
\r
3098 ASIODisposeBuffers();
\r
3099 drivers.removeCurrentDriver();
\r
3101 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3103 CloseHandle( handle->condition );
\r
3104 if ( handle->bufferInfos )
\r
3105 free( handle->bufferInfos );
\r
3107 stream_.apiHandle = 0;
\r
3110 for ( int i=0; i<2; i++ ) {
\r
3111 if ( stream_.userBuffer[i] ) {
\r
3112 free( stream_.userBuffer[i] );
\r
3113 stream_.userBuffer[i] = 0;
\r
3117 if ( stream_.deviceBuffer ) {
\r
3118 free( stream_.deviceBuffer );
\r
3119 stream_.deviceBuffer = 0;
\r
3122 stream_.mode = UNINITIALIZED;
\r
3123 stream_.state = STREAM_CLOSED;
\r
3126 bool stopThreadCalled = false;
\r
3128 void RtApiAsio :: startStream()
\r
3131 if ( stream_.state == STREAM_RUNNING ) {
\r
3132 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3133 error( RtError::WARNING );
\r
3137 //MUTEX_LOCK( &stream_.mutex );
\r
3139 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3140 ASIOError result = ASIOStart();
\r
3141 if ( result != ASE_OK ) {
\r
3142 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3143 errorText_ = errorStream_.str();
\r
3147 handle->drainCounter = 0;
\r
3148 handle->internalDrain = false;
\r
3149 ResetEvent( handle->condition );
\r
3150 stream_.state = STREAM_RUNNING;
\r
3154 //MUTEX_UNLOCK( &stream_.mutex );
\r
3156 stopThreadCalled = false;
\r
3158 if ( result == ASE_OK ) return;
\r
3159 error( RtError::SYSTEM_ERROR );
\r
3162 void RtApiAsio :: stopStream()
\r
3165 if ( stream_.state == STREAM_STOPPED ) {
\r
3166 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3167 error( RtError::WARNING );
\r
3172 MUTEX_LOCK( &stream_.mutex );
\r
3174 if ( stream_.state == STREAM_STOPPED ) {
\r
3175 MUTEX_UNLOCK( &stream_.mutex );
\r
3180 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3181 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3182 if ( handle->drainCounter == 0 ) {
\r
3183 handle->drainCounter = 2;
\r
3184 // MUTEX_UNLOCK( &stream_.mutex );
\r
3185 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3186 //ResetEvent( handle->condition );
\r
3187 // MUTEX_LOCK( &stream_.mutex );
\r
3191 stream_.state = STREAM_STOPPED;
\r
3193 ASIOError result = ASIOStop();
\r
3194 if ( result != ASE_OK ) {
\r
3195 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3196 errorText_ = errorStream_.str();
\r
3199 // MUTEX_UNLOCK( &stream_.mutex );
\r
3201 if ( result == ASE_OK ) return;
\r
3202 error( RtError::SYSTEM_ERROR );
\r
3205 void RtApiAsio :: abortStream()
\r
3208 if ( stream_.state == STREAM_STOPPED ) {
\r
3209 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3210 error( RtError::WARNING );
\r
3214 // The following lines were commented-out because some behavior was
\r
3215 // noted where the device buffers need to be zeroed to avoid
\r
3216 // continuing sound, even when the device buffers are completely
\r
3217 // disposed. So now, calling abort is the same as calling stop.
\r
3218 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3219 // handle->drainCounter = 2;
\r
3223 // This function will be called by a spawned thread when the user
\r
3224 // callback function signals that the stream should be stopped or
\r
3225 // aborted. It is necessary to handle it this way because the
\r
3226 // callbackEvent() function must return before the ASIOStop()
\r
3227 // function will return.
\r
3228 extern "C" unsigned __stdcall asioStopStream( void *ptr )
\r
3230 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3231 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3233 object->stopStream();
\r
3235 _endthreadex( 0 );
\r
3239 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3241 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
3242 if ( stopThreadCalled ) return SUCCESS;
\r
3243 if ( stream_.state == STREAM_CLOSED ) {
\r
3244 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3245 error( RtError::WARNING );
\r
3249 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3250 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3252 // Check if we were draining the stream and signal if finished.
\r
3253 if ( handle->drainCounter > 3 ) {
\r
3254 if ( handle->internalDrain == false )
\r
3255 SetEvent( handle->condition );
\r
3256 else { // spawn a thread to stop the stream
\r
3257 unsigned threadId;
\r
3258 stopThreadCalled = true;
\r
3259 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3260 &stream_.callbackInfo, 0, &threadId );
\r
3265 /*MUTEX_LOCK( &stream_.mutex );
\r
3267 // The state might change while waiting on a mutex.
\r
3268 if ( stream_.state == STREAM_STOPPED ) goto unlock; */
\r
3270 // Invoke user callback to get fresh output data UNLESS we are
\r
3271 // draining stream.
\r
3272 if ( handle->drainCounter == 0 ) {
\r
3273 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3274 double streamTime = getStreamTime();
\r
3275 RtAudioStreamStatus status = 0;
\r
3276 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3277 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3280 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3281 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3284 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3285 stream_.bufferSize, streamTime, status, info->userData );
\r
3286 if ( handle->drainCounter == 2 ) {
\r
3287 // MUTEX_UNLOCK( &stream_.mutex );
\r
3289 unsigned threadId;
\r
3290 stopThreadCalled = true;
\r
3291 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3292 &stream_.callbackInfo, 0, &threadId );
\r
3295 else if ( handle->drainCounter == 1 )
\r
3296 handle->internalDrain = true;
\r
3299 unsigned int nChannels, bufferBytes, i, j;
\r
3300 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3301 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3303 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3305 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3307 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3308 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3309 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3313 else if ( stream_.doConvertBuffer[0] ) {
\r
3315 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3316 if ( stream_.doByteSwap[0] )
\r
3317 byteSwapBuffer( stream_.deviceBuffer,
\r
3318 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3319 stream_.deviceFormat[0] );
\r
3321 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3322 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3323 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3324 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3330 if ( stream_.doByteSwap[0] )
\r
3331 byteSwapBuffer( stream_.userBuffer[0],
\r
3332 stream_.bufferSize * stream_.nUserChannels[0],
\r
3333 stream_.userFormat );
\r
3335 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3336 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3337 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3338 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3343 if ( handle->drainCounter ) {
\r
3344 handle->drainCounter++;
\r
3349 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3351 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3353 if (stream_.doConvertBuffer[1]) {
\r
3355 // Always interleave ASIO input data.
\r
3356 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3357 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3358 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3359 handle->bufferInfos[i].buffers[bufferIndex],
\r
3363 if ( stream_.doByteSwap[1] )
\r
3364 byteSwapBuffer( stream_.deviceBuffer,
\r
3365 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3366 stream_.deviceFormat[1] );
\r
3367 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3371 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3372 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3373 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3374 handle->bufferInfos[i].buffers[bufferIndex],
\r
3379 if ( stream_.doByteSwap[1] )
\r
3380 byteSwapBuffer( stream_.userBuffer[1],
\r
3381 stream_.bufferSize * stream_.nUserChannels[1],
\r
3382 stream_.userFormat );
\r
3387 // The following call was suggested by Malte Clasen. While the API
\r
3388 // documentation indicates it should not be required, some device
\r
3389 // drivers apparently do not function correctly without it.
\r
3390 ASIOOutputReady();
\r
3392 // MUTEX_UNLOCK( &stream_.mutex );
\r
3394 RtApi::tickStreamTime();
\r
3398 void sampleRateChanged( ASIOSampleRate sRate )
\r
3400 // The ASIO documentation says that this usually only happens during
\r
3401 // external sync. Audio processing is not stopped by the driver,
\r
3402 // actual sample rate might not have even changed, maybe only the
\r
3403 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3406 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3408 object->stopStream();
\r
3410 catch ( RtError &exception ) {
\r
3411 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3415 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3418 long asioMessages( long selector, long value, void* message, double* opt )
\r
3422 switch( selector ) {
\r
3423 case kAsioSelectorSupported:
\r
3424 if ( value == kAsioResetRequest
\r
3425 || value == kAsioEngineVersion
\r
3426 || value == kAsioResyncRequest
\r
3427 || value == kAsioLatenciesChanged
\r
3428 // The following three were added for ASIO 2.0, you don't
\r
3429 // necessarily have to support them.
\r
3430 || value == kAsioSupportsTimeInfo
\r
3431 || value == kAsioSupportsTimeCode
\r
3432 || value == kAsioSupportsInputMonitor)
\r
3435 case kAsioResetRequest:
\r
3436 // Defer the task and perform the reset of the driver during the
\r
3437 // next "safe" situation. You cannot reset the driver right now,
\r
3438 // as this code is called from the driver. Reset the driver is
\r
3439 // done by completely destruct is. I.e. ASIOStop(),
\r
3440 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3442 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3445 case kAsioResyncRequest:
\r
3446 // This informs the application that the driver encountered some
\r
3447 // non-fatal data loss. It is used for synchronization purposes
\r
3448 // of different media. Added mainly to work around the Win16Mutex
\r
3449 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3450 // which could lose data because the Mutex was held too long by
\r
3451 // another thread. However a driver can issue it in other
\r
3452 // situations, too.
\r
3453 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3457 case kAsioLatenciesChanged:
\r
3458 // This will inform the host application that the drivers were
\r
3459 // latencies changed. Beware, it this does not mean that the
\r
3460 // buffer sizes have changed! You might need to update internal
\r
3462 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3465 case kAsioEngineVersion:
\r
3466 // Return the supported ASIO version of the host application. If
\r
3467 // a host application does not implement this selector, ASIO 1.0
\r
3468 // is assumed by the driver.
\r
3471 case kAsioSupportsTimeInfo:
\r
3472 // Informs the driver whether the
\r
3473 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3474 // For compatibility with ASIO 1.0 drivers the host application
\r
3475 // should always support the "old" bufferSwitch method, too.
\r
3478 case kAsioSupportsTimeCode:
\r
3479 // Informs the driver whether application is interested in time
\r
3480 // code info. If an application does not need to know about time
\r
3481 // code, the driver has less work to do.
\r
3488 static const char* getAsioErrorString( ASIOError result )
\r
3493 const char*message;
\r
3496 static Messages m[] =
\r
3498 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3499 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3500 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3501 { ASE_InvalidMode, "Invalid mode." },
\r
3502 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3503 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3504 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3507 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3508 if ( m[i].value == result ) return m[i].message;
\r
3510 return "Unknown error.";
\r
3512 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3516 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
3518 // Modified by Robin Davies, October 2005
\r
3519 // - Improvements to DirectX pointer chasing.
\r
3520 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
3521 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
3522 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
3523 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
3525 #include <dsound.h>
\r
3526 #include <assert.h>
\r
3527 #include <algorithm>
\r
3529 #if defined(__MINGW32__)
\r
3530 // missing from latest mingw winapi
\r
3531 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
3532 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
3533 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
3534 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
3541 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
3543 #ifdef _MSC_VER // if Microsoft Visual C++
\r
3544 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
3547 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
3549 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
3550 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
3551 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
3552 return pointer >= earlierPointer && pointer < laterPointer;
\r
3555 // A structure to hold various information related to the DirectSound
\r
3556 // API implementation.
\r
3558 unsigned int drainCounter; // Tracks callback counts when draining
\r
3559 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
3563 UINT bufferPointer[2];
\r
3564 DWORD dsBufferSize[2];
\r
3565 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
3569 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
3572 // Declarations for utility functions, callbacks, and structures
\r
3573 // specific to the DirectSound implementation.
\r
3574 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
3575 LPCTSTR description,
\r
3577 LPVOID lpContext );
\r
3579 static const char* getErrorString( int code );
\r
3581 extern "C" unsigned __stdcall callbackHandler( void *ptr );
\r
3590 : found(false) { validId[0] = false; validId[1] = false; }
\r
3593 std::vector< DsDevice > dsDevices;
\r
3595 RtApiDs :: RtApiDs()
\r
3597 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
3598 // accept whatever the mainline chose for a threading model.
\r
3599 coInitialized_ = false;
\r
3600 HRESULT hr = CoInitialize( NULL );
\r
3601 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
3604 RtApiDs :: ~RtApiDs()
\r
3606 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
3607 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
3610 // The DirectSound default output is always the first device.
\r
3611 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
3616 // The DirectSound default input is always the first input device,
\r
3617 // which is the first capture device enumerated.
\r
3618 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
3623 unsigned int RtApiDs :: getDeviceCount( void )
\r
3625 // Set query flag for previously found devices to false, so that we
\r
3626 // can check for any devices that have disappeared.
\r
3627 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3628 dsDevices[i].found = false;
\r
3630 // Query DirectSound devices.
\r
3631 bool isInput = false;
\r
3632 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3633 if ( FAILED( result ) ) {
\r
3634 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
3635 errorText_ = errorStream_.str();
\r
3636 error( RtError::WARNING );
\r
3639 // Query DirectSoundCapture devices.
\r
3641 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3642 if ( FAILED( result ) ) {
\r
3643 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
3644 errorText_ = errorStream_.str();
\r
3645 error( RtError::WARNING );
\r
3648 // Clean out any devices that may have disappeared.
\r
3649 std::vector< int > indices;
\r
3650 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3651 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
3652 unsigned int nErased = 0;
\r
3653 for ( unsigned int i=0; i<indices.size(); i++ )
\r
3654 dsDevices.erase( dsDevices.begin()-nErased++ );
\r
3656 return dsDevices.size();
\r
3659 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
3661 RtAudio::DeviceInfo info;
\r
3662 info.probed = false;
\r
3664 if ( dsDevices.size() == 0 ) {
\r
3665 // Force a query of all devices
\r
3667 if ( dsDevices.size() == 0 ) {
\r
3668 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
3669 error( RtError::INVALID_USE );
\r
3673 if ( device >= dsDevices.size() ) {
\r
3674 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
3675 error( RtError::INVALID_USE );
\r
3679 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
3681 LPDIRECTSOUND output;
\r
3683 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3684 if ( FAILED( result ) ) {
\r
3685 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3686 errorText_ = errorStream_.str();
\r
3687 error( RtError::WARNING );
\r
3691 outCaps.dwSize = sizeof( outCaps );
\r
3692 result = output->GetCaps( &outCaps );
\r
3693 if ( FAILED( result ) ) {
\r
3694 output->Release();
\r
3695 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
3696 errorText_ = errorStream_.str();
\r
3697 error( RtError::WARNING );
\r
3701 // Get output channel information.
\r
3702 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
3704 // Get sample rate information.
\r
3705 info.sampleRates.clear();
\r
3706 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
3707 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
3708 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
3709 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
3712 // Get format information.
\r
3713 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3714 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3716 output->Release();
\r
3718 if ( getDefaultOutputDevice() == device )
\r
3719 info.isDefaultOutput = true;
\r
3721 if ( dsDevices[ device ].validId[1] == false ) {
\r
3722 info.name = dsDevices[ device ].name;
\r
3723 info.probed = true;
\r
3729 LPDIRECTSOUNDCAPTURE input;
\r
3730 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
3731 if ( FAILED( result ) ) {
\r
3732 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
3733 errorText_ = errorStream_.str();
\r
3734 error( RtError::WARNING );
\r
3739 inCaps.dwSize = sizeof( inCaps );
\r
3740 result = input->GetCaps( &inCaps );
\r
3741 if ( FAILED( result ) ) {
\r
3743 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
3744 errorText_ = errorStream_.str();
\r
3745 error( RtError::WARNING );
\r
3749 // Get input channel information.
\r
3750 info.inputChannels = inCaps.dwChannels;
\r
3752 // Get sample rate and format information.
\r
3753 std::vector<unsigned int> rates;
\r
3754 if ( inCaps.dwChannels >= 2 ) {
\r
3755 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3756 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3757 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3758 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3759 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3760 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3761 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3762 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3764 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3765 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
3766 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
3767 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
3768 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
3770 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3771 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
3772 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
3773 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
3774 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
3777 else if ( inCaps.dwChannels == 1 ) {
\r
3778 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3779 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3780 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3781 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3782 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3783 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3784 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3785 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3787 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3788 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
3789 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
3790 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
3791 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
3793 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3794 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
3795 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
3796 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
3797 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
3800 else info.inputChannels = 0; // technically, this would be an error
\r
3804 if ( info.inputChannels == 0 ) return info;
\r
3806 // Copy the supported rates to the info structure but avoid duplication.
\r
3808 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
3810 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
3811 if ( rates[i] == info.sampleRates[j] ) {
\r
3816 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
3818 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
3820 // If device opens for both playback and capture, we determine the channels.
\r
3821 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
3822 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
3824 if ( device == 0 ) info.isDefaultInput = true;
\r
3826 // Copy name and return.
\r
3827 info.name = dsDevices[ device ].name;
\r
3828 info.probed = true;
\r
3832 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
3833 unsigned int firstChannel, unsigned int sampleRate,
\r
3834 RtAudioFormat format, unsigned int *bufferSize,
\r
3835 RtAudio::StreamOptions *options )
\r
3837 if ( channels + firstChannel > 2 ) {
\r
3838 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
3842 unsigned int nDevices = dsDevices.size();
\r
3843 if ( nDevices == 0 ) {
\r
3844 // This should not happen because a check is made before this function is called.
\r
3845 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
3849 if ( device >= nDevices ) {
\r
3850 // This should not happen because a check is made before this function is called.
\r
3851 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
3855 if ( mode == OUTPUT ) {
\r
3856 if ( dsDevices[ device ].validId[0] == false ) {
\r
3857 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
3858 errorText_ = errorStream_.str();
\r
3862 else { // mode == INPUT
\r
3863 if ( dsDevices[ device ].validId[1] == false ) {
\r
3864 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
3865 errorText_ = errorStream_.str();
\r
3870 // According to a note in PortAudio, using GetDesktopWindow()
\r
3871 // instead of GetForegroundWindow() is supposed to avoid problems
\r
3872 // that occur when the application's window is not the foreground
\r
3873 // window. Also, if the application window closes before the
\r
3874 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
3875 // problems when using GetDesktopWindow() but it seems fine now
\r
3876 // (January 2010). I'll leave it commented here.
\r
3877 // HWND hWnd = GetForegroundWindow();
\r
3878 HWND hWnd = GetDesktopWindow();
\r
3880 // Check the numberOfBuffers parameter and limit the lowest value to
\r
3881 // two. This is a judgement call and a value of two is probably too
\r
3882 // low for capture, but it should work for playback.
\r
3884 if ( options ) nBuffers = options->numberOfBuffers;
\r
3885 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
3886 if ( nBuffers < 2 ) nBuffers = 3;
\r
3888 // Check the lower range of the user-specified buffer size and set
\r
3889 // (arbitrarily) to a lower bound of 32.
\r
3890 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
3892 // Create the wave format structure. The data format setting will
\r
3893 // be determined later.
\r
3894 WAVEFORMATEX waveFormat;
\r
3895 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
3896 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
3897 waveFormat.nChannels = channels + firstChannel;
\r
3898 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
3900 // Determine the device buffer size. By default, we'll use the value
\r
3901 // defined above (32K), but we will grow it to make allowances for
\r
3902 // very large software buffer sizes.
\r
3903 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;;
\r
3904 DWORD dsPointerLeadTime = 0;
\r
3906 void *ohandle = 0, *bhandle = 0;
\r
3908 if ( mode == OUTPUT ) {
\r
3910 LPDIRECTSOUND output;
\r
3911 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3912 if ( FAILED( result ) ) {
\r
3913 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3914 errorText_ = errorStream_.str();
\r
3919 outCaps.dwSize = sizeof( outCaps );
\r
3920 result = output->GetCaps( &outCaps );
\r
3921 if ( FAILED( result ) ) {
\r
3922 output->Release();
\r
3923 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
3924 errorText_ = errorStream_.str();
\r
3928 // Check channel information.
\r
3929 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
3930 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
3931 errorText_ = errorStream_.str();
\r
3935 // Check format information. Use 16-bit format unless not
\r
3936 // supported or user requests 8-bit.
\r
3937 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
3938 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
3939 waveFormat.wBitsPerSample = 16;
\r
3940 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
3943 waveFormat.wBitsPerSample = 8;
\r
3944 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
3946 stream_.userFormat = format;
\r
3948 // Update wave format structure and buffer information.
\r
3949 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
3950 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
3951 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
3953 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
3954 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
3955 dsBufferSize *= 2;
\r
3957 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
3958 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
3959 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
3960 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
3961 if ( FAILED( result ) ) {
\r
3962 output->Release();
\r
3963 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
3964 errorText_ = errorStream_.str();
\r
3968 // Even though we will write to the secondary buffer, we need to
\r
3969 // access the primary buffer to set the correct output format
\r
3970 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
3971 // buffer description.
\r
3972 DSBUFFERDESC bufferDescription;
\r
3973 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
3974 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
3975 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
3977 // Obtain the primary buffer
\r
3978 LPDIRECTSOUNDBUFFER buffer;
\r
3979 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
3980 if ( FAILED( result ) ) {
\r
3981 output->Release();
\r
3982 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
3983 errorText_ = errorStream_.str();
\r
3987 // Set the primary DS buffer sound format.
\r
3988 result = buffer->SetFormat( &waveFormat );
\r
3989 if ( FAILED( result ) ) {
\r
3990 output->Release();
\r
3991 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
3992 errorText_ = errorStream_.str();
\r
3996 // Setup the secondary DS buffer description.
\r
3997 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
3998 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
3999 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4000 DSBCAPS_GLOBALFOCUS |
\r
4001 DSBCAPS_GETCURRENTPOSITION2 |
\r
4002 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
4003 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4004 bufferDescription.lpwfxFormat = &waveFormat;
\r
4006 // Try to create the secondary DS buffer. If that doesn't work,
\r
4007 // try to use software mixing. Otherwise, there's a problem.
\r
4008 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4009 if ( FAILED( result ) ) {
\r
4010 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4011 DSBCAPS_GLOBALFOCUS |
\r
4012 DSBCAPS_GETCURRENTPOSITION2 |
\r
4013 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
4014 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4015 if ( FAILED( result ) ) {
\r
4016 output->Release();
\r
4017 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
4018 errorText_ = errorStream_.str();
\r
4023 // Get the buffer size ... might be different from what we specified.
\r
4025 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
4026 result = buffer->GetCaps( &dsbcaps );
\r
4027 if ( FAILED( result ) ) {
\r
4028 output->Release();
\r
4029 buffer->Release();
\r
4030 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4031 errorText_ = errorStream_.str();
\r
4035 dsBufferSize = dsbcaps.dwBufferBytes;
\r
4037 // Lock the DS buffer
\r
4040 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4041 if ( FAILED( result ) ) {
\r
4042 output->Release();
\r
4043 buffer->Release();
\r
4044 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
4045 errorText_ = errorStream_.str();
\r
4049 // Zero the DS buffer
\r
4050 ZeroMemory( audioPtr, dataLen );
\r
4052 // Unlock the DS buffer
\r
4053 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4054 if ( FAILED( result ) ) {
\r
4055 output->Release();
\r
4056 buffer->Release();
\r
4057 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
4058 errorText_ = errorStream_.str();
\r
4062 ohandle = (void *) output;
\r
4063 bhandle = (void *) buffer;
\r
4066 if ( mode == INPUT ) {
\r
4068 LPDIRECTSOUNDCAPTURE input;
\r
4069 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
4070 if ( FAILED( result ) ) {
\r
4071 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
4072 errorText_ = errorStream_.str();
\r
4077 inCaps.dwSize = sizeof( inCaps );
\r
4078 result = input->GetCaps( &inCaps );
\r
4079 if ( FAILED( result ) ) {
\r
4081 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
4082 errorText_ = errorStream_.str();
\r
4086 // Check channel information.
\r
4087 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
4088 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
4092 // Check format information. Use 16-bit format unless user
\r
4093 // requests 8-bit.
\r
4094 DWORD deviceFormats;
\r
4095 if ( channels + firstChannel == 2 ) {
\r
4096 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
4097 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4098 waveFormat.wBitsPerSample = 8;
\r
4099 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4101 else { // assume 16-bit is supported
\r
4102 waveFormat.wBitsPerSample = 16;
\r
4103 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4106 else { // channel == 1
\r
4107 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
4108 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4109 waveFormat.wBitsPerSample = 8;
\r
4110 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4112 else { // assume 16-bit is supported
\r
4113 waveFormat.wBitsPerSample = 16;
\r
4114 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4117 stream_.userFormat = format;
\r
4119 // Update wave format structure and buffer information.
\r
4120 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4121 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4122 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4124 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4125 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4126 dsBufferSize *= 2;
\r
4128 // Setup the secondary DS buffer description.
\r
4129 DSCBUFFERDESC bufferDescription;
\r
4130 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
4131 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
4132 bufferDescription.dwFlags = 0;
\r
4133 bufferDescription.dwReserved = 0;
\r
4134 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4135 bufferDescription.lpwfxFormat = &waveFormat;
\r
4137 // Create the capture buffer.
\r
4138 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
4139 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
4140 if ( FAILED( result ) ) {
\r
4142 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
4143 errorText_ = errorStream_.str();
\r
4147 // Get the buffer size ... might be different from what we specified.
\r
4148 DSCBCAPS dscbcaps;
\r
4149 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
4150 result = buffer->GetCaps( &dscbcaps );
\r
4151 if ( FAILED( result ) ) {
\r
4153 buffer->Release();
\r
4154 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4155 errorText_ = errorStream_.str();
\r
4159 dsBufferSize = dscbcaps.dwBufferBytes;
\r
4161 // NOTE: We could have a problem here if this is a duplex stream
\r
4162 // and the play and capture hardware buffer sizes are different
\r
4163 // (I'm actually not sure if that is a problem or not).
\r
4164 // Currently, we are not verifying that.
\r
4166 // Lock the capture buffer
\r
4169 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4170 if ( FAILED( result ) ) {
\r
4172 buffer->Release();
\r
4173 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
4174 errorText_ = errorStream_.str();
\r
4178 // Zero the buffer
\r
4179 ZeroMemory( audioPtr, dataLen );
\r
4181 // Unlock the buffer
\r
4182 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4183 if ( FAILED( result ) ) {
\r
4185 buffer->Release();
\r
4186 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
4187 errorText_ = errorStream_.str();
\r
4191 ohandle = (void *) input;
\r
4192 bhandle = (void *) buffer;
\r
4195 // Set various stream parameters
\r
4196 DsHandle *handle = 0;
\r
4197 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
4198 stream_.nUserChannels[mode] = channels;
\r
4199 stream_.bufferSize = *bufferSize;
\r
4200 stream_.channelOffset[mode] = firstChannel;
\r
4201 stream_.deviceInterleaved[mode] = true;
\r
4202 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
4203 else stream_.userInterleaved = true;
\r
4205 // Set flag for buffer conversion
\r
4206 stream_.doConvertBuffer[mode] = false;
\r
4207 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
4208 stream_.doConvertBuffer[mode] = true;
\r
4209 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
4210 stream_.doConvertBuffer[mode] = true;
\r
4211 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4212 stream_.nUserChannels[mode] > 1 )
\r
4213 stream_.doConvertBuffer[mode] = true;
\r
4215 // Allocate necessary internal buffers
\r
4216 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
4217 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
4218 if ( stream_.userBuffer[mode] == NULL ) {
\r
4219 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
4223 if ( stream_.doConvertBuffer[mode] ) {
\r
4225 bool makeBuffer = true;
\r
4226 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
4227 if ( mode == INPUT ) {
\r
4228 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
4229 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
4230 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
4234 if ( makeBuffer ) {
\r
4235 bufferBytes *= *bufferSize;
\r
4236 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
4237 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
4238 if ( stream_.deviceBuffer == NULL ) {
\r
4239 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
4245 // Allocate our DsHandle structures for the stream.
\r
4246 if ( stream_.apiHandle == 0 ) {
\r
4248 handle = new DsHandle;
\r
4250 catch ( std::bad_alloc& ) {
\r
4251 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
4255 // Create a manual-reset event.
\r
4256 handle->condition = CreateEvent( NULL, // no security
\r
4257 TRUE, // manual-reset
\r
4258 FALSE, // non-signaled initially
\r
4259 NULL ); // unnamed
\r
4260 stream_.apiHandle = (void *) handle;
\r
4263 handle = (DsHandle *) stream_.apiHandle;
\r
4264 handle->id[mode] = ohandle;
\r
4265 handle->buffer[mode] = bhandle;
\r
4266 handle->dsBufferSize[mode] = dsBufferSize;
\r
4267 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
4269 stream_.device[mode] = device;
\r
4270 stream_.state = STREAM_STOPPED;
\r
4271 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
4272 // We had already set up an output stream.
\r
4273 stream_.mode = DUPLEX;
\r
4275 stream_.mode = mode;
\r
4276 stream_.nBuffers = nBuffers;
\r
4277 stream_.sampleRate = sampleRate;
\r
4279 // Setup the buffer conversion information structure.
\r
4280 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
4282 // Setup the callback thread.
\r
4283 if ( stream_.callbackInfo.isRunning == false ) {
\r
4284 unsigned threadId;
\r
4285 stream_.callbackInfo.isRunning = true;
\r
4286 stream_.callbackInfo.object = (void *) this;
\r
4287 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
4288 &stream_.callbackInfo, 0, &threadId );
\r
4289 if ( stream_.callbackInfo.thread == 0 ) {
\r
4290 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
4294 // Boost DS thread priority
\r
4295 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
4301 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4302 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4303 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4304 if ( buffer ) buffer->Release();
\r
4305 object->Release();
\r
4307 if ( handle->buffer[1] ) {
\r
4308 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4309 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4310 if ( buffer ) buffer->Release();
\r
4311 object->Release();
\r
4313 CloseHandle( handle->condition );
\r
4315 stream_.apiHandle = 0;
\r
4318 for ( int i=0; i<2; i++ ) {
\r
4319 if ( stream_.userBuffer[i] ) {
\r
4320 free( stream_.userBuffer[i] );
\r
4321 stream_.userBuffer[i] = 0;
\r
4325 if ( stream_.deviceBuffer ) {
\r
4326 free( stream_.deviceBuffer );
\r
4327 stream_.deviceBuffer = 0;
\r
4333 void RtApiDs :: closeStream()
\r
4335 if ( stream_.state == STREAM_CLOSED ) {
\r
4336 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
4337 error( RtError::WARNING );
\r
4341 // Stop the callback thread.
\r
4342 stream_.callbackInfo.isRunning = false;
\r
4343 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
4344 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
4346 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4348 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4349 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4350 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4353 buffer->Release();
\r
4355 object->Release();
\r
4357 if ( handle->buffer[1] ) {
\r
4358 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4359 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4362 buffer->Release();
\r
4364 object->Release();
\r
4366 CloseHandle( handle->condition );
\r
4368 stream_.apiHandle = 0;
\r
4371 for ( int i=0; i<2; i++ ) {
\r
4372 if ( stream_.userBuffer[i] ) {
\r
4373 free( stream_.userBuffer[i] );
\r
4374 stream_.userBuffer[i] = 0;
\r
4378 if ( stream_.deviceBuffer ) {
\r
4379 free( stream_.deviceBuffer );
\r
4380 stream_.deviceBuffer = 0;
\r
4383 stream_.mode = UNINITIALIZED;
\r
4384 stream_.state = STREAM_CLOSED;
\r
4387 void RtApiDs :: startStream()
\r
4390 if ( stream_.state == STREAM_RUNNING ) {
\r
4391 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
4392 error( RtError::WARNING );
\r
4396 //MUTEX_LOCK( &stream_.mutex );
\r
4398 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4400 // Increase scheduler frequency on lesser windows (a side-effect of
\r
4401 // increasing timer accuracy). On greater windows (Win2K or later),
\r
4402 // this is already in effect.
\r
4403 timeBeginPeriod( 1 );
\r
4405 buffersRolling = false;
\r
4406 duplexPrerollBytes = 0;
\r
4408 if ( stream_.mode == DUPLEX ) {
\r
4409 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
4410 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
4413 HRESULT result = 0;
\r
4414 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4416 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4417 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
4418 if ( FAILED( result ) ) {
\r
4419 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
4420 errorText_ = errorStream_.str();
\r
4425 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4427 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4428 result = buffer->Start( DSCBSTART_LOOPING );
\r
4429 if ( FAILED( result ) ) {
\r
4430 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
4431 errorText_ = errorStream_.str();
\r
4436 handle->drainCounter = 0;
\r
4437 handle->internalDrain = false;
\r
4438 ResetEvent( handle->condition );
\r
4439 stream_.state = STREAM_RUNNING;
\r
4442 // MUTEX_UNLOCK( &stream_.mutex );
\r
4444 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4447 void RtApiDs :: stopStream()
\r
4450 if ( stream_.state == STREAM_STOPPED ) {
\r
4451 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
4452 error( RtError::WARNING );
\r
4457 MUTEX_LOCK( &stream_.mutex );
\r
4459 if ( stream_.state == STREAM_STOPPED ) {
\r
4460 MUTEX_UNLOCK( &stream_.mutex );
\r
4465 HRESULT result = 0;
\r
4468 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4469 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4470 if ( handle->drainCounter == 0 ) {
\r
4471 handle->drainCounter = 2;
\r
4472 // MUTEX_UNLOCK( &stream_.mutex );
\r
4473 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
4474 //ResetEvent( handle->condition );
\r
4475 // MUTEX_LOCK( &stream_.mutex );
\r
4478 stream_.state = STREAM_STOPPED;
\r
4480 // Stop the buffer and clear memory
\r
4481 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4482 result = buffer->Stop();
\r
4483 if ( FAILED( result ) ) {
\r
4484 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
4485 errorText_ = errorStream_.str();
\r
4489 // Lock the buffer and clear it so that if we start to play again,
\r
4490 // we won't have old data playing.
\r
4491 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4492 if ( FAILED( result ) ) {
\r
4493 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
4494 errorText_ = errorStream_.str();
\r
4498 // Zero the DS buffer
\r
4499 ZeroMemory( audioPtr, dataLen );
\r
4501 // Unlock the DS buffer
\r
4502 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4503 if ( FAILED( result ) ) {
\r
4504 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
4505 errorText_ = errorStream_.str();
\r
4509 // If we start playing again, we must begin at beginning of buffer.
\r
4510 handle->bufferPointer[0] = 0;
\r
4513 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4514 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4518 stream_.state = STREAM_STOPPED;
\r
4520 result = buffer->Stop();
\r
4521 if ( FAILED( result ) ) {
\r
4522 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
4523 errorText_ = errorStream_.str();
\r
4527 // Lock the buffer and clear it so that if we start to play again,
\r
4528 // we won't have old data playing.
\r
4529 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4530 if ( FAILED( result ) ) {
\r
4531 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
4532 errorText_ = errorStream_.str();
\r
4536 // Zero the DS buffer
\r
4537 ZeroMemory( audioPtr, dataLen );
\r
4539 // Unlock the DS buffer
\r
4540 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4541 if ( FAILED( result ) ) {
\r
4542 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
4543 errorText_ = errorStream_.str();
\r
4547 // If we start recording again, we must begin at beginning of buffer.
\r
4548 handle->bufferPointer[1] = 0;
\r
4552 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
4553 // MUTEX_UNLOCK( &stream_.mutex );
\r
4555 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4558 void RtApiDs :: abortStream()
\r
4561 if ( stream_.state == STREAM_STOPPED ) {
\r
4562 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
4563 error( RtError::WARNING );
\r
4567 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4568 handle->drainCounter = 2;
\r
4573 void RtApiDs :: callbackEvent()
\r
4575 if ( stream_.state == STREAM_STOPPED ) {
\r
4576 Sleep( 50 ); // sleep 50 milliseconds
\r
4580 if ( stream_.state == STREAM_CLOSED ) {
\r
4581 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
4582 error( RtError::WARNING );
\r
4586 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
4587 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4589 // Check if we were draining the stream and signal is finished.
\r
4590 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
4591 if ( handle->internalDrain == false )
\r
4592 SetEvent( handle->condition );
\r
4599 MUTEX_LOCK( &stream_.mutex );
\r
4601 // The state might change while waiting on a mutex.
\r
4602 if ( stream_.state == STREAM_STOPPED ) {
\r
4603 MUTEX_UNLOCK( &stream_.mutex );
\r
4608 // Invoke user callback to get fresh output data UNLESS we are
\r
4609 // draining stream.
\r
4610 if ( handle->drainCounter == 0 ) {
\r
4611 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
4612 double streamTime = getStreamTime();
\r
4613 RtAudioStreamStatus status = 0;
\r
4614 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
4615 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
4616 handle->xrun[0] = false;
\r
4618 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
4619 status |= RTAUDIO_INPUT_OVERFLOW;
\r
4620 handle->xrun[1] = false;
\r
4622 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
4623 stream_.bufferSize, streamTime, status, info->userData );
\r
4624 if ( handle->drainCounter == 2 ) {
\r
4625 // MUTEX_UNLOCK( &stream_.mutex );
\r
4629 else if ( handle->drainCounter == 1 )
\r
4630 handle->internalDrain = true;
\r
4634 DWORD currentWritePointer, safeWritePointer;
\r
4635 DWORD currentReadPointer, safeReadPointer;
\r
4636 UINT nextWritePointer;
\r
4638 LPVOID buffer1 = NULL;
\r
4639 LPVOID buffer2 = NULL;
\r
4640 DWORD bufferSize1 = 0;
\r
4641 DWORD bufferSize2 = 0;
\r
4646 if ( buffersRolling == false ) {
\r
4647 if ( stream_.mode == DUPLEX ) {
\r
4648 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4650 // It takes a while for the devices to get rolling. As a result,
\r
4651 // there's no guarantee that the capture and write device pointers
\r
4652 // will move in lockstep. Wait here for both devices to start
\r
4653 // rolling, and then set our buffer pointers accordingly.
\r
4654 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
4655 // bytes later than the write buffer.
\r
4657 // Stub: a serious risk of having a pre-emptive scheduling round
\r
4658 // take place between the two GetCurrentPosition calls... but I'm
\r
4659 // really not sure how to solve the problem. Temporarily boost to
\r
4660 // Realtime priority, maybe; but I'm not sure what priority the
\r
4661 // DirectSound service threads run at. We *should* be roughly
\r
4662 // within a ms or so of correct.
\r
4664 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4665 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4667 DWORD startSafeWritePointer, startSafeReadPointer;
\r
4669 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
4670 if ( FAILED( result ) ) {
\r
4671 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4672 errorText_ = errorStream_.str();
\r
4673 error( RtError::SYSTEM_ERROR );
\r
4675 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
4676 if ( FAILED( result ) ) {
\r
4677 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4678 errorText_ = errorStream_.str();
\r
4679 error( RtError::SYSTEM_ERROR );
\r
4682 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
4683 if ( FAILED( result ) ) {
\r
4684 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4685 errorText_ = errorStream_.str();
\r
4686 error( RtError::SYSTEM_ERROR );
\r
4688 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
4689 if ( FAILED( result ) ) {
\r
4690 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4691 errorText_ = errorStream_.str();
\r
4692 error( RtError::SYSTEM_ERROR );
\r
4694 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
4698 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4700 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4701 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4702 handle->bufferPointer[1] = safeReadPointer;
\r
4704 else if ( stream_.mode == OUTPUT ) {
\r
4706 // Set the proper nextWritePosition after initial startup.
\r
4707 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4708 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4709 if ( FAILED( result ) ) {
\r
4710 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4711 errorText_ = errorStream_.str();
\r
4712 error( RtError::SYSTEM_ERROR );
\r
4714 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4715 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4718 buffersRolling = true;
\r
4721 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4723 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4725 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
4726 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4727 bufferBytes *= formatBytes( stream_.userFormat );
\r
4728 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
4731 // Setup parameters and do buffer conversion if necessary.
\r
4732 if ( stream_.doConvertBuffer[0] ) {
\r
4733 buffer = stream_.deviceBuffer;
\r
4734 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
4735 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
4736 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
4739 buffer = stream_.userBuffer[0];
\r
4740 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4741 bufferBytes *= formatBytes( stream_.userFormat );
\r
4744 // No byte swapping necessary in DirectSound implementation.
\r
4746 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
4747 // unsigned. So, we need to convert our signed 8-bit data here to
\r
4749 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
4750 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
4752 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
4753 nextWritePointer = handle->bufferPointer[0];
\r
4755 DWORD endWrite, leadPointer;
\r
4757 // Find out where the read and "safe write" pointers are.
\r
4758 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4759 if ( FAILED( result ) ) {
\r
4760 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4761 errorText_ = errorStream_.str();
\r
4762 error( RtError::SYSTEM_ERROR );
\r
4765 // We will copy our output buffer into the region between
\r
4766 // safeWritePointer and leadPointer. If leadPointer is not
\r
4767 // beyond the next endWrite position, wait until it is.
\r
4768 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4769 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
4770 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
4771 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
4772 endWrite = nextWritePointer + bufferBytes;
\r
4774 // Check whether the entire write region is behind the play pointer.
\r
4775 if ( leadPointer >= endWrite ) break;
\r
4777 // If we are here, then we must wait until the leadPointer advances
\r
4778 // beyond the end of our next write region. We use the
\r
4779 // Sleep() function to suspend operation until that happens.
\r
4780 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
4781 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
4782 if ( millis < 1.0 ) millis = 1.0;
\r
4783 Sleep( (DWORD) millis );
\r
4786 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
4787 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
4788 // We've strayed into the forbidden zone ... resync the read pointer.
\r
4789 handle->xrun[0] = true;
\r
4790 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
4791 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
4792 handle->bufferPointer[0] = nextWritePointer;
\r
4793 endWrite = nextWritePointer + bufferBytes;
\r
4796 // Lock free space in the buffer
\r
4797 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
4798 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4799 if ( FAILED( result ) ) {
\r
4800 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
4801 errorText_ = errorStream_.str();
\r
4802 error( RtError::SYSTEM_ERROR );
\r
4805 // Copy our buffer into the DS buffer
\r
4806 CopyMemory( buffer1, buffer, bufferSize1 );
\r
4807 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
4809 // Update our buffer offset and unlock sound buffer
\r
4810 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4811 if ( FAILED( result ) ) {
\r
4812 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
4813 errorText_ = errorStream_.str();
\r
4814 error( RtError::SYSTEM_ERROR );
\r
4816 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4817 handle->bufferPointer[0] = nextWritePointer;
\r
4819 if ( handle->drainCounter ) {
\r
4820 handle->drainCounter++;
\r
4825 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4827 // Setup parameters.
\r
4828 if ( stream_.doConvertBuffer[1] ) {
\r
4829 buffer = stream_.deviceBuffer;
\r
4830 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
4831 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
4834 buffer = stream_.userBuffer[1];
\r
4835 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
4836 bufferBytes *= formatBytes( stream_.userFormat );
\r
4839 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4840 long nextReadPointer = handle->bufferPointer[1];
\r
4841 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
4843 // Find out where the write and "safe read" pointers are.
\r
4844 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4845 if ( FAILED( result ) ) {
\r
4846 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4847 errorText_ = errorStream_.str();
\r
4848 error( RtError::SYSTEM_ERROR );
\r
4851 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4852 DWORD endRead = nextReadPointer + bufferBytes;
\r
4854 // Handling depends on whether we are INPUT or DUPLEX.
\r
4855 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
4856 // then a wait here will drag the write pointers into the forbidden zone.
\r
4858 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
4859 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
4860 // practical way to sync up the read and write pointers reliably, given the
\r
4861 // the very complex relationship between phase and increment of the read and write
\r
4864 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
4865 // provide a pre-roll period of 0.5 seconds in which we return
\r
4866 // zeros from the read buffer while the pointers sync up.
\r
4868 if ( stream_.mode == DUPLEX ) {
\r
4869 if ( safeReadPointer < endRead ) {
\r
4870 if ( duplexPrerollBytes <= 0 ) {
\r
4871 // Pre-roll time over. Be more agressive.
\r
4872 int adjustment = endRead-safeReadPointer;
\r
4874 handle->xrun[1] = true;
\r
4876 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
4877 // and perform fine adjustments later.
\r
4878 // - small adjustments: back off by twice as much.
\r
4879 if ( adjustment >= 2*bufferBytes )
\r
4880 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
4882 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
4884 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4888 // In pre=roll time. Just do it.
\r
4889 nextReadPointer = safeReadPointer - bufferBytes;
\r
4890 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4892 endRead = nextReadPointer + bufferBytes;
\r
4895 else { // mode == INPUT
\r
4896 while ( safeReadPointer < endRead ) {
\r
4897 // See comments for playback.
\r
4898 double millis = (endRead - safeReadPointer) * 1000.0;
\r
4899 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
4900 if ( millis < 1.0 ) millis = 1.0;
\r
4901 Sleep( (DWORD) millis );
\r
4903 // Wake up and find out where we are now.
\r
4904 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4905 if ( FAILED( result ) ) {
\r
4906 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4907 errorText_ = errorStream_.str();
\r
4908 error( RtError::SYSTEM_ERROR );
\r
4911 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4915 // Lock free space in the buffer
\r
4916 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
4917 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4918 if ( FAILED( result ) ) {
\r
4919 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
4920 errorText_ = errorStream_.str();
\r
4921 error( RtError::SYSTEM_ERROR );
\r
4924 if ( duplexPrerollBytes <= 0 ) {
\r
4925 // Copy our buffer into the DS buffer
\r
4926 CopyMemory( buffer, buffer1, bufferSize1 );
\r
4927 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
4930 memset( buffer, 0, bufferSize1 );
\r
4931 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
4932 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
4935 // Update our buffer offset and unlock sound buffer
\r
4936 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4937 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4938 if ( FAILED( result ) ) {
\r
4939 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
4940 errorText_ = errorStream_.str();
\r
4941 error( RtError::SYSTEM_ERROR );
\r
4943 handle->bufferPointer[1] = nextReadPointer;
\r
4945 // No byte swapping necessary in DirectSound implementation.
\r
4947 // If necessary, convert 8-bit data from unsigned to signed.
\r
4948 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
4949 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
4951 // Do buffer conversion if necessary.
\r
4952 if ( stream_.doConvertBuffer[1] )
\r
4953 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
4957 // MUTEX_UNLOCK( &stream_.mutex );
\r
4959 RtApi::tickStreamTime();
\r
4962 // Definitions for utility functions and callbacks
\r
4963 // specific to the DirectSound implementation.
\r
4965 extern "C" unsigned __stdcall callbackHandler( void *ptr )
\r
4967 CallbackInfo *info = (CallbackInfo *) ptr;
\r
4968 RtApiDs *object = (RtApiDs *) info->object;
\r
4969 bool* isRunning = &info->isRunning;
\r
4971 while ( *isRunning == true ) {
\r
4972 object->callbackEvent();
\r
4975 _endthreadex( 0 );
\r
4979 #include "tchar.h"
\r
4981 std::string convertTChar( LPCTSTR name )
\r
4983 #if defined( UNICODE ) || defined( _UNICODE )
\r
4984 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
4985 std::string s( length, 0 );
\r
4986 length = WideCharToMultiByte(CP_UTF8, 0, name, wcslen(name), &s[0], length, NULL, NULL);
\r
4988 std::string s( name );
\r
4994 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
4995 LPCTSTR description,
\r
4997 LPVOID lpContext )
\r
4999 bool *isInput = (bool *) lpContext;
\r
5002 bool validDevice = false;
\r
5003 if ( *isInput == true ) {
\r
5005 LPDIRECTSOUNDCAPTURE object;
\r
5007 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
5008 if ( hr != DS_OK ) return TRUE;
\r
5010 caps.dwSize = sizeof(caps);
\r
5011 hr = object->GetCaps( &caps );
\r
5012 if ( hr == DS_OK ) {
\r
5013 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
5014 validDevice = true;
\r
5016 object->Release();
\r
5020 LPDIRECTSOUND object;
\r
5021 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
5022 if ( hr != DS_OK ) return TRUE;
\r
5024 caps.dwSize = sizeof(caps);
\r
5025 hr = object->GetCaps( &caps );
\r
5026 if ( hr == DS_OK ) {
\r
5027 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
5028 validDevice = true;
\r
5030 object->Release();
\r
5033 // If good device, then save its name and guid.
\r
5034 std::string name = convertTChar( description );
\r
5035 if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
5036 name = "Default Device";
\r
5037 if ( validDevice ) {
\r
5038 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
5039 if ( dsDevices[i].name == name ) {
\r
5040 dsDevices[i].found = true;
\r
5042 dsDevices[i].id[1] = lpguid;
\r
5043 dsDevices[i].validId[1] = true;
\r
5046 dsDevices[i].id[0] = lpguid;
\r
5047 dsDevices[i].validId[0] = true;
\r
5054 device.name = name;
\r
5055 device.found = true;
\r
5057 device.id[1] = lpguid;
\r
5058 device.validId[1] = true;
\r
5061 device.id[0] = lpguid;
\r
5062 device.validId[0] = true;
\r
5064 dsDevices.push_back( device );
\r
5070 static const char* getErrorString( int code )
\r
5074 case DSERR_ALLOCATED:
\r
5075 return "Already allocated";
\r
5077 case DSERR_CONTROLUNAVAIL:
\r
5078 return "Control unavailable";
\r
5080 case DSERR_INVALIDPARAM:
\r
5081 return "Invalid parameter";
\r
5083 case DSERR_INVALIDCALL:
\r
5084 return "Invalid call";
\r
5086 case DSERR_GENERIC:
\r
5087 return "Generic error";
\r
5089 case DSERR_PRIOLEVELNEEDED:
\r
5090 return "Priority level needed";
\r
5092 case DSERR_OUTOFMEMORY:
\r
5093 return "Out of memory";
\r
5095 case DSERR_BADFORMAT:
\r
5096 return "The sample rate or the channel format is not supported";
\r
5098 case DSERR_UNSUPPORTED:
\r
5099 return "Not supported";
\r
5101 case DSERR_NODRIVER:
\r
5102 return "No driver";
\r
5104 case DSERR_ALREADYINITIALIZED:
\r
5105 return "Already initialized";
\r
5107 case DSERR_NOAGGREGATION:
\r
5108 return "No aggregation";
\r
5110 case DSERR_BUFFERLOST:
\r
5111 return "Buffer lost";
\r
5113 case DSERR_OTHERAPPHASPRIO:
\r
5114 return "Another application already has priority";
\r
5116 case DSERR_UNINITIALIZED:
\r
5117 return "Uninitialized";
\r
5120 return "DirectSound unknown error";
\r
5123 //******************** End of __WINDOWS_DS__ *********************//
\r
5127 #if defined(__LINUX_ALSA__)
\r
5129 #include <alsa/asoundlib.h>
\r
5130 #include <unistd.h>
\r
5132 // A structure to hold various information related to the ALSA API
\r
5133 // implementation.
\r
5134 struct AlsaHandle {
\r
5135 snd_pcm_t *handles[2];
\r
5136 bool synchronized;
\r
5138 pthread_cond_t runnable_cv;
\r
5142 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
5145 extern "C" void *alsaCallbackHandler( void * ptr );
\r
5147 RtApiAlsa :: RtApiAlsa()
\r
5149 // Nothing to do here.
\r
5152 RtApiAlsa :: ~RtApiAlsa()
\r
5154 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5157 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
5159 unsigned nDevices = 0;
\r
5160 int result, subdevice, card;
\r
5162 snd_ctl_t *handle;
\r
5164 // Count cards and devices
\r
5166 snd_card_next( &card );
\r
5167 while ( card >= 0 ) {
\r
5168 sprintf( name, "hw:%d", card );
\r
5169 result = snd_ctl_open( &handle, name, 0 );
\r
5170 if ( result < 0 ) {
\r
5171 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5172 errorText_ = errorStream_.str();
\r
5173 error( RtError::WARNING );
\r
5178 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
5179 if ( result < 0 ) {
\r
5180 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5181 errorText_ = errorStream_.str();
\r
5182 error( RtError::WARNING );
\r
5185 if ( subdevice < 0 )
\r
5190 snd_ctl_close( handle );
\r
5191 snd_card_next( &card );
\r
5197 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
5199 RtAudio::DeviceInfo info;
\r
5200 info.probed = false;
\r
5202 unsigned nDevices = 0;
\r
5203 int result, subdevice, card;
\r
5205 snd_ctl_t *chandle;
\r
5207 // Count cards and devices
\r
5209 snd_card_next( &card );
\r
5210 while ( card >= 0 ) {
\r
5211 sprintf( name, "hw:%d", card );
\r
5212 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5213 if ( result < 0 ) {
\r
5214 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5215 errorText_ = errorStream_.str();
\r
5216 error( RtError::WARNING );
\r
5221 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5222 if ( result < 0 ) {
\r
5223 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5224 errorText_ = errorStream_.str();
\r
5225 error( RtError::WARNING );
\r
5228 if ( subdevice < 0 ) break;
\r
5229 if ( nDevices == device ) {
\r
5230 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5236 snd_ctl_close( chandle );
\r
5237 snd_card_next( &card );
\r
5240 if ( nDevices == 0 ) {
\r
5241 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
5242 error( RtError::INVALID_USE );
\r
5245 if ( device >= nDevices ) {
\r
5246 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
5247 error( RtError::INVALID_USE );
\r
5252 // If a stream is already open, we cannot probe the stream devices.
\r
5253 // Thus, use the saved results.
\r
5254 if ( stream_.state != STREAM_CLOSED &&
\r
5255 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
5256 if ( device >= devices_.size() ) {
\r
5257 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
5258 error( RtError::WARNING );
\r
5261 return devices_[ device ];
\r
5264 int openMode = SND_PCM_ASYNC;
\r
5265 snd_pcm_stream_t stream;
\r
5266 snd_pcm_info_t *pcminfo;
\r
5267 snd_pcm_info_alloca( &pcminfo );
\r
5268 snd_pcm_t *phandle;
\r
5269 snd_pcm_hw_params_t *params;
\r
5270 snd_pcm_hw_params_alloca( ¶ms );
\r
5272 // First try for playback
\r
5273 stream = SND_PCM_STREAM_PLAYBACK;
\r
5274 snd_pcm_info_set_device( pcminfo, subdevice );
\r
5275 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
5276 snd_pcm_info_set_stream( pcminfo, stream );
\r
5278 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5279 if ( result < 0 ) {
\r
5280 // Device probably doesn't support playback.
\r
5281 goto captureProbe;
\r
5284 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
5285 if ( result < 0 ) {
\r
5286 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5287 errorText_ = errorStream_.str();
\r
5288 error( RtError::WARNING );
\r
5289 goto captureProbe;
\r
5292 // The device is open ... fill the parameter structure.
\r
5293 result = snd_pcm_hw_params_any( phandle, params );
\r
5294 if ( result < 0 ) {
\r
5295 snd_pcm_close( phandle );
\r
5296 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5297 errorText_ = errorStream_.str();
\r
5298 error( RtError::WARNING );
\r
5299 goto captureProbe;
\r
5302 // Get output channel information.
\r
5303 unsigned int value;
\r
5304 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5305 if ( result < 0 ) {
\r
5306 snd_pcm_close( phandle );
\r
5307 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
5308 errorText_ = errorStream_.str();
\r
5309 error( RtError::WARNING );
\r
5310 goto captureProbe;
\r
5312 info.outputChannels = value;
\r
5313 snd_pcm_close( phandle );
\r
5316 // Now try for capture
\r
5317 stream = SND_PCM_STREAM_CAPTURE;
\r
5318 snd_pcm_info_set_stream( pcminfo, stream );
\r
5320 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5321 snd_ctl_close( chandle );
\r
5322 if ( result < 0 ) {
\r
5323 // Device probably doesn't support capture.
\r
5324 if ( info.outputChannels == 0 ) return info;
\r
5325 goto probeParameters;
\r
5328 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5329 if ( result < 0 ) {
\r
5330 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5331 errorText_ = errorStream_.str();
\r
5332 error( RtError::WARNING );
\r
5333 if ( info.outputChannels == 0 ) return info;
\r
5334 goto probeParameters;
\r
5337 // The device is open ... fill the parameter structure.
\r
5338 result = snd_pcm_hw_params_any( phandle, params );
\r
5339 if ( result < 0 ) {
\r
5340 snd_pcm_close( phandle );
\r
5341 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5342 errorText_ = errorStream_.str();
\r
5343 error( RtError::WARNING );
\r
5344 if ( info.outputChannels == 0 ) return info;
\r
5345 goto probeParameters;
\r
5348 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5349 if ( result < 0 ) {
\r
5350 snd_pcm_close( phandle );
\r
5351 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
5352 errorText_ = errorStream_.str();
\r
5353 error( RtError::WARNING );
\r
5354 if ( info.outputChannels == 0 ) return info;
\r
5355 goto probeParameters;
\r
5357 info.inputChannels = value;
\r
5358 snd_pcm_close( phandle );
\r
5360 // If device opens for both playback and capture, we determine the channels.
\r
5361 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5362 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5364 // ALSA doesn't provide default devices so we'll use the first available one.
\r
5365 if ( device == 0 && info.outputChannels > 0 )
\r
5366 info.isDefaultOutput = true;
\r
5367 if ( device == 0 && info.inputChannels > 0 )
\r
5368 info.isDefaultInput = true;
\r
5371 // At this point, we just need to figure out the supported data
\r
5372 // formats and sample rates. We'll proceed by opening the device in
\r
5373 // the direction with the maximum number of channels, or playback if
\r
5374 // they are equal. This might limit our sample rate options, but so
\r
5377 if ( info.outputChannels >= info.inputChannels )
\r
5378 stream = SND_PCM_STREAM_PLAYBACK;
\r
5380 stream = SND_PCM_STREAM_CAPTURE;
\r
5381 snd_pcm_info_set_stream( pcminfo, stream );
\r
5383 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5384 if ( result < 0 ) {
\r
5385 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5386 errorText_ = errorStream_.str();
\r
5387 error( RtError::WARNING );
\r
5391 // The device is open ... fill the parameter structure.
\r
5392 result = snd_pcm_hw_params_any( phandle, params );
\r
5393 if ( result < 0 ) {
\r
5394 snd_pcm_close( phandle );
\r
5395 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5396 errorText_ = errorStream_.str();
\r
5397 error( RtError::WARNING );
\r
5401 // Test our discrete set of sample rate values.
\r
5402 info.sampleRates.clear();
\r
5403 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
5404 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
5405 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
5407 if ( info.sampleRates.size() == 0 ) {
\r
5408 snd_pcm_close( phandle );
\r
5409 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
5410 errorText_ = errorStream_.str();
\r
5411 error( RtError::WARNING );
\r
5415 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
5416 snd_pcm_format_t format;
\r
5417 info.nativeFormats = 0;
\r
5418 format = SND_PCM_FORMAT_S8;
\r
5419 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5420 info.nativeFormats |= RTAUDIO_SINT8;
\r
5421 format = SND_PCM_FORMAT_S16;
\r
5422 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5423 info.nativeFormats |= RTAUDIO_SINT16;
\r
5424 format = SND_PCM_FORMAT_S24;
\r
5425 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5426 info.nativeFormats |= RTAUDIO_SINT24;
\r
5427 format = SND_PCM_FORMAT_S32;
\r
5428 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5429 info.nativeFormats |= RTAUDIO_SINT32;
\r
5430 format = SND_PCM_FORMAT_FLOAT;
\r
5431 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5432 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
5433 format = SND_PCM_FORMAT_FLOAT64;
\r
5434 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5435 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
5437 // Check that we have at least one supported format
\r
5438 if ( info.nativeFormats == 0 ) {
\r
5439 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
5440 errorText_ = errorStream_.str();
\r
5441 error( RtError::WARNING );
\r
5445 // Get the device name
\r
5447 result = snd_card_get_name( card, &cardname );
\r
5448 if ( result >= 0 )
\r
5449 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
5452 // That's all ... close the device and return
\r
5453 snd_pcm_close( phandle );
\r
5454 info.probed = true;
\r
5458 void RtApiAlsa :: saveDeviceInfo( void )
\r
5462 unsigned int nDevices = getDeviceCount();
\r
5463 devices_.resize( nDevices );
\r
5464 for ( unsigned int i=0; i<nDevices; i++ )
\r
5465 devices_[i] = getDeviceInfo( i );
\r
5468 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5469 unsigned int firstChannel, unsigned int sampleRate,
\r
5470 RtAudioFormat format, unsigned int *bufferSize,
\r
5471 RtAudio::StreamOptions *options )
\r
5474 #if defined(__RTAUDIO_DEBUG__)
\r
5475 snd_output_t *out;
\r
5476 snd_output_stdio_attach(&out, stderr, 0);
\r
5479 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
5481 unsigned nDevices = 0;
\r
5482 int result, subdevice, card;
\r
5484 snd_ctl_t *chandle;
\r
5486 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
5487 snprintf(name, sizeof(name), "%s", "default");
\r
5488 // snprintf(name, sizeof(name), "%s", "dmix:0,0");
\r
5490 // Count cards and devices
\r
5492 snd_card_next( &card );
\r
5493 while ( card >= 0 ) {
\r
5494 sprintf( name, "hw:%d", card );
\r
5495 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5496 if ( result < 0 ) {
\r
5497 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5498 errorText_ = errorStream_.str();
\r
5503 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5504 if ( result < 0 ) break;
\r
5505 if ( subdevice < 0 ) break;
\r
5506 if ( nDevices == device ) {
\r
5507 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5508 snd_ctl_close( chandle );
\r
5513 snd_ctl_close( chandle );
\r
5514 snd_card_next( &card );
\r
5517 if ( nDevices == 0 ) {
\r
5518 // This should not happen because a check is made before this function is called.
\r
5519 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
5523 if ( device >= nDevices ) {
\r
5524 // This should not happen because a check is made before this function is called.
\r
5525 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
5532 // The getDeviceInfo() function will not work for a device that is
\r
5533 // already open. Thus, we'll probe the system before opening a
\r
5534 // stream and save the results for use by getDeviceInfo().
\r
5535 // if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
5536 // this->saveDeviceInfo();
\r
5538 snd_pcm_stream_t stream;
\r
5539 if ( mode == OUTPUT )
\r
5540 stream = SND_PCM_STREAM_PLAYBACK;
\r
5542 stream = SND_PCM_STREAM_CAPTURE;
\r
5544 snd_pcm_t *phandle;
\r
5545 int openMode = SND_PCM_ASYNC;
\r
5546 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
5547 if ( result < 0 ) {
\r
5548 if ( mode == OUTPUT )
\r
5549 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
5551 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
5552 errorText_ = errorStream_.str();
\r
5556 // Fill the parameter structure.
\r
5557 snd_pcm_hw_params_t *hw_params;
\r
5558 snd_pcm_hw_params_alloca( &hw_params );
\r
5559 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
5560 if ( result < 0 ) {
\r
5561 snd_pcm_close( phandle );
\r
5562 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
5563 errorText_ = errorStream_.str();
\r
5567 #if defined(__RTAUDIO_DEBUG__)
\r
5568 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
5569 snd_pcm_hw_params_dump( hw_params, out );
\r
5572 // Set access ... check user preference.
\r
5573 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
5574 stream_.userInterleaved = false;
\r
5575 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5576 if ( result < 0 ) {
\r
5577 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5578 stream_.deviceInterleaved[mode] = true;
\r
5581 stream_.deviceInterleaved[mode] = false;
\r
5584 stream_.userInterleaved = true;
\r
5585 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5586 if ( result < 0 ) {
\r
5587 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5588 stream_.deviceInterleaved[mode] = false;
\r
5591 stream_.deviceInterleaved[mode] = true;
\r
5594 if ( result < 0 ) {
\r
5595 snd_pcm_close( phandle );
\r
5596 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
5597 errorText_ = errorStream_.str();
\r
5601 // Determine how to set the device format.
\r
5602 stream_.userFormat = format;
\r
5603 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
5605 if ( format == RTAUDIO_SINT8 )
\r
5606 deviceFormat = SND_PCM_FORMAT_S8;
\r
5607 else if ( format == RTAUDIO_SINT16 )
\r
5608 deviceFormat = SND_PCM_FORMAT_S16;
\r
5609 else if ( format == RTAUDIO_SINT24 )
\r
5610 deviceFormat = SND_PCM_FORMAT_S24;
\r
5611 else if ( format == RTAUDIO_SINT32 )
\r
5612 deviceFormat = SND_PCM_FORMAT_S32;
\r
5613 else if ( format == RTAUDIO_FLOAT32 )
\r
5614 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5615 else if ( format == RTAUDIO_FLOAT64 )
\r
5616 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5618 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
5619 stream_.deviceFormat[mode] = format;
\r
5623 // The user requested format is not natively supported by the device.
\r
5624 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5625 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
5626 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
5630 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5631 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5632 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
5636 deviceFormat = SND_PCM_FORMAT_S32;
\r
5637 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5638 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
5642 deviceFormat = SND_PCM_FORMAT_S24;
\r
5643 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5644 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
5648 deviceFormat = SND_PCM_FORMAT_S16;
\r
5649 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5650 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5654 deviceFormat = SND_PCM_FORMAT_S8;
\r
5655 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5656 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5660 // If we get here, no supported format was found.
\r
5661 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
5662 errorText_ = errorStream_.str();
\r
5666 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
5667 if ( result < 0 ) {
\r
5668 snd_pcm_close( phandle );
\r
5669 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
5670 errorText_ = errorStream_.str();
\r
5674 // Determine whether byte-swaping is necessary.
\r
5675 stream_.doByteSwap[mode] = false;
\r
5676 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
5677 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
5678 if ( result == 0 )
\r
5679 stream_.doByteSwap[mode] = true;
\r
5680 else if (result < 0) {
\r
5681 snd_pcm_close( phandle );
\r
5682 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
5683 errorText_ = errorStream_.str();
\r
5688 // Set the sample rate.
\r
5689 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
5690 if ( result < 0 ) {
\r
5691 snd_pcm_close( phandle );
\r
5692 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5693 errorText_ = errorStream_.str();
\r
5697 // Determine the number of channels for this device. We support a possible
\r
5698 // minimum device channel number > than the value requested by the user.
\r
5699 stream_.nUserChannels[mode] = channels;
\r
5700 unsigned int value;
\r
5701 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
5702 unsigned int deviceChannels = value;
\r
5703 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
5704 snd_pcm_close( phandle );
\r
5705 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
5706 errorText_ = errorStream_.str();
\r
5710 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
5711 if ( result < 0 ) {
\r
5712 snd_pcm_close( phandle );
\r
5713 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5714 errorText_ = errorStream_.str();
\r
5717 deviceChannels = value;
\r
5718 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
5719 stream_.nDeviceChannels[mode] = deviceChannels;
\r
5721 // Set the device channels.
\r
5722 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
5723 if ( result < 0 ) {
\r
5724 snd_pcm_close( phandle );
\r
5725 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5726 errorText_ = errorStream_.str();
\r
5730 // Set the buffer (or period) size.
\r
5732 snd_pcm_uframes_t periodSize = *bufferSize;
\r
5733 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
5734 if ( result < 0 ) {
\r
5735 snd_pcm_close( phandle );
\r
5736 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5737 errorText_ = errorStream_.str();
\r
5740 *bufferSize = periodSize;
\r
5742 // Set the buffer number, which in ALSA is referred to as the "period".
\r
5743 unsigned int periods = 0;
\r
5744 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
5745 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
5746 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
5747 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
5748 if ( result < 0 ) {
\r
5749 snd_pcm_close( phandle );
\r
5750 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5751 errorText_ = errorStream_.str();
\r
5755 // If attempting to setup a duplex stream, the bufferSize parameter
\r
5756 // MUST be the same in both directions!
\r
5757 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
5758 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
5759 errorText_ = errorStream_.str();
\r
5763 stream_.bufferSize = *bufferSize;
\r
5765 // Install the hardware configuration
\r
5766 result = snd_pcm_hw_params( phandle, hw_params );
\r
5767 if ( result < 0 ) {
\r
5768 snd_pcm_close( phandle );
\r
5769 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5770 errorText_ = errorStream_.str();
\r
5774 #if defined(__RTAUDIO_DEBUG__)
\r
5775 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
5776 snd_pcm_hw_params_dump( hw_params, out );
\r
5779 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
5780 snd_pcm_sw_params_t *sw_params = NULL;
\r
5781 snd_pcm_sw_params_alloca( &sw_params );
\r
5782 snd_pcm_sw_params_current( phandle, sw_params );
\r
5783 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
5784 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
5785 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
5787 // The following two settings were suggested by Theo Veenker
\r
5788 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
5789 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
5791 // here are two options for a fix
\r
5792 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
5793 snd_pcm_uframes_t val;
\r
5794 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
5795 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
5797 result = snd_pcm_sw_params( phandle, sw_params );
\r
5798 if ( result < 0 ) {
\r
5799 snd_pcm_close( phandle );
\r
5800 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5801 errorText_ = errorStream_.str();
\r
5805 #if defined(__RTAUDIO_DEBUG__)
\r
5806 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
5807 snd_pcm_sw_params_dump( sw_params, out );
\r
5810 // Set flags for buffer conversion
\r
5811 stream_.doConvertBuffer[mode] = false;
\r
5812 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
5813 stream_.doConvertBuffer[mode] = true;
\r
5814 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
5815 stream_.doConvertBuffer[mode] = true;
\r
5816 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5817 stream_.nUserChannels[mode] > 1 )
\r
5818 stream_.doConvertBuffer[mode] = true;
\r
5820 // Allocate the ApiHandle if necessary and then save.
\r
5821 AlsaHandle *apiInfo = 0;
\r
5822 if ( stream_.apiHandle == 0 ) {
\r
5824 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
5826 catch ( std::bad_alloc& ) {
\r
5827 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
5831 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
5832 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
5836 stream_.apiHandle = (void *) apiInfo;
\r
5837 apiInfo->handles[0] = 0;
\r
5838 apiInfo->handles[1] = 0;
\r
5841 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5843 apiInfo->handles[mode] = phandle;
\r
5845 // Allocate necessary internal buffers.
\r
5846 unsigned long bufferBytes;
\r
5847 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5848 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5849 if ( stream_.userBuffer[mode] == NULL ) {
\r
5850 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
5854 if ( stream_.doConvertBuffer[mode] ) {
\r
5856 bool makeBuffer = true;
\r
5857 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5858 if ( mode == INPUT ) {
\r
5859 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5860 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5861 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
5865 if ( makeBuffer ) {
\r
5866 bufferBytes *= *bufferSize;
\r
5867 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5868 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5869 if ( stream_.deviceBuffer == NULL ) {
\r
5870 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
5876 stream_.sampleRate = sampleRate;
\r
5877 stream_.nBuffers = periods;
\r
5878 stream_.device[mode] = device;
\r
5879 stream_.state = STREAM_STOPPED;
\r
5881 // Setup the buffer conversion information structure.
\r
5882 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5884 // Setup thread if necessary.
\r
5885 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
5886 // We had already set up an output stream.
\r
5887 stream_.mode = DUPLEX;
\r
5888 // Link the streams if possible.
\r
5889 apiInfo->synchronized = false;
\r
5890 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
5891 apiInfo->synchronized = true;
\r
5893 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
5894 error( RtError::WARNING );
\r
5898 stream_.mode = mode;
\r
5900 // Setup callback thread.
\r
5901 stream_.callbackInfo.object = (void *) this;
\r
5903 // Set the thread attributes for joinable and realtime scheduling
\r
5904 // priority (optional). The higher priority will only take affect
\r
5905 // if the program is run as root or suid. Note, under Linux
\r
5906 // processes with CAP_SYS_NICE privilege, a user can change
\r
5907 // scheduling policy and priority (thus need not be root). See
\r
5908 // POSIX "capabilities".
\r
5909 pthread_attr_t attr;
\r
5910 pthread_attr_init( &attr );
\r
5911 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
5912 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
5913 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
5914 struct sched_param param;
\r
5915 int priority = options->priority;
\r
5916 int min = sched_get_priority_min( SCHED_RR );
\r
5917 int max = sched_get_priority_max( SCHED_RR );
\r
5918 if ( priority < min ) priority = min;
\r
5919 else if ( priority > max ) priority = max;
\r
5920 param.sched_priority = priority;
\r
5921 pthread_attr_setschedparam( &attr, ¶m );
\r
5922 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
5925 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
5927 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
5930 stream_.callbackInfo.isRunning = true;
\r
5931 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
5932 pthread_attr_destroy( &attr );
\r
5934 stream_.callbackInfo.isRunning = false;
\r
5935 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
5944 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
5945 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
5946 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
5948 stream_.apiHandle = 0;
\r
5951 for ( int i=0; i<2; i++ ) {
\r
5952 if ( stream_.userBuffer[i] ) {
\r
5953 free( stream_.userBuffer[i] );
\r
5954 stream_.userBuffer[i] = 0;
\r
5958 if ( stream_.deviceBuffer ) {
\r
5959 free( stream_.deviceBuffer );
\r
5960 stream_.deviceBuffer = 0;
\r
5966 void RtApiAlsa :: closeStream()
\r
5968 if ( stream_.state == STREAM_CLOSED ) {
\r
5969 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
5970 error( RtError::WARNING );
\r
5974 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5975 stream_.callbackInfo.isRunning = false;
\r
5976 MUTEX_LOCK( &stream_.mutex );
\r
5977 if ( stream_.state == STREAM_STOPPED ) {
\r
5978 apiInfo->runnable = true;
\r
5979 pthread_cond_signal( &apiInfo->runnable_cv );
\r
5981 MUTEX_UNLOCK( &stream_.mutex );
\r
5982 pthread_join( stream_.callbackInfo.thread, NULL );
\r
5984 if ( stream_.state == STREAM_RUNNING ) {
\r
5985 stream_.state = STREAM_STOPPED;
\r
5986 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
5987 snd_pcm_drop( apiInfo->handles[0] );
\r
5988 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
5989 snd_pcm_drop( apiInfo->handles[1] );
\r
5993 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
5994 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
5995 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
5997 stream_.apiHandle = 0;
\r
6000 for ( int i=0; i<2; i++ ) {
\r
6001 if ( stream_.userBuffer[i] ) {
\r
6002 free( stream_.userBuffer[i] );
\r
6003 stream_.userBuffer[i] = 0;
\r
6007 if ( stream_.deviceBuffer ) {
\r
6008 free( stream_.deviceBuffer );
\r
6009 stream_.deviceBuffer = 0;
\r
6012 stream_.mode = UNINITIALIZED;
\r
6013 stream_.state = STREAM_CLOSED;
\r
6016 void RtApiAlsa :: startStream()
\r
6018 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
6021 if ( stream_.state == STREAM_RUNNING ) {
\r
6022 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
6023 error( RtError::WARNING );
\r
6027 MUTEX_LOCK( &stream_.mutex );
\r
6030 snd_pcm_state_t state;
\r
6031 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6032 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6033 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6034 state = snd_pcm_state( handle[0] );
\r
6035 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6036 result = snd_pcm_prepare( handle[0] );
\r
6037 if ( result < 0 ) {
\r
6038 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
6039 errorText_ = errorStream_.str();
\r
6045 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6046 state = snd_pcm_state( handle[1] );
\r
6047 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6048 result = snd_pcm_prepare( handle[1] );
\r
6049 if ( result < 0 ) {
\r
6050 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
6051 errorText_ = errorStream_.str();
\r
6057 stream_.state = STREAM_RUNNING;
\r
6060 apiInfo->runnable = true;
\r
6061 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6062 MUTEX_UNLOCK( &stream_.mutex );
\r
6064 if ( result >= 0 ) return;
\r
6065 error( RtError::SYSTEM_ERROR );
\r
6068 void RtApiAlsa :: stopStream()
\r
6071 if ( stream_.state == STREAM_STOPPED ) {
\r
6072 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
6073 error( RtError::WARNING );
\r
6077 stream_.state = STREAM_STOPPED;
\r
6078 MUTEX_LOCK( &stream_.mutex );
\r
6080 //if ( stream_.state == STREAM_STOPPED ) {
\r
6081 // MUTEX_UNLOCK( &stream_.mutex );
\r
6086 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6087 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6088 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6089 if ( apiInfo->synchronized )
\r
6090 result = snd_pcm_drop( handle[0] );
\r
6092 result = snd_pcm_drain( handle[0] );
\r
6093 if ( result < 0 ) {
\r
6094 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
6095 errorText_ = errorStream_.str();
\r
6100 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6101 result = snd_pcm_drop( handle[1] );
\r
6102 if ( result < 0 ) {
\r
6103 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
6104 errorText_ = errorStream_.str();
\r
6110 stream_.state = STREAM_STOPPED;
\r
6111 MUTEX_UNLOCK( &stream_.mutex );
\r
6113 if ( result >= 0 ) return;
\r
6114 error( RtError::SYSTEM_ERROR );
\r
6117 void RtApiAlsa :: abortStream()
\r
6120 if ( stream_.state == STREAM_STOPPED ) {
\r
6121 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
6122 error( RtError::WARNING );
\r
6126 stream_.state = STREAM_STOPPED;
\r
6127 MUTEX_LOCK( &stream_.mutex );
\r
6129 //if ( stream_.state == STREAM_STOPPED ) {
\r
6130 // MUTEX_UNLOCK( &stream_.mutex );
\r
6135 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6136 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6137 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6138 result = snd_pcm_drop( handle[0] );
\r
6139 if ( result < 0 ) {
\r
6140 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
6141 errorText_ = errorStream_.str();
\r
6146 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6147 result = snd_pcm_drop( handle[1] );
\r
6148 if ( result < 0 ) {
\r
6149 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
6150 errorText_ = errorStream_.str();
\r
6156 stream_.state = STREAM_STOPPED;
\r
6157 MUTEX_UNLOCK( &stream_.mutex );
\r
6159 if ( result >= 0 ) return;
\r
6160 error( RtError::SYSTEM_ERROR );
\r
6163 void RtApiAlsa :: callbackEvent()
\r
6165 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6166 if ( stream_.state == STREAM_STOPPED ) {
\r
6167 MUTEX_LOCK( &stream_.mutex );
\r
6168 while ( !apiInfo->runnable )
\r
6169 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
6171 if ( stream_.state != STREAM_RUNNING ) {
\r
6172 MUTEX_UNLOCK( &stream_.mutex );
\r
6175 MUTEX_UNLOCK( &stream_.mutex );
\r
6178 if ( stream_.state == STREAM_CLOSED ) {
\r
6179 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6180 error( RtError::WARNING );
\r
6184 int doStopStream = 0;
\r
6185 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6186 double streamTime = getStreamTime();
\r
6187 RtAudioStreamStatus status = 0;
\r
6188 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
6189 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6190 apiInfo->xrun[0] = false;
\r
6192 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
6193 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6194 apiInfo->xrun[1] = false;
\r
6196 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6197 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
6199 if ( doStopStream == 2 ) {
\r
6204 MUTEX_LOCK( &stream_.mutex );
\r
6206 // The state might change while waiting on a mutex.
\r
6207 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
6212 snd_pcm_t **handle;
\r
6213 snd_pcm_sframes_t frames;
\r
6214 RtAudioFormat format;
\r
6215 handle = (snd_pcm_t **) apiInfo->handles;
\r
6217 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6219 // Setup parameters.
\r
6220 if ( stream_.doConvertBuffer[1] ) {
\r
6221 buffer = stream_.deviceBuffer;
\r
6222 channels = stream_.nDeviceChannels[1];
\r
6223 format = stream_.deviceFormat[1];
\r
6226 buffer = stream_.userBuffer[1];
\r
6227 channels = stream_.nUserChannels[1];
\r
6228 format = stream_.userFormat;
\r
6231 // Read samples from device in interleaved/non-interleaved format.
\r
6232 if ( stream_.deviceInterleaved[1] )
\r
6233 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
6235 void *bufs[channels];
\r
6236 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6237 for ( int i=0; i<channels; i++ )
\r
6238 bufs[i] = (void *) (buffer + (i * offset));
\r
6239 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
6242 if ( result < (int) stream_.bufferSize ) {
\r
6243 // Either an error or overrun occured.
\r
6244 if ( result == -EPIPE ) {
\r
6245 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
6246 if ( state == SND_PCM_STATE_XRUN ) {
\r
6247 apiInfo->xrun[1] = true;
\r
6248 result = snd_pcm_prepare( handle[1] );
\r
6249 if ( result < 0 ) {
\r
6250 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
6251 errorText_ = errorStream_.str();
\r
6255 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6256 errorText_ = errorStream_.str();
\r
6260 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
6261 errorText_ = errorStream_.str();
\r
6263 error( RtError::WARNING );
\r
6267 // Do byte swapping if necessary.
\r
6268 if ( stream_.doByteSwap[1] )
\r
6269 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
6271 // Do buffer conversion if necessary.
\r
6272 if ( stream_.doConvertBuffer[1] )
\r
6273 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6275 // Check stream latency
\r
6276 result = snd_pcm_delay( handle[1], &frames );
\r
6277 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
6282 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6284 // Setup parameters and do buffer conversion if necessary.
\r
6285 if ( stream_.doConvertBuffer[0] ) {
\r
6286 buffer = stream_.deviceBuffer;
\r
6287 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6288 channels = stream_.nDeviceChannels[0];
\r
6289 format = stream_.deviceFormat[0];
\r
6292 buffer = stream_.userBuffer[0];
\r
6293 channels = stream_.nUserChannels[0];
\r
6294 format = stream_.userFormat;
\r
6297 // Do byte swapping if necessary.
\r
6298 if ( stream_.doByteSwap[0] )
\r
6299 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
6301 // Write samples to device in interleaved/non-interleaved format.
\r
6302 if ( stream_.deviceInterleaved[0] )
\r
6303 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
6305 void *bufs[channels];
\r
6306 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6307 for ( int i=0; i<channels; i++ )
\r
6308 bufs[i] = (void *) (buffer + (i * offset));
\r
6309 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
6312 if ( result < (int) stream_.bufferSize ) {
\r
6313 // Either an error or underrun occured.
\r
6314 if ( result == -EPIPE ) {
\r
6315 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
6316 if ( state == SND_PCM_STATE_XRUN ) {
\r
6317 apiInfo->xrun[0] = true;
\r
6318 result = snd_pcm_prepare( handle[0] );
\r
6319 if ( result < 0 ) {
\r
6320 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
6321 errorText_ = errorStream_.str();
\r
6325 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6326 errorText_ = errorStream_.str();
\r
6330 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
6331 errorText_ = errorStream_.str();
\r
6333 error( RtError::WARNING );
\r
6337 // Check stream latency
\r
6338 result = snd_pcm_delay( handle[0], &frames );
\r
6339 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
6343 MUTEX_UNLOCK( &stream_.mutex );
\r
6345 RtApi::tickStreamTime();
\r
6346 if ( doStopStream == 1 ) this->stopStream();
\r
6349 extern "C" void *alsaCallbackHandler( void *ptr )
\r
6351 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6352 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
6353 bool *isRunning = &info->isRunning;
\r
6355 while ( *isRunning == true ) {
\r
6356 pthread_testcancel();
\r
6357 object->callbackEvent();
\r
6360 pthread_exit( NULL );
\r
6363 //******************** End of __LINUX_ALSA__ *********************//
\r
6367 #if defined(__LINUX_OSS__)
\r
6369 #include <unistd.h>
\r
6370 #include <sys/ioctl.h>
\r
6371 #include <unistd.h>
\r
6372 #include <fcntl.h>
\r
6373 #include "soundcard.h"
\r
6374 #include <errno.h>
\r
6377 extern "C" void *ossCallbackHandler(void * ptr);
\r
6379 // A structure to hold various information related to the OSS API
\r
6380 // implementation.
\r
6381 struct OssHandle {
\r
6382 int id[2]; // device ids
\r
6385 pthread_cond_t runnable;
\r
6388 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
6391 RtApiOss :: RtApiOss()
\r
6393 // Nothing to do here.
\r
6396 RtApiOss :: ~RtApiOss()
\r
6398 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6401 unsigned int RtApiOss :: getDeviceCount( void )
\r
6403 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6404 if ( mixerfd == -1 ) {
\r
6405 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
6406 error( RtError::WARNING );
\r
6410 oss_sysinfo sysinfo;
\r
6411 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
6413 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6414 error( RtError::WARNING );
\r
6419 return sysinfo.numaudios;
\r
6422 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
6424 RtAudio::DeviceInfo info;
\r
6425 info.probed = false;
\r
6427 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6428 if ( mixerfd == -1 ) {
\r
6429 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
6430 error( RtError::WARNING );
\r
6434 oss_sysinfo sysinfo;
\r
6435 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6436 if ( result == -1 ) {
\r
6438 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6439 error( RtError::WARNING );
\r
6443 unsigned nDevices = sysinfo.numaudios;
\r
6444 if ( nDevices == 0 ) {
\r
6446 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
6447 error( RtError::INVALID_USE );
\r
6450 if ( device >= nDevices ) {
\r
6452 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
6453 error( RtError::INVALID_USE );
\r
6456 oss_audioinfo ainfo;
\r
6457 ainfo.dev = device;
\r
6458 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6460 if ( result == -1 ) {
\r
6461 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6462 errorText_ = errorStream_.str();
\r
6463 error( RtError::WARNING );
\r
6468 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
6469 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
6470 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
6471 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
6472 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6475 // Probe data formats ... do for input
\r
6476 unsigned long mask = ainfo.iformats;
\r
6477 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
6478 info.nativeFormats |= RTAUDIO_SINT16;
\r
6479 if ( mask & AFMT_S8 )
\r
6480 info.nativeFormats |= RTAUDIO_SINT8;
\r
6481 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
6482 info.nativeFormats |= RTAUDIO_SINT32;
\r
6483 if ( mask & AFMT_FLOAT )
\r
6484 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
6485 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
6486 info.nativeFormats |= RTAUDIO_SINT24;
\r
6488 // Check that we have at least one supported format
\r
6489 if ( info.nativeFormats == 0 ) {
\r
6490 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
6491 errorText_ = errorStream_.str();
\r
6492 error( RtError::WARNING );
\r
6496 // Probe the supported sample rates.
\r
6497 info.sampleRates.clear();
\r
6498 if ( ainfo.nrates ) {
\r
6499 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
6500 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6501 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
6502 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6509 // Check min and max rate values;
\r
6510 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6511 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
6512 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6516 if ( info.sampleRates.size() == 0 ) {
\r
6517 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
6518 errorText_ = errorStream_.str();
\r
6519 error( RtError::WARNING );
\r
6522 info.probed = true;
\r
6523 info.name = ainfo.name;
\r
6530 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
6531 unsigned int firstChannel, unsigned int sampleRate,
\r
6532 RtAudioFormat format, unsigned int *bufferSize,
\r
6533 RtAudio::StreamOptions *options )
\r
6535 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6536 if ( mixerfd == -1 ) {
\r
6537 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
6541 oss_sysinfo sysinfo;
\r
6542 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6543 if ( result == -1 ) {
\r
6545 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6549 unsigned nDevices = sysinfo.numaudios;
\r
6550 if ( nDevices == 0 ) {
\r
6551 // This should not happen because a check is made before this function is called.
\r
6553 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
6557 if ( device >= nDevices ) {
\r
6558 // This should not happen because a check is made before this function is called.
\r
6560 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
6564 oss_audioinfo ainfo;
\r
6565 ainfo.dev = device;
\r
6566 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6568 if ( result == -1 ) {
\r
6569 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6570 errorText_ = errorStream_.str();
\r
6574 // Check if device supports input or output
\r
6575 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
6576 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
6577 if ( mode == OUTPUT )
\r
6578 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
6580 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
6581 errorText_ = errorStream_.str();
\r
6586 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
6587 if ( mode == OUTPUT )
\r
6588 flags |= O_WRONLY;
\r
6589 else { // mode == INPUT
\r
6590 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
6591 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
6592 close( handle->id[0] );
\r
6593 handle->id[0] = 0;
\r
6594 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
6595 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
6596 errorText_ = errorStream_.str();
\r
6599 // Check that the number previously set channels is the same.
\r
6600 if ( stream_.nUserChannels[0] != channels ) {
\r
6601 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
6602 errorText_ = errorStream_.str();
\r
6608 flags |= O_RDONLY;
\r
6611 // Set exclusive access if specified.
\r
6612 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
6614 // Try to open the device.
\r
6616 fd = open( ainfo.devnode, flags, 0 );
\r
6618 if ( errno == EBUSY )
\r
6619 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
6621 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
6622 errorText_ = errorStream_.str();
\r
6626 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
6628 if ( flags | O_RDWR ) {
\r
6629 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
6630 if ( result == -1) {
\r
6631 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
6632 errorText_ = errorStream_.str();
\r
6638 // Check the device channel support.
\r
6639 stream_.nUserChannels[mode] = channels;
\r
6640 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
6642 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
6643 errorText_ = errorStream_.str();
\r
6647 // Set the number of channels.
\r
6648 int deviceChannels = channels + firstChannel;
\r
6649 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
6650 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
6652 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
6653 errorText_ = errorStream_.str();
\r
6656 stream_.nDeviceChannels[mode] = deviceChannels;
\r
6658 // Get the data format mask
\r
6660 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
6661 if ( result == -1 ) {
\r
6663 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
6664 errorText_ = errorStream_.str();
\r
6668 // Determine how to set the device format.
\r
6669 stream_.userFormat = format;
\r
6670 int deviceFormat = -1;
\r
6671 stream_.doByteSwap[mode] = false;
\r
6672 if ( format == RTAUDIO_SINT8 ) {
\r
6673 if ( mask & AFMT_S8 ) {
\r
6674 deviceFormat = AFMT_S8;
\r
6675 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
6678 else if ( format == RTAUDIO_SINT16 ) {
\r
6679 if ( mask & AFMT_S16_NE ) {
\r
6680 deviceFormat = AFMT_S16_NE;
\r
6681 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6683 else if ( mask & AFMT_S16_OE ) {
\r
6684 deviceFormat = AFMT_S16_OE;
\r
6685 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6686 stream_.doByteSwap[mode] = true;
\r
6689 else if ( format == RTAUDIO_SINT24 ) {
\r
6690 if ( mask & AFMT_S24_NE ) {
\r
6691 deviceFormat = AFMT_S24_NE;
\r
6692 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6694 else if ( mask & AFMT_S24_OE ) {
\r
6695 deviceFormat = AFMT_S24_OE;
\r
6696 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6697 stream_.doByteSwap[mode] = true;
\r
6700 else if ( format == RTAUDIO_SINT32 ) {
\r
6701 if ( mask & AFMT_S32_NE ) {
\r
6702 deviceFormat = AFMT_S32_NE;
\r
6703 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6705 else if ( mask & AFMT_S32_OE ) {
\r
6706 deviceFormat = AFMT_S32_OE;
\r
6707 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6708 stream_.doByteSwap[mode] = true;
\r
6712 if ( deviceFormat == -1 ) {
\r
6713 // The user requested format is not natively supported by the device.
\r
6714 if ( mask & AFMT_S16_NE ) {
\r
6715 deviceFormat = AFMT_S16_NE;
\r
6716 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6718 else if ( mask & AFMT_S32_NE ) {
\r
6719 deviceFormat = AFMT_S32_NE;
\r
6720 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6722 else if ( mask & AFMT_S24_NE ) {
\r
6723 deviceFormat = AFMT_S24_NE;
\r
6724 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6726 else if ( mask & AFMT_S16_OE ) {
\r
6727 deviceFormat = AFMT_S16_OE;
\r
6728 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6729 stream_.doByteSwap[mode] = true;
\r
6731 else if ( mask & AFMT_S32_OE ) {
\r
6732 deviceFormat = AFMT_S32_OE;
\r
6733 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6734 stream_.doByteSwap[mode] = true;
\r
6736 else if ( mask & AFMT_S24_OE ) {
\r
6737 deviceFormat = AFMT_S24_OE;
\r
6738 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6739 stream_.doByteSwap[mode] = true;
\r
6741 else if ( mask & AFMT_S8) {
\r
6742 deviceFormat = AFMT_S8;
\r
6743 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
6747 if ( stream_.deviceFormat[mode] == 0 ) {
\r
6748 // This really shouldn't happen ...
\r
6750 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
6751 errorText_ = errorStream_.str();
\r
6755 // Set the data format.
\r
6756 int temp = deviceFormat;
\r
6757 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
6758 if ( result == -1 || deviceFormat != temp ) {
\r
6760 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
6761 errorText_ = errorStream_.str();
\r
6765 // Attempt to set the buffer size. According to OSS, the minimum
\r
6766 // number of buffers is two. The supposed minimum buffer size is 16
\r
6767 // bytes, so that will be our lower bound. The argument to this
\r
6768 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
6769 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
6770 // We'll check the actual value used near the end of the setup
\r
6772 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
6773 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
6775 if ( options ) buffers = options->numberOfBuffers;
\r
6776 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
6777 if ( buffers < 2 ) buffers = 3;
\r
6778 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
6779 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
6780 if ( result == -1 ) {
\r
6782 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
6783 errorText_ = errorStream_.str();
\r
6786 stream_.nBuffers = buffers;
\r
6788 // Save buffer size (in sample frames).
\r
6789 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
6790 stream_.bufferSize = *bufferSize;
\r
6792 // Set the sample rate.
\r
6793 int srate = sampleRate;
\r
6794 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
6795 if ( result == -1 ) {
\r
6797 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
6798 errorText_ = errorStream_.str();
\r
6802 // Verify the sample rate setup worked.
\r
6803 if ( abs( srate - sampleRate ) > 100 ) {
\r
6805 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
6806 errorText_ = errorStream_.str();
\r
6809 stream_.sampleRate = sampleRate;
\r
6811 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
6812 // We're doing duplex setup here.
\r
6813 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
6814 stream_.nDeviceChannels[0] = deviceChannels;
\r
6817 // Set interleaving parameters.
\r
6818 stream_.userInterleaved = true;
\r
6819 stream_.deviceInterleaved[mode] = true;
\r
6820 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
6821 stream_.userInterleaved = false;
\r
6823 // Set flags for buffer conversion
\r
6824 stream_.doConvertBuffer[mode] = false;
\r
6825 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
6826 stream_.doConvertBuffer[mode] = true;
\r
6827 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
6828 stream_.doConvertBuffer[mode] = true;
\r
6829 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
6830 stream_.nUserChannels[mode] > 1 )
\r
6831 stream_.doConvertBuffer[mode] = true;
\r
6833 // Allocate the stream handles if necessary and then save.
\r
6834 if ( stream_.apiHandle == 0 ) {
\r
6836 handle = new OssHandle;
\r
6838 catch ( std::bad_alloc& ) {
\r
6839 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
6843 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
6844 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
6848 stream_.apiHandle = (void *) handle;
\r
6851 handle = (OssHandle *) stream_.apiHandle;
\r
6853 handle->id[mode] = fd;
\r
6855 // Allocate necessary internal buffers.
\r
6856 unsigned long bufferBytes;
\r
6857 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
6858 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
6859 if ( stream_.userBuffer[mode] == NULL ) {
\r
6860 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
6864 if ( stream_.doConvertBuffer[mode] ) {
\r
6866 bool makeBuffer = true;
\r
6867 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
6868 if ( mode == INPUT ) {
\r
6869 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
6870 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6871 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
6875 if ( makeBuffer ) {
\r
6876 bufferBytes *= *bufferSize;
\r
6877 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6878 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6879 if ( stream_.deviceBuffer == NULL ) {
\r
6880 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
6886 stream_.device[mode] = device;
\r
6887 stream_.state = STREAM_STOPPED;
\r
6889 // Setup the buffer conversion information structure.
\r
6890 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6892 // Setup thread if necessary.
\r
6893 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
6894 // We had already set up an output stream.
\r
6895 stream_.mode = DUPLEX;
\r
6896 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
6899 stream_.mode = mode;
\r
6901 // Setup callback thread.
\r
6902 stream_.callbackInfo.object = (void *) this;
\r
6904 // Set the thread attributes for joinable and realtime scheduling
\r
6905 // priority. The higher priority will only take affect if the
\r
6906 // program is run as root or suid.
\r
6907 pthread_attr_t attr;
\r
6908 pthread_attr_init( &attr );
\r
6909 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
6910 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
6911 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
6912 struct sched_param param;
\r
6913 int priority = options->priority;
\r
6914 int min = sched_get_priority_min( SCHED_RR );
\r
6915 int max = sched_get_priority_max( SCHED_RR );
\r
6916 if ( priority < min ) priority = min;
\r
6917 else if ( priority > max ) priority = max;
\r
6918 param.sched_priority = priority;
\r
6919 pthread_attr_setschedparam( &attr, ¶m );
\r
6920 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
6923 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
6925 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
6928 stream_.callbackInfo.isRunning = true;
\r
6929 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
6930 pthread_attr_destroy( &attr );
\r
6932 stream_.callbackInfo.isRunning = false;
\r
6933 errorText_ = "RtApiOss::error creating callback thread!";
\r
6942 pthread_cond_destroy( &handle->runnable );
\r
6943 if ( handle->id[0] ) close( handle->id[0] );
\r
6944 if ( handle->id[1] ) close( handle->id[1] );
\r
6946 stream_.apiHandle = 0;
\r
6949 for ( int i=0; i<2; i++ ) {
\r
6950 if ( stream_.userBuffer[i] ) {
\r
6951 free( stream_.userBuffer[i] );
\r
6952 stream_.userBuffer[i] = 0;
\r
6956 if ( stream_.deviceBuffer ) {
\r
6957 free( stream_.deviceBuffer );
\r
6958 stream_.deviceBuffer = 0;
\r
6964 void RtApiOss :: closeStream()
\r
6966 if ( stream_.state == STREAM_CLOSED ) {
\r
6967 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
6968 error( RtError::WARNING );
\r
6972 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
6973 stream_.callbackInfo.isRunning = false;
\r
6974 MUTEX_LOCK( &stream_.mutex );
\r
6975 if ( stream_.state == STREAM_STOPPED )
\r
6976 pthread_cond_signal( &handle->runnable );
\r
6977 MUTEX_UNLOCK( &stream_.mutex );
\r
6978 pthread_join( stream_.callbackInfo.thread, NULL );
\r
6980 if ( stream_.state == STREAM_RUNNING ) {
\r
6981 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
6982 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
6984 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
6985 stream_.state = STREAM_STOPPED;
\r
6989 pthread_cond_destroy( &handle->runnable );
\r
6990 if ( handle->id[0] ) close( handle->id[0] );
\r
6991 if ( handle->id[1] ) close( handle->id[1] );
\r
6993 stream_.apiHandle = 0;
\r
6996 for ( int i=0; i<2; i++ ) {
\r
6997 if ( stream_.userBuffer[i] ) {
\r
6998 free( stream_.userBuffer[i] );
\r
6999 stream_.userBuffer[i] = 0;
\r
7003 if ( stream_.deviceBuffer ) {
\r
7004 free( stream_.deviceBuffer );
\r
7005 stream_.deviceBuffer = 0;
\r
7008 stream_.mode = UNINITIALIZED;
\r
7009 stream_.state = STREAM_CLOSED;
\r
7012 void RtApiOss :: startStream()
\r
7015 if ( stream_.state == STREAM_RUNNING ) {
\r
7016 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
7017 error( RtError::WARNING );
\r
7021 MUTEX_LOCK( &stream_.mutex );
\r
7023 stream_.state = STREAM_RUNNING;
\r
7025 // No need to do anything else here ... OSS automatically starts
\r
7026 // when fed samples.
\r
7028 MUTEX_UNLOCK( &stream_.mutex );
\r
7030 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7031 pthread_cond_signal( &handle->runnable );
\r
7034 void RtApiOss :: stopStream()
\r
7037 if ( stream_.state == STREAM_STOPPED ) {
\r
7038 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
7039 error( RtError::WARNING );
\r
7043 MUTEX_LOCK( &stream_.mutex );
\r
7045 // The state might change while waiting on a mutex.
\r
7046 if ( stream_.state == STREAM_STOPPED ) {
\r
7047 MUTEX_UNLOCK( &stream_.mutex );
\r
7052 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7053 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7055 // Flush the output with zeros a few times.
\r
7058 RtAudioFormat format;
\r
7060 if ( stream_.doConvertBuffer[0] ) {
\r
7061 buffer = stream_.deviceBuffer;
\r
7062 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7063 format = stream_.deviceFormat[0];
\r
7066 buffer = stream_.userBuffer[0];
\r
7067 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7068 format = stream_.userFormat;
\r
7071 memset( buffer, 0, samples * formatBytes(format) );
\r
7072 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
7073 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7074 if ( result == -1 ) {
\r
7075 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
7076 error( RtError::WARNING );
\r
7080 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7081 if ( result == -1 ) {
\r
7082 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7083 errorText_ = errorStream_.str();
\r
7086 handle->triggered = false;
\r
7089 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7090 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7091 if ( result == -1 ) {
\r
7092 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7093 errorText_ = errorStream_.str();
\r
7099 stream_.state = STREAM_STOPPED;
\r
7100 MUTEX_UNLOCK( &stream_.mutex );
\r
7102 if ( result != -1 ) return;
\r
7103 error( RtError::SYSTEM_ERROR );
\r
7106 void RtApiOss :: abortStream()
\r
7109 if ( stream_.state == STREAM_STOPPED ) {
\r
7110 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
7111 error( RtError::WARNING );
\r
7115 MUTEX_LOCK( &stream_.mutex );
\r
7117 // The state might change while waiting on a mutex.
\r
7118 if ( stream_.state == STREAM_STOPPED ) {
\r
7119 MUTEX_UNLOCK( &stream_.mutex );
\r
7124 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7125 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7126 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7127 if ( result == -1 ) {
\r
7128 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7129 errorText_ = errorStream_.str();
\r
7132 handle->triggered = false;
\r
7135 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7136 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7137 if ( result == -1 ) {
\r
7138 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7139 errorText_ = errorStream_.str();
\r
7145 stream_.state = STREAM_STOPPED;
\r
7146 MUTEX_UNLOCK( &stream_.mutex );
\r
7148 if ( result != -1 ) return;
\r
7149 error( RtError::SYSTEM_ERROR );
\r
7152 void RtApiOss :: callbackEvent()
\r
7154 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7155 if ( stream_.state == STREAM_STOPPED ) {
\r
7156 MUTEX_LOCK( &stream_.mutex );
\r
7157 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
7158 if ( stream_.state != STREAM_RUNNING ) {
\r
7159 MUTEX_UNLOCK( &stream_.mutex );
\r
7162 MUTEX_UNLOCK( &stream_.mutex );
\r
7165 if ( stream_.state == STREAM_CLOSED ) {
\r
7166 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7167 error( RtError::WARNING );
\r
7171 // Invoke user callback to get fresh output data.
\r
7172 int doStopStream = 0;
\r
7173 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7174 double streamTime = getStreamTime();
\r
7175 RtAudioStreamStatus status = 0;
\r
7176 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
7177 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7178 handle->xrun[0] = false;
\r
7180 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
7181 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7182 handle->xrun[1] = false;
\r
7184 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7185 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7186 if ( doStopStream == 2 ) {
\r
7187 this->abortStream();
\r
7191 MUTEX_LOCK( &stream_.mutex );
\r
7193 // The state might change while waiting on a mutex.
\r
7194 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7199 RtAudioFormat format;
\r
7201 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7203 // Setup parameters and do buffer conversion if necessary.
\r
7204 if ( stream_.doConvertBuffer[0] ) {
\r
7205 buffer = stream_.deviceBuffer;
\r
7206 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7207 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7208 format = stream_.deviceFormat[0];
\r
7211 buffer = stream_.userBuffer[0];
\r
7212 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7213 format = stream_.userFormat;
\r
7216 // Do byte swapping if necessary.
\r
7217 if ( stream_.doByteSwap[0] )
\r
7218 byteSwapBuffer( buffer, samples, format );
\r
7220 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
7222 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7223 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7224 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
7225 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7226 handle->triggered = true;
\r
7229 // Write samples to device.
\r
7230 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7232 if ( result == -1 ) {
\r
7233 // We'll assume this is an underrun, though there isn't a
\r
7234 // specific means for determining that.
\r
7235 handle->xrun[0] = true;
\r
7236 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
7237 error( RtError::WARNING );
\r
7238 // Continue on to input section.
\r
7242 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7244 // Setup parameters.
\r
7245 if ( stream_.doConvertBuffer[1] ) {
\r
7246 buffer = stream_.deviceBuffer;
\r
7247 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
7248 format = stream_.deviceFormat[1];
\r
7251 buffer = stream_.userBuffer[1];
\r
7252 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
7253 format = stream_.userFormat;
\r
7256 // Read samples from device.
\r
7257 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
7259 if ( result == -1 ) {
\r
7260 // We'll assume this is an overrun, though there isn't a
\r
7261 // specific means for determining that.
\r
7262 handle->xrun[1] = true;
\r
7263 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
7264 error( RtError::WARNING );
\r
7268 // Do byte swapping if necessary.
\r
7269 if ( stream_.doByteSwap[1] )
\r
7270 byteSwapBuffer( buffer, samples, format );
\r
7272 // Do buffer conversion if necessary.
\r
7273 if ( stream_.doConvertBuffer[1] )
\r
7274 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7278 MUTEX_UNLOCK( &stream_.mutex );
\r
7280 RtApi::tickStreamTime();
\r
7281 if ( doStopStream == 1 ) this->stopStream();
\r
7284 extern "C" void *ossCallbackHandler( void *ptr )
\r
7286 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7287 RtApiOss *object = (RtApiOss *) info->object;
\r
7288 bool *isRunning = &info->isRunning;
\r
7290 while ( *isRunning == true ) {
\r
7291 pthread_testcancel();
\r
7292 object->callbackEvent();
\r
7295 pthread_exit( NULL );
\r
7298 //******************** End of __LINUX_OSS__ *********************//
\r
7302 // *************************************************** //
\r
7304 // Protected common (OS-independent) RtAudio methods.
\r
7306 // *************************************************** //
\r
7308 // This method can be modified to control the behavior of error
\r
7309 // message printing.
\r
7310 void RtApi :: error( RtError::Type type )
\r
7312 errorStream_.str(""); // clear the ostringstream
\r
7313 if ( type == RtError::WARNING && showWarnings_ == true )
\r
7314 std::cerr << '\n' << errorText_ << "\n\n";
\r
7315 else if ( type != RtError::WARNING )
\r
7316 throw( RtError( errorText_, type ) );
\r
7319 void RtApi :: verifyStream()
\r
7321 if ( stream_.state == STREAM_CLOSED ) {
\r
7322 errorText_ = "RtApi:: a stream is not open!";
\r
7323 error( RtError::INVALID_USE );
\r
7327 void RtApi :: clearStreamInfo()
\r
7329 stream_.mode = UNINITIALIZED;
\r
7330 stream_.state = STREAM_CLOSED;
\r
7331 stream_.sampleRate = 0;
\r
7332 stream_.bufferSize = 0;
\r
7333 stream_.nBuffers = 0;
\r
7334 stream_.userFormat = 0;
\r
7335 stream_.userInterleaved = true;
\r
7336 stream_.streamTime = 0.0;
\r
7337 stream_.apiHandle = 0;
\r
7338 stream_.deviceBuffer = 0;
\r
7339 stream_.callbackInfo.callback = 0;
\r
7340 stream_.callbackInfo.userData = 0;
\r
7341 stream_.callbackInfo.isRunning = false;
\r
7342 for ( int i=0; i<2; i++ ) {
\r
7343 stream_.device[i] = 11111;
\r
7344 stream_.doConvertBuffer[i] = false;
\r
7345 stream_.deviceInterleaved[i] = true;
\r
7346 stream_.doByteSwap[i] = false;
\r
7347 stream_.nUserChannels[i] = 0;
\r
7348 stream_.nDeviceChannels[i] = 0;
\r
7349 stream_.channelOffset[i] = 0;
\r
7350 stream_.deviceFormat[i] = 0;
\r
7351 stream_.latency[i] = 0;
\r
7352 stream_.userBuffer[i] = 0;
\r
7353 stream_.convertInfo[i].channels = 0;
\r
7354 stream_.convertInfo[i].inJump = 0;
\r
7355 stream_.convertInfo[i].outJump = 0;
\r
7356 stream_.convertInfo[i].inFormat = 0;
\r
7357 stream_.convertInfo[i].outFormat = 0;
\r
7358 stream_.convertInfo[i].inOffset.clear();
\r
7359 stream_.convertInfo[i].outOffset.clear();
\r
7363 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
7365 if ( format == RTAUDIO_SINT16 )
\r
7367 else if ( format == RTAUDIO_SINT24 || format == RTAUDIO_SINT32 ||
\r
7368 format == RTAUDIO_FLOAT32 )
\r
7370 else if ( format == RTAUDIO_FLOAT64 )
\r
7372 else if ( format == RTAUDIO_SINT8 )
\r
7375 errorText_ = "RtApi::formatBytes: undefined format.";
\r
7376 error( RtError::WARNING );
\r
7381 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
7383 if ( mode == INPUT ) { // convert device to user buffer
\r
7384 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
7385 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
7386 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
7387 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
7389 else { // convert user to device buffer
\r
7390 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
7391 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
7392 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
7393 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
7396 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
7397 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
7399 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
7401 // Set up the interleave/deinterleave offsets.
\r
7402 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
7403 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
7404 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
7405 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7406 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7407 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7408 stream_.convertInfo[mode].inJump = 1;
\r
7412 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7413 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7414 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7415 stream_.convertInfo[mode].outJump = 1;
\r
7419 else { // no (de)interleaving
\r
7420 if ( stream_.userInterleaved ) {
\r
7421 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7422 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7423 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7427 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7428 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7429 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7430 stream_.convertInfo[mode].inJump = 1;
\r
7431 stream_.convertInfo[mode].outJump = 1;
\r
7436 // Add channel offset.
\r
7437 if ( firstChannel > 0 ) {
\r
7438 if ( stream_.deviceInterleaved[mode] ) {
\r
7439 if ( mode == OUTPUT ) {
\r
7440 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7441 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
7444 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7445 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
7449 if ( mode == OUTPUT ) {
\r
7450 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7451 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7454 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7455 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7461 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
7463 // This function does format conversion, input/output channel compensation, and
\r
7464 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
7465 // the lower three bytes of a 32-bit integer.
\r
7467 // Clear our device buffer when in/out duplex device channels are different
\r
7468 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
7469 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
7470 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
7473 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
7475 Float64 *out = (Float64 *)outBuffer;
\r
7477 if (info.inFormat == RTAUDIO_SINT8) {
\r
7478 signed char *in = (signed char *)inBuffer;
\r
7479 scale = 1.0 / 127.5;
\r
7480 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7481 for (j=0; j<info.channels; j++) {
\r
7482 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7483 out[info.outOffset[j]] += 0.5;
\r
7484 out[info.outOffset[j]] *= scale;
\r
7486 in += info.inJump;
\r
7487 out += info.outJump;
\r
7490 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7491 Int16 *in = (Int16 *)inBuffer;
\r
7492 scale = 1.0 / 32767.5;
\r
7493 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7494 for (j=0; j<info.channels; j++) {
\r
7495 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7496 out[info.outOffset[j]] += 0.5;
\r
7497 out[info.outOffset[j]] *= scale;
\r
7499 in += info.inJump;
\r
7500 out += info.outJump;
\r
7503 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7504 Int32 *in = (Int32 *)inBuffer;
\r
7505 scale = 1.0 / 8388607.5;
\r
7506 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7507 for (j=0; j<info.channels; j++) {
\r
7508 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]] & 0x00ffffff);
\r
7509 out[info.outOffset[j]] += 0.5;
\r
7510 out[info.outOffset[j]] *= scale;
\r
7512 in += info.inJump;
\r
7513 out += info.outJump;
\r
7516 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7517 Int32 *in = (Int32 *)inBuffer;
\r
7518 scale = 1.0 / 2147483647.5;
\r
7519 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7520 for (j=0; j<info.channels; j++) {
\r
7521 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7522 out[info.outOffset[j]] += 0.5;
\r
7523 out[info.outOffset[j]] *= scale;
\r
7525 in += info.inJump;
\r
7526 out += info.outJump;
\r
7529 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7530 Float32 *in = (Float32 *)inBuffer;
\r
7531 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7532 for (j=0; j<info.channels; j++) {
\r
7533 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7535 in += info.inJump;
\r
7536 out += info.outJump;
\r
7539 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7540 // Channel compensation and/or (de)interleaving only.
\r
7541 Float64 *in = (Float64 *)inBuffer;
\r
7542 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7543 for (j=0; j<info.channels; j++) {
\r
7544 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7546 in += info.inJump;
\r
7547 out += info.outJump;
\r
7551 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
7553 Float32 *out = (Float32 *)outBuffer;
\r
7555 if (info.inFormat == RTAUDIO_SINT8) {
\r
7556 signed char *in = (signed char *)inBuffer;
\r
7557 scale = (Float32) ( 1.0 / 127.5 );
\r
7558 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7559 for (j=0; j<info.channels; j++) {
\r
7560 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7561 out[info.outOffset[j]] += 0.5;
\r
7562 out[info.outOffset[j]] *= scale;
\r
7564 in += info.inJump;
\r
7565 out += info.outJump;
\r
7568 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7569 Int16 *in = (Int16 *)inBuffer;
\r
7570 scale = (Float32) ( 1.0 / 32767.5 );
\r
7571 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7572 for (j=0; j<info.channels; j++) {
\r
7573 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7574 out[info.outOffset[j]] += 0.5;
\r
7575 out[info.outOffset[j]] *= scale;
\r
7577 in += info.inJump;
\r
7578 out += info.outJump;
\r
7581 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7582 Int32 *in = (Int32 *)inBuffer;
\r
7583 scale = (Float32) ( 1.0 / 8388607.5 );
\r
7584 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7585 for (j=0; j<info.channels; j++) {
\r
7586 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]] & 0x00ffffff);
\r
7587 out[info.outOffset[j]] += 0.5;
\r
7588 out[info.outOffset[j]] *= scale;
\r
7590 in += info.inJump;
\r
7591 out += info.outJump;
\r
7594 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7595 Int32 *in = (Int32 *)inBuffer;
\r
7596 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
7597 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7598 for (j=0; j<info.channels; j++) {
\r
7599 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7600 out[info.outOffset[j]] += 0.5;
\r
7601 out[info.outOffset[j]] *= scale;
\r
7603 in += info.inJump;
\r
7604 out += info.outJump;
\r
7607 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7608 // Channel compensation and/or (de)interleaving only.
\r
7609 Float32 *in = (Float32 *)inBuffer;
\r
7610 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7611 for (j=0; j<info.channels; j++) {
\r
7612 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7614 in += info.inJump;
\r
7615 out += info.outJump;
\r
7618 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7619 Float64 *in = (Float64 *)inBuffer;
\r
7620 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7621 for (j=0; j<info.channels; j++) {
\r
7622 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7624 in += info.inJump;
\r
7625 out += info.outJump;
\r
7629 else if (info.outFormat == RTAUDIO_SINT32) {
\r
7630 Int32 *out = (Int32 *)outBuffer;
\r
7631 if (info.inFormat == RTAUDIO_SINT8) {
\r
7632 signed char *in = (signed char *)inBuffer;
\r
7633 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7634 for (j=0; j<info.channels; j++) {
\r
7635 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7636 out[info.outOffset[j]] <<= 24;
\r
7638 in += info.inJump;
\r
7639 out += info.outJump;
\r
7642 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7643 Int16 *in = (Int16 *)inBuffer;
\r
7644 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7645 for (j=0; j<info.channels; j++) {
\r
7646 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7647 out[info.outOffset[j]] <<= 16;
\r
7649 in += info.inJump;
\r
7650 out += info.outJump;
\r
7653 else if (info.inFormat == RTAUDIO_SINT24) { // Hmmm ... we could just leave it in the lower 3 bytes
\r
7654 Int32 *in = (Int32 *)inBuffer;
\r
7655 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7656 for (j=0; j<info.channels; j++) {
\r
7657 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7658 out[info.outOffset[j]] <<= 8;
\r
7660 in += info.inJump;
\r
7661 out += info.outJump;
\r
7664 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7665 // Channel compensation and/or (de)interleaving only.
\r
7666 Int32 *in = (Int32 *)inBuffer;
\r
7667 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7668 for (j=0; j<info.channels; j++) {
\r
7669 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7671 in += info.inJump;
\r
7672 out += info.outJump;
\r
7675 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7676 Float32 *in = (Float32 *)inBuffer;
\r
7677 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7678 for (j=0; j<info.channels; j++) {
\r
7679 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
7681 in += info.inJump;
\r
7682 out += info.outJump;
\r
7685 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7686 Float64 *in = (Float64 *)inBuffer;
\r
7687 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7688 for (j=0; j<info.channels; j++) {
\r
7689 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
7691 in += info.inJump;
\r
7692 out += info.outJump;
\r
7696 else if (info.outFormat == RTAUDIO_SINT24) {
\r
7697 Int32 *out = (Int32 *)outBuffer;
\r
7698 if (info.inFormat == RTAUDIO_SINT8) {
\r
7699 signed char *in = (signed char *)inBuffer;
\r
7700 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7701 for (j=0; j<info.channels; j++) {
\r
7702 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7703 out[info.outOffset[j]] <<= 16;
\r
7705 in += info.inJump;
\r
7706 out += info.outJump;
\r
7709 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7710 Int16 *in = (Int16 *)inBuffer;
\r
7711 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7712 for (j=0; j<info.channels; j++) {
\r
7713 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7714 out[info.outOffset[j]] <<= 8;
\r
7716 in += info.inJump;
\r
7717 out += info.outJump;
\r
7720 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7721 // Channel compensation and/or (de)interleaving only.
\r
7722 Int32 *in = (Int32 *)inBuffer;
\r
7723 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7724 for (j=0; j<info.channels; j++) {
\r
7725 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7727 in += info.inJump;
\r
7728 out += info.outJump;
\r
7731 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7732 Int32 *in = (Int32 *)inBuffer;
\r
7733 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7734 for (j=0; j<info.channels; j++) {
\r
7735 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7736 out[info.outOffset[j]] >>= 8;
\r
7738 in += info.inJump;
\r
7739 out += info.outJump;
\r
7742 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7743 Float32 *in = (Float32 *)inBuffer;
\r
7744 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7745 for (j=0; j<info.channels; j++) {
\r
7746 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
7748 in += info.inJump;
\r
7749 out += info.outJump;
\r
7752 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7753 Float64 *in = (Float64 *)inBuffer;
\r
7754 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7755 for (j=0; j<info.channels; j++) {
\r
7756 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
7758 in += info.inJump;
\r
7759 out += info.outJump;
\r
7763 else if (info.outFormat == RTAUDIO_SINT16) {
\r
7764 Int16 *out = (Int16 *)outBuffer;
\r
7765 if (info.inFormat == RTAUDIO_SINT8) {
\r
7766 signed char *in = (signed char *)inBuffer;
\r
7767 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7768 for (j=0; j<info.channels; j++) {
\r
7769 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
7770 out[info.outOffset[j]] <<= 8;
\r
7772 in += info.inJump;
\r
7773 out += info.outJump;
\r
7776 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7777 // Channel compensation and/or (de)interleaving only.
\r
7778 Int16 *in = (Int16 *)inBuffer;
\r
7779 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7780 for (j=0; j<info.channels; j++) {
\r
7781 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7783 in += info.inJump;
\r
7784 out += info.outJump;
\r
7787 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7788 Int32 *in = (Int32 *)inBuffer;
\r
7789 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7790 for (j=0; j<info.channels; j++) {
\r
7791 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 8) & 0x0000ffff);
\r
7793 in += info.inJump;
\r
7794 out += info.outJump;
\r
7797 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7798 Int32 *in = (Int32 *)inBuffer;
\r
7799 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7800 for (j=0; j<info.channels; j++) {
\r
7801 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
7803 in += info.inJump;
\r
7804 out += info.outJump;
\r
7807 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7808 Float32 *in = (Float32 *)inBuffer;
\r
7809 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7810 for (j=0; j<info.channels; j++) {
\r
7811 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
7813 in += info.inJump;
\r
7814 out += info.outJump;
\r
7817 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7818 Float64 *in = (Float64 *)inBuffer;
\r
7819 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7820 for (j=0; j<info.channels; j++) {
\r
7821 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
7823 in += info.inJump;
\r
7824 out += info.outJump;
\r
7828 else if (info.outFormat == RTAUDIO_SINT8) {
\r
7829 signed char *out = (signed char *)outBuffer;
\r
7830 if (info.inFormat == RTAUDIO_SINT8) {
\r
7831 // Channel compensation and/or (de)interleaving only.
\r
7832 signed char *in = (signed char *)inBuffer;
\r
7833 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7834 for (j=0; j<info.channels; j++) {
\r
7835 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7837 in += info.inJump;
\r
7838 out += info.outJump;
\r
7841 if (info.inFormat == RTAUDIO_SINT16) {
\r
7842 Int16 *in = (Int16 *)inBuffer;
\r
7843 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7844 for (j=0; j<info.channels; j++) {
\r
7845 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
7847 in += info.inJump;
\r
7848 out += info.outJump;
\r
7851 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7852 Int32 *in = (Int32 *)inBuffer;
\r
7853 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7854 for (j=0; j<info.channels; j++) {
\r
7855 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 16) & 0x000000ff);
\r
7857 in += info.inJump;
\r
7858 out += info.outJump;
\r
7861 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7862 Int32 *in = (Int32 *)inBuffer;
\r
7863 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7864 for (j=0; j<info.channels; j++) {
\r
7865 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
7867 in += info.inJump;
\r
7868 out += info.outJump;
\r
7871 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7872 Float32 *in = (Float32 *)inBuffer;
\r
7873 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7874 for (j=0; j<info.channels; j++) {
\r
7875 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
7877 in += info.inJump;
\r
7878 out += info.outJump;
\r
7881 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7882 Float64 *in = (Float64 *)inBuffer;
\r
7883 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7884 for (j=0; j<info.channels; j++) {
\r
7885 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
7887 in += info.inJump;
\r
7888 out += info.outJump;
\r
7894 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
7895 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
7896 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
7898 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
7900 register char val;
\r
7901 register char *ptr;
\r
7904 if ( format == RTAUDIO_SINT16 ) {
\r
7905 for ( unsigned int i=0; i<samples; i++ ) {
\r
7906 // Swap 1st and 2nd bytes.
\r
7908 *(ptr) = *(ptr+1);
\r
7911 // Increment 2 bytes.
\r
7915 else if ( format == RTAUDIO_SINT24 ||
\r
7916 format == RTAUDIO_SINT32 ||
\r
7917 format == RTAUDIO_FLOAT32 ) {
\r
7918 for ( unsigned int i=0; i<samples; i++ ) {
\r
7919 // Swap 1st and 4th bytes.
\r
7921 *(ptr) = *(ptr+3);
\r
7924 // Swap 2nd and 3rd bytes.
\r
7927 *(ptr) = *(ptr+1);
\r
7930 // Increment 3 more bytes.
\r
7934 else if ( format == RTAUDIO_FLOAT64 ) {
\r
7935 for ( unsigned int i=0; i<samples; i++ ) {
\r
7936 // Swap 1st and 8th bytes
\r
7938 *(ptr) = *(ptr+7);
\r
7941 // Swap 2nd and 7th bytes
\r
7944 *(ptr) = *(ptr+5);
\r
7947 // Swap 3rd and 6th bytes
\r
7950 *(ptr) = *(ptr+3);
\r
7953 // Swap 4th and 5th bytes
\r
7956 *(ptr) = *(ptr+1);
\r
7959 // Increment 5 more bytes.
\r
7965 // Indentation settings for Vim and Emacs
\r
7967 // Local Variables:
\r
7968 // c-basic-offset: 2
\r
7969 // indent-tabs-mode: nil
\r
7972 // vim: et sts=2 sw=2
\r