1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound and ASIO) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2011 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.0.10
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
61 #elif defined(__LINUX_ALSA__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
63 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
64 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
65 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
66 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
68 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
69 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
72 // *************************************************** //
\r
74 // RtAudio definitions.
\r
76 // *************************************************** //
\r
78 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
82 // The order here will control the order of RtAudio's API search in
\r
84 #if defined(__UNIX_JACK__)
\r
85 apis.push_back( UNIX_JACK );
\r
87 #if defined(__LINUX_ALSA__)
\r
88 apis.push_back( LINUX_ALSA );
\r
90 #if defined(__LINUX_OSS__)
\r
91 apis.push_back( LINUX_OSS );
\r
93 #if defined(__WINDOWS_ASIO__)
\r
94 apis.push_back( WINDOWS_ASIO );
\r
96 #if defined(__WINDOWS_DS__)
\r
97 apis.push_back( WINDOWS_DS );
\r
99 #if defined(__MACOSX_CORE__)
\r
100 apis.push_back( MACOSX_CORE );
\r
102 #if defined(__RTAUDIO_DUMMY__)
\r
103 apis.push_back( RTAUDIO_DUMMY );
\r
107 void RtAudio :: openRtApi( RtAudio::Api api )
\r
109 #if defined(__UNIX_JACK__)
\r
110 if ( api == UNIX_JACK )
\r
111 rtapi_ = new RtApiJack();
\r
113 #if defined(__LINUX_ALSA__)
\r
114 if ( api == LINUX_ALSA )
\r
115 rtapi_ = new RtApiAlsa();
\r
117 #if defined(__LINUX_OSS__)
\r
118 if ( api == LINUX_OSS )
\r
119 rtapi_ = new RtApiOss();
\r
121 #if defined(__WINDOWS_ASIO__)
\r
122 if ( api == WINDOWS_ASIO )
\r
123 rtapi_ = new RtApiAsio();
\r
125 #if defined(__WINDOWS_DS__)
\r
126 if ( api == WINDOWS_DS )
\r
127 rtapi_ = new RtApiDs();
\r
129 #if defined(__MACOSX_CORE__)
\r
130 if ( api == MACOSX_CORE )
\r
131 rtapi_ = new RtApiCore();
\r
133 #if defined(__RTAUDIO_DUMMY__)
\r
134 if ( api == RTAUDIO_DUMMY )
\r
135 rtapi_ = new RtApiDummy();
\r
139 RtAudio :: RtAudio( RtAudio::Api api ) throw()
\r
143 if ( api != UNSPECIFIED ) {
\r
144 // Attempt to open the specified API.
\r
146 if ( rtapi_ ) return;
\r
148 // No compiled support for specified API value. Issue a debug
\r
149 // warning and continue as if no API was specified.
\r
150 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
153 // Iterate through the compiled APIs and return as soon as we find
\r
154 // one with at least one device or we reach the end of the list.
\r
155 std::vector< RtAudio::Api > apis;
\r
156 getCompiledApi( apis );
\r
157 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
158 openRtApi( apis[i] );
\r
159 if ( rtapi_->getDeviceCount() ) break;
\r
162 if ( rtapi_ ) return;
\r
164 // It should not be possible to get here because the preprocessor
\r
165 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
166 // API-specific definitions are passed to the compiler. But just in
\r
167 // case something weird happens, we'll print out an error message.
\r
168 std::cerr << "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
171 RtAudio :: ~RtAudio() throw()
\r
176 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
177 RtAudio::StreamParameters *inputParameters,
\r
178 RtAudioFormat format, unsigned int sampleRate,
\r
179 unsigned int *bufferFrames,
\r
180 RtAudioCallback callback, void *userData,
\r
181 RtAudio::StreamOptions *options )
\r
183 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
184 sampleRate, bufferFrames, callback,
\r
185 userData, options );
\r
188 // *************************************************** //
\r
190 // Public RtApi definitions (see end of file for
\r
191 // private or protected utility functions).
\r
193 // *************************************************** //
\r
197 stream_.state = STREAM_CLOSED;
\r
198 stream_.mode = UNINITIALIZED;
\r
199 stream_.apiHandle = 0;
\r
200 stream_.userBuffer[0] = 0;
\r
201 stream_.userBuffer[1] = 0;
\r
202 MUTEX_INITIALIZE( &stream_.mutex );
\r
203 showWarnings_ = true;
\r
208 MUTEX_DESTROY( &stream_.mutex );
\r
211 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
212 RtAudio::StreamParameters *iParams,
\r
213 RtAudioFormat format, unsigned int sampleRate,
\r
214 unsigned int *bufferFrames,
\r
215 RtAudioCallback callback, void *userData,
\r
216 RtAudio::StreamOptions *options )
\r
218 if ( stream_.state != STREAM_CLOSED ) {
\r
219 errorText_ = "RtApi::openStream: a stream is already open!";
\r
220 error( RtError::INVALID_USE );
\r
223 if ( oParams && oParams->nChannels < 1 ) {
\r
224 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
225 error( RtError::INVALID_USE );
\r
228 if ( iParams && iParams->nChannels < 1 ) {
\r
229 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
230 error( RtError::INVALID_USE );
\r
233 if ( oParams == NULL && iParams == NULL ) {
\r
234 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
235 error( RtError::INVALID_USE );
\r
238 if ( formatBytes(format) == 0 ) {
\r
239 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
240 error( RtError::INVALID_USE );
\r
243 unsigned int nDevices = getDeviceCount();
\r
244 unsigned int oChannels = 0;
\r
246 oChannels = oParams->nChannels;
\r
247 if ( oParams->deviceId >= nDevices ) {
\r
248 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
249 error( RtError::INVALID_USE );
\r
253 unsigned int iChannels = 0;
\r
255 iChannels = iParams->nChannels;
\r
256 if ( iParams->deviceId >= nDevices ) {
\r
257 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
258 error( RtError::INVALID_USE );
\r
265 if ( oChannels > 0 ) {
\r
267 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
268 sampleRate, format, bufferFrames, options, oParams->deviceName );
\r
269 if ( result == false ) error( RtError::SYSTEM_ERROR );
\r
272 if ( iChannels > 0 ) {
\r
274 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
275 sampleRate, format, bufferFrames, options, iParams->deviceName );
\r
276 if ( result == false ) {
\r
277 if ( oChannels > 0 ) closeStream();
\r
278 error( RtError::SYSTEM_ERROR );
\r
282 stream_.callbackInfo.callback = (void *) callback;
\r
283 stream_.callbackInfo.userData = userData;
\r
285 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
286 stream_.state = STREAM_STOPPED;
\r
289 unsigned int RtApi :: getDefaultInputDevice( void )
\r
291 // Should be implemented in subclasses if possible.
\r
295 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
297 // Should be implemented in subclasses if possible.
\r
301 void RtApi :: closeStream( void )
\r
303 // MUST be implemented in subclasses!
\r
307 bool RtApi :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
308 unsigned int firstChannel, unsigned int sampleRate,
\r
309 RtAudioFormat format, unsigned int *bufferSize,
\r
310 RtAudio::StreamOptions *options, const std::string &deviceName )
\r
312 // MUST be implemented in subclasses!
\r
316 void RtApi :: tickStreamTime( void )
\r
318 // Subclasses that do not provide their own implementation of
\r
319 // getStreamTime should call this function once per buffer I/O to
\r
320 // provide basic stream time support.
\r
322 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
324 #if defined( HAVE_GETTIMEOFDAY )
\r
325 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
329 long RtApi :: getStreamLatency( void )
\r
333 long totalLatency = 0;
\r
334 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
335 totalLatency = stream_.latency[0];
\r
336 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
337 totalLatency += stream_.latency[1];
\r
339 return totalLatency;
\r
342 double RtApi :: getStreamTime( void )
\r
346 #if defined( HAVE_GETTIMEOFDAY )
\r
347 // Return a very accurate estimate of the stream time by
\r
348 // adding in the elapsed time since the last tick.
\r
349 struct timeval then;
\r
350 struct timeval now;
\r
352 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
353 return stream_.streamTime;
\r
355 gettimeofday( &now, NULL );
\r
356 then = stream_.lastTickTimestamp;
\r
357 return stream_.streamTime +
\r
358 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
359 (then.tv_sec + 0.000001 * then.tv_usec));
\r
361 return stream_.streamTime;
\r
365 unsigned int RtApi :: getStreamSampleRate( void )
\r
369 return stream_.sampleRate;
\r
373 // *************************************************** //
\r
375 // OS/API-specific methods.
\r
377 // *************************************************** //
\r
379 #if defined(__MACOSX_CORE__)
\r
381 // The OS X CoreAudio API is designed to use a separate callback
\r
382 // procedure for each of its audio devices. A single RtAudio duplex
\r
383 // stream using two different devices is supported here, though it
\r
384 // cannot be guaranteed to always behave correctly because we cannot
\r
385 // synchronize these two callbacks.
\r
387 // A property listener is installed for over/underrun information.
\r
388 // However, no functionality is currently provided to allow property
\r
389 // listeners to trigger user handlers because it is unclear what could
\r
390 // be done if a critical stream parameter (buffer size, sample rate,
\r
391 // device disconnect) notification arrived. The listeners entail
\r
392 // quite a bit of extra code and most likely, a user program wouldn't
\r
393 // be prepared for the result anyway. However, we do provide a flag
\r
394 // to the client callback function to inform of an over/underrun.
\r
396 // A structure to hold various information related to the CoreAudio API
\r
398 struct CoreHandle {
\r
399 AudioDeviceID id[2]; // device ids
\r
400 AudioDeviceIOProcID procId[2];
\r
401 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
402 UInt32 nStreams[2]; // number of streams to use
\r
404 char *deviceBuffer;
\r
405 pthread_cond_t condition;
\r
406 int drainCounter; // Tracks callback counts when draining
\r
407 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
410 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
413 RtApiCore:: RtApiCore()
\r
415 // This is a largely undocumented but absolutely necessary
\r
416 // requirement starting with OS-X 10.6. If not called, queries and
\r
417 // updates to various audio device properties are not handled
\r
419 CFRunLoopRef theRunLoop = NULL;
\r
420 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
421 kAudioObjectPropertyScopeGlobal,
\r
422 kAudioObjectPropertyElementMaster };
\r
423 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
424 if ( result != noErr ) {
\r
425 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
426 error( RtError::WARNING );
\r
430 RtApiCore :: ~RtApiCore()
\r
432 // The subclass destructor gets called before the base class
\r
433 // destructor, so close an existing stream before deallocating
\r
434 // apiDeviceId memory.
\r
435 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
438 unsigned int RtApiCore :: getDeviceCount( void )
\r
440 // Find out how many audio devices there are, if any.
\r
442 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
443 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
444 if ( result != noErr ) {
\r
445 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
446 error( RtError::WARNING );
\r
450 return dataSize / sizeof( AudioDeviceID );
\r
453 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
455 unsigned int nDevices = getDeviceCount();
\r
456 if ( nDevices <= 1 ) return 0;
\r
459 UInt32 dataSize = sizeof( AudioDeviceID );
\r
460 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
461 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
462 if ( result != noErr ) {
\r
463 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
464 error( RtError::WARNING );
\r
468 dataSize *= nDevices;
\r
469 AudioDeviceID deviceList[ nDevices ];
\r
470 property.mSelector = kAudioHardwarePropertyDevices;
\r
471 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
472 if ( result != noErr ) {
\r
473 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
474 error( RtError::WARNING );
\r
478 for ( unsigned int i=0; i<nDevices; i++ )
\r
479 if ( id == deviceList[i] ) return i;
\r
481 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
482 error( RtError::WARNING );
\r
486 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
488 unsigned int nDevices = getDeviceCount();
\r
489 if ( nDevices <= 1 ) return 0;
\r
492 UInt32 dataSize = sizeof( AudioDeviceID );
\r
493 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
494 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
495 if ( result != noErr ) {
\r
496 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
497 error( RtError::WARNING );
\r
501 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
502 AudioDeviceID deviceList[ nDevices ];
\r
503 property.mSelector = kAudioHardwarePropertyDevices;
\r
504 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
505 if ( result != noErr ) {
\r
506 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
507 error( RtError::WARNING );
\r
511 for ( unsigned int i=0; i<nDevices; i++ )
\r
512 if ( id == deviceList[i] ) return i;
\r
514 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
515 error( RtError::WARNING );
\r
519 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
521 RtAudio::DeviceInfo info;
\r
522 info.probed = false;
\r
525 unsigned int nDevices = getDeviceCount();
\r
526 if ( nDevices == 0 ) {
\r
527 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
528 error( RtError::INVALID_USE );
\r
531 if ( device >= nDevices ) {
\r
532 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
533 error( RtError::INVALID_USE );
\r
536 AudioDeviceID deviceList[ nDevices ];
\r
537 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
538 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
539 kAudioObjectPropertyScopeGlobal,
\r
540 kAudioObjectPropertyElementMaster };
\r
541 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
542 0, NULL, &dataSize, (void *) &deviceList );
\r
543 if ( result != noErr ) {
\r
544 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
545 error( RtError::WARNING );
\r
549 AudioDeviceID id = deviceList[ device ];
\r
551 // Get the device name.
\r
553 CFStringRef cfname;
\r
554 dataSize = sizeof( CFStringRef );
\r
555 property.mSelector = kAudioObjectPropertyManufacturer;
\r
556 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
557 if ( result != noErr ) {
\r
558 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
559 errorText_ = errorStream_.str();
\r
560 error( RtError::WARNING );
\r
564 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
565 int length = CFStringGetLength(cfname);
\r
566 char *mname = (char *)malloc(length * 3 + 1);
\r
567 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
568 info.name.append( (const char *)mname, strlen(mname) );
\r
569 info.name.append( ": " );
\r
570 CFRelease( cfname );
\r
573 property.mSelector = kAudioObjectPropertyName;
\r
574 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
575 if ( result != noErr ) {
\r
576 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
577 errorText_ = errorStream_.str();
\r
578 error( RtError::WARNING );
\r
582 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
583 length = CFStringGetLength(cfname);
\r
584 char *name = (char *)malloc(length * 3 + 1);
\r
585 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
586 info.name.append( (const char *)name, strlen(name) );
\r
587 CFRelease( cfname );
\r
590 // Get the output stream "configuration".
\r
591 AudioBufferList *bufferList = nil;
\r
592 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
593 property.mScope = kAudioDevicePropertyScopeOutput;
\r
594 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
596 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
597 if ( result != noErr || dataSize == 0 ) {
\r
598 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
599 errorText_ = errorStream_.str();
\r
600 error( RtError::WARNING );
\r
604 // Allocate the AudioBufferList.
\r
605 bufferList = (AudioBufferList *) malloc( dataSize );
\r
606 if ( bufferList == NULL ) {
\r
607 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
608 error( RtError::WARNING );
\r
612 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
613 if ( result != noErr || dataSize == 0 ) {
\r
614 free( bufferList );
\r
615 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
616 errorText_ = errorStream_.str();
\r
617 error( RtError::WARNING );
\r
621 // Get output channel information.
\r
622 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
623 for ( i=0; i<nStreams; i++ )
\r
624 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
625 free( bufferList );
\r
627 // Get the input stream "configuration".
\r
628 property.mScope = kAudioDevicePropertyScopeInput;
\r
629 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
630 if ( result != noErr || dataSize == 0 ) {
\r
631 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
632 errorText_ = errorStream_.str();
\r
633 error( RtError::WARNING );
\r
637 // Allocate the AudioBufferList.
\r
638 bufferList = (AudioBufferList *) malloc( dataSize );
\r
639 if ( bufferList == NULL ) {
\r
640 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
641 error( RtError::WARNING );
\r
645 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
646 if (result != noErr || dataSize == 0) {
\r
647 free( bufferList );
\r
648 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
649 errorText_ = errorStream_.str();
\r
650 error( RtError::WARNING );
\r
654 // Get input channel information.
\r
655 nStreams = bufferList->mNumberBuffers;
\r
656 for ( i=0; i<nStreams; i++ )
\r
657 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
658 free( bufferList );
\r
660 // If device opens for both playback and capture, we determine the channels.
\r
661 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
662 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
664 // Probe the device sample rates.
\r
665 bool isInput = false;
\r
666 if ( info.outputChannels == 0 ) isInput = true;
\r
668 // Determine the supported sample rates.
\r
669 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
670 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
671 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
672 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
673 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
674 errorText_ = errorStream_.str();
\r
675 error( RtError::WARNING );
\r
679 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
680 AudioValueRange rangeList[ nRanges ];
\r
681 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
682 if ( result != kAudioHardwareNoError ) {
\r
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
684 errorText_ = errorStream_.str();
\r
685 error( RtError::WARNING );
\r
689 Float64 minimumRate = 100000000.0, maximumRate = 0.0;
\r
690 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
691 if ( rangeList[i].mMinimum < minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
692 if ( rangeList[i].mMaximum > maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
695 info.sampleRates.clear();
\r
696 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
697 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
698 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
701 if ( info.sampleRates.size() == 0 ) {
\r
702 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
703 errorText_ = errorStream_.str();
\r
704 error( RtError::WARNING );
\r
708 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
709 // Thus, any other "physical" formats supported by the device are of
\r
710 // no interest to the client.
\r
711 info.nativeFormats = RTAUDIO_FLOAT32;
\r
713 if ( info.outputChannels > 0 )
\r
714 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
715 if ( info.inputChannels > 0 )
\r
716 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
718 info.probed = true;
\r
722 OSStatus callbackHandler( AudioDeviceID inDevice,
\r
723 const AudioTimeStamp* inNow,
\r
724 const AudioBufferList* inInputData,
\r
725 const AudioTimeStamp* inInputTime,
\r
726 AudioBufferList* outOutputData,
\r
727 const AudioTimeStamp* inOutputTime,
\r
728 void* infoPointer )
\r
730 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
732 RtApiCore *object = (RtApiCore *) info->object;
\r
733 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
734 return kAudioHardwareUnspecifiedError;
\r
736 return kAudioHardwareNoError;
\r
739 OSStatus xrunListener( AudioObjectID inDevice,
\r
741 const AudioObjectPropertyAddress properties[],
\r
742 void* handlePointer )
\r
744 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
745 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
746 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
747 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
748 handle->xrun[1] = true;
\r
750 handle->xrun[0] = true;
\r
754 return kAudioHardwareNoError;
\r
757 OSStatus rateListener( AudioObjectID inDevice,
\r
759 const AudioObjectPropertyAddress properties[],
\r
760 void* ratePointer )
\r
763 Float64 *rate = (Float64 *) ratePointer;
\r
764 UInt32 dataSize = sizeof( Float64 );
\r
765 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
766 kAudioObjectPropertyScopeGlobal,
\r
767 kAudioObjectPropertyElementMaster };
\r
768 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
769 return kAudioHardwareNoError;
\r
772 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
773 unsigned int firstChannel, unsigned int sampleRate,
\r
774 RtAudioFormat format, unsigned int *bufferSize,
\r
775 RtAudio::StreamOptions *options, const std::string &deviceName )
\r
778 unsigned int nDevices = getDeviceCount();
\r
779 if ( nDevices == 0 ) {
\r
780 // This should not happen because a check is made before this function is called.
\r
781 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
785 if ( device >= nDevices ) {
\r
786 // This should not happen because a check is made before this function is called.
\r
787 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
791 AudioDeviceID deviceList[ nDevices ];
\r
792 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
793 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
794 kAudioObjectPropertyScopeGlobal,
\r
795 kAudioObjectPropertyElementMaster };
\r
796 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
797 0, NULL, &dataSize, (void *) &deviceList );
\r
798 if ( result != noErr ) {
\r
799 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
803 AudioDeviceID id = deviceList[ device ];
\r
805 // Setup for stream mode.
\r
806 bool isInput = false;
\r
807 if ( mode == INPUT ) {
\r
809 property.mScope = kAudioDevicePropertyScopeInput;
\r
812 property.mScope = kAudioDevicePropertyScopeOutput;
\r
814 // Get the stream "configuration".
\r
815 AudioBufferList *bufferList = nil;
\r
817 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
818 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
819 if ( result != noErr || dataSize == 0 ) {
\r
820 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
821 errorText_ = errorStream_.str();
\r
825 // Allocate the AudioBufferList.
\r
826 bufferList = (AudioBufferList *) malloc( dataSize );
\r
827 if ( bufferList == NULL ) {
\r
828 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
832 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
833 if (result != noErr || dataSize == 0) {
\r
834 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
835 errorText_ = errorStream_.str();
\r
839 // Search for one or more streams that contain the desired number of
\r
840 // channels. CoreAudio devices can have an arbitrary number of
\r
841 // streams and each stream can have an arbitrary number of channels.
\r
842 // For each stream, a single buffer of interleaved samples is
\r
843 // provided. RtAudio prefers the use of one stream of interleaved
\r
844 // data or multiple consecutive single-channel streams. However, we
\r
845 // now support multiple consecutive multi-channel streams of
\r
846 // interleaved data as well.
\r
847 UInt32 iStream, offsetCounter = firstChannel;
\r
848 UInt32 nStreams = bufferList->mNumberBuffers;
\r
849 bool monoMode = false;
\r
850 bool foundStream = false;
\r
852 // First check that the device supports the requested number of
\r
854 UInt32 deviceChannels = 0;
\r
855 for ( iStream=0; iStream<nStreams; iStream++ )
\r
856 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
858 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
859 free( bufferList );
\r
860 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
861 errorText_ = errorStream_.str();
\r
865 // Look for a single stream meeting our needs.
\r
866 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
867 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
868 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
869 if ( streamChannels >= channels + offsetCounter ) {
\r
870 firstStream = iStream;
\r
871 channelOffset = offsetCounter;
\r
872 foundStream = true;
\r
875 if ( streamChannels > offsetCounter ) break;
\r
876 offsetCounter -= streamChannels;
\r
879 // If we didn't find a single stream above, then we should be able
\r
880 // to meet the channel specification with multiple streams.
\r
881 if ( foundStream == false ) {
\r
883 offsetCounter = firstChannel;
\r
884 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
885 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
886 if ( streamChannels > offsetCounter ) break;
\r
887 offsetCounter -= streamChannels;
\r
890 firstStream = iStream;
\r
891 channelOffset = offsetCounter;
\r
892 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
894 if ( streamChannels > 1 ) monoMode = false;
\r
895 while ( channelCounter > 0 ) {
\r
896 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
897 if ( streamChannels > 1 ) monoMode = false;
\r
898 channelCounter -= streamChannels;
\r
903 free( bufferList );
\r
905 // Determine the buffer size.
\r
906 AudioValueRange bufferRange;
\r
907 dataSize = sizeof( AudioValueRange );
\r
908 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
909 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
911 if ( result != noErr ) {
\r
912 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
913 errorText_ = errorStream_.str();
\r
917 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
918 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
919 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
921 // Set the buffer size. For multiple streams, I'm assuming we only
\r
922 // need to make this setting for the master channel.
\r
923 UInt32 theSize = (UInt32) *bufferSize;
\r
924 dataSize = sizeof( UInt32 );
\r
925 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
926 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
928 if ( result != noErr ) {
\r
929 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
930 errorText_ = errorStream_.str();
\r
934 // If attempting to setup a duplex stream, the bufferSize parameter
\r
935 // MUST be the same in both directions!
\r
936 *bufferSize = theSize;
\r
937 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
938 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
939 errorText_ = errorStream_.str();
\r
943 stream_.bufferSize = *bufferSize;
\r
944 stream_.nBuffers = 1;
\r
946 // Try to set "hog" mode ... it's not clear to me this is working.
\r
947 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
949 dataSize = sizeof( hog_pid );
\r
950 property.mSelector = kAudioDevicePropertyHogMode;
\r
951 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
952 if ( result != noErr ) {
\r
953 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
954 errorText_ = errorStream_.str();
\r
958 if ( hog_pid != getpid() ) {
\r
959 hog_pid = getpid();
\r
960 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
961 if ( result != noErr ) {
\r
962 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
963 errorText_ = errorStream_.str();
\r
969 // Check and if necessary, change the sample rate for the device.
\r
970 Float64 nominalRate;
\r
971 dataSize = sizeof( Float64 );
\r
972 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
973 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
975 if ( result != noErr ) {
\r
976 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
977 errorText_ = errorStream_.str();
\r
981 // Only change the sample rate if off by more than 1 Hz.
\r
982 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
984 // Set a property listener for the sample rate change
\r
985 Float64 reportedRate = 0.0;
\r
986 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
987 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
988 if ( result != noErr ) {
\r
989 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
990 errorText_ = errorStream_.str();
\r
994 nominalRate = (Float64) sampleRate;
\r
995 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
997 if ( result != noErr ) {
\r
998 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
999 errorText_ = errorStream_.str();
\r
1003 // Now wait until the reported nominal rate is what we just set.
\r
1004 UInt32 microCounter = 0;
\r
1005 while ( reportedRate != nominalRate ) {
\r
1006 microCounter += 5000;
\r
1007 if ( microCounter > 5000000 ) break;
\r
1011 // Remove the property listener.
\r
1012 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1014 if ( microCounter > 5000000 ) {
\r
1015 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1016 errorText_ = errorStream_.str();
\r
1021 // Now set the stream format for all streams. Also, check the
\r
1022 // physical format of the device and change that if necessary.
\r
1023 AudioStreamBasicDescription description;
\r
1024 dataSize = sizeof( AudioStreamBasicDescription );
\r
1025 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1026 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1027 if ( result != noErr ) {
\r
1028 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1029 errorText_ = errorStream_.str();
\r
1033 // Set the sample rate and data format id. However, only make the
\r
1034 // change if the sample rate is not within 1.0 of the desired
\r
1035 // rate and the format is not linear pcm.
\r
1036 bool updateFormat = false;
\r
1037 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1038 description.mSampleRate = (Float64) sampleRate;
\r
1039 updateFormat = true;
\r
1042 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1043 description.mFormatID = kAudioFormatLinearPCM;
\r
1044 updateFormat = true;
\r
1047 if ( updateFormat ) {
\r
1048 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1049 if ( result != noErr ) {
\r
1050 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1051 errorText_ = errorStream_.str();
\r
1056 // Now check the physical format.
\r
1057 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1058 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1059 if ( result != noErr ) {
\r
1060 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1061 errorText_ = errorStream_.str();
\r
1065 //std::cout << "Current physical stream format:" << std::endl;
\r
1066 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1067 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1068 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1069 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1071 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1072 description.mFormatID = kAudioFormatLinearPCM;
\r
1073 //description.mSampleRate = (Float64) sampleRate;
\r
1074 AudioStreamBasicDescription testDescription = description;
\r
1075 UInt32 formatFlags;
\r
1077 // We'll try higher bit rates first and then work our way down.
\r
1078 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1079 formatFlags = description.mFormatFlags | kLinearPCMFormatFlagIsFloat & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1080 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1081 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1082 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1083 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1084 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1085 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1086 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1087 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1088 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1089 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1090 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1092 bool setPhysicalFormat = false;
\r
1093 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1094 testDescription = description;
\r
1095 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1096 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1097 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1098 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1100 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1101 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1102 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1103 if ( result == noErr ) {
\r
1104 setPhysicalFormat = true;
\r
1105 //std::cout << "Updated physical stream format:" << std::endl;
\r
1106 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1107 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1108 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1109 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1114 if ( !setPhysicalFormat ) {
\r
1115 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1116 errorText_ = errorStream_.str();
\r
1119 } // done setting virtual/physical formats.
\r
1121 // Get the stream / device latency.
\r
1123 dataSize = sizeof( UInt32 );
\r
1124 property.mSelector = kAudioDevicePropertyLatency;
\r
1125 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1126 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1127 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1129 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1130 errorText_ = errorStream_.str();
\r
1131 error( RtError::WARNING );
\r
1135 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1136 // always be presented in native-endian format, so we should never
\r
1137 // need to byte swap.
\r
1138 stream_.doByteSwap[mode] = false;
\r
1140 // From the CoreAudio documentation, PCM data must be supplied as
\r
1142 stream_.userFormat = format;
\r
1143 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1145 if ( streamCount == 1 )
\r
1146 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1147 else // multiple streams
\r
1148 stream_.nDeviceChannels[mode] = channels;
\r
1149 stream_.nUserChannels[mode] = channels;
\r
1150 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1151 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1152 else stream_.userInterleaved = true;
\r
1153 stream_.deviceInterleaved[mode] = true;
\r
1154 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1156 // Set flags for buffer conversion.
\r
1157 stream_.doConvertBuffer[mode] = false;
\r
1158 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1159 stream_.doConvertBuffer[mode] = true;
\r
1160 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1161 stream_.doConvertBuffer[mode] = true;
\r
1162 if ( streamCount == 1 ) {
\r
1163 if ( stream_.nUserChannels[mode] > 1 &&
\r
1164 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1165 stream_.doConvertBuffer[mode] = true;
\r
1167 else if ( monoMode && stream_.userInterleaved )
\r
1168 stream_.doConvertBuffer[mode] = true;
\r
1170 // Allocate our CoreHandle structure for the stream.
\r
1171 CoreHandle *handle = 0;
\r
1172 if ( stream_.apiHandle == 0 ) {
\r
1174 handle = new CoreHandle;
\r
1176 catch ( std::bad_alloc& ) {
\r
1177 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1181 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1182 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1185 stream_.apiHandle = (void *) handle;
\r
1188 handle = (CoreHandle *) stream_.apiHandle;
\r
1189 handle->iStream[mode] = firstStream;
\r
1190 handle->nStreams[mode] = streamCount;
\r
1191 handle->id[mode] = id;
\r
1193 // Allocate necessary internal buffers.
\r
1194 unsigned long bufferBytes;
\r
1195 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1196 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1197 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1198 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1199 if ( stream_.userBuffer[mode] == NULL ) {
\r
1200 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1204 // If possible, we will make use of the CoreAudio stream buffers as
\r
1205 // "device buffers". However, we can't do this if using multiple
\r
1207 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1209 bool makeBuffer = true;
\r
1210 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1211 if ( mode == INPUT ) {
\r
1212 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1213 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1214 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1218 if ( makeBuffer ) {
\r
1219 bufferBytes *= *bufferSize;
\r
1220 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1221 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1222 if ( stream_.deviceBuffer == NULL ) {
\r
1223 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1229 stream_.sampleRate = sampleRate;
\r
1230 stream_.device[mode] = device;
\r
1231 stream_.state = STREAM_STOPPED;
\r
1232 stream_.callbackInfo.object = (void *) this;
\r
1234 // Setup the buffer conversion information structure.
\r
1235 if ( stream_.doConvertBuffer[mode] ) {
\r
1236 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1237 else setConvertInfo( mode, channelOffset );
\r
1240 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1241 // Only one callback procedure per device.
\r
1242 stream_.mode = DUPLEX;
\r
1244 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1245 if ( result != noErr ) {
\r
1246 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1247 errorText_ = errorStream_.str();
\r
1250 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1251 stream_.mode = DUPLEX;
\r
1253 stream_.mode = mode;
\r
1256 // Setup the device property listener for over/underload.
\r
1257 property.mSelector = kAudioDeviceProcessorOverload;
\r
1258 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1264 pthread_cond_destroy( &handle->condition );
\r
1266 stream_.apiHandle = 0;
\r
1269 for ( int i=0; i<2; i++ ) {
\r
1270 if ( stream_.userBuffer[i] ) {
\r
1271 free( stream_.userBuffer[i] );
\r
1272 stream_.userBuffer[i] = 0;
\r
1276 if ( stream_.deviceBuffer ) {
\r
1277 free( stream_.deviceBuffer );
\r
1278 stream_.deviceBuffer = 0;
\r
1284 void RtApiCore :: closeStream( void )
\r
1286 if ( stream_.state == STREAM_CLOSED ) {
\r
1287 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1288 error( RtError::WARNING );
\r
1292 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1293 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1294 if ( stream_.state == STREAM_RUNNING )
\r
1295 AudioDeviceStop( handle->id[0], handle->procId[0] );
\r
1296 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1299 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1300 if ( stream_.state == STREAM_RUNNING )
\r
1301 AudioDeviceStop( handle->id[1], handle->procId[1] );
\r
1302 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1305 for ( int i=0; i<2; i++ ) {
\r
1306 if ( stream_.userBuffer[i] ) {
\r
1307 free( stream_.userBuffer[i] );
\r
1308 stream_.userBuffer[i] = 0;
\r
1312 if ( stream_.deviceBuffer ) {
\r
1313 free( stream_.deviceBuffer );
\r
1314 stream_.deviceBuffer = 0;
\r
1317 // Destroy pthread condition variable.
\r
1318 pthread_cond_destroy( &handle->condition );
\r
1320 stream_.apiHandle = 0;
\r
1322 stream_.mode = UNINITIALIZED;
\r
1323 stream_.state = STREAM_CLOSED;
\r
1326 void RtApiCore :: startStream( void )
\r
1329 if ( stream_.state == STREAM_RUNNING ) {
\r
1330 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1331 error( RtError::WARNING );
\r
1335 MUTEX_LOCK( &stream_.mutex );
\r
1337 OSStatus result = noErr;
\r
1338 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1339 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1341 result = AudioDeviceStart( handle->id[0], handle->procId[0] );
\r
1342 if ( result != noErr ) {
\r
1343 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1344 errorText_ = errorStream_.str();
\r
1349 if ( stream_.mode == INPUT ||
\r
1350 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1352 result = AudioDeviceStart( handle->id[1], handle->procId[1] );
\r
1353 if ( result != noErr ) {
\r
1354 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1355 errorText_ = errorStream_.str();
\r
1360 handle->drainCounter = 0;
\r
1361 handle->internalDrain = false;
\r
1362 stream_.state = STREAM_RUNNING;
\r
1365 MUTEX_UNLOCK( &stream_.mutex );
\r
1367 if ( result == noErr ) return;
\r
1368 error( RtError::SYSTEM_ERROR );
\r
1371 void RtApiCore :: stopStream( void )
\r
1374 if ( stream_.state == STREAM_STOPPED ) {
\r
1375 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1376 error( RtError::WARNING );
\r
1380 MUTEX_LOCK( &stream_.mutex );
\r
1382 if ( stream_.state == STREAM_STOPPED ) {
\r
1383 MUTEX_UNLOCK( &stream_.mutex );
\r
1387 OSStatus result = noErr;
\r
1388 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1389 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1391 if ( handle->drainCounter == 0 ) {
\r
1392 handle->drainCounter = 2;
\r
1393 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1396 MUTEX_UNLOCK( &stream_.mutex );
\r
1397 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1398 MUTEX_LOCK( &stream_.mutex );
\r
1399 if ( result != noErr ) {
\r
1400 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1401 errorText_ = errorStream_.str();
\r
1406 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1408 MUTEX_UNLOCK( &stream_.mutex );
\r
1409 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1410 MUTEX_LOCK( &stream_.mutex );
\r
1411 if ( result != noErr ) {
\r
1412 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1413 errorText_ = errorStream_.str();
\r
1418 stream_.state = STREAM_STOPPED;
\r
1421 MUTEX_UNLOCK( &stream_.mutex );
\r
1423 if ( result == noErr ) return;
\r
1424 error( RtError::SYSTEM_ERROR );
\r
1427 void RtApiCore :: abortStream( void )
\r
1430 if ( stream_.state == STREAM_STOPPED ) {
\r
1431 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1432 error( RtError::WARNING );
\r
1436 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1437 handle->drainCounter = 2;
\r
1442 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1443 const AudioBufferList *inBufferList,
\r
1444 const AudioBufferList *outBufferList )
\r
1446 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
1447 if ( stream_.state == STREAM_CLOSED ) {
\r
1448 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1449 error( RtError::WARNING );
\r
1453 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1454 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1456 // Check if we were draining the stream and signal is finished.
\r
1457 if ( handle->drainCounter > 3 ) {
\r
1458 if ( handle->internalDrain == true )
\r
1460 else // external call to stopStream()
\r
1461 pthread_cond_signal( &handle->condition );
\r
1465 MUTEX_LOCK( &stream_.mutex );
\r
1467 // The state might change while waiting on a mutex.
\r
1468 if ( stream_.state == STREAM_STOPPED ) {
\r
1469 MUTEX_UNLOCK( &stream_.mutex );
\r
1473 AudioDeviceID outputDevice = handle->id[0];
\r
1475 // Invoke user callback to get fresh output data UNLESS we are
\r
1476 // draining stream or duplex mode AND the input/output devices are
\r
1477 // different AND this function is called for the input device.
\r
1478 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1479 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1480 double streamTime = getStreamTime();
\r
1481 RtAudioStreamStatus status = 0;
\r
1482 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1483 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1484 handle->xrun[0] = false;
\r
1486 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1487 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1488 handle->xrun[1] = false;
\r
1491 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1492 stream_.bufferSize, streamTime, status, info->userData );
\r
1493 if ( handle->drainCounter == 2 ) {
\r
1494 MUTEX_UNLOCK( &stream_.mutex );
\r
1498 else if ( handle->drainCounter == 1 )
\r
1499 handle->internalDrain = true;
\r
1502 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1504 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1506 if ( handle->nStreams[0] == 1 ) {
\r
1507 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1509 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1511 else { // fill multiple streams with zeros
\r
1512 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1513 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1515 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1519 else if ( handle->nStreams[0] == 1 ) {
\r
1520 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1521 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1522 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1524 else { // copy from user buffer
\r
1525 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1526 stream_.userBuffer[0],
\r
1527 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1530 else { // fill multiple streams
\r
1531 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1532 if ( stream_.doConvertBuffer[0] ) {
\r
1533 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1534 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1537 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1538 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1539 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1540 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1541 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1544 else { // fill multiple multi-channel streams with interleaved data
\r
1545 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1546 Float32 *out, *in;
\r
1548 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1549 UInt32 inChannels = stream_.nUserChannels[0];
\r
1550 if ( stream_.doConvertBuffer[0] ) {
\r
1551 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1552 inChannels = stream_.nDeviceChannels[0];
\r
1555 if ( inInterleaved ) inOffset = 1;
\r
1556 else inOffset = stream_.bufferSize;
\r
1558 channelsLeft = inChannels;
\r
1559 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1561 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1562 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1565 // Account for possible channel offset in first stream
\r
1566 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1567 streamChannels -= stream_.channelOffset[0];
\r
1568 outJump = stream_.channelOffset[0];
\r
1572 // Account for possible unfilled channels at end of the last stream
\r
1573 if ( streamChannels > channelsLeft ) {
\r
1574 outJump = streamChannels - channelsLeft;
\r
1575 streamChannels = channelsLeft;
\r
1578 // Determine input buffer offsets and skips
\r
1579 if ( inInterleaved ) {
\r
1580 inJump = inChannels;
\r
1581 in += inChannels - channelsLeft;
\r
1585 in += (inChannels - channelsLeft) * inOffset;
\r
1588 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1589 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1590 *out++ = in[j*inOffset];
\r
1595 channelsLeft -= streamChannels;
\r
1600 if ( handle->drainCounter ) {
\r
1601 handle->drainCounter++;
\r
1606 AudioDeviceID inputDevice;
\r
1607 inputDevice = handle->id[1];
\r
1608 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1610 if ( handle->nStreams[1] == 1 ) {
\r
1611 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1612 convertBuffer( stream_.userBuffer[1],
\r
1613 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1614 stream_.convertInfo[1] );
\r
1616 else { // copy to user buffer
\r
1617 memcpy( stream_.userBuffer[1],
\r
1618 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1619 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1622 else { // read from multiple streams
\r
1623 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1624 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1626 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1627 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1628 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1629 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1630 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1633 else { // read from multiple multi-channel streams
\r
1634 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1635 Float32 *out, *in;
\r
1637 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1638 UInt32 outChannels = stream_.nUserChannels[1];
\r
1639 if ( stream_.doConvertBuffer[1] ) {
\r
1640 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1641 outChannels = stream_.nDeviceChannels[1];
\r
1644 if ( outInterleaved ) outOffset = 1;
\r
1645 else outOffset = stream_.bufferSize;
\r
1647 channelsLeft = outChannels;
\r
1648 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1650 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1651 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1654 // Account for possible channel offset in first stream
\r
1655 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1656 streamChannels -= stream_.channelOffset[1];
\r
1657 inJump = stream_.channelOffset[1];
\r
1661 // Account for possible unread channels at end of the last stream
\r
1662 if ( streamChannels > channelsLeft ) {
\r
1663 inJump = streamChannels - channelsLeft;
\r
1664 streamChannels = channelsLeft;
\r
1667 // Determine output buffer offsets and skips
\r
1668 if ( outInterleaved ) {
\r
1669 outJump = outChannels;
\r
1670 out += outChannels - channelsLeft;
\r
1674 out += (outChannels - channelsLeft) * outOffset;
\r
1677 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1678 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1679 out[j*outOffset] = *in++;
\r
1684 channelsLeft -= streamChannels;
\r
1688 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1689 convertBuffer( stream_.userBuffer[1],
\r
1690 stream_.deviceBuffer,
\r
1691 stream_.convertInfo[1] );
\r
1697 MUTEX_UNLOCK( &stream_.mutex );
\r
1699 RtApi::tickStreamTime();
\r
1703 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1707 case kAudioHardwareNotRunningError:
\r
1708 return "kAudioHardwareNotRunningError";
\r
1710 case kAudioHardwareUnspecifiedError:
\r
1711 return "kAudioHardwareUnspecifiedError";
\r
1713 case kAudioHardwareUnknownPropertyError:
\r
1714 return "kAudioHardwareUnknownPropertyError";
\r
1716 case kAudioHardwareBadPropertySizeError:
\r
1717 return "kAudioHardwareBadPropertySizeError";
\r
1719 case kAudioHardwareIllegalOperationError:
\r
1720 return "kAudioHardwareIllegalOperationError";
\r
1722 case kAudioHardwareBadObjectError:
\r
1723 return "kAudioHardwareBadObjectError";
\r
1725 case kAudioHardwareBadDeviceError:
\r
1726 return "kAudioHardwareBadDeviceError";
\r
1728 case kAudioHardwareBadStreamError:
\r
1729 return "kAudioHardwareBadStreamError";
\r
1731 case kAudioHardwareUnsupportedOperationError:
\r
1732 return "kAudioHardwareUnsupportedOperationError";
\r
1734 case kAudioDeviceUnsupportedFormatError:
\r
1735 return "kAudioDeviceUnsupportedFormatError";
\r
1737 case kAudioDevicePermissionsError:
\r
1738 return "kAudioDevicePermissionsError";
\r
1741 return "CoreAudio unknown error";
\r
1745 //******************** End of __MACOSX_CORE__ *********************//
\r
1748 #if defined(__UNIX_JACK__)
\r
1750 // JACK is a low-latency audio server, originally written for the
\r
1751 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1752 // connect a number of different applications to an audio device, as
\r
1753 // well as allowing them to share audio between themselves.
\r
1755 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1756 // have ports connected to the server. The JACK server is typically
\r
1757 // started in a terminal as follows:
\r
1759 // .jackd -d alsa -d hw:0
\r
1761 // or through an interface program such as qjackctl. Many of the
\r
1762 // parameters normally set for a stream are fixed by the JACK server
\r
1763 // and can be specified when the JACK server is started. In
\r
1766 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1768 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1769 // frames, and number of buffers = 4. Once the server is running, it
\r
1770 // is not possible to override these values. If the values are not
\r
1771 // specified in the command-line, the JACK server uses default values.
\r
1773 // The JACK server does not have to be running when an instance of
\r
1774 // RtApiJack is created, though the function getDeviceCount() will
\r
1775 // report 0 devices found until JACK has been started. When no
\r
1776 // devices are available (i.e., the JACK server is not running), a
\r
1777 // stream cannot be opened.
\r
1779 #include <jack/jack.h>
\r
1780 #include <unistd.h>
\r
1783 // A structure to hold various information related to the Jack API
\r
1784 // implementation.
\r
1785 struct JackHandle {
\r
1786 jack_client_t *client;
\r
1787 jack_port_t **ports[2];
\r
1788 std::string deviceName[2];
\r
1790 pthread_cond_t condition;
\r
1791 int drainCounter; // Tracks callback counts when draining
\r
1792 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1795 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1798 ThreadHandle threadId;
\r
1799 void jackSilentError( const char * ) {};
\r
1801 RtApiJack :: RtApiJack()
\r
1803 // Nothing to do here.
\r
1804 #if !defined(__RTAUDIO_DEBUG__)
\r
1805 // Turn off Jack's internal error reporting.
\r
1806 jack_set_error_function( &jackSilentError );
\r
1810 RtApiJack :: ~RtApiJack()
\r
1812 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1815 unsigned int RtApiJack :: getDeviceCount( void )
\r
1817 // See if we can become a jack client.
\r
1818 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1819 jack_status_t *status = NULL;
\r
1820 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1821 if ( client == 0 ) return 0;
\r
1823 const char **ports;
\r
1824 std::string port, previousPort;
\r
1825 unsigned int nChannels = 0, nDevices = 0;
\r
1826 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1828 // Parse the port names up to the first colon (:).
\r
1829 size_t iColon = 0;
\r
1831 port = (char *) ports[ nChannels ];
\r
1832 iColon = port.find(":");
\r
1833 if ( iColon != std::string::npos ) {
\r
1834 port = port.substr( 0, iColon + 1 );
\r
1835 if ( port != previousPort ) {
\r
1837 previousPort = port;
\r
1840 } while ( ports[++nChannels] );
\r
1844 jack_client_close( client );
\r
1848 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1850 RtAudio::DeviceInfo info;
\r
1851 info.probed = false;
\r
1853 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1854 jack_status_t *status = NULL;
\r
1855 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1856 if ( client == 0 ) {
\r
1857 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1858 error( RtError::WARNING );
\r
1862 const char **ports;
\r
1863 std::string port, previousPort;
\r
1864 unsigned int nPorts = 0, nDevices = 0;
\r
1865 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1867 // Parse the port names up to the first colon (:).
\r
1868 size_t iColon = 0;
\r
1870 port = (char *) ports[ nPorts ];
\r
1871 iColon = port.find(":");
\r
1872 if ( iColon != std::string::npos ) {
\r
1873 port = port.substr( 0, iColon );
\r
1874 if ( port != previousPort ) {
\r
1875 if ( nDevices == device ) info.name = port;
\r
1877 previousPort = port;
\r
1880 } while ( ports[++nPorts] );
\r
1884 if ( device >= nDevices ) {
\r
1885 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1886 error( RtError::INVALID_USE );
\r
1889 // Get the current jack server sample rate.
\r
1890 info.sampleRates.clear();
\r
1891 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
1893 // Count the available ports containing the client name as device
\r
1894 // channels. Jack "input ports" equal RtAudio output channels.
\r
1895 unsigned int nChannels = 0;
\r
1896 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
1898 while ( ports[ nChannels ] ) nChannels++;
\r
1900 info.outputChannels = nChannels;
\r
1903 // Jack "output ports" equal RtAudio input channels.
\r
1905 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
1907 while ( ports[ nChannels ] ) nChannels++;
\r
1909 info.inputChannels = nChannels;
\r
1912 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
1913 jack_client_close(client);
\r
1914 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
1915 error( RtError::WARNING );
\r
1919 // If device opens for both playback and capture, we determine the channels.
\r
1920 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
1921 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
1923 // Jack always uses 32-bit floats.
\r
1924 info.nativeFormats = RTAUDIO_FLOAT32;
\r
1926 // Jack doesn't provide default devices so we'll use the first available one.
\r
1927 if ( device == 0 && info.outputChannels > 0 )
\r
1928 info.isDefaultOutput = true;
\r
1929 if ( device == 0 && info.inputChannels > 0 )
\r
1930 info.isDefaultInput = true;
\r
1932 jack_client_close(client);
\r
1933 info.probed = true;
\r
1937 int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
1939 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1941 RtApiJack *object = (RtApiJack *) info->object;
\r
1942 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
1947 // This function will be called by a spawned thread when the Jack
\r
1948 // server signals that it is shutting down. It is necessary to handle
\r
1949 // it this way because the jackShutdown() function must return before
\r
1950 // the jack_deactivate() function (in closeStream()) will return.
\r
1951 extern "C" void *jackCloseStream( void *ptr )
\r
1953 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1954 RtApiJack *object = (RtApiJack *) info->object;
\r
1956 object->closeStream();
\r
1958 pthread_exit( NULL );
\r
1960 void jackShutdown( void *infoPointer )
\r
1962 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
1963 RtApiJack *object = (RtApiJack *) info->object;
\r
1965 // Check current stream state. If stopped, then we'll assume this
\r
1966 // was called as a result of a call to RtApiJack::stopStream (the
\r
1967 // deactivation of a client handle causes this function to be called).
\r
1968 // If not, we'll assume the Jack server is shutting down or some
\r
1969 // other problem occurred and we should close the stream.
\r
1970 if ( object->isStreamRunning() == false ) return;
\r
1972 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
1973 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
1976 int jackXrun( void *infoPointer )
\r
1978 JackHandle *handle = (JackHandle *) infoPointer;
\r
1980 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
1981 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
1986 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
1987 unsigned int firstChannel, unsigned int sampleRate,
\r
1988 RtAudioFormat format, unsigned int *bufferSize,
\r
1989 RtAudio::StreamOptions *options, const std::string &aDeviceName )
\r
1991 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
1993 // Look for jack server and try to become a client (only do once per stream).
\r
1994 jack_client_t *client = 0;
\r
1995 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
1996 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1997 jack_status_t *status = NULL;
\r
1998 if ( options && !options->streamName.empty() )
\r
1999 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2001 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2002 if ( client == 0 ) {
\r
2003 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2004 error( RtError::WARNING );
\r
2009 // The handle must have been created on an earlier pass.
\r
2010 client = handle->client;
\r
2013 std::string deviceName = aDeviceName;
\r
2014 const char **ports;
\r
2015 if ( deviceName.size() == 0 ) {
\r
2016 std::string port, previousPort, deviceName;
\r
2017 unsigned int nPorts = 0, nDevices = 0;
\r
2018 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2020 // Parse the port names up to the first colon (:).
\r
2021 size_t iColon = 0;
\r
2023 port = (char *) ports[ nPorts ];
\r
2024 iColon = port.find(":");
\r
2025 if ( iColon != std::string::npos ) {
\r
2026 port = port.substr( 0, iColon );
\r
2027 if ( port != previousPort ) {
\r
2028 if ( nDevices == device ) deviceName = port;
\r
2030 previousPort = port;
\r
2033 } while ( ports[++nPorts] );
\r
2036 if ( device >= nDevices ) {
\r
2037 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2042 // Count the available ports containing the client name as device
\r
2043 // channels. Jack "input ports" equal RtAudio output channels.
\r
2044 unsigned int nChannels = 0;
\r
2045 unsigned long flag = JackPortIsInput;
\r
2046 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2047 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2049 while ( ports[ nChannels ] ) nChannels++;
\r
2053 // Compare the jack ports for specified client to the requested number of channels.
\r
2054 if ( nChannels < (channels + firstChannel) ) {
\r
2055 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2056 errorText_ = errorStream_.str();
\r
2060 // Check the jack server sample rate.
\r
2061 unsigned int jackRate = jack_get_sample_rate( client );
\r
2062 if ( sampleRate != jackRate ) {
\r
2063 jack_client_close( client );
\r
2064 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2065 errorText_ = errorStream_.str();
\r
2068 stream_.sampleRate = jackRate;
\r
2070 // Get the latency of the JACK port.
\r
2071 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2072 if ( ports[ firstChannel ] )
\r
2073 stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2076 // The jack server always uses 32-bit floating-point data.
\r
2077 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2078 stream_.userFormat = format;
\r
2080 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2081 else stream_.userInterleaved = true;
\r
2083 // Jack always uses non-interleaved buffers.
\r
2084 stream_.deviceInterleaved[mode] = false;
\r
2086 // Jack always provides host byte-ordered data.
\r
2087 stream_.doByteSwap[mode] = false;
\r
2089 // Get the buffer size. The buffer size and number of buffers
\r
2090 // (periods) is set when the jack server is started.
\r
2091 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2092 *bufferSize = stream_.bufferSize;
\r
2094 stream_.nDeviceChannels[mode] = channels;
\r
2095 stream_.nUserChannels[mode] = channels;
\r
2097 // Set flags for buffer conversion.
\r
2098 stream_.doConvertBuffer[mode] = false;
\r
2099 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2100 stream_.doConvertBuffer[mode] = true;
\r
2101 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2102 stream_.nUserChannels[mode] > 1 )
\r
2103 stream_.doConvertBuffer[mode] = true;
\r
2105 // Allocate our JackHandle structure for the stream.
\r
2106 if ( handle == 0 ) {
\r
2108 handle = new JackHandle;
\r
2110 catch ( std::bad_alloc& ) {
\r
2111 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2115 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2116 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2119 stream_.apiHandle = (void *) handle;
\r
2120 handle->client = client;
\r
2122 handle->deviceName[mode] = deviceName;
\r
2124 // Allocate necessary internal buffers.
\r
2125 unsigned long bufferBytes;
\r
2126 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2127 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2128 if ( stream_.userBuffer[mode] == NULL ) {
\r
2129 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2133 if ( stream_.doConvertBuffer[mode] ) {
\r
2135 bool makeBuffer = true;
\r
2136 if ( mode == OUTPUT )
\r
2137 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2138 else { // mode == INPUT
\r
2139 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2140 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2141 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2142 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2146 if ( makeBuffer ) {
\r
2147 bufferBytes *= *bufferSize;
\r
2148 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2149 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2150 if ( stream_.deviceBuffer == NULL ) {
\r
2151 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2157 // Allocate memory for the Jack ports (channels) identifiers.
\r
2158 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2159 if ( handle->ports[mode] == NULL ) {
\r
2160 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2164 stream_.device[mode] = device;
\r
2165 stream_.channelOffset[mode] = firstChannel;
\r
2166 stream_.state = STREAM_STOPPED;
\r
2167 stream_.callbackInfo.object = (void *) this;
\r
2169 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2170 // We had already set up the stream for output.
\r
2171 stream_.mode = DUPLEX;
\r
2173 stream_.mode = mode;
\r
2174 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2175 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2176 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2179 // Register our ports.
\r
2181 if ( mode == OUTPUT ) {
\r
2182 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2183 snprintf( label, 64, "outport %d", i );
\r
2184 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2185 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2189 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2190 snprintf( label, 64, "inport %d", i );
\r
2191 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2192 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2196 // Setup the buffer conversion information structure. We don't use
\r
2197 // buffers to do channel offsets, so we override that parameter
\r
2199 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2205 pthread_cond_destroy( &handle->condition );
\r
2206 jack_client_close( handle->client );
\r
2208 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2209 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2212 stream_.apiHandle = 0;
\r
2215 for ( int i=0; i<2; i++ ) {
\r
2216 if ( stream_.userBuffer[i] ) {
\r
2217 free( stream_.userBuffer[i] );
\r
2218 stream_.userBuffer[i] = 0;
\r
2222 if ( stream_.deviceBuffer ) {
\r
2223 free( stream_.deviceBuffer );
\r
2224 stream_.deviceBuffer = 0;
\r
2230 void RtApiJack :: closeStream( void )
\r
2232 if ( stream_.state == STREAM_CLOSED ) {
\r
2233 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2234 error( RtError::WARNING );
\r
2238 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2241 if ( stream_.state == STREAM_RUNNING )
\r
2242 jack_deactivate( handle->client );
\r
2244 jack_client_close( handle->client );
\r
2248 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2249 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2250 pthread_cond_destroy( &handle->condition );
\r
2252 stream_.apiHandle = 0;
\r
2255 for ( int i=0; i<2; i++ ) {
\r
2256 if ( stream_.userBuffer[i] ) {
\r
2257 free( stream_.userBuffer[i] );
\r
2258 stream_.userBuffer[i] = 0;
\r
2262 if ( stream_.deviceBuffer ) {
\r
2263 free( stream_.deviceBuffer );
\r
2264 stream_.deviceBuffer = 0;
\r
2267 stream_.mode = UNINITIALIZED;
\r
2268 stream_.state = STREAM_CLOSED;
\r
2271 void RtApiJack :: startStream( void )
\r
2274 if ( stream_.state == STREAM_RUNNING ) {
\r
2275 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2276 error( RtError::WARNING );
\r
2280 MUTEX_LOCK(&stream_.mutex);
\r
2282 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2283 int result = jack_activate( handle->client );
\r
2285 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2289 const char **ports;
\r
2291 // Get the list of available ports.
\r
2292 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2294 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2295 if ( ports == NULL) {
\r
2296 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2300 // Now make the port connections. Since RtAudio wasn't designed to
\r
2301 // allow the user to select particular channels of a device, we'll
\r
2302 // just open the first "nChannels" ports with offset.
\r
2303 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2305 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2306 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2309 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2316 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2318 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2319 if ( ports == NULL) {
\r
2320 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2324 // Now make the port connections. See note above.
\r
2325 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2327 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2328 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2331 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2338 handle->drainCounter = 0;
\r
2339 handle->internalDrain = false;
\r
2340 stream_.state = STREAM_RUNNING;
\r
2343 MUTEX_UNLOCK(&stream_.mutex);
\r
2345 if ( result == 0 ) return;
\r
2346 error( RtError::SYSTEM_ERROR );
\r
2349 void RtApiJack :: stopStream( void )
\r
2352 if ( stream_.state == STREAM_STOPPED ) {
\r
2353 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2354 error( RtError::WARNING );
\r
2358 MUTEX_LOCK( &stream_.mutex );
\r
2360 if ( stream_.state == STREAM_STOPPED ) {
\r
2361 MUTEX_UNLOCK( &stream_.mutex );
\r
2365 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2366 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2368 if ( handle->drainCounter == 0 ) {
\r
2369 handle->drainCounter = 2;
\r
2370 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2374 jack_deactivate( handle->client );
\r
2375 stream_.state = STREAM_STOPPED;
\r
2377 MUTEX_UNLOCK( &stream_.mutex );
\r
2380 void RtApiJack :: abortStream( void )
\r
2383 if ( stream_.state == STREAM_STOPPED ) {
\r
2384 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2385 error( RtError::WARNING );
\r
2389 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2390 handle->drainCounter = 2;
\r
2395 // This function will be called by a spawned thread when the user
\r
2396 // callback function signals that the stream should be stopped or
\r
2397 // aborted. It is necessary to handle it this way because the
\r
2398 // callbackEvent() function must return before the jack_deactivate()
\r
2399 // function will return.
\r
2400 extern "C" void *jackStopStream( void *ptr )
\r
2402 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2403 RtApiJack *object = (RtApiJack *) info->object;
\r
2405 object->stopStream();
\r
2407 pthread_exit( NULL );
\r
2410 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2412 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
2413 if ( stream_.state == STREAM_CLOSED ) {
\r
2414 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2415 error( RtError::WARNING );
\r
2418 if ( stream_.bufferSize != nframes ) {
\r
2419 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2420 error( RtError::WARNING );
\r
2424 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2425 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2427 // Check if we were draining the stream and signal is finished.
\r
2428 if ( handle->drainCounter > 3 ) {
\r
2429 if ( handle->internalDrain == true )
\r
2430 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2432 pthread_cond_signal( &handle->condition );
\r
2436 MUTEX_LOCK( &stream_.mutex );
\r
2438 // The state might change while waiting on a mutex.
\r
2439 if ( stream_.state == STREAM_STOPPED ) {
\r
2440 MUTEX_UNLOCK( &stream_.mutex );
\r
2444 // Invoke user callback first, to get fresh output data.
\r
2445 if ( handle->drainCounter == 0 ) {
\r
2446 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2447 double streamTime = getStreamTime();
\r
2448 RtAudioStreamStatus status = 0;
\r
2449 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2450 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2451 handle->xrun[0] = false;
\r
2453 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2454 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2455 handle->xrun[1] = false;
\r
2457 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2458 stream_.bufferSize, streamTime, status, info->userData );
\r
2459 if ( handle->drainCounter == 2 ) {
\r
2460 MUTEX_UNLOCK( &stream_.mutex );
\r
2462 pthread_create( &id, NULL, jackStopStream, info );
\r
2465 else if ( handle->drainCounter == 1 )
\r
2466 handle->internalDrain = true;
\r
2469 jack_default_audio_sample_t *jackbuffer;
\r
2470 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2471 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2473 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2475 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2476 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2477 memset( jackbuffer, 0, bufferBytes );
\r
2481 else if ( stream_.doConvertBuffer[0] ) {
\r
2483 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2485 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2486 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2487 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2490 else { // no buffer conversion
\r
2491 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2492 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2493 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2497 if ( handle->drainCounter ) {
\r
2498 handle->drainCounter++;
\r
2503 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2505 if ( stream_.doConvertBuffer[1] ) {
\r
2506 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2507 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2508 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2510 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2512 else { // no buffer conversion
\r
2513 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2514 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2515 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2521 MUTEX_UNLOCK(&stream_.mutex);
\r
2523 RtApi::tickStreamTime();
\r
2526 //******************** End of __UNIX_JACK__ *********************//
\r
2529 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2531 // The ASIO API is designed around a callback scheme, so this
\r
2532 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2533 // Jack. The primary constraint with ASIO is that it only allows
\r
2534 // access to a single driver at a time. Thus, it is not possible to
\r
2535 // have more than one simultaneous RtAudio stream.
\r
2537 // This implementation also requires a number of external ASIO files
\r
2538 // and a few global variables. The ASIO callback scheme does not
\r
2539 // allow for the passing of user data, so we must create a global
\r
2540 // pointer to our callbackInfo structure.
\r
2542 // On unix systems, we make use of a pthread condition variable.
\r
2543 // Since there is no equivalent in Windows, I hacked something based
\r
2544 // on information found in
\r
2545 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2547 #include "asiosys.h"
\r
2549 #include "iasiothiscallresolver.h"
\r
2550 #include "asiodrivers.h"
\r
2553 AsioDrivers drivers;
\r
2554 ASIOCallbacks asioCallbacks;
\r
2555 ASIODriverInfo driverInfo;
\r
2556 CallbackInfo *asioCallbackInfo;
\r
2559 struct AsioHandle {
\r
2560 int drainCounter; // Tracks callback counts when draining
\r
2561 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2562 ASIOBufferInfo *bufferInfos;
\r
2566 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2569 // Function declarations (definitions at end of section)
\r
2570 static const char* getAsioErrorString( ASIOError result );
\r
2571 void sampleRateChanged( ASIOSampleRate sRate );
\r
2572 long asioMessages( long selector, long value, void* message, double* opt );
\r
2574 RtApiAsio :: RtApiAsio()
\r
2576 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2577 // CoInitialize beforehand, but it must be for appartment threading
\r
2578 // (in which case, CoInitilialize will return S_FALSE here).
\r
2579 coInitialized_ = false;
\r
2580 HRESULT hr = CoInitialize( NULL );
\r
2581 if ( FAILED(hr) ) {
\r
2582 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2583 error( RtError::WARNING );
\r
2585 coInitialized_ = true;
\r
2587 drivers.removeCurrentDriver();
\r
2588 driverInfo.asioVersion = 2;
\r
2590 // See note in DirectSound implementation about GetDesktopWindow().
\r
2591 driverInfo.sysRef = GetForegroundWindow();
\r
2594 RtApiAsio :: ~RtApiAsio()
\r
2596 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2597 if ( coInitialized_ ) CoUninitialize();
\r
2600 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2602 return (unsigned int) drivers.asioGetNumDev();
\r
2605 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2607 RtAudio::DeviceInfo info;
\r
2608 info.probed = false;
\r
2611 unsigned int nDevices = getDeviceCount();
\r
2612 if ( nDevices == 0 ) {
\r
2613 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2614 error( RtError::INVALID_USE );
\r
2617 if ( device >= nDevices ) {
\r
2618 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2619 error( RtError::INVALID_USE );
\r
2622 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2623 if ( stream_.state != STREAM_CLOSED ) {
\r
2624 if ( device >= devices_.size() ) {
\r
2625 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2626 error( RtError::WARNING );
\r
2629 return devices_[ device ];
\r
2632 char driverName[32];
\r
2633 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2634 if ( result != ASE_OK ) {
\r
2635 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2636 errorText_ = errorStream_.str();
\r
2637 error( RtError::WARNING );
\r
2641 info.name = driverName;
\r
2643 if ( !drivers.loadDriver( driverName ) ) {
\r
2644 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2645 errorText_ = errorStream_.str();
\r
2646 error( RtError::WARNING );
\r
2650 result = ASIOInit( &driverInfo );
\r
2651 if ( result != ASE_OK ) {
\r
2652 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2653 errorText_ = errorStream_.str();
\r
2654 error( RtError::WARNING );
\r
2658 // Determine the device channel information.
\r
2659 long inputChannels, outputChannels;
\r
2660 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2661 if ( result != ASE_OK ) {
\r
2662 drivers.removeCurrentDriver();
\r
2663 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2664 errorText_ = errorStream_.str();
\r
2665 error( RtError::WARNING );
\r
2669 info.outputChannels = outputChannels;
\r
2670 info.inputChannels = inputChannels;
\r
2671 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2672 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2674 // Determine the supported sample rates.
\r
2675 info.sampleRates.clear();
\r
2676 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2677 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2678 if ( result == ASE_OK )
\r
2679 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2682 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2683 ASIOChannelInfo channelInfo;
\r
2684 channelInfo.channel = 0;
\r
2685 channelInfo.isInput = true;
\r
2686 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2687 result = ASIOGetChannelInfo( &channelInfo );
\r
2688 if ( result != ASE_OK ) {
\r
2689 drivers.removeCurrentDriver();
\r
2690 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2691 errorText_ = errorStream_.str();
\r
2692 error( RtError::WARNING );
\r
2696 info.nativeFormats = 0;
\r
2697 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2698 info.nativeFormats |= RTAUDIO_SINT16;
\r
2699 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2700 info.nativeFormats |= RTAUDIO_SINT32;
\r
2701 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2702 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2703 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2704 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2706 if ( info.outputChannels > 0 )
\r
2707 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2708 if ( info.inputChannels > 0 )
\r
2709 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2711 info.probed = true;
\r
2712 drivers.removeCurrentDriver();
\r
2716 void bufferSwitch( long index, ASIOBool processNow )
\r
2718 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2719 object->callbackEvent( index );
\r
2722 void RtApiAsio :: saveDeviceInfo( void )
\r
2726 unsigned int nDevices = getDeviceCount();
\r
2727 devices_.resize( nDevices );
\r
2728 for ( unsigned int i=0; i<nDevices; i++ )
\r
2729 devices_[i] = getDeviceInfo( i );
\r
2732 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2733 unsigned int firstChannel, unsigned int sampleRate,
\r
2734 RtAudioFormat format, unsigned int *bufferSize,
\r
2735 RtAudio::StreamOptions *options, const std::string &deviceName )
\r
2737 // For ASIO, a duplex stream MUST use the same driver.
\r
2738 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2739 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2743 char driverName[32];
\r
2744 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2745 if ( result != ASE_OK ) {
\r
2746 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2747 errorText_ = errorStream_.str();
\r
2751 // Only load the driver once for duplex stream.
\r
2752 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2753 // The getDeviceInfo() function will not work when a stream is open
\r
2754 // because ASIO does not allow multiple devices to run at the same
\r
2755 // time. Thus, we'll probe the system before opening a stream and
\r
2756 // save the results for use by getDeviceInfo().
\r
2757 this->saveDeviceInfo();
\r
2759 if ( !drivers.loadDriver( driverName ) ) {
\r
2760 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2761 errorText_ = errorStream_.str();
\r
2765 result = ASIOInit( &driverInfo );
\r
2766 if ( result != ASE_OK ) {
\r
2767 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2768 errorText_ = errorStream_.str();
\r
2773 // Check the device channel count.
\r
2774 long inputChannels, outputChannels;
\r
2775 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2776 if ( result != ASE_OK ) {
\r
2777 drivers.removeCurrentDriver();
\r
2778 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2779 errorText_ = errorStream_.str();
\r
2783 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2784 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2785 drivers.removeCurrentDriver();
\r
2786 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2787 errorText_ = errorStream_.str();
\r
2790 stream_.nDeviceChannels[mode] = channels;
\r
2791 stream_.nUserChannels[mode] = channels;
\r
2792 stream_.channelOffset[mode] = firstChannel;
\r
2794 // Verify the sample rate is supported.
\r
2795 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2796 if ( result != ASE_OK ) {
\r
2797 drivers.removeCurrentDriver();
\r
2798 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2799 errorText_ = errorStream_.str();
\r
2803 // Get the current sample rate
\r
2804 ASIOSampleRate currentRate;
\r
2805 result = ASIOGetSampleRate( ¤tRate );
\r
2806 if ( result != ASE_OK ) {
\r
2807 drivers.removeCurrentDriver();
\r
2808 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2809 errorText_ = errorStream_.str();
\r
2813 // Set the sample rate only if necessary
\r
2814 if ( currentRate != sampleRate ) {
\r
2815 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2816 if ( result != ASE_OK ) {
\r
2817 drivers.removeCurrentDriver();
\r
2818 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2819 errorText_ = errorStream_.str();
\r
2824 // Determine the driver data type.
\r
2825 ASIOChannelInfo channelInfo;
\r
2826 channelInfo.channel = 0;
\r
2827 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2828 else channelInfo.isInput = true;
\r
2829 result = ASIOGetChannelInfo( &channelInfo );
\r
2830 if ( result != ASE_OK ) {
\r
2831 drivers.removeCurrentDriver();
\r
2832 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2833 errorText_ = errorStream_.str();
\r
2837 // Assuming WINDOWS host is always little-endian.
\r
2838 stream_.doByteSwap[mode] = false;
\r
2839 stream_.userFormat = format;
\r
2840 stream_.deviceFormat[mode] = 0;
\r
2841 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2842 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2843 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2845 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2846 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2847 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2849 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2850 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2851 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2853 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2854 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2855 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2858 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2859 drivers.removeCurrentDriver();
\r
2860 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2861 errorText_ = errorStream_.str();
\r
2865 // Set the buffer size. For a duplex stream, this will end up
\r
2866 // setting the buffer size based on the input constraints, which
\r
2868 long minSize, maxSize, preferSize, granularity;
\r
2869 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2870 if ( result != ASE_OK ) {
\r
2871 drivers.removeCurrentDriver();
\r
2872 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2873 errorText_ = errorStream_.str();
\r
2877 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2878 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2879 else if ( granularity == -1 ) {
\r
2880 // Make sure bufferSize is a power of two.
\r
2881 int log2_of_min_size = 0;
\r
2882 int log2_of_max_size = 0;
\r
2884 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2885 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2886 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
2889 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
2890 int min_delta_num = log2_of_min_size;
\r
2892 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
2893 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
2894 if (current_delta < min_delta) {
\r
2895 min_delta = current_delta;
\r
2896 min_delta_num = i;
\r
2900 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
2901 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2902 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2904 else if ( granularity != 0 ) {
\r
2905 // Set to an even multiple of granularity, rounding up.
\r
2906 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
2909 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
2910 drivers.removeCurrentDriver();
\r
2911 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
2915 stream_.bufferSize = *bufferSize;
\r
2916 stream_.nBuffers = 2;
\r
2918 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2919 else stream_.userInterleaved = true;
\r
2921 // ASIO always uses non-interleaved buffers.
\r
2922 stream_.deviceInterleaved[mode] = false;
\r
2924 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
2925 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
2926 if ( handle == 0 ) {
\r
2928 handle = new AsioHandle;
\r
2930 catch ( std::bad_alloc& ) {
\r
2931 //if ( handle == NULL ) {
\r
2932 drivers.removeCurrentDriver();
\r
2933 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
2936 handle->bufferInfos = 0;
\r
2938 // Create a manual-reset event.
\r
2939 handle->condition = CreateEvent( NULL, // no security
\r
2940 TRUE, // manual-reset
\r
2941 FALSE, // non-signaled initially
\r
2942 NULL ); // unnamed
\r
2943 stream_.apiHandle = (void *) handle;
\r
2946 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
2947 // and output separately, we'll have to dispose of previously
\r
2948 // created output buffers for a duplex stream.
\r
2949 long inputLatency, outputLatency;
\r
2950 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
2951 ASIODisposeBuffers();
\r
2952 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
2955 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
2956 bool buffersAllocated = false;
\r
2957 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
2958 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
2959 if ( handle->bufferInfos == NULL ) {
\r
2960 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
2961 errorText_ = errorStream_.str();
\r
2965 ASIOBufferInfo *infos;
\r
2966 infos = handle->bufferInfos;
\r
2967 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
2968 infos->isInput = ASIOFalse;
\r
2969 infos->channelNum = i + stream_.channelOffset[0];
\r
2970 infos->buffers[0] = infos->buffers[1] = 0;
\r
2972 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
2973 infos->isInput = ASIOTrue;
\r
2974 infos->channelNum = i + stream_.channelOffset[1];
\r
2975 infos->buffers[0] = infos->buffers[1] = 0;
\r
2978 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
2979 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
2980 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
2981 asioCallbacks.asioMessage = &asioMessages;
\r
2982 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
2983 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
2984 if ( result != ASE_OK ) {
\r
2985 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
2986 errorText_ = errorStream_.str();
\r
2989 buffersAllocated = true;
\r
2991 // Set flags for buffer conversion.
\r
2992 stream_.doConvertBuffer[mode] = false;
\r
2993 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2994 stream_.doConvertBuffer[mode] = true;
\r
2995 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2996 stream_.nUserChannels[mode] > 1 )
\r
2997 stream_.doConvertBuffer[mode] = true;
\r
2999 // Allocate necessary internal buffers
\r
3000 unsigned long bufferBytes;
\r
3001 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3002 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3003 if ( stream_.userBuffer[mode] == NULL ) {
\r
3004 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3008 if ( stream_.doConvertBuffer[mode] ) {
\r
3010 bool makeBuffer = true;
\r
3011 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3012 if ( mode == INPUT ) {
\r
3013 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
3014 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3015 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3019 if ( makeBuffer ) {
\r
3020 bufferBytes *= *bufferSize;
\r
3021 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3022 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3023 if ( stream_.deviceBuffer == NULL ) {
\r
3024 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3030 stream_.sampleRate = sampleRate;
\r
3031 stream_.device[mode] = device;
\r
3032 stream_.state = STREAM_STOPPED;
\r
3033 asioCallbackInfo = &stream_.callbackInfo;
\r
3034 stream_.callbackInfo.object = (void *) this;
\r
3035 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3036 // We had already set up an output stream.
\r
3037 stream_.mode = DUPLEX;
\r
3039 stream_.mode = mode;
\r
3041 // Determine device latencies
\r
3042 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3043 if ( result != ASE_OK ) {
\r
3044 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3045 errorText_ = errorStream_.str();
\r
3046 error( RtError::WARNING); // warn but don't fail
\r
3049 stream_.latency[0] = outputLatency;
\r
3050 stream_.latency[1] = inputLatency;
\r
3053 // Setup the buffer conversion information structure. We don't use
\r
3054 // buffers to do channel offsets, so we override that parameter
\r
3056 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3061 if ( buffersAllocated )
\r
3062 ASIODisposeBuffers();
\r
3063 drivers.removeCurrentDriver();
\r
3066 CloseHandle( handle->condition );
\r
3067 if ( handle->bufferInfos )
\r
3068 free( handle->bufferInfos );
\r
3070 stream_.apiHandle = 0;
\r
3073 for ( int i=0; i<2; i++ ) {
\r
3074 if ( stream_.userBuffer[i] ) {
\r
3075 free( stream_.userBuffer[i] );
\r
3076 stream_.userBuffer[i] = 0;
\r
3080 if ( stream_.deviceBuffer ) {
\r
3081 free( stream_.deviceBuffer );
\r
3082 stream_.deviceBuffer = 0;
\r
3088 void RtApiAsio :: closeStream()
\r
3090 if ( stream_.state == STREAM_CLOSED ) {
\r
3091 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3092 error( RtError::WARNING );
\r
3096 if ( stream_.state == STREAM_RUNNING ) {
\r
3097 stream_.state = STREAM_STOPPED;
\r
3100 ASIODisposeBuffers();
\r
3101 drivers.removeCurrentDriver();
\r
3103 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3105 CloseHandle( handle->condition );
\r
3106 if ( handle->bufferInfos )
\r
3107 free( handle->bufferInfos );
\r
3109 stream_.apiHandle = 0;
\r
3112 for ( int i=0; i<2; i++ ) {
\r
3113 if ( stream_.userBuffer[i] ) {
\r
3114 free( stream_.userBuffer[i] );
\r
3115 stream_.userBuffer[i] = 0;
\r
3119 if ( stream_.deviceBuffer ) {
\r
3120 free( stream_.deviceBuffer );
\r
3121 stream_.deviceBuffer = 0;
\r
3124 stream_.mode = UNINITIALIZED;
\r
3125 stream_.state = STREAM_CLOSED;
\r
3128 bool stopThreadCalled = false;
\r
3130 void RtApiAsio :: startStream()
\r
3133 if ( stream_.state == STREAM_RUNNING ) {
\r
3134 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3135 error( RtError::WARNING );
\r
3139 //MUTEX_LOCK( &stream_.mutex );
\r
3141 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3142 ASIOError result = ASIOStart();
\r
3143 if ( result != ASE_OK ) {
\r
3144 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3145 errorText_ = errorStream_.str();
\r
3149 handle->drainCounter = 0;
\r
3150 handle->internalDrain = false;
\r
3151 ResetEvent( handle->condition );
\r
3152 stream_.state = STREAM_RUNNING;
\r
3156 //MUTEX_UNLOCK( &stream_.mutex );
\r
3158 stopThreadCalled = false;
\r
3160 if ( result == ASE_OK ) return;
\r
3161 error( RtError::SYSTEM_ERROR );
\r
3164 void RtApiAsio :: stopStream()
\r
3167 if ( stream_.state == STREAM_STOPPED ) {
\r
3168 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3169 error( RtError::WARNING );
\r
3174 MUTEX_LOCK( &stream_.mutex );
\r
3176 if ( stream_.state == STREAM_STOPPED ) {
\r
3177 MUTEX_UNLOCK( &stream_.mutex );
\r
3182 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3183 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3184 if ( handle->drainCounter == 0 ) {
\r
3185 handle->drainCounter = 2;
\r
3186 // MUTEX_UNLOCK( &stream_.mutex );
\r
3187 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3188 //ResetEvent( handle->condition );
\r
3189 // MUTEX_LOCK( &stream_.mutex );
\r
3193 stream_.state = STREAM_STOPPED;
\r
3195 ASIOError result = ASIOStop();
\r
3196 if ( result != ASE_OK ) {
\r
3197 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3198 errorText_ = errorStream_.str();
\r
3201 // MUTEX_UNLOCK( &stream_.mutex );
\r
3203 if ( result == ASE_OK ) return;
\r
3204 error( RtError::SYSTEM_ERROR );
\r
3207 void RtApiAsio :: abortStream()
\r
3210 if ( stream_.state == STREAM_STOPPED ) {
\r
3211 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3212 error( RtError::WARNING );
\r
3216 // The following lines were commented-out because some behavior was
\r
3217 // noted where the device buffers need to be zeroed to avoid
\r
3218 // continuing sound, even when the device buffers are completely
\r
3219 // disposed. So now, calling abort is the same as calling stop.
\r
3220 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3221 // handle->drainCounter = 2;
\r
3225 // This function will be called by a spawned thread when the user
\r
3226 // callback function signals that the stream should be stopped or
\r
3227 // aborted. It is necessary to handle it this way because the
\r
3228 // callbackEvent() function must return before the ASIOStop()
\r
3229 // function will return.
\r
3230 extern "C" unsigned __stdcall asioStopStream( void *ptr )
\r
3232 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3233 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3235 object->stopStream();
\r
3237 _endthreadex( 0 );
\r
3241 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3243 if ( stream_.state == STREAM_STOPPED ) return SUCCESS;
\r
3244 if ( stopThreadCalled ) return SUCCESS;
\r
3245 if ( stream_.state == STREAM_CLOSED ) {
\r
3246 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3247 error( RtError::WARNING );
\r
3251 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3252 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3254 // Check if we were draining the stream and signal if finished.
\r
3255 if ( handle->drainCounter > 3 ) {
\r
3256 if ( handle->internalDrain == false )
\r
3257 SetEvent( handle->condition );
\r
3258 else { // spawn a thread to stop the stream
\r
3259 unsigned threadId;
\r
3260 stopThreadCalled = true;
\r
3261 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3262 &stream_.callbackInfo, 0, &threadId );
\r
3267 /*MUTEX_LOCK( &stream_.mutex );
\r
3269 // The state might change while waiting on a mutex.
\r
3270 if ( stream_.state == STREAM_STOPPED ) goto unlock; */
\r
3272 // Invoke user callback to get fresh output data UNLESS we are
\r
3273 // draining stream.
\r
3274 if ( handle->drainCounter == 0 ) {
\r
3275 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3276 double streamTime = getStreamTime();
\r
3277 RtAudioStreamStatus status = 0;
\r
3278 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3279 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3282 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3283 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3286 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3287 stream_.bufferSize, streamTime, status, info->userData );
\r
3288 if ( handle->drainCounter == 2 ) {
\r
3289 // MUTEX_UNLOCK( &stream_.mutex );
\r
3291 unsigned threadId;
\r
3292 stopThreadCalled = true;
\r
3293 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3294 &stream_.callbackInfo, 0, &threadId );
\r
3297 else if ( handle->drainCounter == 1 )
\r
3298 handle->internalDrain = true;
\r
3301 unsigned int nChannels, bufferBytes, i, j;
\r
3302 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3303 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3305 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3307 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3309 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3310 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3311 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3315 else if ( stream_.doConvertBuffer[0] ) {
\r
3317 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3318 if ( stream_.doByteSwap[0] )
\r
3319 byteSwapBuffer( stream_.deviceBuffer,
\r
3320 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3321 stream_.deviceFormat[0] );
\r
3323 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3324 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3325 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3326 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3332 if ( stream_.doByteSwap[0] )
\r
3333 byteSwapBuffer( stream_.userBuffer[0],
\r
3334 stream_.bufferSize * stream_.nUserChannels[0],
\r
3335 stream_.userFormat );
\r
3337 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3338 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3339 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3340 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3345 if ( handle->drainCounter ) {
\r
3346 handle->drainCounter++;
\r
3351 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3353 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3355 if (stream_.doConvertBuffer[1]) {
\r
3357 // Always interleave ASIO input data.
\r
3358 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3359 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3360 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3361 handle->bufferInfos[i].buffers[bufferIndex],
\r
3365 if ( stream_.doByteSwap[1] )
\r
3366 byteSwapBuffer( stream_.deviceBuffer,
\r
3367 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3368 stream_.deviceFormat[1] );
\r
3369 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3373 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3374 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3375 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3376 handle->bufferInfos[i].buffers[bufferIndex],
\r
3381 if ( stream_.doByteSwap[1] )
\r
3382 byteSwapBuffer( stream_.userBuffer[1],
\r
3383 stream_.bufferSize * stream_.nUserChannels[1],
\r
3384 stream_.userFormat );
\r
3389 // The following call was suggested by Malte Clasen. While the API
\r
3390 // documentation indicates it should not be required, some device
\r
3391 // drivers apparently do not function correctly without it.
\r
3392 ASIOOutputReady();
\r
3394 // MUTEX_UNLOCK( &stream_.mutex );
\r
3396 RtApi::tickStreamTime();
\r
3400 void sampleRateChanged( ASIOSampleRate sRate )
\r
3402 // The ASIO documentation says that this usually only happens during
\r
3403 // external sync. Audio processing is not stopped by the driver,
\r
3404 // actual sample rate might not have even changed, maybe only the
\r
3405 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3408 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3410 object->stopStream();
\r
3412 catch ( RtError &exception ) {
\r
3413 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3417 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3420 long asioMessages( long selector, long value, void* message, double* opt )
\r
3424 switch( selector ) {
\r
3425 case kAsioSelectorSupported:
\r
3426 if ( value == kAsioResetRequest
\r
3427 || value == kAsioEngineVersion
\r
3428 || value == kAsioResyncRequest
\r
3429 || value == kAsioLatenciesChanged
\r
3430 // The following three were added for ASIO 2.0, you don't
\r
3431 // necessarily have to support them.
\r
3432 || value == kAsioSupportsTimeInfo
\r
3433 || value == kAsioSupportsTimeCode
\r
3434 || value == kAsioSupportsInputMonitor)
\r
3437 case kAsioResetRequest:
\r
3438 // Defer the task and perform the reset of the driver during the
\r
3439 // next "safe" situation. You cannot reset the driver right now,
\r
3440 // as this code is called from the driver. Reset the driver is
\r
3441 // done by completely destruct is. I.e. ASIOStop(),
\r
3442 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3444 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3447 case kAsioResyncRequest:
\r
3448 // This informs the application that the driver encountered some
\r
3449 // non-fatal data loss. It is used for synchronization purposes
\r
3450 // of different media. Added mainly to work around the Win16Mutex
\r
3451 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3452 // which could lose data because the Mutex was held too long by
\r
3453 // another thread. However a driver can issue it in other
\r
3454 // situations, too.
\r
3455 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3459 case kAsioLatenciesChanged:
\r
3460 // This will inform the host application that the drivers were
\r
3461 // latencies changed. Beware, it this does not mean that the
\r
3462 // buffer sizes have changed! You might need to update internal
\r
3464 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3467 case kAsioEngineVersion:
\r
3468 // Return the supported ASIO version of the host application. If
\r
3469 // a host application does not implement this selector, ASIO 1.0
\r
3470 // is assumed by the driver.
\r
3473 case kAsioSupportsTimeInfo:
\r
3474 // Informs the driver whether the
\r
3475 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3476 // For compatibility with ASIO 1.0 drivers the host application
\r
3477 // should always support the "old" bufferSwitch method, too.
\r
3480 case kAsioSupportsTimeCode:
\r
3481 // Informs the driver whether application is interested in time
\r
3482 // code info. If an application does not need to know about time
\r
3483 // code, the driver has less work to do.
\r
3490 static const char* getAsioErrorString( ASIOError result )
\r
3495 const char*message;
\r
3498 static Messages m[] =
\r
3500 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3501 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3502 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3503 { ASE_InvalidMode, "Invalid mode." },
\r
3504 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3505 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3506 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3509 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3510 if ( m[i].value == result ) return m[i].message;
\r
3512 return "Unknown error.";
\r
3514 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3518 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
3520 // Modified by Robin Davies, October 2005
\r
3521 // - Improvements to DirectX pointer chasing.
\r
3522 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
3523 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
3524 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
3525 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
3527 #include <dsound.h>
\r
3528 #include <assert.h>
\r
3529 #include <algorithm>
\r
3531 #if defined(__MINGW32__)
\r
3532 // missing from latest mingw winapi
\r
3533 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
3534 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
3535 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
3536 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
3543 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
3545 #ifdef _MSC_VER // if Microsoft Visual C++
\r
3546 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
3549 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
3551 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
3552 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
3553 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
3554 return pointer >= earlierPointer && pointer < laterPointer;
\r
3557 // A structure to hold various information related to the DirectSound
\r
3558 // API implementation.
\r
3560 unsigned int drainCounter; // Tracks callback counts when draining
\r
3561 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
3565 UINT bufferPointer[2];
\r
3566 DWORD dsBufferSize[2];
\r
3567 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
3571 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
3574 // Declarations for utility functions, callbacks, and structures
\r
3575 // specific to the DirectSound implementation.
\r
3576 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
3577 LPCTSTR description,
\r
3579 LPVOID lpContext );
\r
3581 static const char* getErrorString( int code );
\r
3583 extern "C" unsigned __stdcall callbackHandler( void *ptr );
\r
3592 : found(false) { validId[0] = false; validId[1] = false; }
\r
3595 std::vector< DsDevice > dsDevices;
\r
3597 RtApiDs :: RtApiDs()
\r
3599 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
3600 // accept whatever the mainline chose for a threading model.
\r
3601 coInitialized_ = false;
\r
3602 HRESULT hr = CoInitialize( NULL );
\r
3603 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
3606 RtApiDs :: ~RtApiDs()
\r
3608 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
3609 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
3612 // The DirectSound default output is always the first device.
\r
3613 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
3618 // The DirectSound default input is always the first input device,
\r
3619 // which is the first capture device enumerated.
\r
3620 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
3625 unsigned int RtApiDs :: getDeviceCount( void )
\r
3627 // Set query flag for previously found devices to false, so that we
\r
3628 // can check for any devices that have disappeared.
\r
3629 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3630 dsDevices[i].found = false;
\r
3632 // Query DirectSound devices.
\r
3633 bool isInput = false;
\r
3634 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3635 if ( FAILED( result ) ) {
\r
3636 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
3637 errorText_ = errorStream_.str();
\r
3638 error( RtError::WARNING );
\r
3641 // Query DirectSoundCapture devices.
\r
3643 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &isInput );
\r
3644 if ( FAILED( result ) ) {
\r
3645 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
3646 errorText_ = errorStream_.str();
\r
3647 error( RtError::WARNING );
\r
3650 // Clean out any devices that may have disappeared.
\r
3651 std::vector< int > indices;
\r
3652 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
3653 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
3654 unsigned int nErased = 0;
\r
3655 for ( unsigned int i=0; i<indices.size(); i++ )
\r
3656 dsDevices.erase( dsDevices.begin()-nErased++ );
\r
3658 return dsDevices.size();
\r
3661 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
3663 RtAudio::DeviceInfo info;
\r
3664 info.probed = false;
\r
3666 if ( dsDevices.size() == 0 ) {
\r
3667 // Force a query of all devices
\r
3669 if ( dsDevices.size() == 0 ) {
\r
3670 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
3671 error( RtError::INVALID_USE );
\r
3675 if ( device >= dsDevices.size() ) {
\r
3676 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
3677 error( RtError::INVALID_USE );
\r
3681 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
3683 LPDIRECTSOUND output;
\r
3685 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3686 if ( FAILED( result ) ) {
\r
3687 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3688 errorText_ = errorStream_.str();
\r
3689 error( RtError::WARNING );
\r
3693 outCaps.dwSize = sizeof( outCaps );
\r
3694 result = output->GetCaps( &outCaps );
\r
3695 if ( FAILED( result ) ) {
\r
3696 output->Release();
\r
3697 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
3698 errorText_ = errorStream_.str();
\r
3699 error( RtError::WARNING );
\r
3703 // Get output channel information.
\r
3704 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
3706 // Get sample rate information.
\r
3707 info.sampleRates.clear();
\r
3708 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
3709 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
3710 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
3711 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
3714 // Get format information.
\r
3715 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3716 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3718 output->Release();
\r
3720 if ( getDefaultOutputDevice() == device )
\r
3721 info.isDefaultOutput = true;
\r
3723 if ( dsDevices[ device ].validId[1] == false ) {
\r
3724 info.name = dsDevices[ device ].name;
\r
3725 info.probed = true;
\r
3731 LPDIRECTSOUNDCAPTURE input;
\r
3732 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
3733 if ( FAILED( result ) ) {
\r
3734 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
3735 errorText_ = errorStream_.str();
\r
3736 error( RtError::WARNING );
\r
3741 inCaps.dwSize = sizeof( inCaps );
\r
3742 result = input->GetCaps( &inCaps );
\r
3743 if ( FAILED( result ) ) {
\r
3745 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
3746 errorText_ = errorStream_.str();
\r
3747 error( RtError::WARNING );
\r
3751 // Get input channel information.
\r
3752 info.inputChannels = inCaps.dwChannels;
\r
3754 // Get sample rate and format information.
\r
3755 std::vector<unsigned int> rates;
\r
3756 if ( inCaps.dwChannels >= 2 ) {
\r
3757 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3758 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3759 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3760 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3761 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3762 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3763 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3764 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3766 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3767 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
3768 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
3769 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
3770 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
3772 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3773 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
3774 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
3775 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
3776 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
3779 else if ( inCaps.dwChannels == 1 ) {
\r
3780 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3781 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3782 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3783 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
3784 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3785 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3786 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3787 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
3789 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
3790 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
3791 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
3792 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
3793 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
3795 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
3796 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
3797 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
3798 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
3799 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
3802 else info.inputChannels = 0; // technically, this would be an error
\r
3806 if ( info.inputChannels == 0 ) return info;
\r
3808 // Copy the supported rates to the info structure but avoid duplication.
\r
3810 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
3812 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
3813 if ( rates[i] == info.sampleRates[j] ) {
\r
3818 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
3820 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
3822 // If device opens for both playback and capture, we determine the channels.
\r
3823 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
3824 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
3826 if ( device == 0 ) info.isDefaultInput = true;
\r
3828 // Copy name and return.
\r
3829 info.name = dsDevices[ device ].name;
\r
3830 info.probed = true;
\r
3834 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
3835 unsigned int firstChannel, unsigned int sampleRate,
\r
3836 RtAudioFormat format, unsigned int *bufferSize,
\r
3837 RtAudio::StreamOptions *options, const std::string &deviceName )
\r
3839 if ( channels + firstChannel > 2 ) {
\r
3840 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
3844 unsigned int nDevices = dsDevices.size();
\r
3845 if ( nDevices == 0 ) {
\r
3846 // This should not happen because a check is made before this function is called.
\r
3847 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
3851 if ( device >= nDevices ) {
\r
3852 // This should not happen because a check is made before this function is called.
\r
3853 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
3857 if ( mode == OUTPUT ) {
\r
3858 if ( dsDevices[ device ].validId[0] == false ) {
\r
3859 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
3860 errorText_ = errorStream_.str();
\r
3864 else { // mode == INPUT
\r
3865 if ( dsDevices[ device ].validId[1] == false ) {
\r
3866 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
3867 errorText_ = errorStream_.str();
\r
3872 // According to a note in PortAudio, using GetDesktopWindow()
\r
3873 // instead of GetForegroundWindow() is supposed to avoid problems
\r
3874 // that occur when the application's window is not the foreground
\r
3875 // window. Also, if the application window closes before the
\r
3876 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
3877 // problems when using GetDesktopWindow() but it seems fine now
\r
3878 // (January 2010). I'll leave it commented here.
\r
3879 // HWND hWnd = GetForegroundWindow();
\r
3880 HWND hWnd = GetDesktopWindow();
\r
3882 // Check the numberOfBuffers parameter and limit the lowest value to
\r
3883 // two. This is a judgement call and a value of two is probably too
\r
3884 // low for capture, but it should work for playback.
\r
3886 if ( options ) nBuffers = options->numberOfBuffers;
\r
3887 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
3888 if ( nBuffers < 2 ) nBuffers = 3;
\r
3890 // Check the lower range of the user-specified buffer size and set
\r
3891 // (arbitrarily) to a lower bound of 32.
\r
3892 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
3894 // Create the wave format structure. The data format setting will
\r
3895 // be determined later.
\r
3896 WAVEFORMATEX waveFormat;
\r
3897 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
3898 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
3899 waveFormat.nChannels = channels + firstChannel;
\r
3900 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
3902 // Determine the device buffer size. By default, we'll use the value
\r
3903 // defined above (32K), but we will grow it to make allowances for
\r
3904 // very large software buffer sizes.
\r
3905 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;;
\r
3906 DWORD dsPointerLeadTime = 0;
\r
3908 void *ohandle = 0, *bhandle = 0;
\r
3910 if ( mode == OUTPUT ) {
\r
3912 LPDIRECTSOUND output;
\r
3913 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
3914 if ( FAILED( result ) ) {
\r
3915 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
3916 errorText_ = errorStream_.str();
\r
3921 outCaps.dwSize = sizeof( outCaps );
\r
3922 result = output->GetCaps( &outCaps );
\r
3923 if ( FAILED( result ) ) {
\r
3924 output->Release();
\r
3925 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
3926 errorText_ = errorStream_.str();
\r
3930 // Check channel information.
\r
3931 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
3932 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
3933 errorText_ = errorStream_.str();
\r
3937 // Check format information. Use 16-bit format unless not
\r
3938 // supported or user requests 8-bit.
\r
3939 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
3940 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
3941 waveFormat.wBitsPerSample = 16;
\r
3942 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
3945 waveFormat.wBitsPerSample = 8;
\r
3946 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
3948 stream_.userFormat = format;
\r
3950 // Update wave format structure and buffer information.
\r
3951 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
3952 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
3953 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
3955 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
3956 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
3957 dsBufferSize *= 2;
\r
3959 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
3960 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
3961 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
3962 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
3963 if ( FAILED( result ) ) {
\r
3964 output->Release();
\r
3965 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
3966 errorText_ = errorStream_.str();
\r
3970 // Even though we will write to the secondary buffer, we need to
\r
3971 // access the primary buffer to set the correct output format
\r
3972 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
3973 // buffer description.
\r
3974 DSBUFFERDESC bufferDescription;
\r
3975 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
3976 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
3977 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
3979 // Obtain the primary buffer
\r
3980 LPDIRECTSOUNDBUFFER buffer;
\r
3981 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
3982 if ( FAILED( result ) ) {
\r
3983 output->Release();
\r
3984 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
3985 errorText_ = errorStream_.str();
\r
3989 // Set the primary DS buffer sound format.
\r
3990 result = buffer->SetFormat( &waveFormat );
\r
3991 if ( FAILED( result ) ) {
\r
3992 output->Release();
\r
3993 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
3994 errorText_ = errorStream_.str();
\r
3998 // Setup the secondary DS buffer description.
\r
3999 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
4000 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
4001 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4002 DSBCAPS_GLOBALFOCUS |
\r
4003 DSBCAPS_GETCURRENTPOSITION2 |
\r
4004 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
4005 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4006 bufferDescription.lpwfxFormat = &waveFormat;
\r
4008 // Try to create the secondary DS buffer. If that doesn't work,
\r
4009 // try to use software mixing. Otherwise, there's a problem.
\r
4010 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4011 if ( FAILED( result ) ) {
\r
4012 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
4013 DSBCAPS_GLOBALFOCUS |
\r
4014 DSBCAPS_GETCURRENTPOSITION2 |
\r
4015 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
4016 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
4017 if ( FAILED( result ) ) {
\r
4018 output->Release();
\r
4019 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
4020 errorText_ = errorStream_.str();
\r
4025 // Get the buffer size ... might be different from what we specified.
\r
4027 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
4028 result = buffer->GetCaps( &dsbcaps );
\r
4029 if ( FAILED( result ) ) {
\r
4030 output->Release();
\r
4031 buffer->Release();
\r
4032 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4033 errorText_ = errorStream_.str();
\r
4037 dsBufferSize = dsbcaps.dwBufferBytes;
\r
4039 // Lock the DS buffer
\r
4042 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4043 if ( FAILED( result ) ) {
\r
4044 output->Release();
\r
4045 buffer->Release();
\r
4046 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
4047 errorText_ = errorStream_.str();
\r
4051 // Zero the DS buffer
\r
4052 ZeroMemory( audioPtr, dataLen );
\r
4054 // Unlock the DS buffer
\r
4055 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4056 if ( FAILED( result ) ) {
\r
4057 output->Release();
\r
4058 buffer->Release();
\r
4059 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
4060 errorText_ = errorStream_.str();
\r
4064 ohandle = (void *) output;
\r
4065 bhandle = (void *) buffer;
\r
4068 if ( mode == INPUT ) {
\r
4070 LPDIRECTSOUNDCAPTURE input;
\r
4071 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
4072 if ( FAILED( result ) ) {
\r
4073 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
4074 errorText_ = errorStream_.str();
\r
4079 inCaps.dwSize = sizeof( inCaps );
\r
4080 result = input->GetCaps( &inCaps );
\r
4081 if ( FAILED( result ) ) {
\r
4083 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
4084 errorText_ = errorStream_.str();
\r
4088 // Check channel information.
\r
4089 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
4090 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
4094 // Check format information. Use 16-bit format unless user
\r
4095 // requests 8-bit.
\r
4096 DWORD deviceFormats;
\r
4097 if ( channels + firstChannel == 2 ) {
\r
4098 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
4099 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4100 waveFormat.wBitsPerSample = 8;
\r
4101 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4103 else { // assume 16-bit is supported
\r
4104 waveFormat.wBitsPerSample = 16;
\r
4105 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4108 else { // channel == 1
\r
4109 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
4110 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
4111 waveFormat.wBitsPerSample = 8;
\r
4112 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
4114 else { // assume 16-bit is supported
\r
4115 waveFormat.wBitsPerSample = 16;
\r
4116 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
4119 stream_.userFormat = format;
\r
4121 // Update wave format structure and buffer information.
\r
4122 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
4123 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
4124 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
4126 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
4127 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
4128 dsBufferSize *= 2;
\r
4130 // Setup the secondary DS buffer description.
\r
4131 DSCBUFFERDESC bufferDescription;
\r
4132 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
4133 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
4134 bufferDescription.dwFlags = 0;
\r
4135 bufferDescription.dwReserved = 0;
\r
4136 bufferDescription.dwBufferBytes = dsBufferSize;
\r
4137 bufferDescription.lpwfxFormat = &waveFormat;
\r
4139 // Create the capture buffer.
\r
4140 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
4141 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
4142 if ( FAILED( result ) ) {
\r
4144 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
4145 errorText_ = errorStream_.str();
\r
4149 // Get the buffer size ... might be different from what we specified.
\r
4150 DSCBCAPS dscbcaps;
\r
4151 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
4152 result = buffer->GetCaps( &dscbcaps );
\r
4153 if ( FAILED( result ) ) {
\r
4155 buffer->Release();
\r
4156 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
4157 errorText_ = errorStream_.str();
\r
4161 dsBufferSize = dscbcaps.dwBufferBytes;
\r
4163 // NOTE: We could have a problem here if this is a duplex stream
\r
4164 // and the play and capture hardware buffer sizes are different
\r
4165 // (I'm actually not sure if that is a problem or not).
\r
4166 // Currently, we are not verifying that.
\r
4168 // Lock the capture buffer
\r
4171 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4172 if ( FAILED( result ) ) {
\r
4174 buffer->Release();
\r
4175 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
4176 errorText_ = errorStream_.str();
\r
4180 // Zero the buffer
\r
4181 ZeroMemory( audioPtr, dataLen );
\r
4183 // Unlock the buffer
\r
4184 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4185 if ( FAILED( result ) ) {
\r
4187 buffer->Release();
\r
4188 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
4189 errorText_ = errorStream_.str();
\r
4193 ohandle = (void *) input;
\r
4194 bhandle = (void *) buffer;
\r
4197 // Set various stream parameters
\r
4198 DsHandle *handle = 0;
\r
4199 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
4200 stream_.nUserChannels[mode] = channels;
\r
4201 stream_.bufferSize = *bufferSize;
\r
4202 stream_.channelOffset[mode] = firstChannel;
\r
4203 stream_.deviceInterleaved[mode] = true;
\r
4204 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
4205 else stream_.userInterleaved = true;
\r
4207 // Set flag for buffer conversion
\r
4208 stream_.doConvertBuffer[mode] = false;
\r
4209 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
4210 stream_.doConvertBuffer[mode] = true;
\r
4211 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
4212 stream_.doConvertBuffer[mode] = true;
\r
4213 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4214 stream_.nUserChannels[mode] > 1 )
\r
4215 stream_.doConvertBuffer[mode] = true;
\r
4217 // Allocate necessary internal buffers
\r
4218 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
4219 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
4220 if ( stream_.userBuffer[mode] == NULL ) {
\r
4221 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
4225 if ( stream_.doConvertBuffer[mode] ) {
\r
4227 bool makeBuffer = true;
\r
4228 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
4229 if ( mode == INPUT ) {
\r
4230 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
4231 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
4232 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
4236 if ( makeBuffer ) {
\r
4237 bufferBytes *= *bufferSize;
\r
4238 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
4239 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
4240 if ( stream_.deviceBuffer == NULL ) {
\r
4241 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
4247 // Allocate our DsHandle structures for the stream.
\r
4248 if ( stream_.apiHandle == 0 ) {
\r
4250 handle = new DsHandle;
\r
4252 catch ( std::bad_alloc& ) {
\r
4253 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
4257 // Create a manual-reset event.
\r
4258 handle->condition = CreateEvent( NULL, // no security
\r
4259 TRUE, // manual-reset
\r
4260 FALSE, // non-signaled initially
\r
4261 NULL ); // unnamed
\r
4262 stream_.apiHandle = (void *) handle;
\r
4265 handle = (DsHandle *) stream_.apiHandle;
\r
4266 handle->id[mode] = ohandle;
\r
4267 handle->buffer[mode] = bhandle;
\r
4268 handle->dsBufferSize[mode] = dsBufferSize;
\r
4269 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
4271 stream_.device[mode] = device;
\r
4272 stream_.state = STREAM_STOPPED;
\r
4273 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
4274 // We had already set up an output stream.
\r
4275 stream_.mode = DUPLEX;
\r
4277 stream_.mode = mode;
\r
4278 stream_.nBuffers = nBuffers;
\r
4279 stream_.sampleRate = sampleRate;
\r
4281 // Setup the buffer conversion information structure.
\r
4282 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
4284 // Setup the callback thread.
\r
4285 if ( stream_.callbackInfo.isRunning == false ) {
\r
4286 unsigned threadId;
\r
4287 stream_.callbackInfo.isRunning = true;
\r
4288 stream_.callbackInfo.object = (void *) this;
\r
4289 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
4290 &stream_.callbackInfo, 0, &threadId );
\r
4291 if ( stream_.callbackInfo.thread == 0 ) {
\r
4292 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
4296 // Boost DS thread priority
\r
4297 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
4303 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4304 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4305 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4306 if ( buffer ) buffer->Release();
\r
4307 object->Release();
\r
4309 if ( handle->buffer[1] ) {
\r
4310 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4311 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4312 if ( buffer ) buffer->Release();
\r
4313 object->Release();
\r
4315 CloseHandle( handle->condition );
\r
4317 stream_.apiHandle = 0;
\r
4320 for ( int i=0; i<2; i++ ) {
\r
4321 if ( stream_.userBuffer[i] ) {
\r
4322 free( stream_.userBuffer[i] );
\r
4323 stream_.userBuffer[i] = 0;
\r
4327 if ( stream_.deviceBuffer ) {
\r
4328 free( stream_.deviceBuffer );
\r
4329 stream_.deviceBuffer = 0;
\r
4335 void RtApiDs :: closeStream()
\r
4337 if ( stream_.state == STREAM_CLOSED ) {
\r
4338 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
4339 error( RtError::WARNING );
\r
4343 // Stop the callback thread.
\r
4344 stream_.callbackInfo.isRunning = false;
\r
4345 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
4346 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
4348 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4350 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
4351 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
4352 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4355 buffer->Release();
\r
4357 object->Release();
\r
4359 if ( handle->buffer[1] ) {
\r
4360 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
4361 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4364 buffer->Release();
\r
4366 object->Release();
\r
4368 CloseHandle( handle->condition );
\r
4370 stream_.apiHandle = 0;
\r
4373 for ( int i=0; i<2; i++ ) {
\r
4374 if ( stream_.userBuffer[i] ) {
\r
4375 free( stream_.userBuffer[i] );
\r
4376 stream_.userBuffer[i] = 0;
\r
4380 if ( stream_.deviceBuffer ) {
\r
4381 free( stream_.deviceBuffer );
\r
4382 stream_.deviceBuffer = 0;
\r
4385 stream_.mode = UNINITIALIZED;
\r
4386 stream_.state = STREAM_CLOSED;
\r
4389 void RtApiDs :: startStream()
\r
4392 if ( stream_.state == STREAM_RUNNING ) {
\r
4393 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
4394 error( RtError::WARNING );
\r
4398 //MUTEX_LOCK( &stream_.mutex );
\r
4400 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4402 // Increase scheduler frequency on lesser windows (a side-effect of
\r
4403 // increasing timer accuracy). On greater windows (Win2K or later),
\r
4404 // this is already in effect.
\r
4405 timeBeginPeriod( 1 );
\r
4407 buffersRolling = false;
\r
4408 duplexPrerollBytes = 0;
\r
4410 if ( stream_.mode == DUPLEX ) {
\r
4411 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
4412 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
4415 HRESULT result = 0;
\r
4416 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4418 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4419 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
4420 if ( FAILED( result ) ) {
\r
4421 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
4422 errorText_ = errorStream_.str();
\r
4427 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4429 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4430 result = buffer->Start( DSCBSTART_LOOPING );
\r
4431 if ( FAILED( result ) ) {
\r
4432 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
4433 errorText_ = errorStream_.str();
\r
4438 handle->drainCounter = 0;
\r
4439 handle->internalDrain = false;
\r
4440 ResetEvent( handle->condition );
\r
4441 stream_.state = STREAM_RUNNING;
\r
4444 // MUTEX_UNLOCK( &stream_.mutex );
\r
4446 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4449 void RtApiDs :: stopStream()
\r
4452 if ( stream_.state == STREAM_STOPPED ) {
\r
4453 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
4454 error( RtError::WARNING );
\r
4459 MUTEX_LOCK( &stream_.mutex );
\r
4461 if ( stream_.state == STREAM_STOPPED ) {
\r
4462 MUTEX_UNLOCK( &stream_.mutex );
\r
4467 HRESULT result = 0;
\r
4470 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4471 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4472 if ( handle->drainCounter == 0 ) {
\r
4473 handle->drainCounter = 2;
\r
4474 // MUTEX_UNLOCK( &stream_.mutex );
\r
4475 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
4476 //ResetEvent( handle->condition );
\r
4477 // MUTEX_LOCK( &stream_.mutex );
\r
4480 stream_.state = STREAM_STOPPED;
\r
4482 // Stop the buffer and clear memory
\r
4483 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4484 result = buffer->Stop();
\r
4485 if ( FAILED( result ) ) {
\r
4486 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
4487 errorText_ = errorStream_.str();
\r
4491 // Lock the buffer and clear it so that if we start to play again,
\r
4492 // we won't have old data playing.
\r
4493 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4494 if ( FAILED( result ) ) {
\r
4495 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
4496 errorText_ = errorStream_.str();
\r
4500 // Zero the DS buffer
\r
4501 ZeroMemory( audioPtr, dataLen );
\r
4503 // Unlock the DS buffer
\r
4504 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4505 if ( FAILED( result ) ) {
\r
4506 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
4507 errorText_ = errorStream_.str();
\r
4511 // If we start playing again, we must begin at beginning of buffer.
\r
4512 handle->bufferPointer[0] = 0;
\r
4515 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4516 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4520 stream_.state = STREAM_STOPPED;
\r
4522 result = buffer->Stop();
\r
4523 if ( FAILED( result ) ) {
\r
4524 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
4525 errorText_ = errorStream_.str();
\r
4529 // Lock the buffer and clear it so that if we start to play again,
\r
4530 // we won't have old data playing.
\r
4531 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
4532 if ( FAILED( result ) ) {
\r
4533 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
4534 errorText_ = errorStream_.str();
\r
4538 // Zero the DS buffer
\r
4539 ZeroMemory( audioPtr, dataLen );
\r
4541 // Unlock the DS buffer
\r
4542 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
4543 if ( FAILED( result ) ) {
\r
4544 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
4545 errorText_ = errorStream_.str();
\r
4549 // If we start recording again, we must begin at beginning of buffer.
\r
4550 handle->bufferPointer[1] = 0;
\r
4554 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
4555 // MUTEX_UNLOCK( &stream_.mutex );
\r
4557 if ( FAILED( result ) ) error( RtError::SYSTEM_ERROR );
\r
4560 void RtApiDs :: abortStream()
\r
4563 if ( stream_.state == STREAM_STOPPED ) {
\r
4564 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
4565 error( RtError::WARNING );
\r
4569 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4570 handle->drainCounter = 2;
\r
4575 void RtApiDs :: callbackEvent()
\r
4577 if ( stream_.state == STREAM_STOPPED ) {
\r
4578 Sleep( 50 ); // sleep 50 milliseconds
\r
4582 if ( stream_.state == STREAM_CLOSED ) {
\r
4583 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
4584 error( RtError::WARNING );
\r
4588 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
4589 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
4591 // Check if we were draining the stream and signal is finished.
\r
4592 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
4593 if ( handle->internalDrain == false )
\r
4594 SetEvent( handle->condition );
\r
4601 MUTEX_LOCK( &stream_.mutex );
\r
4603 // The state might change while waiting on a mutex.
\r
4604 if ( stream_.state == STREAM_STOPPED ) {
\r
4605 MUTEX_UNLOCK( &stream_.mutex );
\r
4610 // Invoke user callback to get fresh output data UNLESS we are
\r
4611 // draining stream.
\r
4612 if ( handle->drainCounter == 0 ) {
\r
4613 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
4614 double streamTime = getStreamTime();
\r
4615 RtAudioStreamStatus status = 0;
\r
4616 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
4617 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
4618 handle->xrun[0] = false;
\r
4620 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
4621 status |= RTAUDIO_INPUT_OVERFLOW;
\r
4622 handle->xrun[1] = false;
\r
4624 handle->drainCounter = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
4625 stream_.bufferSize, streamTime, status, info->userData );
\r
4626 if ( handle->drainCounter == 2 ) {
\r
4627 // MUTEX_UNLOCK( &stream_.mutex );
\r
4631 else if ( handle->drainCounter == 1 )
\r
4632 handle->internalDrain = true;
\r
4636 DWORD currentWritePointer, safeWritePointer;
\r
4637 DWORD currentReadPointer, safeReadPointer;
\r
4638 UINT nextWritePointer;
\r
4640 LPVOID buffer1 = NULL;
\r
4641 LPVOID buffer2 = NULL;
\r
4642 DWORD bufferSize1 = 0;
\r
4643 DWORD bufferSize2 = 0;
\r
4648 if ( buffersRolling == false ) {
\r
4649 if ( stream_.mode == DUPLEX ) {
\r
4650 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4652 // It takes a while for the devices to get rolling. As a result,
\r
4653 // there's no guarantee that the capture and write device pointers
\r
4654 // will move in lockstep. Wait here for both devices to start
\r
4655 // rolling, and then set our buffer pointers accordingly.
\r
4656 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
4657 // bytes later than the write buffer.
\r
4659 // Stub: a serious risk of having a pre-emptive scheduling round
\r
4660 // take place between the two GetCurrentPosition calls... but I'm
\r
4661 // really not sure how to solve the problem. Temporarily boost to
\r
4662 // Realtime priority, maybe; but I'm not sure what priority the
\r
4663 // DirectSound service threads run at. We *should* be roughly
\r
4664 // within a ms or so of correct.
\r
4666 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4667 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4669 DWORD startSafeWritePointer, startSafeReadPointer;
\r
4671 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
4672 if ( FAILED( result ) ) {
\r
4673 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4674 errorText_ = errorStream_.str();
\r
4675 error( RtError::SYSTEM_ERROR );
\r
4677 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
4678 if ( FAILED( result ) ) {
\r
4679 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4680 errorText_ = errorStream_.str();
\r
4681 error( RtError::SYSTEM_ERROR );
\r
4684 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
4685 if ( FAILED( result ) ) {
\r
4686 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4687 errorText_ = errorStream_.str();
\r
4688 error( RtError::SYSTEM_ERROR );
\r
4690 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
4691 if ( FAILED( result ) ) {
\r
4692 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4693 errorText_ = errorStream_.str();
\r
4694 error( RtError::SYSTEM_ERROR );
\r
4696 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
4700 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
4702 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4703 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4704 handle->bufferPointer[1] = safeReadPointer;
\r
4706 else if ( stream_.mode == OUTPUT ) {
\r
4708 // Set the proper nextWritePosition after initial startup.
\r
4709 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4710 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4711 if ( FAILED( result ) ) {
\r
4712 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4713 errorText_ = errorStream_.str();
\r
4714 error( RtError::SYSTEM_ERROR );
\r
4716 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4717 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
4720 buffersRolling = true;
\r
4723 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
4725 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
4727 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
4728 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4729 bufferBytes *= formatBytes( stream_.userFormat );
\r
4730 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
4733 // Setup parameters and do buffer conversion if necessary.
\r
4734 if ( stream_.doConvertBuffer[0] ) {
\r
4735 buffer = stream_.deviceBuffer;
\r
4736 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
4737 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
4738 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
4741 buffer = stream_.userBuffer[0];
\r
4742 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
4743 bufferBytes *= formatBytes( stream_.userFormat );
\r
4746 // No byte swapping necessary in DirectSound implementation.
\r
4748 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
4749 // unsigned. So, we need to convert our signed 8-bit data here to
\r
4751 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
4752 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
4754 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
4755 nextWritePointer = handle->bufferPointer[0];
\r
4757 DWORD endWrite, leadPointer;
\r
4759 // Find out where the read and "safe write" pointers are.
\r
4760 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
4761 if ( FAILED( result ) ) {
\r
4762 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
4763 errorText_ = errorStream_.str();
\r
4764 error( RtError::SYSTEM_ERROR );
\r
4767 // We will copy our output buffer into the region between
\r
4768 // safeWritePointer and leadPointer. If leadPointer is not
\r
4769 // beyond the next endWrite position, wait until it is.
\r
4770 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
4771 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
4772 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
4773 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
4774 endWrite = nextWritePointer + bufferBytes;
\r
4776 // Check whether the entire write region is behind the play pointer.
\r
4777 if ( leadPointer >= endWrite ) break;
\r
4779 // If we are here, then we must wait until the leadPointer advances
\r
4780 // beyond the end of our next write region. We use the
\r
4781 // Sleep() function to suspend operation until that happens.
\r
4782 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
4783 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
4784 if ( millis < 1.0 ) millis = 1.0;
\r
4785 Sleep( (DWORD) millis );
\r
4788 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
4789 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
4790 // We've strayed into the forbidden zone ... resync the read pointer.
\r
4791 handle->xrun[0] = true;
\r
4792 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
4793 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
4794 handle->bufferPointer[0] = nextWritePointer;
\r
4795 endWrite = nextWritePointer + bufferBytes;
\r
4798 // Lock free space in the buffer
\r
4799 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
4800 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4801 if ( FAILED( result ) ) {
\r
4802 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
4803 errorText_ = errorStream_.str();
\r
4804 error( RtError::SYSTEM_ERROR );
\r
4807 // Copy our buffer into the DS buffer
\r
4808 CopyMemory( buffer1, buffer, bufferSize1 );
\r
4809 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
4811 // Update our buffer offset and unlock sound buffer
\r
4812 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4813 if ( FAILED( result ) ) {
\r
4814 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
4815 errorText_ = errorStream_.str();
\r
4816 error( RtError::SYSTEM_ERROR );
\r
4818 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4819 handle->bufferPointer[0] = nextWritePointer;
\r
4821 if ( handle->drainCounter ) {
\r
4822 handle->drainCounter++;
\r
4827 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
4829 // Setup parameters.
\r
4830 if ( stream_.doConvertBuffer[1] ) {
\r
4831 buffer = stream_.deviceBuffer;
\r
4832 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
4833 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
4836 buffer = stream_.userBuffer[1];
\r
4837 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
4838 bufferBytes *= formatBytes( stream_.userFormat );
\r
4841 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
4842 long nextReadPointer = handle->bufferPointer[1];
\r
4843 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
4845 // Find out where the write and "safe read" pointers are.
\r
4846 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4847 if ( FAILED( result ) ) {
\r
4848 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4849 errorText_ = errorStream_.str();
\r
4850 error( RtError::SYSTEM_ERROR );
\r
4853 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4854 DWORD endRead = nextReadPointer + bufferBytes;
\r
4856 // Handling depends on whether we are INPUT or DUPLEX.
\r
4857 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
4858 // then a wait here will drag the write pointers into the forbidden zone.
\r
4860 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
4861 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
4862 // practical way to sync up the read and write pointers reliably, given the
\r
4863 // the very complex relationship between phase and increment of the read and write
\r
4866 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
4867 // provide a pre-roll period of 0.5 seconds in which we return
\r
4868 // zeros from the read buffer while the pointers sync up.
\r
4870 if ( stream_.mode == DUPLEX ) {
\r
4871 if ( safeReadPointer < endRead ) {
\r
4872 if ( duplexPrerollBytes <= 0 ) {
\r
4873 // Pre-roll time over. Be more agressive.
\r
4874 int adjustment = endRead-safeReadPointer;
\r
4876 handle->xrun[1] = true;
\r
4878 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
4879 // and perform fine adjustments later.
\r
4880 // - small adjustments: back off by twice as much.
\r
4881 if ( adjustment >= 2*bufferBytes )
\r
4882 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
4884 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
4886 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4890 // In pre=roll time. Just do it.
\r
4891 nextReadPointer = safeReadPointer - bufferBytes;
\r
4892 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
4894 endRead = nextReadPointer + bufferBytes;
\r
4897 else { // mode == INPUT
\r
4898 while ( safeReadPointer < endRead ) {
\r
4899 // See comments for playback.
\r
4900 double millis = (endRead - safeReadPointer) * 1000.0;
\r
4901 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
4902 if ( millis < 1.0 ) millis = 1.0;
\r
4903 Sleep( (DWORD) millis );
\r
4905 // Wake up and find out where we are now.
\r
4906 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
4907 if ( FAILED( result ) ) {
\r
4908 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
4909 errorText_ = errorStream_.str();
\r
4910 error( RtError::SYSTEM_ERROR );
\r
4913 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
4917 // Lock free space in the buffer
\r
4918 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
4919 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
4920 if ( FAILED( result ) ) {
\r
4921 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
4922 errorText_ = errorStream_.str();
\r
4923 error( RtError::SYSTEM_ERROR );
\r
4926 if ( duplexPrerollBytes <= 0 ) {
\r
4927 // Copy our buffer into the DS buffer
\r
4928 CopyMemory( buffer, buffer1, bufferSize1 );
\r
4929 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
4932 memset( buffer, 0, bufferSize1 );
\r
4933 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
4934 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
4937 // Update our buffer offset and unlock sound buffer
\r
4938 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
4939 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
4940 if ( FAILED( result ) ) {
\r
4941 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
4942 errorText_ = errorStream_.str();
\r
4943 error( RtError::SYSTEM_ERROR );
\r
4945 handle->bufferPointer[1] = nextReadPointer;
\r
4947 // No byte swapping necessary in DirectSound implementation.
\r
4949 // If necessary, convert 8-bit data from unsigned to signed.
\r
4950 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
4951 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
4953 // Do buffer conversion if necessary.
\r
4954 if ( stream_.doConvertBuffer[1] )
\r
4955 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
4959 // MUTEX_UNLOCK( &stream_.mutex );
\r
4961 RtApi::tickStreamTime();
\r
4964 // Definitions for utility functions and callbacks
\r
4965 // specific to the DirectSound implementation.
\r
4967 extern "C" unsigned __stdcall callbackHandler( void *ptr )
\r
4969 CallbackInfo *info = (CallbackInfo *) ptr;
\r
4970 RtApiDs *object = (RtApiDs *) info->object;
\r
4971 bool* isRunning = &info->isRunning;
\r
4973 while ( *isRunning == true ) {
\r
4974 object->callbackEvent();
\r
4977 _endthreadex( 0 );
\r
4981 #include "tchar.h"
\r
4983 std::string convertTChar( LPCTSTR name )
\r
4985 #if defined( UNICODE ) || defined( _UNICODE )
\r
4986 int length = WideCharToMultiByte(CP_UTF8, 0, name, -1, NULL, 0, NULL, NULL);
\r
4987 std::string s( length, 0 );
\r
4988 length = WideCharToMultiByte(CP_UTF8, 0, name, wcslen(name), &s[0], length, NULL, NULL);
\r
4990 std::string s( name );
\r
4996 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
4997 LPCTSTR description,
\r
4999 LPVOID lpContext )
\r
5001 bool *isInput = (bool *) lpContext;
\r
5004 bool validDevice = false;
\r
5005 if ( *isInput == true ) {
\r
5007 LPDIRECTSOUNDCAPTURE object;
\r
5009 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
5010 if ( hr != DS_OK ) return TRUE;
\r
5012 caps.dwSize = sizeof(caps);
\r
5013 hr = object->GetCaps( &caps );
\r
5014 if ( hr == DS_OK ) {
\r
5015 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
5016 validDevice = true;
\r
5018 object->Release();
\r
5022 LPDIRECTSOUND object;
\r
5023 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
5024 if ( hr != DS_OK ) return TRUE;
\r
5026 caps.dwSize = sizeof(caps);
\r
5027 hr = object->GetCaps( &caps );
\r
5028 if ( hr == DS_OK ) {
\r
5029 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
5030 validDevice = true;
\r
5032 object->Release();
\r
5035 // If good device, then save its name and guid.
\r
5036 std::string name = convertTChar( description );
\r
5037 if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
5038 name = "Default Device";
\r
5039 if ( validDevice ) {
\r
5040 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
5041 if ( dsDevices[i].name == name ) {
\r
5042 dsDevices[i].found = true;
\r
5044 dsDevices[i].id[1] = lpguid;
\r
5045 dsDevices[i].validId[1] = true;
\r
5048 dsDevices[i].id[0] = lpguid;
\r
5049 dsDevices[i].validId[0] = true;
\r
5056 device.name = name;
\r
5057 device.found = true;
\r
5059 device.id[1] = lpguid;
\r
5060 device.validId[1] = true;
\r
5063 device.id[0] = lpguid;
\r
5064 device.validId[0] = true;
\r
5066 dsDevices.push_back( device );
\r
5072 static const char* getErrorString( int code )
\r
5076 case DSERR_ALLOCATED:
\r
5077 return "Already allocated";
\r
5079 case DSERR_CONTROLUNAVAIL:
\r
5080 return "Control unavailable";
\r
5082 case DSERR_INVALIDPARAM:
\r
5083 return "Invalid parameter";
\r
5085 case DSERR_INVALIDCALL:
\r
5086 return "Invalid call";
\r
5088 case DSERR_GENERIC:
\r
5089 return "Generic error";
\r
5091 case DSERR_PRIOLEVELNEEDED:
\r
5092 return "Priority level needed";
\r
5094 case DSERR_OUTOFMEMORY:
\r
5095 return "Out of memory";
\r
5097 case DSERR_BADFORMAT:
\r
5098 return "The sample rate or the channel format is not supported";
\r
5100 case DSERR_UNSUPPORTED:
\r
5101 return "Not supported";
\r
5103 case DSERR_NODRIVER:
\r
5104 return "No driver";
\r
5106 case DSERR_ALREADYINITIALIZED:
\r
5107 return "Already initialized";
\r
5109 case DSERR_NOAGGREGATION:
\r
5110 return "No aggregation";
\r
5112 case DSERR_BUFFERLOST:
\r
5113 return "Buffer lost";
\r
5115 case DSERR_OTHERAPPHASPRIO:
\r
5116 return "Another application already has priority";
\r
5118 case DSERR_UNINITIALIZED:
\r
5119 return "Uninitialized";
\r
5122 return "DirectSound unknown error";
\r
5125 //******************** End of __WINDOWS_DS__ *********************//
\r
5129 #if defined(__LINUX_ALSA__)
\r
5131 #include <alsa/asoundlib.h>
\r
5132 #include <unistd.h>
\r
5134 // A structure to hold various information related to the ALSA API
\r
5135 // implementation.
\r
5136 struct AlsaHandle {
\r
5137 snd_pcm_t *handles[2];
\r
5138 bool synchronized;
\r
5140 pthread_cond_t runnable_cv;
\r
5144 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
5147 extern "C" void *alsaCallbackHandler( void * ptr );
\r
5149 RtApiAlsa :: RtApiAlsa()
\r
5151 // Nothing to do here.
\r
5154 RtApiAlsa :: ~RtApiAlsa()
\r
5156 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5159 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
5161 unsigned nDevices = 0;
\r
5162 int result, subdevice, card;
\r
5164 snd_ctl_t *handle;
\r
5166 // Count cards and devices
\r
5168 snd_card_next( &card );
\r
5169 while ( card >= 0 ) {
\r
5170 sprintf( name, "hw:%d", card );
\r
5171 result = snd_ctl_open( &handle, name, 0 );
\r
5172 if ( result < 0 ) {
\r
5173 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5174 errorText_ = errorStream_.str();
\r
5175 error( RtError::WARNING );
\r
5180 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
5181 if ( result < 0 ) {
\r
5182 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5183 errorText_ = errorStream_.str();
\r
5184 error( RtError::WARNING );
\r
5187 if ( subdevice < 0 )
\r
5192 snd_ctl_close( handle );
\r
5193 snd_card_next( &card );
\r
5199 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
5201 RtAudio::DeviceInfo info;
\r
5202 info.probed = false;
\r
5204 unsigned nDevices = 0;
\r
5205 int result, subdevice, card;
\r
5207 snd_ctl_t *chandle;
\r
5209 // Count cards and devices
\r
5211 snd_card_next( &card );
\r
5212 while ( card >= 0 ) {
\r
5213 sprintf( name, "hw:%d", card );
\r
5214 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5215 if ( result < 0 ) {
\r
5216 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5217 errorText_ = errorStream_.str();
\r
5218 error( RtError::WARNING );
\r
5223 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5224 if ( result < 0 ) {
\r
5225 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5226 errorText_ = errorStream_.str();
\r
5227 error( RtError::WARNING );
\r
5230 if ( subdevice < 0 ) break;
\r
5231 if ( nDevices == device ) {
\r
5232 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5238 snd_ctl_close( chandle );
\r
5239 snd_card_next( &card );
\r
5242 if ( nDevices == 0 ) {
\r
5243 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
5244 error( RtError::INVALID_USE );
\r
5247 if ( device >= nDevices ) {
\r
5248 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
5249 error( RtError::INVALID_USE );
\r
5254 // If a stream is already open, we cannot probe the stream devices.
\r
5255 // Thus, use the saved results.
\r
5256 if ( stream_.state != STREAM_CLOSED &&
\r
5257 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
5258 if ( device >= devices_.size() ) {
\r
5259 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
5260 error( RtError::WARNING );
\r
5263 return devices_[ device ];
\r
5266 int openMode = SND_PCM_ASYNC;
\r
5267 snd_pcm_stream_t stream;
\r
5268 snd_pcm_info_t *pcminfo;
\r
5269 snd_pcm_info_alloca( &pcminfo );
\r
5270 snd_pcm_t *phandle;
\r
5271 snd_pcm_hw_params_t *params;
\r
5272 snd_pcm_hw_params_alloca( ¶ms );
\r
5274 // First try for playback
\r
5275 stream = SND_PCM_STREAM_PLAYBACK;
\r
5276 snd_pcm_info_set_device( pcminfo, subdevice );
\r
5277 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
5278 snd_pcm_info_set_stream( pcminfo, stream );
\r
5280 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5281 if ( result < 0 ) {
\r
5282 // Device probably doesn't support playback.
\r
5283 goto captureProbe;
\r
5286 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
5287 if ( result < 0 ) {
\r
5288 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5289 errorText_ = errorStream_.str();
\r
5290 error( RtError::WARNING );
\r
5291 goto captureProbe;
\r
5294 // The device is open ... fill the parameter structure.
\r
5295 result = snd_pcm_hw_params_any( phandle, params );
\r
5296 if ( result < 0 ) {
\r
5297 snd_pcm_close( phandle );
\r
5298 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5299 errorText_ = errorStream_.str();
\r
5300 error( RtError::WARNING );
\r
5301 goto captureProbe;
\r
5304 // Get output channel information.
\r
5305 unsigned int value;
\r
5306 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5307 if ( result < 0 ) {
\r
5308 snd_pcm_close( phandle );
\r
5309 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
5310 errorText_ = errorStream_.str();
\r
5311 error( RtError::WARNING );
\r
5312 goto captureProbe;
\r
5314 info.outputChannels = value;
\r
5315 snd_pcm_close( phandle );
\r
5318 // Now try for capture
\r
5319 stream = SND_PCM_STREAM_CAPTURE;
\r
5320 snd_pcm_info_set_stream( pcminfo, stream );
\r
5322 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
5323 snd_ctl_close( chandle );
\r
5324 if ( result < 0 ) {
\r
5325 // Device probably doesn't support capture.
\r
5326 if ( info.outputChannels == 0 ) return info;
\r
5327 goto probeParameters;
\r
5330 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5331 if ( result < 0 ) {
\r
5332 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5333 errorText_ = errorStream_.str();
\r
5334 error( RtError::WARNING );
\r
5335 if ( info.outputChannels == 0 ) return info;
\r
5336 goto probeParameters;
\r
5339 // The device is open ... fill the parameter structure.
\r
5340 result = snd_pcm_hw_params_any( phandle, params );
\r
5341 if ( result < 0 ) {
\r
5342 snd_pcm_close( phandle );
\r
5343 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5344 errorText_ = errorStream_.str();
\r
5345 error( RtError::WARNING );
\r
5346 if ( info.outputChannels == 0 ) return info;
\r
5347 goto probeParameters;
\r
5350 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
5351 if ( result < 0 ) {
\r
5352 snd_pcm_close( phandle );
\r
5353 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
5354 errorText_ = errorStream_.str();
\r
5355 error( RtError::WARNING );
\r
5356 if ( info.outputChannels == 0 ) return info;
\r
5357 goto probeParameters;
\r
5359 info.inputChannels = value;
\r
5360 snd_pcm_close( phandle );
\r
5362 // If device opens for both playback and capture, we determine the channels.
\r
5363 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5364 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5366 // ALSA doesn't provide default devices so we'll use the first available one.
\r
5367 if ( device == 0 && info.outputChannels > 0 )
\r
5368 info.isDefaultOutput = true;
\r
5369 if ( device == 0 && info.inputChannels > 0 )
\r
5370 info.isDefaultInput = true;
\r
5373 // At this point, we just need to figure out the supported data
\r
5374 // formats and sample rates. We'll proceed by opening the device in
\r
5375 // the direction with the maximum number of channels, or playback if
\r
5376 // they are equal. This might limit our sample rate options, but so
\r
5379 if ( info.outputChannels >= info.inputChannels )
\r
5380 stream = SND_PCM_STREAM_PLAYBACK;
\r
5382 stream = SND_PCM_STREAM_CAPTURE;
\r
5383 snd_pcm_info_set_stream( pcminfo, stream );
\r
5385 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
5386 if ( result < 0 ) {
\r
5387 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5388 errorText_ = errorStream_.str();
\r
5389 error( RtError::WARNING );
\r
5393 // The device is open ... fill the parameter structure.
\r
5394 result = snd_pcm_hw_params_any( phandle, params );
\r
5395 if ( result < 0 ) {
\r
5396 snd_pcm_close( phandle );
\r
5397 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5398 errorText_ = errorStream_.str();
\r
5399 error( RtError::WARNING );
\r
5403 // Test our discrete set of sample rate values.
\r
5404 info.sampleRates.clear();
\r
5405 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
5406 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
5407 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
5409 if ( info.sampleRates.size() == 0 ) {
\r
5410 snd_pcm_close( phandle );
\r
5411 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
5412 errorText_ = errorStream_.str();
\r
5413 error( RtError::WARNING );
\r
5417 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
5418 snd_pcm_format_t format;
\r
5419 info.nativeFormats = 0;
\r
5420 format = SND_PCM_FORMAT_S8;
\r
5421 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5422 info.nativeFormats |= RTAUDIO_SINT8;
\r
5423 format = SND_PCM_FORMAT_S16;
\r
5424 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5425 info.nativeFormats |= RTAUDIO_SINT16;
\r
5426 format = SND_PCM_FORMAT_S24;
\r
5427 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5428 info.nativeFormats |= RTAUDIO_SINT24;
\r
5429 format = SND_PCM_FORMAT_S32;
\r
5430 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5431 info.nativeFormats |= RTAUDIO_SINT32;
\r
5432 format = SND_PCM_FORMAT_FLOAT;
\r
5433 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5434 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
5435 format = SND_PCM_FORMAT_FLOAT64;
\r
5436 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
5437 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
5439 // Check that we have at least one supported format
\r
5440 if ( info.nativeFormats == 0 ) {
\r
5441 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
5442 errorText_ = errorStream_.str();
\r
5443 error( RtError::WARNING );
\r
5447 // Get the device name
\r
5449 result = snd_card_get_name( card, &cardname );
\r
5450 if ( result >= 0 )
\r
5451 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
5454 // That's all ... close the device and return
\r
5455 snd_pcm_close( phandle );
\r
5456 info.probed = true;
\r
5460 void RtApiAlsa :: saveDeviceInfo( void )
\r
5464 unsigned int nDevices = getDeviceCount();
\r
5465 devices_.resize( nDevices );
\r
5466 for ( unsigned int i=0; i<nDevices; i++ )
\r
5467 devices_[i] = getDeviceInfo( i );
\r
5470 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5471 unsigned int firstChannel, unsigned int sampleRate,
\r
5472 RtAudioFormat format, unsigned int *bufferSize,
\r
5473 RtAudio::StreamOptions *options, const std::string &deviceName )
\r
5476 #if defined(__RTAUDIO_DEBUG__)
\r
5477 snd_output_t *out;
\r
5478 snd_output_stdio_attach(&out, stderr, 0);
\r
5481 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
5483 unsigned nDevices = 0;
\r
5484 int result, subdevice, card;
\r
5486 snd_ctl_t *chandle;
\r
5488 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
5489 snprintf(name, sizeof(name), "%s", "default");
\r
5490 else if ( deviceName.size() > 0 )
\r
5491 snprintf(name, sizeof(name), "%s", deviceName.c_str());
\r
5493 // Count cards and devices
\r
5495 snd_card_next( &card );
\r
5496 while ( card >= 0 ) {
\r
5497 sprintf( name, "hw:%d", card );
\r
5498 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
5499 if ( result < 0 ) {
\r
5500 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
5501 errorText_ = errorStream_.str();
\r
5506 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
5507 if ( result < 0 ) break;
\r
5508 if ( subdevice < 0 ) break;
\r
5509 if ( nDevices == device ) {
\r
5510 sprintf( name, "hw:%d,%d", card, subdevice );
\r
5511 snd_ctl_close( chandle );
\r
5516 snd_ctl_close( chandle );
\r
5517 snd_card_next( &card );
\r
5520 if ( nDevices == 0 ) {
\r
5521 // This should not happen because a check is made before this function is called.
\r
5522 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
5526 if ( device >= nDevices ) {
\r
5527 // This should not happen because a check is made before this function is called.
\r
5528 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
5535 // The getDeviceInfo() function will not work for a device that is
\r
5536 // already open. Thus, we'll probe the system before opening a
\r
5537 // stream and save the results for use by getDeviceInfo().
\r
5538 // if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
5539 // this->saveDeviceInfo();
\r
5541 snd_pcm_stream_t stream;
\r
5542 if ( mode == OUTPUT )
\r
5543 stream = SND_PCM_STREAM_PLAYBACK;
\r
5545 stream = SND_PCM_STREAM_CAPTURE;
\r
5547 snd_pcm_t *phandle;
\r
5548 int openMode = SND_PCM_ASYNC;
\r
5549 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
5550 if ( result < 0 ) {
\r
5551 if ( mode == OUTPUT )
\r
5552 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
5554 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
5555 errorText_ = errorStream_.str();
\r
5559 // Fill the parameter structure.
\r
5560 snd_pcm_hw_params_t *hw_params;
\r
5561 snd_pcm_hw_params_alloca( &hw_params );
\r
5562 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
5563 if ( result < 0 ) {
\r
5564 snd_pcm_close( phandle );
\r
5565 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
5566 errorText_ = errorStream_.str();
\r
5570 #if defined(__RTAUDIO_DEBUG__)
\r
5571 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
5572 snd_pcm_hw_params_dump( hw_params, out );
\r
5575 // Set access ... check user preference.
\r
5576 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
5577 stream_.userInterleaved = false;
\r
5578 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5579 if ( result < 0 ) {
\r
5580 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5581 stream_.deviceInterleaved[mode] = true;
\r
5584 stream_.deviceInterleaved[mode] = false;
\r
5587 stream_.userInterleaved = true;
\r
5588 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
5589 if ( result < 0 ) {
\r
5590 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
5591 stream_.deviceInterleaved[mode] = false;
\r
5594 stream_.deviceInterleaved[mode] = true;
\r
5597 if ( result < 0 ) {
\r
5598 snd_pcm_close( phandle );
\r
5599 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
5600 errorText_ = errorStream_.str();
\r
5604 // Determine how to set the device format.
\r
5605 stream_.userFormat = format;
\r
5606 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
5608 if ( format == RTAUDIO_SINT8 )
\r
5609 deviceFormat = SND_PCM_FORMAT_S8;
\r
5610 else if ( format == RTAUDIO_SINT16 )
\r
5611 deviceFormat = SND_PCM_FORMAT_S16;
\r
5612 else if ( format == RTAUDIO_SINT24 )
\r
5613 deviceFormat = SND_PCM_FORMAT_S24;
\r
5614 else if ( format == RTAUDIO_SINT32 )
\r
5615 deviceFormat = SND_PCM_FORMAT_S32;
\r
5616 else if ( format == RTAUDIO_FLOAT32 )
\r
5617 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5618 else if ( format == RTAUDIO_FLOAT64 )
\r
5619 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5621 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
5622 stream_.deviceFormat[mode] = format;
\r
5626 // The user requested format is not natively supported by the device.
\r
5627 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
5628 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
5629 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
5633 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
5634 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5635 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
5639 deviceFormat = SND_PCM_FORMAT_S32;
\r
5640 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5641 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
5645 deviceFormat = SND_PCM_FORMAT_S24;
\r
5646 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5647 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
5651 deviceFormat = SND_PCM_FORMAT_S16;
\r
5652 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5653 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5657 deviceFormat = SND_PCM_FORMAT_S8;
\r
5658 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
5659 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5663 // If we get here, no supported format was found.
\r
5664 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
5665 errorText_ = errorStream_.str();
\r
5669 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
5670 if ( result < 0 ) {
\r
5671 snd_pcm_close( phandle );
\r
5672 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
5673 errorText_ = errorStream_.str();
\r
5677 // Determine whether byte-swaping is necessary.
\r
5678 stream_.doByteSwap[mode] = false;
\r
5679 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
5680 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
5681 if ( result == 0 )
\r
5682 stream_.doByteSwap[mode] = true;
\r
5683 else if (result < 0) {
\r
5684 snd_pcm_close( phandle );
\r
5685 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
5686 errorText_ = errorStream_.str();
\r
5691 // Set the sample rate.
\r
5692 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
5693 if ( result < 0 ) {
\r
5694 snd_pcm_close( phandle );
\r
5695 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5696 errorText_ = errorStream_.str();
\r
5700 // Determine the number of channels for this device. We support a possible
\r
5701 // minimum device channel number > than the value requested by the user.
\r
5702 stream_.nUserChannels[mode] = channels;
\r
5703 unsigned int value;
\r
5704 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
5705 unsigned int deviceChannels = value;
\r
5706 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
5707 snd_pcm_close( phandle );
\r
5708 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
5709 errorText_ = errorStream_.str();
\r
5713 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
5714 if ( result < 0 ) {
\r
5715 snd_pcm_close( phandle );
\r
5716 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5717 errorText_ = errorStream_.str();
\r
5720 deviceChannels = value;
\r
5721 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
5722 stream_.nDeviceChannels[mode] = deviceChannels;
\r
5724 // Set the device channels.
\r
5725 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
5726 if ( result < 0 ) {
\r
5727 snd_pcm_close( phandle );
\r
5728 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5729 errorText_ = errorStream_.str();
\r
5733 // Set the buffer (or period) size.
\r
5735 snd_pcm_uframes_t periodSize = *bufferSize;
\r
5736 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
5737 if ( result < 0 ) {
\r
5738 snd_pcm_close( phandle );
\r
5739 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5740 errorText_ = errorStream_.str();
\r
5743 *bufferSize = periodSize;
\r
5745 // Set the buffer number, which in ALSA is referred to as the "period".
\r
5746 unsigned int periods = 0;
\r
5747 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
5748 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
5749 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
5750 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
5751 if ( result < 0 ) {
\r
5752 snd_pcm_close( phandle );
\r
5753 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
5754 errorText_ = errorStream_.str();
\r
5758 // If attempting to setup a duplex stream, the bufferSize parameter
\r
5759 // MUST be the same in both directions!
\r
5760 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
5761 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
5762 errorText_ = errorStream_.str();
\r
5766 stream_.bufferSize = *bufferSize;
\r
5768 // Install the hardware configuration
\r
5769 result = snd_pcm_hw_params( phandle, hw_params );
\r
5770 if ( result < 0 ) {
\r
5771 snd_pcm_close( phandle );
\r
5772 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5773 errorText_ = errorStream_.str();
\r
5777 #if defined(__RTAUDIO_DEBUG__)
\r
5778 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
5779 snd_pcm_hw_params_dump( hw_params, out );
\r
5782 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
5783 snd_pcm_sw_params_t *sw_params = NULL;
\r
5784 snd_pcm_sw_params_alloca( &sw_params );
\r
5785 snd_pcm_sw_params_current( phandle, sw_params );
\r
5786 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
5787 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
5788 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
5790 // The following two settings were suggested by Theo Veenker
\r
5791 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
5792 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
5794 // here are two options for a fix
\r
5795 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
5796 snd_pcm_uframes_t val;
\r
5797 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
5798 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
5800 result = snd_pcm_sw_params( phandle, sw_params );
\r
5801 if ( result < 0 ) {
\r
5802 snd_pcm_close( phandle );
\r
5803 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
5804 errorText_ = errorStream_.str();
\r
5808 #if defined(__RTAUDIO_DEBUG__)
\r
5809 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
5810 snd_pcm_sw_params_dump( sw_params, out );
\r
5813 // Set flags for buffer conversion
\r
5814 stream_.doConvertBuffer[mode] = false;
\r
5815 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
5816 stream_.doConvertBuffer[mode] = true;
\r
5817 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
5818 stream_.doConvertBuffer[mode] = true;
\r
5819 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5820 stream_.nUserChannels[mode] > 1 )
\r
5821 stream_.doConvertBuffer[mode] = true;
\r
5823 // Allocate the ApiHandle if necessary and then save.
\r
5824 AlsaHandle *apiInfo = 0;
\r
5825 if ( stream_.apiHandle == 0 ) {
\r
5827 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
5829 catch ( std::bad_alloc& ) {
\r
5830 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
5834 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
5835 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
5839 stream_.apiHandle = (void *) apiInfo;
\r
5840 apiInfo->handles[0] = 0;
\r
5841 apiInfo->handles[1] = 0;
\r
5844 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5846 apiInfo->handles[mode] = phandle;
\r
5848 // Allocate necessary internal buffers.
\r
5849 unsigned long bufferBytes;
\r
5850 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5851 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5852 if ( stream_.userBuffer[mode] == NULL ) {
\r
5853 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
5857 if ( stream_.doConvertBuffer[mode] ) {
\r
5859 bool makeBuffer = true;
\r
5860 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5861 if ( mode == INPUT ) {
\r
5862 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5863 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5864 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
5868 if ( makeBuffer ) {
\r
5869 bufferBytes *= *bufferSize;
\r
5870 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5871 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5872 if ( stream_.deviceBuffer == NULL ) {
\r
5873 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
5879 stream_.sampleRate = sampleRate;
\r
5880 stream_.nBuffers = periods;
\r
5881 stream_.device[mode] = device;
\r
5882 stream_.state = STREAM_STOPPED;
\r
5884 // Setup the buffer conversion information structure.
\r
5885 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5887 // Setup thread if necessary.
\r
5888 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
5889 // We had already set up an output stream.
\r
5890 stream_.mode = DUPLEX;
\r
5891 // Link the streams if possible.
\r
5892 apiInfo->synchronized = false;
\r
5893 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
5894 apiInfo->synchronized = true;
\r
5896 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
5897 error( RtError::WARNING );
\r
5901 stream_.mode = mode;
\r
5903 // Setup callback thread.
\r
5904 stream_.callbackInfo.object = (void *) this;
\r
5906 // Set the thread attributes for joinable and realtime scheduling
\r
5907 // priority (optional). The higher priority will only take affect
\r
5908 // if the program is run as root or suid. Note, under Linux
\r
5909 // processes with CAP_SYS_NICE privilege, a user can change
\r
5910 // scheduling policy and priority (thus need not be root). See
\r
5911 // POSIX "capabilities".
\r
5912 pthread_attr_t attr;
\r
5913 pthread_attr_init( &attr );
\r
5914 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
5915 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
5916 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
5917 struct sched_param param;
\r
5918 int priority = options->priority;
\r
5919 int min = sched_get_priority_min( SCHED_RR );
\r
5920 int max = sched_get_priority_max( SCHED_RR );
\r
5921 if ( priority < min ) priority = min;
\r
5922 else if ( priority > max ) priority = max;
\r
5923 param.sched_priority = priority;
\r
5924 pthread_attr_setschedparam( &attr, ¶m );
\r
5925 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
5928 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
5930 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
5933 stream_.callbackInfo.isRunning = true;
\r
5934 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
5935 pthread_attr_destroy( &attr );
\r
5937 stream_.callbackInfo.isRunning = false;
\r
5938 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
5947 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
5948 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
5949 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
5951 stream_.apiHandle = 0;
\r
5954 for ( int i=0; i<2; i++ ) {
\r
5955 if ( stream_.userBuffer[i] ) {
\r
5956 free( stream_.userBuffer[i] );
\r
5957 stream_.userBuffer[i] = 0;
\r
5961 if ( stream_.deviceBuffer ) {
\r
5962 free( stream_.deviceBuffer );
\r
5963 stream_.deviceBuffer = 0;
\r
5969 void RtApiAlsa :: closeStream()
\r
5971 if ( stream_.state == STREAM_CLOSED ) {
\r
5972 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
5973 error( RtError::WARNING );
\r
5977 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
5978 stream_.callbackInfo.isRunning = false;
\r
5979 MUTEX_LOCK( &stream_.mutex );
\r
5980 if ( stream_.state == STREAM_STOPPED ) {
\r
5981 apiInfo->runnable = true;
\r
5982 pthread_cond_signal( &apiInfo->runnable_cv );
\r
5984 MUTEX_UNLOCK( &stream_.mutex );
\r
5985 pthread_join( stream_.callbackInfo.thread, NULL );
\r
5987 if ( stream_.state == STREAM_RUNNING ) {
\r
5988 stream_.state = STREAM_STOPPED;
\r
5989 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
5990 snd_pcm_drop( apiInfo->handles[0] );
\r
5991 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
5992 snd_pcm_drop( apiInfo->handles[1] );
\r
5996 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
5997 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
5998 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
6000 stream_.apiHandle = 0;
\r
6003 for ( int i=0; i<2; i++ ) {
\r
6004 if ( stream_.userBuffer[i] ) {
\r
6005 free( stream_.userBuffer[i] );
\r
6006 stream_.userBuffer[i] = 0;
\r
6010 if ( stream_.deviceBuffer ) {
\r
6011 free( stream_.deviceBuffer );
\r
6012 stream_.deviceBuffer = 0;
\r
6015 stream_.mode = UNINITIALIZED;
\r
6016 stream_.state = STREAM_CLOSED;
\r
6019 void RtApiAlsa :: startStream()
\r
6021 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
6024 if ( stream_.state == STREAM_RUNNING ) {
\r
6025 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
6026 error( RtError::WARNING );
\r
6030 MUTEX_LOCK( &stream_.mutex );
\r
6033 snd_pcm_state_t state;
\r
6034 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6035 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6036 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6037 state = snd_pcm_state( handle[0] );
\r
6038 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6039 result = snd_pcm_prepare( handle[0] );
\r
6040 if ( result < 0 ) {
\r
6041 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
6042 errorText_ = errorStream_.str();
\r
6048 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6049 state = snd_pcm_state( handle[1] );
\r
6050 if ( state != SND_PCM_STATE_PREPARED ) {
\r
6051 result = snd_pcm_prepare( handle[1] );
\r
6052 if ( result < 0 ) {
\r
6053 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
6054 errorText_ = errorStream_.str();
\r
6060 stream_.state = STREAM_RUNNING;
\r
6063 apiInfo->runnable = true;
\r
6064 pthread_cond_signal( &apiInfo->runnable_cv );
\r
6065 MUTEX_UNLOCK( &stream_.mutex );
\r
6067 if ( result >= 0 ) return;
\r
6068 error( RtError::SYSTEM_ERROR );
\r
6071 void RtApiAlsa :: stopStream()
\r
6074 if ( stream_.state == STREAM_STOPPED ) {
\r
6075 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
6076 error( RtError::WARNING );
\r
6080 stream_.state = STREAM_STOPPED;
\r
6081 MUTEX_LOCK( &stream_.mutex );
\r
6083 //if ( stream_.state == STREAM_STOPPED ) {
\r
6084 // MUTEX_UNLOCK( &stream_.mutex );
\r
6089 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6090 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6091 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6092 if ( apiInfo->synchronized )
\r
6093 result = snd_pcm_drop( handle[0] );
\r
6095 result = snd_pcm_drain( handle[0] );
\r
6096 if ( result < 0 ) {
\r
6097 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
6098 errorText_ = errorStream_.str();
\r
6103 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6104 result = snd_pcm_drop( handle[1] );
\r
6105 if ( result < 0 ) {
\r
6106 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
6107 errorText_ = errorStream_.str();
\r
6113 stream_.state = STREAM_STOPPED;
\r
6114 MUTEX_UNLOCK( &stream_.mutex );
\r
6116 if ( result >= 0 ) return;
\r
6117 error( RtError::SYSTEM_ERROR );
\r
6120 void RtApiAlsa :: abortStream()
\r
6123 if ( stream_.state == STREAM_STOPPED ) {
\r
6124 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
6125 error( RtError::WARNING );
\r
6129 stream_.state = STREAM_STOPPED;
\r
6130 MUTEX_LOCK( &stream_.mutex );
\r
6132 //if ( stream_.state == STREAM_STOPPED ) {
\r
6133 // MUTEX_UNLOCK( &stream_.mutex );
\r
6138 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6139 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
6140 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6141 result = snd_pcm_drop( handle[0] );
\r
6142 if ( result < 0 ) {
\r
6143 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
6144 errorText_ = errorStream_.str();
\r
6149 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
6150 result = snd_pcm_drop( handle[1] );
\r
6151 if ( result < 0 ) {
\r
6152 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
6153 errorText_ = errorStream_.str();
\r
6159 stream_.state = STREAM_STOPPED;
\r
6160 MUTEX_UNLOCK( &stream_.mutex );
\r
6162 if ( result >= 0 ) return;
\r
6163 error( RtError::SYSTEM_ERROR );
\r
6166 void RtApiAlsa :: callbackEvent()
\r
6168 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
6169 if ( stream_.state == STREAM_STOPPED ) {
\r
6170 MUTEX_LOCK( &stream_.mutex );
\r
6171 while ( !apiInfo->runnable )
\r
6172 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
6174 if ( stream_.state != STREAM_RUNNING ) {
\r
6175 MUTEX_UNLOCK( &stream_.mutex );
\r
6178 MUTEX_UNLOCK( &stream_.mutex );
\r
6181 if ( stream_.state == STREAM_CLOSED ) {
\r
6182 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6183 error( RtError::WARNING );
\r
6187 int doStopStream = 0;
\r
6188 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
6189 double streamTime = getStreamTime();
\r
6190 RtAudioStreamStatus status = 0;
\r
6191 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
6192 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6193 apiInfo->xrun[0] = false;
\r
6195 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
6196 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6197 apiInfo->xrun[1] = false;
\r
6199 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6200 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
6202 if ( doStopStream == 2 ) {
\r
6207 MUTEX_LOCK( &stream_.mutex );
\r
6209 // The state might change while waiting on a mutex.
\r
6210 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
6215 snd_pcm_t **handle;
\r
6216 snd_pcm_sframes_t frames;
\r
6217 RtAudioFormat format;
\r
6218 handle = (snd_pcm_t **) apiInfo->handles;
\r
6220 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6222 // Setup parameters.
\r
6223 if ( stream_.doConvertBuffer[1] ) {
\r
6224 buffer = stream_.deviceBuffer;
\r
6225 channels = stream_.nDeviceChannels[1];
\r
6226 format = stream_.deviceFormat[1];
\r
6229 buffer = stream_.userBuffer[1];
\r
6230 channels = stream_.nUserChannels[1];
\r
6231 format = stream_.userFormat;
\r
6234 // Read samples from device in interleaved/non-interleaved format.
\r
6235 if ( stream_.deviceInterleaved[1] )
\r
6236 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
6238 void *bufs[channels];
\r
6239 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6240 for ( int i=0; i<channels; i++ )
\r
6241 bufs[i] = (void *) (buffer + (i * offset));
\r
6242 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
6245 if ( result < (int) stream_.bufferSize ) {
\r
6246 // Either an error or overrun occured.
\r
6247 if ( result == -EPIPE ) {
\r
6248 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
6249 if ( state == SND_PCM_STATE_XRUN ) {
\r
6250 apiInfo->xrun[1] = true;
\r
6251 result = snd_pcm_prepare( handle[1] );
\r
6252 if ( result < 0 ) {
\r
6253 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
6254 errorText_ = errorStream_.str();
\r
6258 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6259 errorText_ = errorStream_.str();
\r
6263 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
6264 errorText_ = errorStream_.str();
\r
6266 error( RtError::WARNING );
\r
6270 // Do byte swapping if necessary.
\r
6271 if ( stream_.doByteSwap[1] )
\r
6272 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
6274 // Do buffer conversion if necessary.
\r
6275 if ( stream_.doConvertBuffer[1] )
\r
6276 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6278 // Check stream latency
\r
6279 result = snd_pcm_delay( handle[1], &frames );
\r
6280 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
6285 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6287 // Setup parameters and do buffer conversion if necessary.
\r
6288 if ( stream_.doConvertBuffer[0] ) {
\r
6289 buffer = stream_.deviceBuffer;
\r
6290 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6291 channels = stream_.nDeviceChannels[0];
\r
6292 format = stream_.deviceFormat[0];
\r
6295 buffer = stream_.userBuffer[0];
\r
6296 channels = stream_.nUserChannels[0];
\r
6297 format = stream_.userFormat;
\r
6300 // Do byte swapping if necessary.
\r
6301 if ( stream_.doByteSwap[0] )
\r
6302 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
6304 // Write samples to device in interleaved/non-interleaved format.
\r
6305 if ( stream_.deviceInterleaved[0] )
\r
6306 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
6308 void *bufs[channels];
\r
6309 size_t offset = stream_.bufferSize * formatBytes( format );
\r
6310 for ( int i=0; i<channels; i++ )
\r
6311 bufs[i] = (void *) (buffer + (i * offset));
\r
6312 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
6315 if ( result < (int) stream_.bufferSize ) {
\r
6316 // Either an error or underrun occured.
\r
6317 if ( result == -EPIPE ) {
\r
6318 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
6319 if ( state == SND_PCM_STATE_XRUN ) {
\r
6320 apiInfo->xrun[0] = true;
\r
6321 result = snd_pcm_prepare( handle[0] );
\r
6322 if ( result < 0 ) {
\r
6323 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
6324 errorText_ = errorStream_.str();
\r
6328 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
6329 errorText_ = errorStream_.str();
\r
6333 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
6334 errorText_ = errorStream_.str();
\r
6336 error( RtError::WARNING );
\r
6340 // Check stream latency
\r
6341 result = snd_pcm_delay( handle[0], &frames );
\r
6342 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
6346 MUTEX_UNLOCK( &stream_.mutex );
\r
6348 RtApi::tickStreamTime();
\r
6349 if ( doStopStream == 1 ) this->stopStream();
\r
6352 extern "C" void *alsaCallbackHandler( void *ptr )
\r
6354 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6355 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
6356 bool *isRunning = &info->isRunning;
\r
6358 while ( *isRunning == true ) {
\r
6359 pthread_testcancel();
\r
6360 object->callbackEvent();
\r
6363 pthread_exit( NULL );
\r
6366 //******************** End of __LINUX_ALSA__ *********************//
\r
6370 #if defined(__LINUX_OSS__)
\r
6372 #include <unistd.h>
\r
6373 #include <sys/ioctl.h>
\r
6374 #include <unistd.h>
\r
6375 #include <fcntl.h>
\r
6376 #include <sys/soundcard.h>
\r
6377 #include <errno.h>
\r
6380 extern "C" void *ossCallbackHandler(void * ptr);
\r
6382 // A structure to hold various information related to the OSS API
\r
6383 // implementation.
\r
6384 struct OssHandle {
\r
6385 int id[2]; // device ids
\r
6388 pthread_cond_t runnable;
\r
6391 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
6394 RtApiOss :: RtApiOss()
\r
6396 // Nothing to do here.
\r
6399 RtApiOss :: ~RtApiOss()
\r
6401 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6404 unsigned int RtApiOss :: getDeviceCount( void )
\r
6406 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6407 if ( mixerfd == -1 ) {
\r
6408 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
6409 error( RtError::WARNING );
\r
6413 oss_sysinfo sysinfo;
\r
6414 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
6416 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6417 error( RtError::WARNING );
\r
6422 return sysinfo.numaudios;
\r
6425 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
6427 RtAudio::DeviceInfo info;
\r
6428 info.probed = false;
\r
6430 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6431 if ( mixerfd == -1 ) {
\r
6432 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
6433 error( RtError::WARNING );
\r
6437 oss_sysinfo sysinfo;
\r
6438 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6439 if ( result == -1 ) {
\r
6441 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6442 error( RtError::WARNING );
\r
6446 unsigned nDevices = sysinfo.numaudios;
\r
6447 if ( nDevices == 0 ) {
\r
6449 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
6450 error( RtError::INVALID_USE );
\r
6453 if ( device >= nDevices ) {
\r
6455 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
6456 error( RtError::INVALID_USE );
\r
6459 oss_audioinfo ainfo;
\r
6460 ainfo.dev = device;
\r
6461 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6463 if ( result == -1 ) {
\r
6464 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6465 errorText_ = errorStream_.str();
\r
6466 error( RtError::WARNING );
\r
6471 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
6472 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
6473 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
6474 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
6475 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6478 // Probe data formats ... do for input
\r
6479 unsigned long mask = ainfo.iformats;
\r
6480 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
6481 info.nativeFormats |= RTAUDIO_SINT16;
\r
6482 if ( mask & AFMT_S8 )
\r
6483 info.nativeFormats |= RTAUDIO_SINT8;
\r
6484 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
6485 info.nativeFormats |= RTAUDIO_SINT32;
\r
6486 if ( mask & AFMT_FLOAT )
\r
6487 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
6488 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
6489 info.nativeFormats |= RTAUDIO_SINT24;
\r
6491 // Check that we have at least one supported format
\r
6492 if ( info.nativeFormats == 0 ) {
\r
6493 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
6494 errorText_ = errorStream_.str();
\r
6495 error( RtError::WARNING );
\r
6499 // Probe the supported sample rates.
\r
6500 info.sampleRates.clear();
\r
6501 if ( ainfo.nrates ) {
\r
6502 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
6503 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6504 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
6505 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6512 // Check min and max rate values;
\r
6513 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
6514 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
6515 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
6519 if ( info.sampleRates.size() == 0 ) {
\r
6520 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
6521 errorText_ = errorStream_.str();
\r
6522 error( RtError::WARNING );
\r
6525 info.probed = true;
\r
6526 info.name = ainfo.name;
\r
6533 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
6534 unsigned int firstChannel, unsigned int sampleRate,
\r
6535 RtAudioFormat format, unsigned int *bufferSize,
\r
6536 RtAudio::StreamOptions *options, const std::string &deviceName )
\r
6538 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
6539 if ( mixerfd == -1 ) {
\r
6540 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
6544 oss_sysinfo sysinfo;
\r
6545 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
6546 if ( result == -1 ) {
\r
6548 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
6552 unsigned nDevices = sysinfo.numaudios;
\r
6553 if ( nDevices == 0 ) {
\r
6554 // This should not happen because a check is made before this function is called.
\r
6556 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
6560 if ( device >= nDevices ) {
\r
6561 // This should not happen because a check is made before this function is called.
\r
6563 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
6567 oss_audioinfo ainfo;
\r
6568 ainfo.dev = device;
\r
6569 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
6571 if ( result == -1 ) {
\r
6572 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
6573 errorText_ = errorStream_.str();
\r
6577 // Check if device supports input or output
\r
6578 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
6579 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
6580 if ( mode == OUTPUT )
\r
6581 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
6583 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
6584 errorText_ = errorStream_.str();
\r
6589 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
6590 if ( mode == OUTPUT )
\r
6591 flags |= O_WRONLY;
\r
6592 else { // mode == INPUT
\r
6593 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
6594 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
6595 close( handle->id[0] );
\r
6596 handle->id[0] = 0;
\r
6597 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
6598 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
6599 errorText_ = errorStream_.str();
\r
6602 // Check that the number previously set channels is the same.
\r
6603 if ( stream_.nUserChannels[0] != channels ) {
\r
6604 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
6605 errorText_ = errorStream_.str();
\r
6611 flags |= O_RDONLY;
\r
6614 // Set exclusive access if specified.
\r
6615 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
6617 // Try to open the device.
\r
6619 fd = open( ainfo.devnode, flags, 0 );
\r
6621 if ( errno == EBUSY )
\r
6622 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
6624 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
6625 errorText_ = errorStream_.str();
\r
6629 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
6631 if ( flags | O_RDWR ) {
\r
6632 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
6633 if ( result == -1) {
\r
6634 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
6635 errorText_ = errorStream_.str();
\r
6641 // Check the device channel support.
\r
6642 stream_.nUserChannels[mode] = channels;
\r
6643 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
6645 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
6646 errorText_ = errorStream_.str();
\r
6650 // Set the number of channels.
\r
6651 int deviceChannels = channels + firstChannel;
\r
6652 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
6653 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
6655 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
6656 errorText_ = errorStream_.str();
\r
6659 stream_.nDeviceChannels[mode] = deviceChannels;
\r
6661 // Get the data format mask
\r
6663 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
6664 if ( result == -1 ) {
\r
6666 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
6667 errorText_ = errorStream_.str();
\r
6671 // Determine how to set the device format.
\r
6672 stream_.userFormat = format;
\r
6673 int deviceFormat = -1;
\r
6674 stream_.doByteSwap[mode] = false;
\r
6675 if ( format == RTAUDIO_SINT8 ) {
\r
6676 if ( mask & AFMT_S8 ) {
\r
6677 deviceFormat = AFMT_S8;
\r
6678 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
6681 else if ( format == RTAUDIO_SINT16 ) {
\r
6682 if ( mask & AFMT_S16_NE ) {
\r
6683 deviceFormat = AFMT_S16_NE;
\r
6684 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6686 else if ( mask & AFMT_S16_OE ) {
\r
6687 deviceFormat = AFMT_S16_OE;
\r
6688 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6689 stream_.doByteSwap[mode] = true;
\r
6692 else if ( format == RTAUDIO_SINT24 ) {
\r
6693 if ( mask & AFMT_S24_NE ) {
\r
6694 deviceFormat = AFMT_S24_NE;
\r
6695 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6697 else if ( mask & AFMT_S24_OE ) {
\r
6698 deviceFormat = AFMT_S24_OE;
\r
6699 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6700 stream_.doByteSwap[mode] = true;
\r
6703 else if ( format == RTAUDIO_SINT32 ) {
\r
6704 if ( mask & AFMT_S32_NE ) {
\r
6705 deviceFormat = AFMT_S32_NE;
\r
6706 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6708 else if ( mask & AFMT_S32_OE ) {
\r
6709 deviceFormat = AFMT_S32_OE;
\r
6710 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6711 stream_.doByteSwap[mode] = true;
\r
6715 if ( deviceFormat == -1 ) {
\r
6716 // The user requested format is not natively supported by the device.
\r
6717 if ( mask & AFMT_S16_NE ) {
\r
6718 deviceFormat = AFMT_S16_NE;
\r
6719 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6721 else if ( mask & AFMT_S32_NE ) {
\r
6722 deviceFormat = AFMT_S32_NE;
\r
6723 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6725 else if ( mask & AFMT_S24_NE ) {
\r
6726 deviceFormat = AFMT_S24_NE;
\r
6727 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6729 else if ( mask & AFMT_S16_OE ) {
\r
6730 deviceFormat = AFMT_S16_OE;
\r
6731 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
6732 stream_.doByteSwap[mode] = true;
\r
6734 else if ( mask & AFMT_S32_OE ) {
\r
6735 deviceFormat = AFMT_S32_OE;
\r
6736 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
6737 stream_.doByteSwap[mode] = true;
\r
6739 else if ( mask & AFMT_S24_OE ) {
\r
6740 deviceFormat = AFMT_S24_OE;
\r
6741 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
6742 stream_.doByteSwap[mode] = true;
\r
6744 else if ( mask & AFMT_S8) {
\r
6745 deviceFormat = AFMT_S8;
\r
6746 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
6750 if ( stream_.deviceFormat[mode] == 0 ) {
\r
6751 // This really shouldn't happen ...
\r
6753 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
6754 errorText_ = errorStream_.str();
\r
6758 // Set the data format.
\r
6759 int temp = deviceFormat;
\r
6760 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
6761 if ( result == -1 || deviceFormat != temp ) {
\r
6763 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
6764 errorText_ = errorStream_.str();
\r
6768 // Attempt to set the buffer size. According to OSS, the minimum
\r
6769 // number of buffers is two. The supposed minimum buffer size is 16
\r
6770 // bytes, so that will be our lower bound. The argument to this
\r
6771 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
6772 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
6773 // We'll check the actual value used near the end of the setup
\r
6775 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
6776 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
6778 if ( options ) buffers = options->numberOfBuffers;
\r
6779 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
6780 if ( buffers < 2 ) buffers = 3;
\r
6781 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
6782 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
6783 if ( result == -1 ) {
\r
6785 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
6786 errorText_ = errorStream_.str();
\r
6789 stream_.nBuffers = buffers;
\r
6791 // Save buffer size (in sample frames).
\r
6792 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
6793 stream_.bufferSize = *bufferSize;
\r
6795 // Set the sample rate.
\r
6796 int srate = sampleRate;
\r
6797 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
6798 if ( result == -1 ) {
\r
6800 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
6801 errorText_ = errorStream_.str();
\r
6805 // Verify the sample rate setup worked.
\r
6806 if ( abs( srate - sampleRate ) > 100 ) {
\r
6808 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
6809 errorText_ = errorStream_.str();
\r
6812 stream_.sampleRate = sampleRate;
\r
6814 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
6815 // We're doing duplex setup here.
\r
6816 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
6817 stream_.nDeviceChannels[0] = deviceChannels;
\r
6820 // Set interleaving parameters.
\r
6821 stream_.userInterleaved = true;
\r
6822 stream_.deviceInterleaved[mode] = true;
\r
6823 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
6824 stream_.userInterleaved = false;
\r
6826 // Set flags for buffer conversion
\r
6827 stream_.doConvertBuffer[mode] = false;
\r
6828 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
6829 stream_.doConvertBuffer[mode] = true;
\r
6830 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
6831 stream_.doConvertBuffer[mode] = true;
\r
6832 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
6833 stream_.nUserChannels[mode] > 1 )
\r
6834 stream_.doConvertBuffer[mode] = true;
\r
6836 // Allocate the stream handles if necessary and then save.
\r
6837 if ( stream_.apiHandle == 0 ) {
\r
6839 handle = new OssHandle;
\r
6841 catch ( std::bad_alloc& ) {
\r
6842 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
6846 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
6847 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
6851 stream_.apiHandle = (void *) handle;
\r
6854 handle = (OssHandle *) stream_.apiHandle;
\r
6856 handle->id[mode] = fd;
\r
6858 // Allocate necessary internal buffers.
\r
6859 unsigned long bufferBytes;
\r
6860 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
6861 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
6862 if ( stream_.userBuffer[mode] == NULL ) {
\r
6863 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
6867 if ( stream_.doConvertBuffer[mode] ) {
\r
6869 bool makeBuffer = true;
\r
6870 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
6871 if ( mode == INPUT ) {
\r
6872 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
6873 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
6874 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
6878 if ( makeBuffer ) {
\r
6879 bufferBytes *= *bufferSize;
\r
6880 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
6881 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
6882 if ( stream_.deviceBuffer == NULL ) {
\r
6883 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
6889 stream_.device[mode] = device;
\r
6890 stream_.state = STREAM_STOPPED;
\r
6892 // Setup the buffer conversion information structure.
\r
6893 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
6895 // Setup thread if necessary.
\r
6896 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
6897 // We had already set up an output stream.
\r
6898 stream_.mode = DUPLEX;
\r
6899 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
6902 stream_.mode = mode;
\r
6904 // Setup callback thread.
\r
6905 stream_.callbackInfo.object = (void *) this;
\r
6907 // Set the thread attributes for joinable and realtime scheduling
\r
6908 // priority. The higher priority will only take affect if the
\r
6909 // program is run as root or suid.
\r
6910 pthread_attr_t attr;
\r
6911 pthread_attr_init( &attr );
\r
6912 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
6913 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
6914 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
6915 struct sched_param param;
\r
6916 int priority = options->priority;
\r
6917 int min = sched_get_priority_min( SCHED_RR );
\r
6918 int max = sched_get_priority_max( SCHED_RR );
\r
6919 if ( priority < min ) priority = min;
\r
6920 else if ( priority > max ) priority = max;
\r
6921 param.sched_priority = priority;
\r
6922 pthread_attr_setschedparam( &attr, ¶m );
\r
6923 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
6926 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
6928 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
6931 stream_.callbackInfo.isRunning = true;
\r
6932 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
6933 pthread_attr_destroy( &attr );
\r
6935 stream_.callbackInfo.isRunning = false;
\r
6936 errorText_ = "RtApiOss::error creating callback thread!";
\r
6945 pthread_cond_destroy( &handle->runnable );
\r
6946 if ( handle->id[0] ) close( handle->id[0] );
\r
6947 if ( handle->id[1] ) close( handle->id[1] );
\r
6949 stream_.apiHandle = 0;
\r
6952 for ( int i=0; i<2; i++ ) {
\r
6953 if ( stream_.userBuffer[i] ) {
\r
6954 free( stream_.userBuffer[i] );
\r
6955 stream_.userBuffer[i] = 0;
\r
6959 if ( stream_.deviceBuffer ) {
\r
6960 free( stream_.deviceBuffer );
\r
6961 stream_.deviceBuffer = 0;
\r
6967 void RtApiOss :: closeStream()
\r
6969 if ( stream_.state == STREAM_CLOSED ) {
\r
6970 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
6971 error( RtError::WARNING );
\r
6975 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
6976 stream_.callbackInfo.isRunning = false;
\r
6977 MUTEX_LOCK( &stream_.mutex );
\r
6978 if ( stream_.state == STREAM_STOPPED )
\r
6979 pthread_cond_signal( &handle->runnable );
\r
6980 MUTEX_UNLOCK( &stream_.mutex );
\r
6981 pthread_join( stream_.callbackInfo.thread, NULL );
\r
6983 if ( stream_.state == STREAM_RUNNING ) {
\r
6984 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
6985 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
6987 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
6988 stream_.state = STREAM_STOPPED;
\r
6992 pthread_cond_destroy( &handle->runnable );
\r
6993 if ( handle->id[0] ) close( handle->id[0] );
\r
6994 if ( handle->id[1] ) close( handle->id[1] );
\r
6996 stream_.apiHandle = 0;
\r
6999 for ( int i=0; i<2; i++ ) {
\r
7000 if ( stream_.userBuffer[i] ) {
\r
7001 free( stream_.userBuffer[i] );
\r
7002 stream_.userBuffer[i] = 0;
\r
7006 if ( stream_.deviceBuffer ) {
\r
7007 free( stream_.deviceBuffer );
\r
7008 stream_.deviceBuffer = 0;
\r
7011 stream_.mode = UNINITIALIZED;
\r
7012 stream_.state = STREAM_CLOSED;
\r
7015 void RtApiOss :: startStream()
\r
7018 if ( stream_.state == STREAM_RUNNING ) {
\r
7019 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
7020 error( RtError::WARNING );
\r
7024 MUTEX_LOCK( &stream_.mutex );
\r
7026 stream_.state = STREAM_RUNNING;
\r
7028 // No need to do anything else here ... OSS automatically starts
\r
7029 // when fed samples.
\r
7031 MUTEX_UNLOCK( &stream_.mutex );
\r
7033 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7034 pthread_cond_signal( &handle->runnable );
\r
7037 void RtApiOss :: stopStream()
\r
7040 if ( stream_.state == STREAM_STOPPED ) {
\r
7041 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
7042 error( RtError::WARNING );
\r
7046 MUTEX_LOCK( &stream_.mutex );
\r
7048 // The state might change while waiting on a mutex.
\r
7049 if ( stream_.state == STREAM_STOPPED ) {
\r
7050 MUTEX_UNLOCK( &stream_.mutex );
\r
7055 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7056 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7058 // Flush the output with zeros a few times.
\r
7061 RtAudioFormat format;
\r
7063 if ( stream_.doConvertBuffer[0] ) {
\r
7064 buffer = stream_.deviceBuffer;
\r
7065 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7066 format = stream_.deviceFormat[0];
\r
7069 buffer = stream_.userBuffer[0];
\r
7070 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7071 format = stream_.userFormat;
\r
7074 memset( buffer, 0, samples * formatBytes(format) );
\r
7075 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
7076 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7077 if ( result == -1 ) {
\r
7078 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
7079 error( RtError::WARNING );
\r
7083 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7084 if ( result == -1 ) {
\r
7085 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7086 errorText_ = errorStream_.str();
\r
7089 handle->triggered = false;
\r
7092 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7093 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7094 if ( result == -1 ) {
\r
7095 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7096 errorText_ = errorStream_.str();
\r
7102 stream_.state = STREAM_STOPPED;
\r
7103 MUTEX_UNLOCK( &stream_.mutex );
\r
7105 if ( result != -1 ) return;
\r
7106 error( RtError::SYSTEM_ERROR );
\r
7109 void RtApiOss :: abortStream()
\r
7112 if ( stream_.state == STREAM_STOPPED ) {
\r
7113 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
7114 error( RtError::WARNING );
\r
7118 MUTEX_LOCK( &stream_.mutex );
\r
7120 // The state might change while waiting on a mutex.
\r
7121 if ( stream_.state == STREAM_STOPPED ) {
\r
7122 MUTEX_UNLOCK( &stream_.mutex );
\r
7127 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7128 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7129 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
7130 if ( result == -1 ) {
\r
7131 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
7132 errorText_ = errorStream_.str();
\r
7135 handle->triggered = false;
\r
7138 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
7139 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
7140 if ( result == -1 ) {
\r
7141 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
7142 errorText_ = errorStream_.str();
\r
7148 stream_.state = STREAM_STOPPED;
\r
7149 MUTEX_UNLOCK( &stream_.mutex );
\r
7151 if ( result != -1 ) return;
\r
7152 error( RtError::SYSTEM_ERROR );
\r
7155 void RtApiOss :: callbackEvent()
\r
7157 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
7158 if ( stream_.state == STREAM_STOPPED ) {
\r
7159 MUTEX_LOCK( &stream_.mutex );
\r
7160 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
7161 if ( stream_.state != STREAM_RUNNING ) {
\r
7162 MUTEX_UNLOCK( &stream_.mutex );
\r
7165 MUTEX_UNLOCK( &stream_.mutex );
\r
7168 if ( stream_.state == STREAM_CLOSED ) {
\r
7169 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7170 error( RtError::WARNING );
\r
7174 // Invoke user callback to get fresh output data.
\r
7175 int doStopStream = 0;
\r
7176 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7177 double streamTime = getStreamTime();
\r
7178 RtAudioStreamStatus status = 0;
\r
7179 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
7180 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7181 handle->xrun[0] = false;
\r
7183 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
7184 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7185 handle->xrun[1] = false;
\r
7187 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7188 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7189 if ( doStopStream == 2 ) {
\r
7190 this->abortStream();
\r
7194 MUTEX_LOCK( &stream_.mutex );
\r
7196 // The state might change while waiting on a mutex.
\r
7197 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7202 RtAudioFormat format;
\r
7204 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7206 // Setup parameters and do buffer conversion if necessary.
\r
7207 if ( stream_.doConvertBuffer[0] ) {
\r
7208 buffer = stream_.deviceBuffer;
\r
7209 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7210 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
7211 format = stream_.deviceFormat[0];
\r
7214 buffer = stream_.userBuffer[0];
\r
7215 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
7216 format = stream_.userFormat;
\r
7219 // Do byte swapping if necessary.
\r
7220 if ( stream_.doByteSwap[0] )
\r
7221 byteSwapBuffer( buffer, samples, format );
\r
7223 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
7225 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7226 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7227 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
7228 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
7229 handle->triggered = true;
\r
7232 // Write samples to device.
\r
7233 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
7235 if ( result == -1 ) {
\r
7236 // We'll assume this is an underrun, though there isn't a
\r
7237 // specific means for determining that.
\r
7238 handle->xrun[0] = true;
\r
7239 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
7240 error( RtError::WARNING );
\r
7241 // Continue on to input section.
\r
7245 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7247 // Setup parameters.
\r
7248 if ( stream_.doConvertBuffer[1] ) {
\r
7249 buffer = stream_.deviceBuffer;
\r
7250 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
7251 format = stream_.deviceFormat[1];
\r
7254 buffer = stream_.userBuffer[1];
\r
7255 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
7256 format = stream_.userFormat;
\r
7259 // Read samples from device.
\r
7260 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
7262 if ( result == -1 ) {
\r
7263 // We'll assume this is an overrun, though there isn't a
\r
7264 // specific means for determining that.
\r
7265 handle->xrun[1] = true;
\r
7266 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
7267 error( RtError::WARNING );
\r
7271 // Do byte swapping if necessary.
\r
7272 if ( stream_.doByteSwap[1] )
\r
7273 byteSwapBuffer( buffer, samples, format );
\r
7275 // Do buffer conversion if necessary.
\r
7276 if ( stream_.doConvertBuffer[1] )
\r
7277 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7281 MUTEX_UNLOCK( &stream_.mutex );
\r
7283 RtApi::tickStreamTime();
\r
7284 if ( doStopStream == 1 ) this->stopStream();
\r
7287 extern "C" void *ossCallbackHandler( void *ptr )
\r
7289 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7290 RtApiOss *object = (RtApiOss *) info->object;
\r
7291 bool *isRunning = &info->isRunning;
\r
7293 while ( *isRunning == true ) {
\r
7294 pthread_testcancel();
\r
7295 object->callbackEvent();
\r
7298 pthread_exit( NULL );
\r
7301 //******************** End of __LINUX_OSS__ *********************//
\r
7305 // *************************************************** //
\r
7307 // Protected common (OS-independent) RtAudio methods.
\r
7309 // *************************************************** //
\r
7311 // This method can be modified to control the behavior of error
\r
7312 // message printing.
\r
7313 void RtApi :: error( RtError::Type type )
\r
7315 errorStream_.str(""); // clear the ostringstream
\r
7316 if ( type == RtError::WARNING && showWarnings_ == true )
\r
7317 std::cerr << '\n' << errorText_ << "\n\n";
\r
7318 else if ( type != RtError::WARNING )
\r
7319 throw( RtError( errorText_, type ) );
\r
7322 void RtApi :: verifyStream()
\r
7324 if ( stream_.state == STREAM_CLOSED ) {
\r
7325 errorText_ = "RtApi:: a stream is not open!";
\r
7326 error( RtError::INVALID_USE );
\r
7330 void RtApi :: clearStreamInfo()
\r
7332 stream_.mode = UNINITIALIZED;
\r
7333 stream_.state = STREAM_CLOSED;
\r
7334 stream_.sampleRate = 0;
\r
7335 stream_.bufferSize = 0;
\r
7336 stream_.nBuffers = 0;
\r
7337 stream_.userFormat = 0;
\r
7338 stream_.userInterleaved = true;
\r
7339 stream_.streamTime = 0.0;
\r
7340 stream_.apiHandle = 0;
\r
7341 stream_.deviceBuffer = 0;
\r
7342 stream_.callbackInfo.callback = 0;
\r
7343 stream_.callbackInfo.userData = 0;
\r
7344 stream_.callbackInfo.isRunning = false;
\r
7345 for ( int i=0; i<2; i++ ) {
\r
7346 stream_.device[i] = 11111;
\r
7347 stream_.doConvertBuffer[i] = false;
\r
7348 stream_.deviceInterleaved[i] = true;
\r
7349 stream_.doByteSwap[i] = false;
\r
7350 stream_.nUserChannels[i] = 0;
\r
7351 stream_.nDeviceChannels[i] = 0;
\r
7352 stream_.channelOffset[i] = 0;
\r
7353 stream_.deviceFormat[i] = 0;
\r
7354 stream_.latency[i] = 0;
\r
7355 stream_.userBuffer[i] = 0;
\r
7356 stream_.convertInfo[i].channels = 0;
\r
7357 stream_.convertInfo[i].inJump = 0;
\r
7358 stream_.convertInfo[i].outJump = 0;
\r
7359 stream_.convertInfo[i].inFormat = 0;
\r
7360 stream_.convertInfo[i].outFormat = 0;
\r
7361 stream_.convertInfo[i].inOffset.clear();
\r
7362 stream_.convertInfo[i].outOffset.clear();
\r
7366 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
7368 if ( format == RTAUDIO_SINT16 )
\r
7370 else if ( format == RTAUDIO_SINT24 || format == RTAUDIO_SINT32 ||
\r
7371 format == RTAUDIO_FLOAT32 )
\r
7373 else if ( format == RTAUDIO_FLOAT64 )
\r
7375 else if ( format == RTAUDIO_SINT8 )
\r
7378 errorText_ = "RtApi::formatBytes: undefined format.";
\r
7379 error( RtError::WARNING );
\r
7384 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
7386 if ( mode == INPUT ) { // convert device to user buffer
\r
7387 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
7388 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
7389 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
7390 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
7392 else { // convert user to device buffer
\r
7393 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
7394 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
7395 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
7396 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
7399 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
7400 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
7402 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
7404 // Set up the interleave/deinterleave offsets.
\r
7405 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
7406 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
7407 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
7408 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7409 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7410 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7411 stream_.convertInfo[mode].inJump = 1;
\r
7415 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7416 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7417 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7418 stream_.convertInfo[mode].outJump = 1;
\r
7422 else { // no (de)interleaving
\r
7423 if ( stream_.userInterleaved ) {
\r
7424 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7425 stream_.convertInfo[mode].inOffset.push_back( k );
\r
7426 stream_.convertInfo[mode].outOffset.push_back( k );
\r
7430 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
7431 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
7432 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
7433 stream_.convertInfo[mode].inJump = 1;
\r
7434 stream_.convertInfo[mode].outJump = 1;
\r
7439 // Add channel offset.
\r
7440 if ( firstChannel > 0 ) {
\r
7441 if ( stream_.deviceInterleaved[mode] ) {
\r
7442 if ( mode == OUTPUT ) {
\r
7443 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7444 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
7447 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7448 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
7452 if ( mode == OUTPUT ) {
\r
7453 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7454 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7457 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
7458 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
7464 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
7466 // This function does format conversion, input/output channel compensation, and
\r
7467 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
7468 // the lower three bytes of a 32-bit integer.
\r
7470 // Clear our device buffer when in/out duplex device channels are different
\r
7471 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
7472 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
7473 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
7476 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
7478 Float64 *out = (Float64 *)outBuffer;
\r
7480 if (info.inFormat == RTAUDIO_SINT8) {
\r
7481 signed char *in = (signed char *)inBuffer;
\r
7482 scale = 1.0 / 127.5;
\r
7483 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7484 for (j=0; j<info.channels; j++) {
\r
7485 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7486 out[info.outOffset[j]] += 0.5;
\r
7487 out[info.outOffset[j]] *= scale;
\r
7489 in += info.inJump;
\r
7490 out += info.outJump;
\r
7493 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7494 Int16 *in = (Int16 *)inBuffer;
\r
7495 scale = 1.0 / 32767.5;
\r
7496 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7497 for (j=0; j<info.channels; j++) {
\r
7498 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7499 out[info.outOffset[j]] += 0.5;
\r
7500 out[info.outOffset[j]] *= scale;
\r
7502 in += info.inJump;
\r
7503 out += info.outJump;
\r
7506 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7507 Int32 *in = (Int32 *)inBuffer;
\r
7508 scale = 1.0 / 8388607.5;
\r
7509 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7510 for (j=0; j<info.channels; j++) {
\r
7511 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]] & 0x00ffffff);
\r
7512 out[info.outOffset[j]] += 0.5;
\r
7513 out[info.outOffset[j]] *= scale;
\r
7515 in += info.inJump;
\r
7516 out += info.outJump;
\r
7519 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7520 Int32 *in = (Int32 *)inBuffer;
\r
7521 scale = 1.0 / 2147483647.5;
\r
7522 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7523 for (j=0; j<info.channels; j++) {
\r
7524 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7525 out[info.outOffset[j]] += 0.5;
\r
7526 out[info.outOffset[j]] *= scale;
\r
7528 in += info.inJump;
\r
7529 out += info.outJump;
\r
7532 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7533 Float32 *in = (Float32 *)inBuffer;
\r
7534 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7535 for (j=0; j<info.channels; j++) {
\r
7536 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
7538 in += info.inJump;
\r
7539 out += info.outJump;
\r
7542 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7543 // Channel compensation and/or (de)interleaving only.
\r
7544 Float64 *in = (Float64 *)inBuffer;
\r
7545 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7546 for (j=0; j<info.channels; j++) {
\r
7547 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7549 in += info.inJump;
\r
7550 out += info.outJump;
\r
7554 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
7556 Float32 *out = (Float32 *)outBuffer;
\r
7558 if (info.inFormat == RTAUDIO_SINT8) {
\r
7559 signed char *in = (signed char *)inBuffer;
\r
7560 scale = (Float32) ( 1.0 / 127.5 );
\r
7561 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7562 for (j=0; j<info.channels; j++) {
\r
7563 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7564 out[info.outOffset[j]] += 0.5;
\r
7565 out[info.outOffset[j]] *= scale;
\r
7567 in += info.inJump;
\r
7568 out += info.outJump;
\r
7571 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7572 Int16 *in = (Int16 *)inBuffer;
\r
7573 scale = (Float32) ( 1.0 / 32767.5 );
\r
7574 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7575 for (j=0; j<info.channels; j++) {
\r
7576 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7577 out[info.outOffset[j]] += 0.5;
\r
7578 out[info.outOffset[j]] *= scale;
\r
7580 in += info.inJump;
\r
7581 out += info.outJump;
\r
7584 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7585 Int32 *in = (Int32 *)inBuffer;
\r
7586 scale = (Float32) ( 1.0 / 8388607.5 );
\r
7587 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7588 for (j=0; j<info.channels; j++) {
\r
7589 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]] & 0x00ffffff);
\r
7590 out[info.outOffset[j]] += 0.5;
\r
7591 out[info.outOffset[j]] *= scale;
\r
7593 in += info.inJump;
\r
7594 out += info.outJump;
\r
7597 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7598 Int32 *in = (Int32 *)inBuffer;
\r
7599 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
7600 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7601 for (j=0; j<info.channels; j++) {
\r
7602 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7603 out[info.outOffset[j]] += 0.5;
\r
7604 out[info.outOffset[j]] *= scale;
\r
7606 in += info.inJump;
\r
7607 out += info.outJump;
\r
7610 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7611 // Channel compensation and/or (de)interleaving only.
\r
7612 Float32 *in = (Float32 *)inBuffer;
\r
7613 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7614 for (j=0; j<info.channels; j++) {
\r
7615 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7617 in += info.inJump;
\r
7618 out += info.outJump;
\r
7621 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7622 Float64 *in = (Float64 *)inBuffer;
\r
7623 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7624 for (j=0; j<info.channels; j++) {
\r
7625 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
7627 in += info.inJump;
\r
7628 out += info.outJump;
\r
7632 else if (info.outFormat == RTAUDIO_SINT32) {
\r
7633 Int32 *out = (Int32 *)outBuffer;
\r
7634 if (info.inFormat == RTAUDIO_SINT8) {
\r
7635 signed char *in = (signed char *)inBuffer;
\r
7636 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7637 for (j=0; j<info.channels; j++) {
\r
7638 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7639 out[info.outOffset[j]] <<= 24;
\r
7641 in += info.inJump;
\r
7642 out += info.outJump;
\r
7645 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7646 Int16 *in = (Int16 *)inBuffer;
\r
7647 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7648 for (j=0; j<info.channels; j++) {
\r
7649 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7650 out[info.outOffset[j]] <<= 16;
\r
7652 in += info.inJump;
\r
7653 out += info.outJump;
\r
7656 else if (info.inFormat == RTAUDIO_SINT24) { // Hmmm ... we could just leave it in the lower 3 bytes
\r
7657 Int32 *in = (Int32 *)inBuffer;
\r
7658 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7659 for (j=0; j<info.channels; j++) {
\r
7660 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7661 out[info.outOffset[j]] <<= 8;
\r
7663 in += info.inJump;
\r
7664 out += info.outJump;
\r
7667 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7668 // Channel compensation and/or (de)interleaving only.
\r
7669 Int32 *in = (Int32 *)inBuffer;
\r
7670 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7671 for (j=0; j<info.channels; j++) {
\r
7672 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7674 in += info.inJump;
\r
7675 out += info.outJump;
\r
7678 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7679 Float32 *in = (Float32 *)inBuffer;
\r
7680 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7681 for (j=0; j<info.channels; j++) {
\r
7682 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
7684 in += info.inJump;
\r
7685 out += info.outJump;
\r
7688 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7689 Float64 *in = (Float64 *)inBuffer;
\r
7690 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7691 for (j=0; j<info.channels; j++) {
\r
7692 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
7694 in += info.inJump;
\r
7695 out += info.outJump;
\r
7699 else if (info.outFormat == RTAUDIO_SINT24) {
\r
7700 Int32 *out = (Int32 *)outBuffer;
\r
7701 if (info.inFormat == RTAUDIO_SINT8) {
\r
7702 signed char *in = (signed char *)inBuffer;
\r
7703 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7704 for (j=0; j<info.channels; j++) {
\r
7705 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7706 out[info.outOffset[j]] <<= 16;
\r
7708 in += info.inJump;
\r
7709 out += info.outJump;
\r
7712 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7713 Int16 *in = (Int16 *)inBuffer;
\r
7714 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7715 for (j=0; j<info.channels; j++) {
\r
7716 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7717 out[info.outOffset[j]] <<= 8;
\r
7719 in += info.inJump;
\r
7720 out += info.outJump;
\r
7723 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7724 // Channel compensation and/or (de)interleaving only.
\r
7725 Int32 *in = (Int32 *)inBuffer;
\r
7726 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7727 for (j=0; j<info.channels; j++) {
\r
7728 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7730 in += info.inJump;
\r
7731 out += info.outJump;
\r
7734 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7735 Int32 *in = (Int32 *)inBuffer;
\r
7736 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7737 for (j=0; j<info.channels; j++) {
\r
7738 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
7739 out[info.outOffset[j]] >>= 8;
\r
7741 in += info.inJump;
\r
7742 out += info.outJump;
\r
7745 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7746 Float32 *in = (Float32 *)inBuffer;
\r
7747 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7748 for (j=0; j<info.channels; j++) {
\r
7749 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
7751 in += info.inJump;
\r
7752 out += info.outJump;
\r
7755 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7756 Float64 *in = (Float64 *)inBuffer;
\r
7757 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7758 for (j=0; j<info.channels; j++) {
\r
7759 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
7761 in += info.inJump;
\r
7762 out += info.outJump;
\r
7766 else if (info.outFormat == RTAUDIO_SINT16) {
\r
7767 Int16 *out = (Int16 *)outBuffer;
\r
7768 if (info.inFormat == RTAUDIO_SINT8) {
\r
7769 signed char *in = (signed char *)inBuffer;
\r
7770 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7771 for (j=0; j<info.channels; j++) {
\r
7772 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
7773 out[info.outOffset[j]] <<= 8;
\r
7775 in += info.inJump;
\r
7776 out += info.outJump;
\r
7779 else if (info.inFormat == RTAUDIO_SINT16) {
\r
7780 // Channel compensation and/or (de)interleaving only.
\r
7781 Int16 *in = (Int16 *)inBuffer;
\r
7782 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7783 for (j=0; j<info.channels; j++) {
\r
7784 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7786 in += info.inJump;
\r
7787 out += info.outJump;
\r
7790 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7791 Int32 *in = (Int32 *)inBuffer;
\r
7792 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7793 for (j=0; j<info.channels; j++) {
\r
7794 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 8) & 0x0000ffff);
\r
7796 in += info.inJump;
\r
7797 out += info.outJump;
\r
7800 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7801 Int32 *in = (Int32 *)inBuffer;
\r
7802 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7803 for (j=0; j<info.channels; j++) {
\r
7804 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
7806 in += info.inJump;
\r
7807 out += info.outJump;
\r
7810 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7811 Float32 *in = (Float32 *)inBuffer;
\r
7812 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7813 for (j=0; j<info.channels; j++) {
\r
7814 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
7816 in += info.inJump;
\r
7817 out += info.outJump;
\r
7820 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7821 Float64 *in = (Float64 *)inBuffer;
\r
7822 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7823 for (j=0; j<info.channels; j++) {
\r
7824 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
7826 in += info.inJump;
\r
7827 out += info.outJump;
\r
7831 else if (info.outFormat == RTAUDIO_SINT8) {
\r
7832 signed char *out = (signed char *)outBuffer;
\r
7833 if (info.inFormat == RTAUDIO_SINT8) {
\r
7834 // Channel compensation and/or (de)interleaving only.
\r
7835 signed char *in = (signed char *)inBuffer;
\r
7836 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7837 for (j=0; j<info.channels; j++) {
\r
7838 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
7840 in += info.inJump;
\r
7841 out += info.outJump;
\r
7844 if (info.inFormat == RTAUDIO_SINT16) {
\r
7845 Int16 *in = (Int16 *)inBuffer;
\r
7846 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7847 for (j=0; j<info.channels; j++) {
\r
7848 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
7850 in += info.inJump;
\r
7851 out += info.outJump;
\r
7854 else if (info.inFormat == RTAUDIO_SINT24) {
\r
7855 Int32 *in = (Int32 *)inBuffer;
\r
7856 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7857 for (j=0; j<info.channels; j++) {
\r
7858 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 16) & 0x000000ff);
\r
7860 in += info.inJump;
\r
7861 out += info.outJump;
\r
7864 else if (info.inFormat == RTAUDIO_SINT32) {
\r
7865 Int32 *in = (Int32 *)inBuffer;
\r
7866 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7867 for (j=0; j<info.channels; j++) {
\r
7868 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
7870 in += info.inJump;
\r
7871 out += info.outJump;
\r
7874 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
7875 Float32 *in = (Float32 *)inBuffer;
\r
7876 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7877 for (j=0; j<info.channels; j++) {
\r
7878 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
7880 in += info.inJump;
\r
7881 out += info.outJump;
\r
7884 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
7885 Float64 *in = (Float64 *)inBuffer;
\r
7886 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
7887 for (j=0; j<info.channels; j++) {
\r
7888 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
7890 in += info.inJump;
\r
7891 out += info.outJump;
\r
7897 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
7898 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
7899 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
7901 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
7903 register char val;
\r
7904 register char *ptr;
\r
7907 if ( format == RTAUDIO_SINT16 ) {
\r
7908 for ( unsigned int i=0; i<samples; i++ ) {
\r
7909 // Swap 1st and 2nd bytes.
\r
7911 *(ptr) = *(ptr+1);
\r
7914 // Increment 2 bytes.
\r
7918 else if ( format == RTAUDIO_SINT24 ||
\r
7919 format == RTAUDIO_SINT32 ||
\r
7920 format == RTAUDIO_FLOAT32 ) {
\r
7921 for ( unsigned int i=0; i<samples; i++ ) {
\r
7922 // Swap 1st and 4th bytes.
\r
7924 *(ptr) = *(ptr+3);
\r
7927 // Swap 2nd and 3rd bytes.
\r
7930 *(ptr) = *(ptr+1);
\r
7933 // Increment 3 more bytes.
\r
7937 else if ( format == RTAUDIO_FLOAT64 ) {
\r
7938 for ( unsigned int i=0; i<samples; i++ ) {
\r
7939 // Swap 1st and 8th bytes
\r
7941 *(ptr) = *(ptr+7);
\r
7944 // Swap 2nd and 7th bytes
\r
7947 *(ptr) = *(ptr+5);
\r
7950 // Swap 3rd and 6th bytes
\r
7953 *(ptr) = *(ptr+3);
\r
7956 // Swap 4th and 5th bytes
\r
7959 *(ptr) = *(ptr+1);
\r
7962 // Increment 5 more bytes.
\r
7968 // Indentation settings for Vim and Emacs
\r
7970 // Local Variables:
\r
7971 // c-basic-offset: 2
\r
7972 // indent-tabs-mode: nil
\r
7975 // vim: et sts=2 sw=2
\r