diff --git a/lib/native/mac/libjnmaccoreaudio.jnilib b/lib/native/mac/libjnmaccoreaudio.jnilib index 777733cb2ecfbae27e33546c3989077d02742dcd..f3585c1aea4392041a6ed689e75522b5b99a0388 100755 Binary files a/lib/native/mac/libjnmaccoreaudio.jnilib and b/lib/native/mac/libjnmaccoreaudio.jnilib differ diff --git a/lib/native/mac/libjnwebrtc.dylib b/lib/native/mac/libjnwebrtc.dylib new file mode 100755 index 0000000000000000000000000000000000000000..ef5ee9fb71629b345410e55343497f27a76fa970 Binary files /dev/null and b/lib/native/mac/libjnwebrtc.dylib differ diff --git a/lib/native/mac/libjnwebrtcaec.dylib b/lib/native/mac/libjnwebrtcaec.dylib new file mode 100755 index 0000000000000000000000000000000000000000..502d640b8b4e4f2e143972fb3b2d7192cf70ce02 Binary files /dev/null and b/lib/native/mac/libjnwebrtcaec.dylib differ diff --git a/src/native/build.xml b/src/native/build.xml index 6e43b3c9e8d3f49dc3c35d513e7d2e767bb13326..97f40f95f974b78509dd99d03dd9dea0b105f3ae 100644 --- a/src/native/build.xml +++ b/src/native/build.xml @@ -919,8 +919,8 @@ <compilerarg value="x86_64" /> <compilerarg value="-arch" /> <compilerarg value="i386" /> - <compilerarg value="-I/System/Library/Frameworks/JavaVM.framework/Headers" - /> + <compilerarg + value="-I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk/System/Library/Frameworks/JavaVM.framework/Versions/A/Headers" /> <linkerarg value="-o" location="end" /> <linkerarg value="libjnmaccoreaudio.jnilib" location="end" /> @@ -934,6 +934,10 @@ <linkerarg value="Coreaudio" /> <linkerarg value="-framework" /> <linkerarg value="AudioToolbox" /> + <linkerarg value="${basedir}/${native_install_dir}/libjnwebrtc.dylib" /> + <linkerarg + value="${basedir}/${native_install_dir}/libjnwebrtcaec.dylib" /> + <linkerarg value="-lstdc++" location="end" /> <fileset dir="${src}/native/macosx/coreaudio/lib" includes="*.c"/> <fileset dir="${src}/native/macosx/coreaudio/jni" includes="*.c"/> @@ -943,6 +947,252 @@ <delete file="${native_install_dir}/history.xml" failonerror="false" /> </target> + <target name="webrtc_aec" description="Build Webrtc AEC shared library for Mac OS X" if="is.running.macos" + depends="init-native"> + <fail message="webrtcSrc not set!" + unless="webrtcSrc" /> + <cc outtype="shared" name="gcc" outfile="${native_install_dir}/jnwebrtc" objdir="${obj}"> + <compilerarg value="-DWEBRTC_AEC_DEBUG_DUMP" /> + <compilerarg value="-DWEBRTC_MAC" /> + + <!--compilerarg value="-xc++" /--> + <compilerarg value="-Wall" /> + <compilerarg value="-O2" /> + <compilerarg value="-arch" /> + <compilerarg value="x86_64" /> + <compilerarg value="-arch" /> + <compilerarg value="i386" /> + <compilerarg value="-I${basedir}/${webrtcSrc}/trunk" /> + <compilerarg value="-I${basedir}/${webrtcSrc}/trunk/webrtc" /> + + <linkerarg value="-arch" /> + <linkerarg value="x86_64" /> + <linkerarg value="-arch" /> + <linkerarg value="i386" /> + + <!--fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/" + includes="*.cc"/--> + + <!--fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="*.cc"/--> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="cpu_features.cc"/> + + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/agc/" + includes="*.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/aec/" + includes="*.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/utility/" + includes="*.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/vad/" + includes="*.c"/> + + <!--fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/aecm/" + includes="*.c"/--> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/aecm/" + includes="aecm_core.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/aecm/" + includes="echo_control_mobile.c"/> + + <!--fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/ns/" + includes="*.c"/--> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/ns/" + includes="noise_suppression.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/ns/" + includes="noise_suppression_x.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/ns/" + includes="ns_core.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/ns/" + includes="nsx_core.c"/> + + <!--fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="*.c"/--> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="splitting_filter.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="real_fft.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="complex_bit_reverse.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="complex_fft.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="spl_init.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="cross_correlation.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="division_operations.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="dot_product_with_scale.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="resample_by_2.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="energy.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="downsample_fast.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="auto_corr_to_refl_coef.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="auto_correlation.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="copy_set_operations.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="filter_ar.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="filter_ar_fast_q12.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="filter_ma_fast_q12.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="get_hanning_window.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="get_scaling_square.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="ilbc_specific_functions.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="levinson_durbin.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="lpc_to_refl_coef.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="min_max_operations.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="randomization_functions.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="refl_coef_to_lpc.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="resample.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="resample_48khz.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="resample_by_2_internal.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="resample_fractional.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="spl_sqrt.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="spl_sqrt_floor.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="spl_version.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="sqrt_of_one_minus_x_squared.c"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/common_audio/signal_processing/" + includes="vector_scaling_operations.c"/> + </cc> + + + <cc outtype="shared" name="gcc" outfile="${native_install_dir}/jnwebrtcaec" objdir="${obj}"> + <compilerarg value="-DWEBRTC_NS_FIXED" /> + <compilerarg value="-DWEBRTC_MAC" /> + <compilerarg value="-DWEBRTC_CLOCK_TYPE_REALTIME" /> + + <compilerarg value="-xc++" /> + <compilerarg value="-Wall" /> + <compilerarg value="-O2" /> + <compilerarg value="-arch" /> + <compilerarg value="x86_64" /> + <compilerarg value="-arch" /> + <compilerarg value="i386" /> + <compilerarg value="-I${basedir}/${webrtcSrc}/trunk" /> + <compilerarg value="-I${basedir}/${webrtcSrc}/trunk/webrtc" /> + <compilerarg + value="-I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk/System/Library/Frameworks/JavaVM.framework/Versions/A/Headers" /> + + <linkerarg value="-arch" /> + <linkerarg value="x86_64" /> + <linkerarg value="-arch" /> + <linkerarg value="i386" /> + <linkerarg value="-lstdc++" location="end" /> + <linkerarg value="${basedir}/${native_install_dir}/libjnwebrtc.dylib" /> + <!--linkerarg + value="${basedir}/${src}/native/macosx/coreaudio/lib/webrtc/obj/*.o" + location="end"/--> + + <!--fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/" + includes="*.cc"/--> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/" + includes="audio_buffer.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/" + includes="audio_processing_impl.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/" + includes="echo_cancellation_impl.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/" + includes="echo_control_mobile_impl.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/" + includes="gain_control_impl.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/" + includes="high_pass_filter_impl.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/" + includes="level_estimator_impl.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/" + includes="noise_suppression_impl.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/" + includes="processing_component.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/" + includes="splitting_filter.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/modules/audio_processing/" + includes="voice_detection_impl.cc"/> + + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="cpu_features.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="aligned_malloc.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="clock.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="condition_variable.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="condition_variable_posix.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="cpu_info.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="critical_section.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="critical_section_posix.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="data_log.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="data_log_c.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="event.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="event_posix.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="event_tracer.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="file_impl.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="list_no_stl.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="logging.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="rw_lock.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="rw_lock_generic.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="rw_lock_posix.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="sleep.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="sort.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="thread.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="thread_posix.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="tick_util.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="trace_impl.cc"/> + <fileset dir="${webrtcSrc}/trunk/webrtc/system_wrappers/source/" + includes="trace_posix.cc"/> + + + + <fileset dir="${src}/native/macosx/coreaudio/lib/webrtc/" + includes="*.c"/> + </cc> + + <delete dir="${obj}" failonerror="false" /> + <delete file="${native_install_dir}/history.xml" failonerror="false" /> + </target> + <!-- compile jnquicktime library for Mac OS X (32-bit/64-bit/ppc) --> <target name="quicktime" description="Build jnquicktime shared library for Mac OS X" if="is.running.macos" depends="init-native"> diff --git a/src/native/macosx/coreaudio/lib/device.c b/src/native/macosx/coreaudio/lib/device.c index f9c901319986862da7ee1888ab5e2ff1c8c459e5..015c298da073460012d6a5c5112bce0eae3c0ed5 100644 --- a/src/native/macosx/coreaudio/lib/device.c +++ b/src/native/macosx/coreaudio/lib/device.c @@ -6,11 +6,17 @@ */ #include "device.h" +#include "webrtc/libjitsi_webrtc_aec.h" + #include <CoreAudio/CoreAudio.h> #include <CoreFoundation/CFString.h> +#include <math.h> #include <pthread.h> #include <stdio.h> +unsigned char newMethod = 1; +unsigned char activateAEC = 0; + extern void maccoreaudio_log( const char * error_format, ...); @@ -79,10 +85,7 @@ static OSStatus maccoreaudio_devicesChangedCallback( OSStatus maccoreaudio_initConverter( const char * deviceUID, - const AudioStreamBasicDescription * javaFormat, - unsigned char isJavaFormatSource, - AudioConverterRef * converter, - double * conversionRatio); + maccoreaudio_stream * stream); inline UInt32 CalculateLPCMFlags ( UInt32 inValidBitsPerChannel, @@ -156,6 +159,20 @@ void maccoreaudio_getDefaultFormat( AudioStreamBasicDescription * deviceFormat); +OSStatus +maccoreaudio_converterComplexInputDataProc( + AudioConverterRef inAudioConverter, + UInt32* ioNumberDataPackets, + AudioBufferList* ioData, + AudioStreamPacketDescription** ioDataPacketDescription, + void* inUserData); + +int +maccoreaudio_initAudioBuffer( + AudioBuffer * audioBuffer, + int nbChannels, + int length); + /** * Do nothing: there is no need to initializes anything to get device * information on MacOsX. @@ -1391,7 +1408,7 @@ maccoreaudio_stream * maccoreaudio_startStream( void* callbackObject, void* callbackMethod, void* readWriteFunction, - unsigned char isJavaFormatSource, + unsigned char isOutputStream, float sampleRate, UInt32 nbChannels, UInt32 bitsPerChannel, @@ -1399,6 +1416,16 @@ maccoreaudio_stream * maccoreaudio_startStream( unsigned char isBigEndian, unsigned char isNonInterleaved) { + fprintf(stderr, "CHENZO: maccoreaudio_startStream: \ + \n\tisOutputStream: %d\ + \n\tsampleRate: %f\ + \n\tnbChannels: %d\n", + isOutputStream, + sampleRate, + (int) nbChannels + ); + fflush(stderr); + AudioDeviceID device; OSStatus err = noErr; @@ -1428,6 +1455,9 @@ maccoreaudio_stream * maccoreaudio_startStream( stream->callbackFunction = callbackFunction; stream->callbackObject = callbackObject; stream->callbackMethod = callbackMethod; + stream->isOutputStream = isOutputStream; + stream->step = 0; + memset(&stream->audioBuffer, 0, sizeof(AudioBuffer)); if(pthread_mutex_init(&stream->mutex, NULL) != 0) { @@ -1440,9 +1470,49 @@ maccoreaudio_stream * maccoreaudio_startStream( return NULL; } - AudioStreamBasicDescription javaFormat; + // Init AEC + stream->isAECActivated = activateAEC; + stream->aecConversionRatio = 1; + if(stream->isAECActivated) + { + float aecSampleRate = 32000; + UInt32 aecNbChannels = 2; + UInt32 aecBitsPerChannel = 16; + unsigned char aecIsFloat = false; + unsigned char aecIsBigEndian = isBigEndian; + unsigned char aecIsNonInterleaved = false; + FillOutASBDForLPCM( + &stream->aecFormat, + aecSampleRate, + aecNbChannels, + aecBitsPerChannel, + aecBitsPerChannel, + aecIsFloat, + aecIsBigEndian, + aecIsNonInterleaved); + if((err = libjitsi_webrtc_aec_initAudioProcessing( + aecSampleRate, //int sample_rate, + aecNbChannels, //int nb_capture_channels, + aecNbChannels //int nb_render_channels + )) + != 0) + { + maccoreaudio_log( + "maccoreaudio_startStream (coreaudio/device.c): \ + \n\tlibjitsi_webrtc_aec_initAudioProcessing: 0x%x for device %s", + (int) err, + deviceUID); + AudioDeviceDestroyIOProcID(device, stream->ioProcId); + pthread_mutex_destroy(&stream->mutex); + free(stream); + return NULL; + } + } + + + // Init the converter. FillOutASBDForLPCM( - &javaFormat, + &stream->javaFormat, sampleRate, nbChannels, bitsPerChannel, @@ -1450,12 +1520,8 @@ maccoreaudio_stream * maccoreaudio_startStream( isFloat, isBigEndian, isNonInterleaved); - if((err = maccoreaudio_initConverter( - deviceUID, - &javaFormat, - isJavaFormatSource, - &stream->converter, - &stream->conversionRatio)) + + if((err = maccoreaudio_initConverter(deviceUID, stream)) != noErr) { maccoreaudio_log( @@ -1468,6 +1534,41 @@ maccoreaudio_stream * maccoreaudio_startStream( return NULL; } + fprintf(stderr, + "CHENZO: startStream\ + \n\tstream->deviceFormat.mSampleRate: %f\ + \n\tstream->deviceFormat.mChannelsPerFrame: %d\ + \n\tstream->deviceFormat.mBitsPerChannel: %d\ + \n\tstream->deviceFormat.mBytesPerPacket: %d\ + \n\tstream->deviceFormat.mBytesPerFrame: %d\ + \n\tstream->aecFormat.mSampleRate: %f\ + \n\tstream->aecFormat.mChannelsPerFrame: %d\ + \n\tstream->aecFormat.mBitsPerChannel: %d\ + \n\tstream->aecFormat.mBytesPerPacket: %d\ + \n\tstream->aecFormat.mBytesPerFrame: %d\ + \n\tstream->javaFormat.mSampleRate: %f\ + \n\tstream->javaFormat.mChannelsPerFrame: %d\ + \n\tstream->javaFormat.mBitsPerChannel: %d\ + \n\tstream->javaFormat.mBytesPerPacket: %d\ + \n\tstream->javaFormat.mBytesPerFrame: %d\ + \n", + stream->deviceFormat.mSampleRate, + (int) stream->deviceFormat.mChannelsPerFrame, + (int) stream->deviceFormat.mBitsPerChannel, + (int) stream->deviceFormat.mBytesPerPacket, + (int) stream->deviceFormat.mBytesPerFrame, + stream->aecFormat.mSampleRate, + (int) stream->aecFormat.mChannelsPerFrame, + (int) stream->aecFormat.mBitsPerChannel, + (int) stream->aecFormat.mBytesPerPacket, + (int) stream->aecFormat.mBytesPerFrame, + stream->javaFormat.mSampleRate, + (int) stream->javaFormat.mChannelsPerFrame, + (int) stream->javaFormat.mBitsPerChannel, + (int) stream->javaFormat.mBytesPerPacket, + (int) stream->javaFormat.mBytesPerFrame); + fflush(stderr); + // register the IOProc if((err = AudioDeviceCreateIOProcID( device, @@ -1480,7 +1581,11 @@ maccoreaudio_stream * maccoreaudio_startStream( \n\tAudioDeviceIOProcID: 0x%x for device %s", (int) err, deviceUID); - AudioConverterDispose(stream->converter); + if(stream->isAECActivated) + { + AudioConverterDispose(stream->aecConverter); + } + AudioConverterDispose(stream->outConverter); pthread_mutex_destroy(&stream->mutex); free(stream); return NULL; @@ -1495,7 +1600,11 @@ maccoreaudio_stream * maccoreaudio_startStream( (int) err, deviceUID); AudioDeviceDestroyIOProcID(device, stream->ioProcId); - AudioConverterDispose(stream->converter); + if(stream->isAECActivated) + { + AudioConverterDispose(stream->aecConverter); + } + AudioConverterDispose(stream->outConverter); pthread_mutex_destroy(&stream->mutex); free(stream); return NULL; @@ -1557,7 +1666,18 @@ void maccoreaudio_stopStream( } } - if((err = AudioConverterDispose(stream->converter)) != noErr) + if(stream->isAECActivated) + { + if((err = AudioConverterDispose(stream->aecConverter)) != noErr) + { + maccoreaudio_log( + "maccoreaudio_stopStream (coreaudio/device.c): \ + \n\tAudioConverterDispose: 0x%x for device %s", + (int) err, + deviceUID); + } + } + if((err = AudioConverterDispose(stream->outConverter)) != noErr) { maccoreaudio_log( "maccoreaudio_stopStream (coreaudio/device.c): \ @@ -1596,16 +1716,66 @@ OSStatus maccoreaudio_readInputStream( const AudioTimeStamp* inOutputTime, void* inClientData) { + //fprintf(stderr, "CHENZO: readIntputStream START\n"); fflush(stderr); + OSStatus err = noErr; int error = 0; maccoreaudio_stream * stream = (maccoreaudio_stream*) inClientData; void (*callbackFunction) (char*, int, void*, void*) = stream->callbackFunction; - UInt32 tmpLength - = inInputData->mBuffers[0].mDataByteSize * stream->conversionRatio; - char tmpBuffer[tmpLength]; + + + UInt32 tmpSize = sizeof(UInt32); + UInt32 aecTmpLength + = inInputData->mBuffers[0].mDataByteSize; + //= inInputData->mBuffers[0].mDataByteSize + // / stream->deviceFormat.mBytesPerPacket + // * stream->aecFormat.mBytesPerPacket; + //= inInputData->mBuffers[0].mDataByteSize * 1; + //= inInputData->mBuffers[0].mDataByteSize * 2; + //= inInputData->mBuffers[0].mDataByteSize * stream->aecConversionRatio; + //= outOutputData->mBuffers[0].mDataByteSize * 2; + //= outOutputData->mBuffers[0].mDataByteSize * stream->aecConversionRatio; + AudioConverterGetProperty( + stream->aecConverter, + kAudioConverterPropertyCalculateOutputBufferSize, + &tmpSize, + &aecTmpLength); + + + UInt32 outTmpLength + = aecTmpLength; + //= aecTmpLength + // / stream->aecFormat.mBytesPerPacket + // * stream->javaFormat.mBytesPerPacket; + //= aecTmpLength * stream->outConversionRatio; + AudioConverterGetProperty( + stream->outConverter, + kAudioConverterPropertyCalculateOutputBufferSize, + &tmpSize, + &outTmpLength); + + + char aecTmpBuffer[aecTmpLength]; + char outTmpBuffer[outTmpLength]; int i; + /*fprintf(stderr, + "CHENZO: readIntputStream 0: dev:%d, aec:%d, java:%d\n", + (int) inInputData->mBuffers[0].mDataByteSize, + (int) aecTmpLength, + (int) outTmpLength); + fflush(stderr); + fprintf(stderr, + "CHENZO: readIntputStream 0.0: dev:%d, aec:%d, java:%d\n", + (int) inInputData->mBuffers[0].mDataByteSize + / stream->deviceFormat.mBytesPerPacket, + (int) aecTmpLength + / stream->aecFormat.mBytesPerPacket, + (int) outTmpLength + / stream->javaFormat.mBytesPerPacket); + fflush(stderr);*/ + if((error = pthread_mutex_trylock(&stream->mutex)) == 0) { if(stream->ioProcId != 0) @@ -1615,25 +1785,242 @@ OSStatus maccoreaudio_readInputStream( if(inInputData->mBuffers[i].mData != NULL && inInputData->mBuffers[i].mDataByteSize > 0) { - if((err = AudioConverterConvertBuffer( - stream->converter, - inInputData->mBuffers[i].mDataByteSize, - inInputData->mBuffers[i].mData, - &tmpLength, - tmpBuffer)) - != noErr) + if(stream->isAECActivated) { - maccoreaudio_log( + /*fprintf(stderr, + "CHENZO: readIntputStream 1: %d, ratio: %f, %f\n", + i, + stream->aecConversionRatio, + stream->outConversionRatio); + fflush(stderr);*/ + + stream->step = 0; + + stream->audioBuffer = inInputData->mBuffers[i]; + + UInt32 outputDataPacketSize + = aecTmpLength / stream->aecFormat.mBytesPerPacket; + AudioBufferList aecBufferList; + aecBufferList.mNumberBuffers = 1; + aecBufferList.mBuffers[0].mNumberChannels + = stream->aecFormat.mChannelsPerFrame; + aecBufferList.mBuffers[0].mDataByteSize = aecTmpLength; + aecBufferList.mBuffers[0].mData = aecTmpBuffer; + + if((err = AudioConverterFillComplexBuffer( + stream->aecConverter, + maccoreaudio_converterComplexInputDataProc, + stream, // corresponding to inUserData + &outputDataPacketSize, + &aecBufferList, + NULL)) + != noErr) + { + maccoreaudio_log( + "maccoreaudio_readInputStream (coreaudio/device.c): \ + \n\tAudioConverterFillComplexBuffer: 0x%x", + (int) err); + pthread_mutex_unlock(&stream->mutex); + return err; + } + + /*fprintf(stderr, + "CHENZO: readIntputStream 2: nbPacketAec:%d/%d/%d\n", + (int) outputDataPacketSize, + (int) aecBufferList.mBuffers[0].mDataByteSize, + (int) aecTmpLength / + stream->aecFormat.mBytesPerPacket); + fflush(stderr);*/ + + if((err = libjitsi_webrtc_aec_process( + 1, // isCaptureStream, + (short*) aecTmpBuffer, //int16_t * data, + aecTmpLength, //int data_length, + // int sample_rate + stream->aecFormat.mSampleRate, + //int nb_channels + stream->aecFormat.mChannelsPerFrame)) + != 0) + { + maccoreaudio_log( + "maccoreaudio_readInputStream (coreaudio/device.c): \ + \n\tlibjitsi_webrtc_aec_process: 0x%x", + (int) err); + } + + /*fprintf(stderr, + "CHENZO: readIntputStream 3: nbPacketAec:%d/%d/%d\n", + (int) outputDataPacketSize, + (int) aecBufferList.mBuffers[0].mDataByteSize, + (int) aecTmpLength / + stream->aecFormat.mBytesPerPacket); + fflush(stderr);*/ + + stream->step = 1; + + stream->audioBuffer = aecBufferList.mBuffers[0]; + + outputDataPacketSize + = outTmpLength / stream->javaFormat.mBytesPerPacket; + AudioBufferList outputBufferList; + outputBufferList.mNumberBuffers = 1; + outputBufferList.mBuffers[0].mNumberChannels + = stream->javaFormat.mChannelsPerFrame; + outputBufferList.mBuffers[0].mDataByteSize + = outTmpLength; + outputBufferList.mBuffers[0].mData = outTmpBuffer; + + if((err = AudioConverterFillComplexBuffer( + stream->outConverter, + maccoreaudio_converterComplexInputDataProc, + stream, // corresponding to inUserData + &outputDataPacketSize, + &outputBufferList, + NULL)) + != noErr) + { + fprintf(stderr, "CHENZO: readIntputStream log\n"); + fflush(stderr); + maccoreaudio_log( "maccoreaudio_readInputStream (coreaudio/device.c): \ - \n\tAudioConverterConvertBuffer: 0x%x", - (int) err); - pthread_mutex_unlock(&stream->mutex); - return err; + \n\tAudioConverterFillComplexBuffer: 0x%x", + (int) err); + pthread_mutex_unlock(&stream->mutex); + return err; + } + + /*fprintf(stderr, "CHENZO: readIntputStream 4\n"); + fflush(stderr);*/ + + + /*if((err = AudioConverterConvertBuffer( + stream->aecConverter, + inInputData->mBuffers[i].mDataByteSize, + inInputData->mBuffers[i].mData, + &aecTmpLength, + aecTmpBuffer)) + != noErr) + { + fprintf( + stderr, + "maccoreaudio_readInputStream (coreaudio/device.c): \ + \n\tAudioConverterConvertBuffer: 0x%x\n", + (int) err); + fflush(stderr); + maccoreaudio_log( + "maccoreaudio_readInputStream (coreaudio/device.c): \ + \n\tAudioConverterConvertBuffer: 0x%x", + (int) err); + pthread_mutex_unlock(&stream->mutex); + return err; + } + + fprintf(stderr, "CHENZO: readIntputStream 2\n"); + fflush(stderr); + if((err = libjitsi_webrtc_aec_process( + 1, // isCaptureStream, + (short*) aecTmpBuffer, //int16_t * data, + aecTmpLength, //int data_length, + 32000, // int sample_rate, + 1 //int nb_channels + )) + != 0) + { + maccoreaudio_log( + "maccoreaudio_readInputStream (coreaudio/device.c): \ + \n\tlibjitsi_webrtc_aec_process: 0x%x", + (int) err); + } + + fprintf(stderr, "CHENZO: readIntputStream 3\n"); + fflush(stderr); + if((err = AudioConverterConvertBuffer( + stream->outConverter, + aecTmpLength, + aecTmpBuffer, + &outTmpLength, + outTmpBuffer)) + != noErr) + { + maccoreaudio_log( + "maccoreaudio_readInputStream (coreaudio/device.c): \ + \n\tAudioConverterConvertBuffer: 0x%x", + (int) err); + pthread_mutex_unlock(&stream->mutex); + return err; + }*/ + + /*fprintf(stderr, "CHENZO: readIntputStream 5\n"); + fflush(stderr);*/ + } + else if(newMethod) + { + stream->step = 0; + + stream->audioBuffer = inInputData->mBuffers[i]; + + UInt32 outputDataPacketSize + = outTmpLength / stream->javaFormat.mBytesPerPacket; + AudioBufferList outputBufferList; + outputBufferList.mNumberBuffers = 1; + outputBufferList.mBuffers[0].mNumberChannels + = stream->javaFormat.mChannelsPerFrame; + outputBufferList.mBuffers[0].mDataByteSize + = outTmpLength; + outputBufferList.mBuffers[0].mData = outTmpBuffer; + + /*fprintf(stderr, + "CHENZO: readIntputStream NEW METHOD %d - A: %d/%d\n", + i, + (int) outputBufferList.mBuffers[0].mDataByteSize, + (int) outputDataPacketSize); + fflush(stderr);*/ + + if((err = AudioConverterFillComplexBuffer( + stream->outConverter, + maccoreaudio_converterComplexInputDataProc, + stream, // corresponding to inUserData + &outputDataPacketSize, + &outputBufferList, + NULL)) + != noErr) + { + maccoreaudio_log( + "maccoreaudio_readInputStream (coreaudio/device.c): \ + \n\tAudioConverterFillComplexBuffer: 0x%x", + (int) err); + pthread_mutex_unlock(&stream->mutex); + return err; + } + + /*fprintf(stderr, + "CHENZO: readIntputStream NEW METHOD - B: %d/%d\n\n", + (int) outputBufferList.mBuffers[0].mDataByteSize, + (int) outputDataPacketSize); + fflush(stderr);*/ + } + else + { + if((err = AudioConverterConvertBuffer( + stream->outConverter, + inInputData->mBuffers[i].mDataByteSize, + inInputData->mBuffers[i].mData, + &outTmpLength, + outTmpBuffer)) + != noErr) + { + maccoreaudio_log( + "maccoreaudio_readInputStream (coreaudio/device.c): \ + \n\tAudioConverterConvertBuffer: 0x%x", + (int) err); + pthread_mutex_unlock(&stream->mutex); + return err; + } } callbackFunction( - tmpBuffer, - tmpLength, + outTmpBuffer, + outTmpLength, stream->callbackObject, stream->callbackMethod); } @@ -1659,6 +2046,8 @@ OSStatus maccoreaudio_readInputStream( strerror(errno)); } + //fprintf(stderr, "CHENZO: readIntputStream END\n"); fflush(stderr); + return noErr; } @@ -1671,6 +2060,8 @@ OSStatus maccoreaudio_writeOutputStream( const AudioTimeStamp* inOutputTime, void* inClientData) { + //fprintf(stderr, "CHENZO: writeOutputStream START\n"); fflush(stderr); + OSStatus err = noErr; int error = 0; @@ -1683,37 +2074,330 @@ OSStatus maccoreaudio_writeOutputStream( return err; } - int tmpLength - = outOutputData->mBuffers[0].mDataByteSize * stream->conversionRatio; - char tmpBuffer[tmpLength]; + UInt32 tmpSize = sizeof(UInt32); + UInt32 aecTmpLength + = outOutputData->mBuffers[0].mDataByteSize; + //= outOutputData->mBuffers[0].mDataByteSize * 2; + //= outOutputData->mBuffers[0].mDataByteSize * stream->aecConversionRatio; + AudioConverterGetProperty( + stream->outConverter, + kAudioConverterPropertyCalculateInputBufferSize, + &tmpSize, + &aecTmpLength); + + + + UInt32 outTmpLength + = aecTmpLength; + //= (outOutputData->mBuffers[0].mDataByteSize + // / stream->deviceFormat.mBytesPerPacket) + // * stream->javaFormat.mBytesPerPacket; + //= aecTmpLength * stream->outConversionRatio; + AudioConverterGetProperty( + stream->aecConverter, + kAudioConverterPropertyCalculateInputBufferSize, + &tmpSize, + &outTmpLength); + + + char aecTmpBuffer[aecTmpLength]; + char outTmpBuffer[outTmpLength]; + + /*fprintf(stderr, + "CHENZO: writeOutputStream 0: dev:%d, aec:%d, java:%d\n", + (int) outOutputData->mBuffers[0].mDataByteSize, + (int) aecTmpLength, + (int) outTmpLength); + fflush(stderr); + fprintf(stderr, + "CHENZO: wrtieOutputStream 0.0: dev:%d, aec:%d, java:%d\n", + (int) outOutputData->mBuffers[0].mDataByteSize + / stream->deviceFormat.mBytesPerPacket, + (int) aecTmpLength + / stream->aecFormat.mBytesPerPacket, + (int) outTmpLength + / stream->javaFormat.mBytesPerPacket); + fflush(stderr);*/ + if((error = pthread_mutex_trylock(&stream->mutex)) == 0) { if(stream->ioProcId != 0) { - callbackFunction( - tmpBuffer, - tmpLength, - stream->callbackObject, - stream->callbackMethod); - - if((err = AudioConverterConvertBuffer( - stream->converter, - tmpLength, - tmpBuffer, - &outOutputData->mBuffers[0].mDataByteSize, - outOutputData->mBuffers[0].mData)) - != noErr) + if(stream->isAECActivated) { - maccoreaudio_log( + /*fprintf(stderr, "CHENZO: writeOutputStream 1\n"); + fflush(stderr);*/ + + callbackFunction( + outTmpBuffer, + outTmpLength, + stream->callbackObject, + stream->callbackMethod); + + /*fprintf(stderr, "CHENZO: writeOutputStream 2\n"); + fflush(stderr);*/ + + stream->step = 0; + + stream->audioBuffer.mNumberChannels + = stream->javaFormat.mChannelsPerFrame; + stream->audioBuffer.mDataByteSize = outTmpLength; + stream->audioBuffer.mData = outTmpBuffer; + + UInt32 outputDataPacketSize + = aecTmpLength + / stream->aecFormat.mBytesPerPacket; + AudioBufferList outputBufferList; + outputBufferList.mNumberBuffers = 1; + outputBufferList.mBuffers[0].mNumberChannels + = stream->aecFormat.mChannelsPerFrame; + outputBufferList.mBuffers[0].mDataByteSize + = aecTmpLength; + outputBufferList.mBuffers[0].mData = aecTmpBuffer; + + UInt32 tmp = outOutputData->mBuffers[0].mDataByteSize; + UInt32 tmpSize = sizeof(tmp); + AudioConverterGetProperty( + stream->aecConverter, + kAudioConverterPropertyCalculateOutputBufferSize, + //kAudioConverterPropertyMaximumOutputPacketSize, + &tmpSize, + &tmp); + + /*fprintf(stderr, + "CHENZO: writeOutputStream 2.1: nbPacketAec:%d/%d/%d, new:%d\n", + (int) outputDataPacketSize, + (int) outputBufferList.mBuffers[0].mDataByteSize, + (int) aecTmpLength / + stream->aecFormat.mBytesPerPacket, + tmp); + fflush(stderr);*/ + + if((err = AudioConverterFillComplexBuffer( + stream->aecConverter, + maccoreaudio_converterComplexInputDataProc, + stream, // corresponding to inUserData + &outputDataPacketSize, + &outputBufferList, + NULL)) + != noErr) + { + maccoreaudio_log( "maccoreaudio_writeOutputStream (coreaudio/device.c): \ - \n\tAudioConverterConvertBuffer: 0x%x", (int) err); - memset( - outOutputData->mBuffers[0].mData, - 0, - outOutputData->mBuffers[0].mDataByteSize); - pthread_mutex_unlock(&stream->mutex); - return err; + \n\tAudioConverterFillComplexBuffer: 0x%x", + (int) err); + pthread_mutex_unlock(&stream->mutex); + return err; + } + + /*fprintf(stderr, + "CHENZO: writeOutputStream 3: nbPacketAec:%d/%d/%d\n", + (int) outputDataPacketSize, + (int) outputBufferList.mBuffers[0].mDataByteSize, + (int) aecTmpLength / + stream->aecFormat.mBytesPerPacket); + fflush(stderr); + + + fprintf(stderr, "CHENZO: writeOutputStream 3\n"); + fflush(stderr);*/ + + if((err = libjitsi_webrtc_aec_process( + 0, // isCaptureStream, + (short*) aecTmpBuffer, //int16_t * data, + aecTmpLength, //int data_length, + // int sample_rate + stream->aecFormat.mSampleRate, + //int nb_channels + stream->aecFormat.mChannelsPerFrame)) + != 0) + { + maccoreaudio_log( + "maccoreaudio_readInputStream (coreaudio/device.c): \ + \n\tlibjitsi_webrtc_aec_process: 0x%x", + (int) err); + } + + /*fprintf(stderr, "CHENZO: writeOutputStream 3\n"); + fflush(stderr);*/ + + + stream->step = 1; + + stream->audioBuffer.mNumberChannels + = stream->aecFormat.mChannelsPerFrame; + stream->audioBuffer.mDataByteSize = aecTmpLength; + stream->audioBuffer.mData = aecTmpBuffer; + + outputDataPacketSize + = outOutputData->mBuffers[0].mDataByteSize + / stream->deviceFormat.mBytesPerPacket; + + if((err = AudioConverterFillComplexBuffer( + stream->outConverter, + maccoreaudio_converterComplexInputDataProc, + stream, // corresponding to inUserData + &outputDataPacketSize, + //&outputBufferList, + outOutputData, + NULL)) + != noErr) + { + maccoreaudio_log( + "maccoreaudio_writeOutputStream (coreaudio/device.c): \ + \n\tAudioConverterFillComplexBuffer: 0x%x", + (int) err); + pthread_mutex_unlock(&stream->mutex); + return err; + } + + + /*fprintf(stderr, "CHENZO: writeOutputStream 4\n"); + fflush(stderr);*/ + + + + + /*if((err = AudioConverterConvertBuffer( + stream->aecConverter, + aecTmpLength, + aecTmpBuffer, + &outTmpLength, + outTmpBuffer)) + != noErr) + { + maccoreaudio_log( + "maccoreaudio_writeOutputStream (coreaudio/device.c): \ + \n\tAudioConverterConvertBuffer: 0x%x", (int) err); + memset( + outOutputData->mBuffers[0].mData, + 0, + outOutputData->mBuffers[0].mDataByteSize); + pthread_mutex_unlock(&stream->mutex); + return err; + } + fprintf(stderr, "CHENZO: writeOutputStream 3\n"); + fflush(stderr); + + if((err = libjitsi_webrtc_aec_process( + 0, // isCaptureStream, + (short*) outTmpBuffer, //int16_t * data, + outTmpLength, //int data_length, + 32000, // int sample_rate, + 1 //int nb_channels + )) + != 0) + { + maccoreaudio_log( + "maccoreaudio_writeOutputStream (coreaudio/device.c): \ + \n\tlibjitsi_webrtc_aec_process: 0x%x", (int) err); + } + fprintf(stderr, "CHENZO: writeOutputStream 4\n"); + fflush(stderr); + + if((err = AudioConverterConvertBuffer( + stream->outConverter, + outTmpLength, + outTmpBuffer, + &outOutputData->mBuffers[0].mDataByteSize, + outOutputData->mBuffers[0].mData)) + != noErr) + { + maccoreaudio_log( + "maccoreaudio_writeOutputStream (coreaudio/device.c): \ + \n\tAudioConverterConvertBuffer: 0x%x", (int) err); + memset( + outOutputData->mBuffers[0].mData, + 0, + outOutputData->mBuffers[0].mDataByteSize); + pthread_mutex_unlock(&stream->mutex); + return err; + } + fprintf(stderr, "CHENZO: writeOutputStream 5\n"); + fflush(stderr);*/ + } + else if(newMethod) + { + callbackFunction( + outTmpBuffer, + outTmpLength, + stream->callbackObject, + stream->callbackMethod); + /* int + maccoreaudio_initAudioBuffer( + &stream->audioBuffer, + 1, //int nbChannels, + int length);*/ + + stream->step = 0; + + stream->audioBuffer.mNumberChannels + = stream->javaFormat.mChannelsPerFrame; + stream->audioBuffer.mDataByteSize = outTmpLength; + stream->audioBuffer.mData = outTmpBuffer; + + + UInt32 outputDataPacketSize + = outOutputData->mBuffers[0].mDataByteSize + / stream->deviceFormat.mBytesPerPacket; + + /*fprintf(stderr, + "CHENZO: writeOutputStream NEW METHOD - A: %d/%d\n", + (int) outOutputData->mBuffers[0].mDataByteSize, + (int) outputDataPacketSize); + fflush(stderr);*/ + + if((err = AudioConverterFillComplexBuffer( + stream->outConverter, + maccoreaudio_converterComplexInputDataProc, + stream, // corresponding to inUserData + &outputDataPacketSize, + //&outputBufferList, + outOutputData, + NULL)) + != noErr) + { + maccoreaudio_log( + "maccoreaudio_writeOutputStream (coreaudio/device.c): \ + \n\tAudioConverterFillComplexBuffer: 0x%x", + (int) err); + pthread_mutex_unlock(&stream->mutex); + return err; + } + + /*fprintf(stderr, + "CHENZO: writeOutputStream NEW METHOD - B: %d/%d\n\n", + (int) outOutputData->mBuffers[0].mDataByteSize, + (int) outputDataPacketSize); + fflush(stderr);*/ + } + else + { + callbackFunction( + outTmpBuffer, + outTmpLength, + stream->callbackObject, + stream->callbackMethod); + + if((err = AudioConverterConvertBuffer( + stream->outConverter, + outTmpLength, + outTmpBuffer, + &outOutputData->mBuffers[0].mDataByteSize, + outOutputData->mBuffers[0].mData)) + != noErr) + { + maccoreaudio_log( + "maccoreaudio_writeOutputStream (coreaudio/device.c): \ + \n\tAudioConverterConvertBuffer: 0x%x", (int) err); + memset( + outOutputData->mBuffers[0].mData, + 0, + outOutputData->mBuffers[0].mDataByteSize); + pthread_mutex_unlock(&stream->mutex); + return err; + } } } if(pthread_mutex_unlock(&stream->mutex) != 0) @@ -1770,6 +2454,8 @@ OSStatus maccoreaudio_writeOutputStream( } } + //fprintf(stderr, "CHENZO: writeOutputStream END\n"); fflush(stderr); + return noErr; } @@ -1816,27 +2502,19 @@ OSStatus maccoreaudio_getStreamVirtualFormat( * format description. * * @param deviceUID The device identifier. - * @param javaFormat The format needed by the upper layer Java aplication. - * @param isJavaFormatSource True if the Java format is the source of this * converter and the device the ouput. False otherwise. - * @param converter A pointer to the converter used to store the new created - * converter. * * @return noErr if everything works correctly. Any other vlue otherwise. */ OSStatus maccoreaudio_initConverter( const char * deviceUID, - const AudioStreamBasicDescription * javaFormat, - unsigned char isJavaFormatSource, - AudioConverterRef * converter, - double * conversionRatio) + maccoreaudio_stream * stream) { - AudioStreamBasicDescription deviceFormat; OSStatus err = noErr; if((err = maccoreaudio_getDeviceFormat( deviceUID, - isJavaFormatSource, - &deviceFormat)) + stream->isOutputStream, + &stream->deviceFormat)) != noErr) { maccoreaudio_log( @@ -1846,8 +2524,8 @@ OSStatus maccoreaudio_initConverter( if((err = maccoreaudio_getDeviceFormatDeprecated( deviceUID, - isJavaFormatSource, - &deviceFormat)) != noErr) + stream->isOutputStream, + &stream->deviceFormat)) != noErr) { maccoreaudio_log( "maccoreaudio_initConverter (coreaudio/device.c): \ @@ -1857,31 +2535,74 @@ OSStatus maccoreaudio_initConverter( // Everything has failed to retrieve the device format, then try // with the default one. - maccoreaudio_getDefaultFormat(&deviceFormat); + maccoreaudio_getDefaultFormat(&stream->deviceFormat); } } - const AudioStreamBasicDescription *inFormat = javaFormat; - const AudioStreamBasicDescription *outFormat = &deviceFormat; - if(!isJavaFormatSource) + const AudioStreamBasicDescription *inFormat = &stream->javaFormat; + const AudioStreamBasicDescription *outFormat = &stream->deviceFormat; + if(!stream->isOutputStream) { - inFormat = &deviceFormat; - outFormat = javaFormat; + inFormat = &stream->deviceFormat; + outFormat = &stream->javaFormat; } - if((err = AudioConverterNew(inFormat, outFormat, converter)) - != noErr) + if(!stream->isAECActivated) { - maccoreaudio_log( - "maccoreaudio_initConverter (coreaudio/device.c): \ - \n\tAudioConverterNew, err: 0x%x", - ((int) err)); - return err; - } + if((err = AudioConverterNew(inFormat, outFormat, &stream->outConverter)) + != noErr) + { + maccoreaudio_log( + "maccoreaudio_initConverter (coreaudio/device.c): \ + \n\tAudioConverterNew, err: 0x%x", + ((int) err)); + return err; + } - *conversionRatio = - ((double) javaFormat->mBytesPerFrame * javaFormat->mSampleRate) - / ((double) deviceFormat.mBytesPerFrame * deviceFormat.mSampleRate); + stream->outConversionRatio = ( + ((double) stream->javaFormat.mBytesPerFrame + * stream->javaFormat.mSampleRate) + / ((double) stream->deviceFormat.mBytesPerFrame + * stream->deviceFormat.mSampleRate)); + } + else + { + if((err = AudioConverterNew( + inFormat, + &stream->aecFormat, + &stream->aecConverter)) + != noErr) + { + maccoreaudio_log( + "maccoreaudio_initConverter (coreaudio/device.c): \ + \n\tAudioConverterNew, err: 0x%x", + ((int) err)); + return err; + } + stream->aecConversionRatio = ( + ((double) stream->javaFormat.mBytesPerFrame + * stream->javaFormat.mSampleRate) + / ((double) stream->aecFormat.mBytesPerFrame + * stream->aecFormat.mSampleRate)); + + if((err = AudioConverterNew( + &stream->aecFormat, + outFormat, + &stream->outConverter)) + != noErr) + { + maccoreaudio_log( + "maccoreaudio_initConverter (coreaudio/device.c): \ + \n\tAudioConverterNew, err: 0x%x", + ((int) err)); + return err; + } + stream->outConversionRatio = ( + ((double) stream->aecFormat.mBytesPerFrame + * stream->aecFormat.mSampleRate) + / ((double) stream->deviceFormat.mBytesPerFrame + * stream->deviceFormat.mSampleRate)); + } return err; } @@ -2113,3 +2834,120 @@ maccoreaudio_getDefaultFormat( false, false); } + + +OSStatus +maccoreaudio_converterComplexInputDataProc( + AudioConverterRef inAudioConverter, + UInt32* ioNumberDataPackets, + AudioBufferList* ioData, + AudioStreamPacketDescription** ioDataPacketDescription, + void* inUserData) +{ + /*fprintf(stderr, + "CHENZO: readIntputStream NEW METHOD - CALLBAKC - A: %d\n", + (int) *ioNumberDataPackets); + fflush(stderr);*/ + + if(ioDataPacketDescription) + { + fprintf(stderr, "_converterComplexInputDataProc cannot \ + provide input data; it doesn't know how to \ + provide packet descriptions\n"); + fflush(stderr); + maccoreaudio_log("_converterComplexInputDataProc cannot \ + provide input data; it doesn't know how to \ + provide packet descriptions"); + *ioDataPacketDescription = NULL; + *ioNumberDataPackets = 0; + ioData->mNumberBuffers = 0; + return 501; + } + + maccoreaudio_stream * stream = (maccoreaudio_stream*) inUserData; + + ioData->mNumberBuffers = 1; + ioData->mBuffers[0] = stream->audioBuffer; + + if(stream->isOutputStream) + { + if(stream->isAECActivated) + { + if(stream->step == 0) + { + *ioNumberDataPackets = ioData->mBuffers[0].mDataByteSize + / stream->javaFormat.mBytesPerPacket; + } + else // if (stream->step == 1) + { + *ioNumberDataPackets = ioData->mBuffers[0].mDataByteSize + / stream->aecFormat.mBytesPerPacket; + } + } + else + { + *ioNumberDataPackets = ioData->mBuffers[0].mDataByteSize + / stream->javaFormat.mBytesPerPacket; + } + } + else + { + if(stream->isAECActivated) + { + if(stream->step == 0) + { + *ioNumberDataPackets = ioData->mBuffers[0].mDataByteSize + / stream->deviceFormat.mBytesPerPacket; + } + else // if (stream->step == 1) + { + *ioNumberDataPackets = ioData->mBuffers[0].mDataByteSize + / stream->aecFormat.mBytesPerPacket; + } + } + else + { + *ioNumberDataPackets = ioData->mBuffers[0].mDataByteSize + / stream->deviceFormat.mBytesPerPacket; + } + } + + /*fprintf(stderr, + "CHENZO: readIntputStream NEW METHOD - CALLBAKC - B: %d/%d\n", + (int) ioData->mBuffers[0].mDataByteSize, + (int) *ioNumberDataPackets); + fflush(stderr);*/ + + return 0; +} + + + +int +maccoreaudio_initAudioBuffer( + AudioBuffer * audioBuffer, + int nbChannels, + int length) +{ + audioBuffer->mNumberChannels = nbChannels; + if(audioBuffer->mDataByteSize < length) + { + if(audioBuffer->mData != NULL) + { + free(audioBuffer->mData); + audioBuffer->mData = NULL; + } + + if((audioBuffer->mData = (char*) malloc(length * sizeof(char))) == NULL) + { + maccoreaudio_log( + "%s: %s\n", + "maccoreaudio_initAudioBuffer (coreaudio/device.c): \ + \n\tmalloc", + strerror(errno)); + return -1; + } + audioBuffer->mDataByteSize = length; + } + return 0; +} diff --git a/src/native/macosx/coreaudio/lib/device.h b/src/native/macosx/coreaudio/lib/device.h index 829375bfe29b1082fb6b95c9709b95ef76ffd5a0..3e01ba7772b9ae1d4497e573572054200eb2a474 100644 --- a/src/native/macosx/coreaudio/lib/device.h +++ b/src/native/macosx/coreaudio/lib/device.h @@ -24,8 +24,17 @@ typedef struct void* callbackFunction; void* callbackObject; void* callbackMethod; - AudioConverterRef converter; - double conversionRatio; + unsigned char isOutputStream; + unsigned char isAECActivated; + unsigned short step; + AudioConverterRef aecConverter; + AudioConverterRef outConverter; + double aecConversionRatio; + double outConversionRatio; + AudioStreamBasicDescription deviceFormat; + AudioStreamBasicDescription aecFormat; + AudioStreamBasicDescription javaFormat; + AudioBuffer audioBuffer; pthread_mutex_t mutex; } maccoreaudio_stream; diff --git a/src/native/macosx/coreaudio/lib/webrtc/libjitsi_webrtc_aec.c b/src/native/macosx/coreaudio/lib/webrtc/libjitsi_webrtc_aec.c new file mode 100644 index 0000000000000000000000000000000000000000..307bdc73d6c067b1fdd4a533a2f3ff468ae8c929 --- /dev/null +++ b/src/native/macosx/coreaudio/lib/webrtc/libjitsi_webrtc_aec.c @@ -0,0 +1,411 @@ +/* + * Jitsi, the OpenSource Java VoIP and Instant Messaging client. + * + * Distributable under LGPL license. + * See terms of license at gnu.org. + */ +#include "libjitsi_webrtc_aec.h" + +#include "webrtc/modules/audio_processing/include/audio_processing.h" +#include "webrtc/modules/interface/module_common_types.h" + + +//#include <webrtc/modules/audio_processing/aec/include/echo_cancellation.h> + +#include <errno.h> +#include <jni.h> +#include <math.h> +#include <stdio.h> + +using namespace webrtc; + +/** + * Functions to use acoustic echo cancelling with webrtc. + * + * @author Vincent Lucas + */ +typedef struct +{ + // 0 = cqpture, 1 = render + int16_t * data[2]; + int dataLength[2]; + int dataUsed[2]; + AudioProcessing * audioProcessing; +} libjitsi_webrtc_aec; + +static libjitsi_webrtc_aec * aec = NULL; + +static JavaVM * libjitsi_webrtc_aec_VM = NULL; + +int libjitsi_webrtc_aec_init( + void); + +void libjitsi_webrtc_aec_free( + void); + +void libjitsi_webrtc_aec_log( + const char * error_format, + ...); + +JNIEXPORT jint JNICALL +JNI_OnLoad(JavaVM *vm, void *pvt) +{ + libjitsi_webrtc_aec_VM = vm; + libjitsi_webrtc_aec_init(); + return JNI_VERSION_1_6; +} + +JNIEXPORT void JNICALL +JNI_OnUnload(JavaVM *vm, void *pvt) +{ + libjitsi_webrtc_aec_free(); + libjitsi_webrtc_aec_VM = NULL; +} + + +/** + * Initiates a new webrtc_aec capable instance. + * + * @return 0 if everything works fine. -1 otherwise. + */ +int +libjitsi_webrtc_aec_init( + void) +{ + int err; + int id = 0; + + // If AEC is already active, frees it before the reinitialization. + if(aec != NULL) + { + libjitsi_webrtc_aec_free(); + } + + // Starts the initialization. + if((aec = (libjitsi_webrtc_aec*) malloc(sizeof(libjitsi_webrtc_aec))) + == NULL) + { + libjitsi_webrtc_aec_log( + "%s: %s\n", + "libjitsi_webrtc_aec_init (libjitsi_webrtc_aec.c): \ + \n\tmalloc", + strerror(errno)); + return -1; + } + + // Inits the capture and render buffer to default values + for(int i = 0; i < 2; ++i) + { + aec->data[i] = NULL; + aec->dataLength[i] = 0; + aec->dataUsed[i] = 0; + } + + // Creates WEBRTC AudioProcessing + if((aec->audioProcessing = AudioProcessing::Create(id)) == NULL) + { + libjitsi_webrtc_aec_log( + "%s: 0x%x\n", + "libjitsi_webrtc_aec_init (libjitsi_webrtc_aec.c): \ + \n\tAudioProcessing::Create", + 0); + libjitsi_webrtc_aec_free(); + return -1; + } + + // Enables high pass filter. + if((err = aec->audioProcessing->high_pass_filter()->Enable(true)) + != AudioProcessing::kNoError) + { + libjitsi_webrtc_aec_log( + "%s: 0x%x\n", + "libjitsi_webrtc_aec_init (libjitsi_webrtc_aec.c): \ + \n\tAudioProcessing::high_pass_filter::Enable", + err); + libjitsi_webrtc_aec_free(); + return -1; + } + + return 0; +} + +/** + * Frees the webrtc_aec instance. + */ +void libjitsi_webrtc_aec_free( + void) +{ + if(aec != NULL) + { + if(aec->audioProcessing != NULL) + { + delete(aec->audioProcessing); + } + + for(int i = 0; i < 2; ++i) + { + if(aec->data[i] != NULL) + { + free(aec->data[i]); + } + } + + free(aec); + } +} + +/** + * Analyzes or processes a given stream to removes echo. + * + * @param isCaptureStream True if the given buffer comes from the capture + * device. False if it comes from the render device. + * @param data The buffer containing the stream data. + * @param data_length The size of the valid data contained in the buffer. + * @param sample_rate The sample rate used to get the given buffer. + * @param nb_cahnnels The number of channels contained in the buffer: 1 = mono, + * 2 = stereo, etc. + * + * @return 0 if everything works fine. -1 otherwise. + */ +int libjitsi_webrtc_aec_process( + int isCaptureStream, + int16_t * data, + int data_length, + int sample_rate, + int nb_channels) +{ + int stream_index = 0; + if(!isCaptureStream) + { + stream_index = 1; + } + + int nb_shift = 0; + + while(nb_shift < data_length) + { + int nb_data = + aec->dataLength[stream_index] - aec->dataUsed[stream_index]; + if(nb_data > data_length) + { + nb_data = data_length; + } + memcpy(aec->data[stream_index], data, nb_data); + aec->dataUsed[stream_index] += nb_data; + nb_shift += nb_data; + + if(aec->dataLength[stream_index] == aec->dataUsed[stream_index]) + { + int err; + AudioFrame * frame = new AudioFrame(); + + frame->UpdateFrame( + -1, // id + -1, //j * nb_ms_sample_length, //timestamp + aec->data[stream_index], + aec->dataLength[stream_index] / nb_channels, + sample_rate, + AudioFrame::kNormalSpeech, + AudioFrame::kVadActive, + nb_channels); + + // Process capture stream. + if(isCaptureStream) + { + if((err = aec->audioProcessing->ProcessStream(frame)) != 0) + { + libjitsi_webrtc_aec_log( + "%s: 0x%x\n", + "libjitsi_webrtc_aec_process (libjitsi_webrtc_aec.c): \ + \n\tAudioProcessing::ProccessStream", + err); + return -1; + } + } + // Process render stream. + // TODO: webrc can resample this render stream : need to set device + // rate to echo sampler. + else + { + if((err = aec->audioProcessing->AnalyzeReverseStream(frame)) + != 0) + { + libjitsi_webrtc_aec_log( + "%s: 0x%x\n", + "libjitsi_webrtc_aec_process (libjitsi_webrtc_aec.c): \ + \n\tAudioProcessing::AnalyzeReverseStream", + err); + return -1; + } + } + + + aec->dataUsed[stream_index] = 0; + } + } + + + return 0; +} + +/** + * Logs the corresponding error message. + * + * @param error_format The format of the error message. + * @param ... The list of variable specified in the format argument. + */ +void libjitsi_webrtc_aec_log( + const char * error_format, + ...) +{ + JNIEnv *env = NULL; + + if(libjitsi_webrtc_aec_VM->AttachCurrentThreadAsDaemon( + (void**) &env, + NULL) + == 0) + { + jclass clazz = env->FindClass("org/jitsi/impl/neomedia/WebrtcAec"); + if (clazz) + { + jmethodID methodID = env->GetStaticMethodID(clazz, "log", "([B)V"); + + int error_length = 2048; + char error[error_length]; + va_list arg; + va_start (arg, error_format); + vsnprintf(error, error_length, error_format, arg); + va_end (arg); + + int str_len = strlen(error); + jbyteArray bufferBytes = env->NewByteArray(str_len); + env->SetByteArrayRegion(bufferBytes, 0, str_len, (jbyte *) error); + env->CallStaticVoidMethod(clazz, methodID, bufferBytes); + } + + libjitsi_webrtc_aec_VM->DetachCurrentThread(); + } +} + +int libjitsi_webrtc_aec_initAudioProcessing( + int sample_rate, + int nb_capture_channels, + int nb_render_channels + ) +{ + // TODO: deal with + // - same input / ouput sample rate + // - same input / ouput nb channels for capture stream + int err; + + if((err = aec->audioProcessing->set_sample_rate_hz(sample_rate)) + != AudioProcessing::kNoError) + { + libjitsi_webrtc_aec_log( + "%s: 0x%x\n", + "libjitsi_webrtc_aec_initAudioProcessing (libjitsi_webrtc_aec.c): \ + \n\tAudioProcessing::set_sample_rate_hz", + err); + return -1; + } + + // Inits the capture and render buffer to default values + for(int i = 0; i < 2; ++i) + { + if(aec->data[i] != NULL) + { + free(aec->data[i]); + } + aec->dataUsed[i] = 0; + // capture + if(i == 0) + { + aec->dataLength[i] = sample_rate / 100 * nb_capture_channels; + } + // render + else + { + aec->dataLength[i] = sample_rate / 100 * nb_render_channels; + } + if((aec->data[i] + = (int16_t*) malloc(aec->dataLength[i] * sizeof(int16_t))) + == NULL) + { + libjitsi_webrtc_aec_log( + "%s: %s\n", + "libjitsi_webrtc_aec_initAudioProcessing (libjitsi_webrtc_aec.c): \ + \n\tmalloc", + strerror(errno)); + return -1; + } + } + + // CAPTURE: Mono and stereo render only. + if((err = aec->audioProcessing->set_num_channels( + nb_capture_channels, + nb_capture_channels)) + != AudioProcessing::kNoError) + { + libjitsi_webrtc_aec_log( + "%s: 0x%x\n", + "libjitsi_webrtc_aec_initAudioProcessing (libjitsi_webrtc_aec.c): \ + \n\tAudioProcessing::set_num_channels", + err); + return -1; + } + + // RENDER + if((err = aec->audioProcessing->set_num_reverse_channels( + nb_render_channels)) + != AudioProcessing::kNoError) + { + libjitsi_webrtc_aec_log( + "%s: 0x%x\n", + "libjitsi_webrtc_aec_initAudioProcessing (libjitsi_webrtc_aec.c): \ + \n\tAudioProcessing::set_num_reverse_channels", + err); + return -1; + } + + + + + + // AEC + /*if((err = audioProcessing->echo_cancellation()->set_device_sample_rate_hz( + sample_rate)) + != AudioProcessing::kNoError) + { + libjitsi_webrtc_aec_log( + "%s: 0x%x\n", + "libjitsi_webrtc_aec_initAudioProcessing (libjitsi_webrtc_aec.c): \ + \n\tAudioProcessing::echo_cancellation::set_device_sample_rate_hz", + err); + return -1; + }*/ + aec->audioProcessing->echo_cancellation()->set_stream_drift_samples(100); + if((err = aec->audioProcessing->echo_cancellation() + ->enable_drift_compensation(true)) + != AudioProcessing::kNoError) + { + libjitsi_webrtc_aec_log( + "%s: 0x%x\n", + "libjitsi_webrtc_aec_initAudioProcessing (libjitsi_webrtc_aec.c): \ + \n\tAudioProcessing::echo_cancellation::enable_drift_compensation", + err); + return -1; + } + if((err = aec->audioProcessing->echo_cancellation()->Enable(true)) + != AudioProcessing::kNoError) + { + libjitsi_webrtc_aec_log( + "%s: 0x%x\n", + "libjitsi_webrtc_aec_initAudioProcessing (libjitsi_webrtc_aec.c): \ + \n\tAudioProcessing::echo_cancellation::Enable", + err); + return -1; + } + + return 0; +} + diff --git a/src/native/macosx/coreaudio/lib/webrtc/libjitsi_webrtc_aec.h b/src/native/macosx/coreaudio/lib/webrtc/libjitsi_webrtc_aec.h new file mode 100644 index 0000000000000000000000000000000000000000..64a25992172ecfd683457f5653f689aa6ef99860 --- /dev/null +++ b/src/native/macosx/coreaudio/lib/webrtc/libjitsi_webrtc_aec.h @@ -0,0 +1,38 @@ +/* + * Jitsi, the OpenSource Java VoIP and Instant Messaging client. + * + * Distributable under LGPL license. + * See terms of license at gnu.org. + */ +#ifndef libjitsi_webrtc_aec_h +#define libjitsi_webrtc_aec_h + +#include <stdint.h> + +#ifdef __cplusplus +extern "C" +{ +#endif + +/** + * Functions to use acoustic echo cancelling with webrtc. + * + * @author Vincent Lucas + */ +int libjitsi_webrtc_aec_process( + int isCaptureStream, + int16_t * data, + int data_length, + int sample_rate, + int nb_channels); + +int libjitsi_webrtc_aec_initAudioProcessing( + int sample_rate, + int nb_capture_channels, + int nb_render_channels); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/native/macosx/coreaudio/lib/webrtc/mine.c b/src/native/macosx/coreaudio/lib/webrtc/mine.c new file mode 100644 index 0000000000000000000000000000000000000000..8d6748c495466ab729a2cc0f41ce2ca150eac9e1 --- /dev/null +++ b/src/native/macosx/coreaudio/lib/webrtc/mine.c @@ -0,0 +1,214 @@ +#include "webrtc/modules/audio_processing/include/audio_processing.h" +#include "webrtc/modules/interface/module_common_types.h" + +#include <math.h> + +// README webrtc/modules/audio_processing/include/audio_processing.h + +using namespace webrtc; + +void gen_sin(int16_t * buff, int length); + +int pos = 0; + +int main(int arc, char **argv) +{ + int id = 0; + + AudioFrame * render_frame = new AudioFrame(); + AudioFrame * capture_frame = new AudioFrame(); + + int nb_channels = 1; + int nb_ms_sample_length = 10; //10 ms + int sample_rate = 32000; + //int sample_rate = 44100; + int samples_per_channel = (nb_ms_sample_length * sample_rate) / 1000; + int data_length = samples_per_channel * nb_channels; + int nb_decal = (data_length * 7) / nb_ms_sample_length; + int16_t render_data[data_length]; + int16_t capture_data[data_length]; + //memset(render_data, 0, data_length); + //memset(capture_data, 0, data_length); + + + int analog_level = 10; + //int has_voice = 0; + + //memcpy(render_data, capture_data, data_length * sizeof(int16_t)); + + /*gen_sin(render_data, data_length); + + for(int i = 0; i < nb_decal; ++i) + { + capture_data[i] = render_data[data_length - nb_decal + i]; + } + for(int i = nb_decal; i < data_length; ++i) + { + capture_data[i] = render_data[i - nb_decal]; + }*/ + + AudioProcessing * audioProcessing = AudioProcessing::Create(id); + + audioProcessing->set_sample_rate_hz(sample_rate); + + // // Mono capture and stereo render. + audioProcessing->set_num_channels(1, 1); + audioProcessing->set_num_reverse_channels(1); + //audioProcessing->set_num_reverse_channels(2); + + audioProcessing->high_pass_filter()->Enable(true); + + audioProcessing->echo_cancellation()->set_stream_drift_samples(100); + + audioProcessing->echo_cancellation()->enable_drift_compensation(false); + audioProcessing->echo_cancellation()->Enable(true); + + //audioProcessing->noise_reduction()->set_level(kHighSuppression); + //audioProcessing->noise_reduction()->Enable(true); + + audioProcessing->gain_control()->set_analog_level_limits(0, 255); + //audioProcessing->gain_control()->set_mode(kAdaptiveAnalog); + audioProcessing->gain_control()->Enable(true); + + audioProcessing->voice_detection()->Enable(true); + + // Start a voice call... + + for(int j = 0; j < 5; ++j) + { + for(int i = 0; i < nb_decal; ++i) + { + capture_data[i] = render_data[data_length - nb_decal + i]; + } + gen_sin(render_data, data_length); + for(int i = nb_decal; i < data_length; ++i) + { + capture_data[i] = render_data[i - nb_decal]; + } + + render_frame->UpdateFrame( + -1, // id + -1, + //j * nb_ms_sample_length, // timestamp + render_data, + samples_per_channel, + sample_rate, + AudioFrame::kNormalSpeech, + AudioFrame::kVadActive, + nb_channels); + capture_frame->UpdateFrame( + -1, // id + -1, + //j * nb_ms_sample_length, // timestamp + capture_data, + samples_per_channel, + sample_rate, + AudioFrame::kNormalSpeech, + AudioFrame::kVadActive, + nb_channels); + + + + // ... Render frame arrives bound for the audio HAL ... + audioProcessing->AnalyzeReverseStream(render_frame); + + // // ... Capture frame arrives from the audio HAL ... + // // Call required set_stream_ functions. + audioProcessing->set_stream_delay_ms(100); + //audioProcessing->set_stream_delay_ms(delay_ms); + audioProcessing->gain_control()->set_stream_analog_level(analog_level); + + audioProcessing->ProcessStream(capture_frame); + + // // Call required stream_ functions. + analog_level = audioProcessing->gain_control()->stream_analog_level(); + //has_voice = audioProcessing->stream_has_voice(); + + // Repeate render and capture processing for the duration of the call... + // Start a new call... + //audioProcessing->Initialize(); + + + + for(int i = 0; i < nb_decal; ++i) + { + if(capture_data[i] != render_data[data_length - nb_decal + i]) + { + fprintf(stderr, "render/capture[%d]: %d/%d\n", + i, + render_data[i], + capture_data[i]); + fflush(stderr); + } + } + for(int i = nb_decal; i < data_length; ++i) + { + if(capture_data[i] != render_data[i - nb_decal]) + { + fprintf(stderr, "render/capture[%d]: %d/%d\n", + i, + render_data[i], + capture_data[i]); + fflush(stderr); + } + } + fprintf(stderr, "analog_level: %d\n", analog_level); + //fprintf(stderr, "has_voice: %d\n", has_voice); + fprintf(stderr, "\n\n\n"); + fflush(stderr); + + + + + + /*for(int i = 0; i < data_length; ++i) + { + if(render_data[i] != capture_data[i]) + { + fprintf(stderr, "render/capture[%d]: %d/%d\n", + i, + render_data[i], + capture_data[i]); + fflush(stderr); + } + } + fprintf(stderr, "\n\n\n"); + fflush(stderr);*/ + } + + // Close the application... + delete audioProcessing; + delete render_frame; + delete capture_frame; + + return 0; +} + + +void gen_sin(int16_t * buff, int length) +{ + int nb_channels = 1; + //int nb_ms_sample_length = 10; //10 ms + int sample_rate = 32000; + double freq = 440.0; + + //int kHz = sample_rate / 1000; + //int nbToneSamples = kHz * nb_ms_sample_length; + + /* + * The leading nbInterDigitSamples should be zeroes. They are because we + * have just allocated the array. + */ + for(int sampleNumber = 0; + sampleNumber < length; + sampleNumber += nb_channels) + { + for(int i = 0; i < nb_channels; ++i) + { + buff[sampleNumber + i] + = isinf(pos + sampleNumber * 2.0 * M_PI * freq / sample_rate) * + 3; + } + } + pos += length / nb_channels; +} diff --git a/src/org/jitsi/impl/neomedia/CoreAudioDevice.java b/src/org/jitsi/impl/neomedia/CoreAudioDevice.java index 01c4f67e9db18162992804037fa9ea91217f1de2..86af1a58ba4fceb09818dacc1e6e39f9e0cf94d6 100644 --- a/src/org/jitsi/impl/neomedia/CoreAudioDevice.java +++ b/src/org/jitsi/impl/neomedia/CoreAudioDevice.java @@ -41,6 +41,7 @@ public class CoreAudioDevice { System.loadLibrary("jnmaccoreaudio"); isLoaded = true; + WebrtcAec.init(); } else if (OSUtils.IS_WINDOWS_VISTA || OSUtils.IS_WINDOWS_7 diff --git a/src/org/jitsi/impl/neomedia/WebrtcAec.java b/src/org/jitsi/impl/neomedia/WebrtcAec.java new file mode 100644 index 0000000000000000000000000000000000000000..abf9a1acbe6035dc9ceddab8ca973373e62f17c8 --- /dev/null +++ b/src/org/jitsi/impl/neomedia/WebrtcAec.java @@ -0,0 +1,66 @@ +/* + * Jitsi, the OpenSource Java VoIP and Instant Messaging client. + * + * Distributable under LGPL license. + * See terms of license at gnu.org. + */ +package org.jitsi.impl.neomedia; + +import org.jitsi.util.*; + +/** + * Extension for the JNI link to the WebrtcAec. + * + * @author Vincent Lucas + */ +public class WebrtcAec +{ + /** + * The <tt>Logger</tt> used by the <tt>WebrtcAec</tt> class and + * its instances for logging output. + */ + private static final Logger logger = Logger.getLogger(WebrtcAec.class); + + /** + * Tells if the Webrtc library is correctly loaded. + */ + public static boolean isLoaded; + + + /** + * Loads CoreAudioDevice if we are using MacOsX or Windows Vista/7/8. + */ + static + { + try + { + System.loadLibrary("jnwebrtc"); + System.loadLibrary("jnwebrtcaec"); + } + catch (NullPointerException npe) + { + // Swallow whatever exceptions are known to be thrown by + // System.loadLibrary() because the class has to be loaded in order + // to not prevent the loading of its users and isLoaded will remain + // false eventually. + } + catch (SecurityException se) + { + } + catch (UnsatisfiedLinkError ule) + { + } + } + + public static void init() + { + // Nothing to do, but the first call to this function load the webrtc + // and the webrtcaec libraries (cf. previous function). + } + + public static void log(byte[] error) + { + String errorString = StringUtils.newString(error); + logger.info(errorString); + } +}