diff --git a/src/org/jitsi/impl/neomedia/device/WASAPISystem.java b/src/org/jitsi/impl/neomedia/device/WASAPISystem.java
index 9626cfb298f4e4beeefb2b1d75a692f36b74420e..8432676f8a58caaa89bb08948ab50d976bf7fd5c 100644
--- a/src/org/jitsi/impl/neomedia/device/WASAPISystem.java
+++ b/src/org/jitsi/impl/neomedia/device/WASAPISystem.java
@@ -272,7 +272,10 @@ public static void WAVEFORMATEX_fill(
     {
         super(
                 LOCATOR_PROTOCOL,
-                FEATURE_NOTIFY_AND_PLAYBACK_DEVICES | FEATURE_REINITIALIZE);
+                FEATURE_DENOISE
+                    | FEATURE_ECHO_CANCELLATION
+                    | FEATURE_NOTIFY_AND_PLAYBACK_DEVICES
+                    | FEATURE_REINITIALIZE);
     }
 
     /**
@@ -413,6 +416,20 @@ private void configureSupportedFormats(
     protected void doInitialize()
         throws Exception
     {
+        List<CaptureDeviceInfo2> captureDevices;
+        List<CaptureDeviceInfo2> playbackDevices;
+
+        /*
+         * We want to protect iMMDeviceEnumerator because it may be accessed by
+         * multiple threads. Which the method doInitialize will not be invoked
+         * more than once at a time, it may be concurrently invoked along with
+         * other methods. We do not want the methods setCaptureDevices and
+         * setPlaybackDevices in the synchronized block because they may fire
+         * events which may in turn lead to deadlocks. 
+         */
+        synchronized (this)
+        {
+
         /*
          * XXX Multiple threads may invoke the initialization of a DeviceSystem
          * so we cannot be sure that the COM library has been initialized for
@@ -445,8 +462,6 @@ protected void doInitialize()
                     iMMDeviceEnumerator,
                     eAll,
                     DEVICE_STATE_ACTIVE);
-        List<CaptureDeviceInfo2> captureDevices;
-        List<CaptureDeviceInfo2> playbackDevices;
 
         if (iMMDeviceCollection == 0)
         {
@@ -513,6 +528,8 @@ protected void doInitialize()
             IMMDeviceCollection_Release(iMMDeviceCollection);
         }
 
+        } // synchronized (this)
+
         setCaptureDevices(captureDevices);
         setPlaybackDevices(playbackDevices);
     }
@@ -628,10 +645,13 @@ protected void finalize()
     {
         try
         {
-            if (iMMDeviceEnumerator != 0)
+            synchronized (this)
             {
-                IMMDeviceEnumerator_Release(iMMDeviceEnumerator);
-                iMMDeviceEnumerator = 0;
+                if (iMMDeviceEnumerator != 0)
+                {
+                    IMMDeviceEnumerator_Release(iMMDeviceEnumerator);
+                    iMMDeviceEnumerator = 0;
+                }
             }
         }
         finally
@@ -918,15 +938,15 @@ private List<AudioFormat> getIMediaObjectSupportedFormats(long iMediaObject)
      * Gets an audio endpoint device that is identified by a specific endpoint
      * ID string.
      *
-     * @param id the endpoing ID string which identifies the audio endpoint
+     * @param id the endpoint ID string which identifies the audio endpoint
      * device to be retrieved
      * @return an <tt>IMMDevice</tt> instance which represents the audio
-     * endpoint device that is identified by the specified enpoint ID string
+     * endpoint device that is identified by the specified endpoint ID string
      * @throws HResultException if an error occurs while retrieving the audio
      * endpoint device that is identified by the specified endpoint ID string in
      * a native WASAPI function which returns an <tt>HRESULT</tt> value
      */
-    public long getIMMDevice(String id)
+    public synchronized long getIMMDevice(String id)
         throws HResultException
     {
         long iMMDeviceEnumerator = this.iMMDeviceEnumerator;
@@ -1012,6 +1032,94 @@ private String getIMMDeviceFriendlyName(long iMMDevice)
         return deviceFriendlyName;
     }
 
+    /**
+     * Gets the zero-based index within the <tt>IMMDeviceCollection</tt>
+     * interface of an audio endpoint device specified by an endpoint ID string.
+     *
+     * @param id the endpoint ID string which specifies the audio endpoint
+     * device whose zero-based index within the <tt>IMMDeviceCollection</tt>
+     * interface is to be retrieved
+     * @return the zero-based index within the <tt>IMMDeviceCollection</tt>
+     * interface of an audio endpoint device identified by the specified
+     * endpoint ID string if the specified endpoint ID string identifies an
+     * actual audio endpoint device within the <tt>IMMDeviceCollection</tt>
+     * interface; otherwise, <tt>-1</tt>
+     * @throws HResultException if an error occurs while determining the
+     * zero-based index within the <tt>IMMDeviceCollection</tt> interface of the
+     * audio endpoint device identified by the specified endpoint ID string in a
+     * native WASAPI function which returns an <tt>HRESULT</tt> value
+     */
+    public synchronized int getIMMDeviceIndex(String id, int dataFlow)
+        throws HResultException
+    {
+        long iMMDeviceEnumerator = this.iMMDeviceEnumerator;
+
+        if (iMMDeviceEnumerator == 0)
+            throw new IllegalStateException("iMMDeviceEnumerator");
+
+        long iMMDeviceCollection
+            = IMMDeviceEnumerator_EnumAudioEndpoints(
+                    iMMDeviceEnumerator,
+                    dataFlow,
+                    DEVICE_STATE_ACTIVE);
+
+        if (iMMDeviceCollection == 0)
+        {
+            throw new RuntimeException(
+                    "IMMDeviceEnumerator_EnumAudioEndpoints");
+        }
+
+        int iMMDeviceIndex = -1;
+
+        try
+        {
+            int count = IMMDeviceCollection_GetCount(iMMDeviceCollection);
+
+            if (count > 0)
+            {
+                for (int i = 0; i < count; i++)
+                {
+                    long iMMDevice
+                        = IMMDeviceCollection_Item(iMMDeviceCollection, i);
+
+                    if (iMMDevice == 0)
+                    {
+                        throw new RuntimeException(
+                                "IMMDeviceCollection_Item");
+                    }
+
+                    String iMMDeviceID;
+
+                    try
+                    {
+                        iMMDeviceID = IMMDevice_GetId(iMMDevice);
+                    }
+                    finally
+                    {
+                        IMMDevice_Release(iMMDevice);
+                    }
+                    /*
+                     * The endpoint ID strings include GUIDs so case insensitive
+                     * comparison should be appropriate. If we wanted to be more
+                     * strict, we would've invoked IMMDeviceCollection_GetDevice
+                     * in order to have Windows Audio Session API (WASAPI) make
+                     * the comparison of the enpoint ID strings.
+                     */
+                    if (id.equalsIgnoreCase(iMMDeviceID))
+                    {
+                        iMMDeviceIndex = i;
+                        break;
+                    }
+                }
+            }
+        }
+        finally
+        {
+            IMMDeviceCollection_Release(iMMDeviceCollection);
+        }
+        return iMMDeviceIndex;
+    }
+
     /**
      * {@inheritDoc}
      */
diff --git a/src/org/jitsi/impl/neomedia/jmfext/media/protocol/wasapi/VoiceCaptureDSP.java b/src/org/jitsi/impl/neomedia/jmfext/media/protocol/wasapi/VoiceCaptureDSP.java
index 72e6e62094be9cdbaf987626156b37cb551694d6..16215407172ad28a66a68298c51b772f89e91cab 100644
--- a/src/org/jitsi/impl/neomedia/jmfext/media/protocol/wasapi/VoiceCaptureDSP.java
+++ b/src/org/jitsi/impl/neomedia/jmfext/media/protocol/wasapi/VoiceCaptureDSP.java
@@ -55,6 +55,19 @@ public class VoiceCaptureDSP
     public static final String MEDIATYPE_Audio
         = "{73647561-0000-0010-8000-00aa00389b71}";
 
+    /**
+     * Specifies which audio devices the Voice Capture DSP uses for capturing
+     * and rendering audio. Set this property if you are using the DSP in source
+     * mode. The DSP ignores this property in filter mode. The value of the
+     * property is two 16-bit <tt>WORD</tt>s packed into a <tt>DWORD</tt> (i.e.
+     * a Java <tt>int</tt>). The upper 16 bits specify the audio rendering
+     * device (typically a speaker), and the lower 16 bits specify the capture
+     * device (typically a microphone). Each device is specified as an index
+     * into the audio device collection. If the index is <tt>-1</tt>, the
+     * default device is used.
+     */
+    public static final long MFPKEY_WMAAECMA_DEVICE_INDEXES;
+
     /**
      * Specifies whether the Voice Capture DSP uses source mode (<tt>true</tt>)
      * or filter mode (<tt>false</tt>). In source mode, the application does not
@@ -165,6 +178,7 @@ public class VoiceCaptureDSP
     {
         String fmtid = "{6f52c567-0360-4bd2-9617-ccbf1421c939} ";
         String pszString = null;
+        long _MFPKEY_WMAAECMA_DEVICE_INDEXES = 0;
         long _MFPKEY_WMAAECMA_DMO_SOURCE_MODE = 0;
         long _MFPKEY_WMAAECMA_SYSTEM_MODE = 0;
         /*
@@ -175,6 +189,15 @@ public class VoiceCaptureDSP
 
         try
         {
+            pszString = fmtid + "4";
+            _MFPKEY_WMAAECMA_DEVICE_INDEXES
+                = PSPropertyKeyFromString(pszString);
+            if (_MFPKEY_WMAAECMA_DEVICE_INDEXES == 0)
+            {
+                throw new IllegalStateException(
+                        "MFPKEY_WMAAECMA_DEVICE_INDEXES");
+            }
+
             pszString = fmtid + "3";
             _MFPKEY_WMAAECMA_DMO_SOURCE_MODE
                 = PSPropertyKeyFromString(pszString);
@@ -219,6 +242,7 @@ public class VoiceCaptureDSP
             }
         }
 
+        MFPKEY_WMAAECMA_DEVICE_INDEXES = _MFPKEY_WMAAECMA_DEVICE_INDEXES;
         MFPKEY_WMAAECMA_DMO_SOURCE_MODE = _MFPKEY_WMAAECMA_DMO_SOURCE_MODE;
         MFPKEY_WMAAECMA_SYSTEM_MODE = _MFPKEY_WMAAECMA_SYSTEM_MODE;
 
diff --git a/src/org/jitsi/impl/neomedia/jmfext/media/protocol/wasapi/WASAPIStream.java b/src/org/jitsi/impl/neomedia/jmfext/media/protocol/wasapi/WASAPIStream.java
index 567e7c48769ffffb6a58cf11c78cc060c821bbf4..d95837ffa57a5c1ff4d256d8d3ce16fdc9048174 100644
--- a/src/org/jitsi/impl/neomedia/jmfext/media/protocol/wasapi/WASAPIStream.java
+++ b/src/org/jitsi/impl/neomedia/jmfext/media/protocol/wasapi/WASAPIStream.java
@@ -41,6 +41,13 @@ public class WASAPIStream
      */
     private static final int CAPTURE_INPUT_STREAM_INDEX = 0;
 
+    /**
+     * The default value of the property
+     * <tt>MFPKEY_WMAAECMA_DMO_SOURCE_MODE</tt> to be set on the Voice Capture
+     * DSP.
+     */
+    private static final boolean DEFAULT_SOURCE_MODE = true;
+
     /**
      * The <tt>Logger</tt> used by the <tt>WASAPIStream</tt> class and its
      * instances to log debug information.
@@ -396,6 +403,12 @@ static void throwNewIOException(String message, HResultException hre)
         throw ioe;
     }
 
+    /**
+     * The indicator which determines whether this <tt>SourceStream</tt> has
+     * been connected with acoustic echo cancellation (AEC) enabled.
+     */
+    private boolean aec;
+
     /**
      * The maximum capacity/number of bytes of {@link #iMediaBuffer}.
      */
@@ -435,7 +448,7 @@ static void throwNewIOException(String message, HResultException hre)
     private boolean captureIsBusy;
 
     /**
-     * The number of nonseconds of audio encoded in the <tt>outFormat</tt> of
+     * The number of nonoseconds of audio encoded in the <tt>outFormat</tt> of
      * {@link #capture} represented by a <tt>byte</tt>.
      */
     private double captureNanosPerByte;
@@ -540,12 +553,26 @@ static void throwNewIOException(String message, HResultException hre)
      */
     private boolean renderIsBusy;
 
+    /**
+     * The <tt>WASAPIRenderer</tt> which maintains an active stream on the
+     * rendering device selected in the Voice Capture DSP implementing acoustic
+     * echo cancellation (AEC) in order to ensure that the DSP can produce
+     * output.
+     */
+    private Renderer renderer;
+
     /**
      * The indicator which determines whether no reading from {@link #render} is
      * to be performed until it reaches a certain threshold of availability. 
      */
     private boolean replenishRender;
 
+    /**
+     * The indicator which determined whether {@link #iMediaObject} has been
+     * initialized to operate in source (as opposed to filter) mode.
+     */
+    private boolean sourceMode;
+
     /**
      * The indicator which determines whether this <tt>SourceStream</tt> is
      * started i.e. there has been a successful invocation of {@link #start()}
@@ -623,17 +650,42 @@ private void configureAEC(long iPropertyStore)
                 IPropertyStore_SetValue(
                         iPropertyStore,
                         MFPKEY_WMAAECMA_FEATURE_MODE, true);
+
+                AudioSystem audioSystem = dataSource.audioSystem;
+
+                /*
+                 * Perform acoustic echo suppression (AEC) on the residual
+                 * signal a maximum number of times.
+                 */
                 if (MFPKEY_WMAAECMA_FEATR_AES != 0)
                 {
                     IPropertyStore_SetValue(
                             iPropertyStore,
-                            MFPKEY_WMAAECMA_FEATR_AES, 2);
+                            MFPKEY_WMAAECMA_FEATR_AES,
+                            audioSystem.isEchoCancel() ? 2 : 0);
+                }
+                // Perform automatic gain control (AGC).
+                if (MFPKEY_WMAAECMA_FEATR_AGC != 0)
+                {
+                    IPropertyStore_SetValue(
+                            iPropertyStore,
+                            MFPKEY_WMAAECMA_FEATR_AGC,
+                            true);
+                }
+                // Perform noise suppression (NS).
+                if (MFPKEY_WMAAECMA_FEATR_NS != 0)
+                {
+                    IPropertyStore_SetValue(
+                            iPropertyStore,
+                            MFPKEY_WMAAECMA_FEATR_NS,
+                            audioSystem.isDenoise() ? 1 : 0);
                 }
                 if (MFPKEY_WMAAECMA_FEATR_ECHO_LENGTH != 0)
                 {
                     IPropertyStore_SetValue(
                             iPropertyStore,
-                            MFPKEY_WMAAECMA_FEATR_ECHO_LENGTH, 256);
+                            MFPKEY_WMAAECMA_FEATR_ECHO_LENGTH,
+                            256);
                 }
             }
         }
@@ -716,6 +768,7 @@ private void disconnect()
              * connect.
              */
             format = null;
+            sourceMode = false;
         }
     }
 
@@ -725,7 +778,7 @@ private void disconnect()
      * endpoint device has been passed i.e. it is certain that this instance is
      * disconnected.
      *
-     * @throws Exception if the <tt>SourceStream</tt> fails to connect to the
+     * @throws Exception if this <tt>SourceStream</tt> fails to connect to the
      * associated audio endpoint device. The <tt>Exception</tt> is logged by the
      * <tt>connect()</tt> method.
      */
@@ -743,96 +796,214 @@ private void doConnect()
             throw new NullPointerException("No format set.");
         if (dataSource.aec)
         {
-            /*
-             * This SourceStream will output in an AudioFormat supported by the
-             * voice capture DMO which implements the acoustic echo cancellation
-             * (AEC) feature. The IAudioClients will be initialized with
-             * AudioFormats based on thisFormat
-             */
-            AudioFormat captureFormat
-                = findClosestMatchCaptureSupportedFormat(thisFormat);
+            aec = true;
+            try
+            {
+                CaptureDeviceInfo2 captureDevice
+                    = dataSource.audioSystem.getDevice(
+                            AudioSystem.DataFlow.CAPTURE,
+                            locator);
+
+                /*
+                 * If the information about the capture device cannot be found,
+                 * acoustic echo cancellation (AEC) cannot be enabled. That
+                 * should not happen because the locator/MediaLocator is sure to
+                 * be set. Anyway, leave the error detection to the non-AEC
+                 * branch.
+                 */
+                if (captureDevice != null)
+                {
+                    /*
+                     * If the information about the render endpoint device
+                     * cannot be found, AEC cannot be enabled. Period.
+                     */
+                    CaptureDeviceInfo2 renderDevice
+                        = dataSource.audioSystem.getSelectedDevice(
+                                AudioSystem.DataFlow.PLAYBACK);
+
+                    if (renderDevice != null)
+                    {
+                        boolean sourceMode = DEFAULT_SOURCE_MODE;
 
-            if (captureFormat == null)
+                        if (sourceMode)
+                        {
+                            doConnectInSourceMode(
+                                    captureDevice,
+                                    renderDevice,
+                                    thisFormat);
+                        }
+                        else
+                        {
+                            doConnectInFilterMode(
+                                    captureDevice,
+                                    renderDevice,
+                                    thisFormat);
+                        }
+
+                        this.sourceMode = sourceMode;
+                    }
+                }
+            }
+            catch (Throwable t)
             {
-                throw new IllegalStateException(
-                        "Failed to determine an AudioFormat with which to"
-                            + " initialize IAudioClient for MediaLocator "
-                            + locator + " based on AudioFormat " + thisFormat);
+                if (t instanceof ThreadDeath)
+                    throw (ThreadDeath) t;
+                else
+                {
+                    logger.error(
+                            "Failed to enable acoustic echo cancellation (AEC)."
+                                + " Will try without it.",
+                            t);
+                }
             }
+        }
+        if (iMediaObject == 0)
+        {
+            aec = false;
+            initializeCapture(locator, thisFormat);
+        }
 
+        this.format = thisFormat;
+    }
 
-            CaptureDeviceInfo2 renderDeviceInfo
-                = dataSource.audioSystem.getSelectedDevice(
-                        AudioSystem.DataFlow.PLAYBACK);
-            MediaLocator renderLocator;
-            AudioFormat renderFormat;
+    /**
+     * Invoked by {@link #doConnect()} after it has been determined that
+     * acoustic echo cancellation (AEC) is to be utilized and the implementing
+     * Voice Capture DSP is to be initialized in filter mode.
+     *
+     * @param captureDevice a <tt>CaptureDeviceInfo2</tt> which identifies the
+     * capture endpoint device to be used
+     * @param renderDevice a <tt>CaptureDeviceInfo2</tt> which identifies the
+     * render endpoint device to be used
+     * @param outFormat the <tt>Format</tt> of the media data in which the Voice
+     * Capture DSP is to output
+     * @throws Exception if this <tt>SourceStream</tt> fails to connect to the
+     * associated audio endpoint device. The <tt>Exception</tt> is logged by the
+     * <tt>connect()</tt> method.
+     */
+    private void doConnectInFilterMode(
+            CaptureDeviceInfo2 captureDevice,
+            CaptureDeviceInfo2 renderDevice,
+            AudioFormat outFormat)
+        throws Exception
+    {
+        /*
+         * This SourceStream will output in an AudioFormat supported by the
+         * voice capture DMO which implements the acoustic echo cancellation
+         * (AEC) feature. The IAudioClients will be initialized with
+         * AudioFormats based on outFormat.
+         */
+        AudioFormat captureFormat
+            = findClosestMatchCaptureSupportedFormat(outFormat);
+
+        if (captureFormat == null)
+        {
+            throw new IllegalStateException(
+                    "Failed to determine an AudioFormat with which to"
+                        + " initialize IAudioClient for MediaLocator " + locator
+                        + " based on AudioFormat " + outFormat);
+        }
 
-            if (renderDeviceInfo == null)
+        MediaLocator renderLocator;
+        AudioFormat renderFormat;
+
+        if (renderDevice == null)
+        {
+            /*
+             * We explicitly want to support the case in which the user has
+             * selected "none" for the playback/render endpoint device.
+             */
+            renderLocator = null;
+            renderFormat = captureFormat;
+        }
+        else
+        {
+            renderLocator = renderDevice.getLocator();
+            if (renderLocator == null)
             {
-                /*
-                 * We explicitly want to support the case in which the user has
-                 * selected "none" for the playback/render endpoint device.
-                 */
-                renderLocator = null;
-                renderFormat = captureFormat;
+                throw new IllegalStateException(
+                        "A CaptureDeviceInfo2 instance which describes a"
+                            + " Windows Audio Session API (WASAPI) render"
+                            + " endpoint device and which does not have an"
+                            + " actual locator/MediaLocator is illegal.");
             }
             else
             {
-                renderLocator = renderDeviceInfo.getLocator();
-                if (renderLocator == null)
+                renderFormat
+                    = findClosestMatch(
+                            renderDevice.getFormats(),
+                            outFormat,
+                            NativelySupportedAudioFormat.class);
+                if (renderFormat == null)
                 {
                     throw new IllegalStateException(
-                            "A CaptureDeviceInfo2 instance which describes a"
-                                + " Windows Audio Session API (WASAPI) render"
-                                + " endpoint device and which does not have an"
-                                + " actual locator/MediaLocator is illegal.");
-                }
-                else
-                {
-                    renderFormat
-                        = findClosestMatch(
-                                renderDeviceInfo.getFormats(),
-                                thisFormat,
-                                NativelySupportedAudioFormat.class);
-                    if (renderFormat == null)
-                    {
-                        throw new IllegalStateException(
-                                "Failed to determine an AudioFormat with which"
-                                    + " to initialize IAudioClient for"
-                                    + " MediaLocator " + renderLocator
-                                    + " based on AudioFormat " + thisFormat);
-                    }
+                            "Failed to determine an AudioFormat with which to"
+                                + " initialize IAudioClient for MediaLocator "
+                                + renderLocator + " based on AudioFormat "
+                                + outFormat);
                 }
             }
+        }
 
-            boolean uninitialize = true;
+        boolean uninitialize = true;
 
-            initializeCapture(locator, captureFormat);
+        initializeCapture(locator, captureFormat);
+        try
+        {
+            if (renderLocator != null)
+                initializeRender(renderLocator, renderFormat);
             try
             {
-                if (renderLocator != null)
-                    initializeRender(renderLocator, renderFormat);
-                try
-                {
-                    initializeAEC(captureFormat, renderFormat, thisFormat);
-                    uninitialize = false;
-                }
-                finally
-                {
-                    if (uninitialize)
-                        uninitializeRender();
-                }
+                initializeAEC(
+                        /* sourceMode */ false,
+                        captureDevice,
+                        captureFormat,
+                        renderDevice,
+                        renderFormat,
+                        outFormat);
+                uninitialize = false;
             }
             finally
             {
                 if (uninitialize)
-                    uninitializeCapture();
+                    uninitializeRender();
             }
         }
-        else
-            initializeCapture(locator, thisFormat);
+        finally
+        {
+            if (uninitialize)
+                uninitializeCapture();
+        }
+    }
 
-        this.format = thisFormat;
+    /**
+     * Invoked by {@link #doConnect()} after it has been determined that
+     * acoustic echo cancellation (AEC) is to be utilized and the implementing
+     * Voice Capture DSP is to be initialized in filter mode.
+     *
+     * @param captureDevice a <tt>CaptureDeviceInfo2</tt> which identifies the
+     * capture endpoint device to be used
+     * @param renderDevice a <tt>CaptureDeviceInfo2</tt> which identifies the
+     * render endpoint device to be used
+     * @param outFormat the <tt>Format</tt> of the media data in which the Voice
+     * Capture DSP is to output
+     * @throws Exception if this <tt>SourceStream</tt> fails to connect to the
+     * associated audio endpoint device. The <tt>Exception</tt> is logged by the
+     * <tt>connect()</tt> method.
+     */
+    private void doConnectInSourceMode(
+            CaptureDeviceInfo2 captureDevice,
+            CaptureDeviceInfo2 renderDevice,
+            AudioFormat outFormat)
+        throws Exception
+    {
+        initializeAEC(
+                /* sourceMode */ true,
+                captureDevice,
+                /* captureFormat */ null,
+                renderDevice,
+                /* renderFormat */ null,
+                outFormat);
     }
 
     /**
@@ -844,6 +1015,19 @@ protected Format doGetFormat()
         return (format == null) ? super.doGetFormat() : format;
     }
 
+    /**
+     * Finds an <tt>AudioFormat</tt> in the list of supported <tt>Format</tt>s
+     * of the associated capture endpoint device which is as similar to a
+     * specific <tt>AudioFormat</tt> as possible.
+     *
+     * @param format the <tt>AudioFormat</tt> for which a similar
+     * <tt>AudioFormat</tt> is to be found in the list of supported formats of
+     * the associated capture endpoint device
+     * @return an <tt>AudioFormat</tt> which is an element of the list of
+     * supported formats of the associated capture endpoint device and is as
+     * similar to the specified <tt>format</tt> as possible or <tt>null</tt> if
+     * no similarity could be established
+     */
     private AudioFormat findClosestMatchCaptureSupportedFormat(
             AudioFormat format)
     {
@@ -870,9 +1054,16 @@ private MediaLocator getLocator()
      * Initializes the <tt>IMediaObject</tt> which is to perform acoustic echo
      * cancellation.
      *
-     * @param inFormat0 the <tt>AudioFormat</tt> of the media which will be
+     * @param sourceMode <tt>true</tt> if the Voice Capture DSP is to be
+     * initialized in source mode or <tt>false</tt> if the Voice Capture DSP is
+     * to be initialized in filter mode
+     * @param captureDevice a <tt>CaptureDeviceInfo2</tt> which identifies the
+     * capture endpoint device to be used
+     * @param captureFormat the <tt>AudioFormat</tt> of the media which will be
      * delivered to the input stream representing the audio from the microphone
-     * @param inFormat1 the <tt>AudioFormat</tt> of the media which will be
+     * @param renderDevice  <tt>CaptureDeviceInfo2</tt> which identifies the
+     * render endpoint device to be used
+     * @param renderFormat the <tt>AudioFormat</tt> of the media which will be
      * delivered to the input stream representing the audio from the speaker
      * (line)
      * @param outFormat the <tt>AudioFormat</tt> of the media which is to be
@@ -881,7 +1072,11 @@ private MediaLocator getLocator()
      * implementing acoustic echo cancellation fails
      */
     private void initializeAEC(
-            AudioFormat inFormat0, AudioFormat inFormat1,
+            boolean sourceMode,
+            CaptureDeviceInfo2 captureDevice,
+            AudioFormat captureFormat,
+            CaptureDeviceInfo2 renderDevice,
+            AudioFormat renderFormat,
             AudioFormat outFormat)
         throws Exception
     {
@@ -895,55 +1090,8 @@ private void initializeAEC(
         }
         try
         {
-            int dwInputStreamIndex = CAPTURE_INPUT_STREAM_INDEX;
-            int hresult
-                = IMediaObject_SetXXXputType(
-                        iMediaObject,
-                        /* IMediaObject_SetInputType */ true,
-                        dwInputStreamIndex,
-                        inFormat0,
-                        /* dwFlags */ 0);
-
-            if (FAILED(hresult))
-            {
-                throw new HResultException(
-                        hresult,
-                        "IMediaObject_SetInputType, dwInputStreamIndex "
-                            + dwInputStreamIndex + ", " + inFormat0);
-            }
-            dwInputStreamIndex = RENDER_INPUT_STREAM_INDEX;
-            hresult
-                = IMediaObject_SetXXXputType(
-                        iMediaObject,
-                        /* IMediaObject_SetInputType */ true,
-                        dwInputStreamIndex,
-                        inFormat1,
-                        /* dwFlags */ 0);
-            if (FAILED(hresult))
-            {
-                throw new HResultException(
-                        hresult,
-                        "IMediaObject_SetInputType, dwInputStreamIndex "
-                            + dwInputStreamIndex + ", " + inFormat1);
-            }
-            hresult
-                = IMediaObject_SetXXXputType(
-                        iMediaObject,
-                        /* IMediaObject_SetOutputType */ false,
-                        /* dwOutputStreamIndex */ 0,
-                        outFormat,
-                        /* dwFlags */ 0);
-            if (FAILED(hresult))
-            {
-                throw new HResultException(
-                        hresult,
-                        "IMediaObject_SetOutputType, " + outFormat);
-            }
-
             long iPropertyStore
-                = IMediaObject_QueryInterface(
-                        iMediaObject,
-                        IID_IPropertyStore);
+                = IMediaObject_QueryInterface(iMediaObject, IID_IPropertyStore);
 
             if (iPropertyStore == 0)
             {
@@ -952,11 +1100,12 @@ private void initializeAEC(
             }
             try
             {
-                hresult
+                int hresult
                     = IPropertyStore_SetValue(
                             iPropertyStore,
                             MFPKEY_WMAAECMA_DMO_SOURCE_MODE,
-                            false);
+                            sourceMode);
+
                 if (FAILED(hresult))
                 {
                     throw new HResultException(
@@ -966,148 +1115,87 @@ private void initializeAEC(
                 }
                 configureAEC(iPropertyStore);
 
-                long captureIMediaBuffer
-                    = MediaBuffer_alloc(capture.bufferSize);
+                hresult
+                    = IMediaObject_SetXXXputType(
+                            iMediaObject,
+                            /* IMediaObject_SetOutputType */ false,
+                            /* dwOutputStreamIndex */ 0,
+                            outFormat,
+                            /* dwFlags */ 0);
+                if (FAILED(hresult))
+                {
+                    throw new HResultException(
+                            hresult,
+                            "IMediaObject_SetOutputType, " + outFormat);
+                }
 
-                if (captureIMediaBuffer == 0)
+                int outFrameSize
+                    = WASAPISystem.getSampleSizeInBytes(outFormat)
+                        * outFormat.getChannels();
+                int outFrames
+                    = (int)
+                        (WASAPISystem.DEFAULT_BUFFER_DURATION
+                            * ((int) outFormat.getSampleRate()) / 1000);
+                long iMediaBuffer = MediaBuffer_alloc(outFrameSize * outFrames);
+
+                if (iMediaBuffer == 0)
                     throw new OutOfMemoryError("MediaBuffer_alloc");
                 try
                 {
-                    /*
-                     * We explicitly want to support the case in which the user
-                     * has selected "none" for the playback/render endpoint
-                     * device.
-                     */
-                    long renderIMediaBuffer
-                        = MediaBuffer_alloc(
-                                ((render == null) ? capture : render)
-                                    .bufferSize);
+                    long dmoOutputDataBuffer
+                        = DMO_OUTPUT_DATA_BUFFER_alloc(
+                                iMediaBuffer,
+                                /* dwStatus */ 0,
+                                /* rtTimestamp */ 0,
+                                /* rtTimelength */ 0);
 
-                    if (renderIMediaBuffer == 0)
-                        throw new OutOfMemoryError("MediaBuffer_alloc");
+                    if (dmoOutputDataBuffer == 0)
+                    {
+                        throw new OutOfMemoryError(
+                                "DMO_OUTPUT_DATA_BUFFER_alloc");
+                    }
                     try
                     {
-                        int outFrameSize
-                            = WASAPISystem.getSampleSizeInBytes(outFormat)
-                                * outFormat.getChannels();
-                        int outFrames
-                            = (int)
-                                (WASAPISystem.DEFAULT_BUFFER_DURATION
-                                    * ((int) outFormat.getSampleRate()) / 1000);
-                        long iMediaBuffer
-                            = MediaBuffer_alloc(outFrameSize * outFrames);
-
-                        if (iMediaBuffer == 0)
-                            throw new OutOfMemoryError("MediaBuffer_alloc");
-                        try
+                        bufferMaxLength
+                            = IMediaBuffer_GetMaxLength(iMediaBuffer);
+
+                        processed = new byte[bufferMaxLength * 3];
+                        processedLength = 0;
+
+                        if (sourceMode)
                         {
-                            long dmoOutputDataBuffer
-                                = DMO_OUTPUT_DATA_BUFFER_alloc(
-                                        iMediaBuffer,
-                                        /* dwStatus */ 0,
-                                        /* rtTimestamp */ 0,
-                                        /* rtTimelength */ 0);
-
-                            if (dmoOutputDataBuffer == 0)
-                            {
-                                throw new OutOfMemoryError(
-                                        "DMO_OUTPUT_DATA_BUFFER_alloc");
-                            }
-                            try
-                            {
-                                bufferMaxLength
-                                    = IMediaBuffer_GetMaxLength(iMediaBuffer);
-                                captureBufferMaxLength
-                                    = IMediaBuffer_GetMaxLength(
-                                            captureIMediaBuffer);
-                                renderBufferMaxLength
-                                    = IMediaBuffer_GetMaxLength(
-                                            renderIMediaBuffer);
-
-                                processed = new byte[bufferMaxLength * 3];
-                                processedLength = 0;
-
-                                this.captureIMediaBuffer
-                                    = new PtrMediaBuffer(captureIMediaBuffer);
-                                captureIMediaBuffer = 0;
-                                this.dmoOutputDataBuffer = dmoOutputDataBuffer;
-                                dmoOutputDataBuffer = 0;
-                                this.iMediaBuffer = iMediaBuffer;
-                                iMediaBuffer = 0;
-                                this.iMediaObject = iMediaObject;
-                                iMediaObject = 0;
-                                this.renderIMediaBuffer
-                                    = new PtrMediaBuffer(renderIMediaBuffer);
-                                renderIMediaBuffer = 0;
-
-                                /*
-                                 * Prepare to be ready to compute/determine the
-                                 * duration in nanoseconds of a specific number
-                                 * of bytes representing audio samples encoded
-                                 * in the outFormat of capture.
-                                 */
-                                {
-                                    AudioFormat af = capture.outFormat;
-                                    double sampleRate = af.getSampleRate();
-                                    int sampleSizeInBits
-                                        = af.getSampleSizeInBits();
-                                    int channels = af.getChannels();
-
-                                    captureNanosPerByte
-                                        = (8d * 1000d * 1000d * 1000d)
-                                            / (sampleRate
-                                                    * sampleSizeInBits
-                                                    * channels);
-                                }
-                                /*
-                                 * Prepare to be ready to compute/determine the
-                                 * number of bytes representing a specific
-                                 * duration in nanoseconds of audio samples
-                                 * encoded in the outFormat of render.
-                                 */
-                                {
-                                    /*
-                                     * We explicitly want to support the case in
-                                     * which the user has selected "none" for
-                                     * the playback/render endpoint device.
-                                     */
-                                    AudioFormat af
-                                        = ((render == null) ? capture : render)
-                                            .outFormat;
-                                    double sampleRate = af.getSampleRate();
-                                    int sampleSizeInBits
-                                        = af.getSampleSizeInBits();
-                                    int channels = af.getChannels();
-
-                                    renderBytesPerNano
-                                        = (sampleRate
-                                                * sampleSizeInBits
-                                                * channels)
-                                            / (8d * 1000d * 1000d * 1000d);
-                                }
-                            }
-                            finally
-                            {
-                                if (dmoOutputDataBuffer != 0)
-                                    CoTaskMemFree(dmoOutputDataBuffer);
-                            }
+                            initializeAECInSourceMode(
+                                    iPropertyStore,
+                                    captureDevice,
+                                    renderDevice,
+                                    outFormat);
                         }
-                        finally
+                        else
                         {
-                            if (iMediaBuffer != 0)
-                                IMediaBuffer_Release(iMediaBuffer);
+                            initializeAECInFilterMode(
+                                    iMediaObject,
+                                    captureFormat,
+                                    renderFormat,
+                                    outFormat);
                         }
+
+                        this.dmoOutputDataBuffer = dmoOutputDataBuffer;
+                        dmoOutputDataBuffer = 0;
+                        this.iMediaBuffer = iMediaBuffer;
+                        iMediaBuffer = 0;
+                        this.iMediaObject = iMediaObject;
+                        iMediaObject = 0;
                     }
                     finally
                     {
-                        if (renderIMediaBuffer != 0)
-                            IMediaBuffer_Release(renderIMediaBuffer);
+                        if (dmoOutputDataBuffer != 0)
+                            CoTaskMemFree(dmoOutputDataBuffer);
                     }
                 }
                 finally
                 {
-                    if (captureIMediaBuffer != 0)
-                        IMediaBuffer_Release(captureIMediaBuffer);
+                    if (iMediaBuffer != 0)
+                        IMediaBuffer_Release(iMediaBuffer);
                 }
             }
             finally
@@ -1123,6 +1211,227 @@ private void initializeAEC(
         }
     }
 
+    /**
+     * Initializes the Voice Capture DSP which is to perform acoustic echo
+     * cancellation. The method is invoked in case the Voice Capture DSP is to
+     * be used in filter mode.
+     *
+     * @param iMediaObject the <tt>IMediaObject</tt> interface to the Voice
+     * Capture DSP to be initialized in filter mode
+     * @param inFormat0 the <tt>AudioFormat</tt> of the media which will be
+     * delivered to the input stream representing the audio from the microphone
+     * @param inFormat1 the <tt>AudioFormat</tt> of the media which will be
+     * delivered to the input stream representing the audio from the speaker
+     * (line)
+     * @param outFormat the <tt>AudioFormat</tt> of the media which is to be
+     * output by the <tt>IMediaObject</tt>/acoustic echo cancellation
+     * @throws Exception if the initialization of the <tt>IMediaObject</tt>
+     * implementing acoustic echo cancellation fails
+     */
+    private void initializeAECInFilterMode(
+            long iMediaObject,
+            AudioFormat inFormat0, AudioFormat inFormat1,
+            AudioFormat outFormat)
+        throws Exception
+    {
+        int dwInputStreamIndex = CAPTURE_INPUT_STREAM_INDEX;
+        int hresult
+            = IMediaObject_SetXXXputType(
+                    iMediaObject,
+                    /* IMediaObject_SetInputType */ true,
+                    dwInputStreamIndex,
+                    inFormat0,
+                    /* dwFlags */ 0);
+
+        if (FAILED(hresult))
+        {
+            throw new HResultException(
+                    hresult,
+                    "IMediaObject_SetInputType, dwInputStreamIndex "
+                        + dwInputStreamIndex + ", " + inFormat0);
+        }
+        dwInputStreamIndex = RENDER_INPUT_STREAM_INDEX;
+        hresult
+            = IMediaObject_SetXXXputType(
+                    iMediaObject,
+                    /* IMediaObject_SetInputType */ true,
+                    dwInputStreamIndex,
+                    inFormat1,
+                    /* dwFlags */ 0);
+        if (FAILED(hresult))
+        {
+            throw new HResultException(
+                    hresult,
+                    "IMediaObject_SetInputType, dwInputStreamIndex "
+                        + dwInputStreamIndex + ", " + inFormat1);
+        }
+
+        long captureIMediaBuffer
+            = MediaBuffer_alloc(capture.bufferSize);
+
+        if (captureIMediaBuffer == 0)
+            throw new OutOfMemoryError("MediaBuffer_alloc");
+        try
+        {
+            /*
+             * We explicitly want to support the case in which the user has
+             * selected "none" for the playback/render endpoint device.
+             */
+            long renderIMediaBuffer
+                = MediaBuffer_alloc(
+                        ((render == null) ? capture : render).bufferSize);
+
+            if (renderIMediaBuffer == 0)
+                throw new OutOfMemoryError("MediaBuffer_alloc");
+            try
+            {
+                captureBufferMaxLength
+                    = IMediaBuffer_GetMaxLength(captureIMediaBuffer);
+                renderBufferMaxLength
+                    = IMediaBuffer_GetMaxLength(renderIMediaBuffer);
+
+                this.captureIMediaBuffer
+                    = new PtrMediaBuffer(captureIMediaBuffer);
+                captureIMediaBuffer = 0;
+                this.renderIMediaBuffer
+                    = new PtrMediaBuffer(renderIMediaBuffer);
+                renderIMediaBuffer = 0;
+
+                /*
+                 * Prepare to be ready to compute/determine the duration in
+                 * nanoseconds of a specific number of bytes representing audio
+                 * samples encoded in the outFormat of capture.
+                 */
+                {
+                    AudioFormat af = capture.outFormat;
+                    double sampleRate = af.getSampleRate();
+                    int sampleSizeInBits = af.getSampleSizeInBits();
+                    int channels = af.getChannels();
+
+                    captureNanosPerByte
+                        = (8d * 1000d * 1000d * 1000d)
+                            / (sampleRate * sampleSizeInBits * channels);
+                }
+                /*
+                 * Prepare to be ready to compute/determine the number of bytes
+                 * representing a specific duration in nanoseconds of audio
+                 * samples encoded in the outFormat of render.
+                 */
+                {
+                    /*
+                     * We explicitly want to support the case in which the user
+                     * has selected "none" for the playback/render endpoint
+                     * device.
+                     */
+                    AudioFormat af
+                        = ((render == null) ? capture : render).outFormat;
+                    double sampleRate = af.getSampleRate();
+                    int sampleSizeInBits = af.getSampleSizeInBits();
+                    int channels = af.getChannels();
+
+                    renderBytesPerNano
+                        = (sampleRate * sampleSizeInBits * channels)
+                            / (8d * 1000d * 1000d * 1000d);
+                }
+            }
+            finally
+            {
+                if (renderIMediaBuffer != 0)
+                    IMediaBuffer_Release(renderIMediaBuffer);
+            }
+        }
+        finally
+        {
+            if (captureIMediaBuffer != 0)
+                IMediaBuffer_Release(captureIMediaBuffer);
+        }
+    }
+
+    /**
+     * Initializes the Voice Capture DSP which is to perform acoustic echo
+     * cancellation. The method is invoked in case the Voice Capture DSP is to
+     * be used in source mode.
+     *
+     * @param iPropertyStore the <tt>IPropertyStore</tt> interface to the Voice
+     * Capture DSP to be initialized in source mode
+     * @param captureDevice <tt>CaptureDeviceInfo2</tt> which identifies the
+     * capture endpoint device to be used
+     * @param renderDevice <tt>CaptureDeviceInfo2</tt> which identifies the
+     * render endpoint device to be used
+     * @param outFormat the <tt>AudioFormat</tt> of the media which is to be
+     * output by the <tt>IMediaObject</tt>/acoustic echo cancellation
+     * @throws Exception if the initialization of the <tt>IMediaObject</tt>
+     * implementing acoustic echo cancellation fails
+     */
+    private void initializeAECInSourceMode(
+            long iPropertyStore,
+            CaptureDeviceInfo2 captureDevice,
+            CaptureDeviceInfo2 renderDevice,
+            AudioFormat outFormat)
+        throws Exception
+    {
+        WASAPISystem audioSystem = dataSource.audioSystem;
+        int captureDeviceIndex
+            = audioSystem.getIMMDeviceIndex(
+                    captureDevice.getLocator().getRemainder(),
+                    eCapture);
+
+        if (captureDeviceIndex == -1)
+        {
+            throw new IllegalStateException(
+                    "Acoustic echo cancellation (AEC) cannot be initialized"
+                        + " without a microphone.");
+        }
+
+        MediaLocator renderLocator = renderDevice.getLocator();
+        int renderDeviceIndex
+            = audioSystem.getIMMDeviceIndex(
+                    renderLocator.getRemainder(),
+                    eRender);
+
+        if (renderDeviceIndex == -1)
+        {
+            throw new IllegalStateException(
+                    "Acoustic echo cancellation (AEC) cannot be initialized"
+                        + " without a speaker (line).");
+        }
+
+        int hresult
+            = IPropertyStore_SetValue(
+                    iPropertyStore,
+                    MFPKEY_WMAAECMA_DEVICE_INDEXES,
+                    ((0x0000ffff & captureDeviceIndex))
+                        | ((0x0000ffff & renderDeviceIndex) << 16));
+
+        if (FAILED(hresult))
+        {
+            throw new HResultException(
+                    hresult,
+                    "IPropertyStore_SetValue MFPKEY_WMAAECMA_DEVICE_INDEXES");
+        }
+
+        /*
+         * If the selected rendering device does not have an active stream, the
+         * DSP cannot process any output.
+         */
+        AbstractAudioRenderer<?> renderer = new WASAPIRenderer();
+
+        renderer.setLocator(renderLocator);
+
+        Format[] rendererSupportedInputFormats
+            = renderer.getSupportedInputFormats();
+
+        if ((rendererSupportedInputFormats != null)
+                && (rendererSupportedInputFormats.length != 0))
+        {
+            renderer.setInputFormat(rendererSupportedInputFormats[0]);
+        }
+        renderer.open();
+
+        devicePeriod = WASAPISystem.DEFAULT_DEVICE_PERIOD / 2;
+        this.renderer = renderer;
+    }
+
     /**
      * Initializes the delivery of audio data/samples from a capture endpoint
      * device identified by a specific <tt>MediaLocator</tt> into this instance.
@@ -1134,11 +1443,13 @@ private void initializeAEC(
      * @throws Exception if the initialization of the delivery of audio samples
      * from the specified capture endpoint into this instance fails
      */
-    private void initializeCapture(MediaLocator locator, AudioFormat format)
+    private void initializeCapture(
+            MediaLocator locator,
+            AudioFormat format)
         throws Exception
     {
         long hnsBufferDuration
-            = dataSource.aec
+            = aec
                 ? Format.NOT_SPECIFIED
                 : WASAPISystem.DEFAULT_BUFFER_DURATION;
         BufferTransferHandler transferHandler
@@ -1227,8 +1538,8 @@ private void popFromProcessed(int length)
      * <tt>iMediaObject</tt> to which audio samples are to be delivered
      * @param maxLength the maximum number of bytes to the delivered through the
      * specified input stream. Ignored if negative or greater than the actual
-     * capacity/maximum length of the <tt>IMediaBuffer</tt> associated with the specified
-     * <tt>dwInputStreamIndex</tt>.
+     * capacity/maximum length of the <tt>IMediaBuffer</tt> associated with the
+     * specified <tt>dwInputStreamIndex</tt>.
      */
     private void processInput(int dwInputStreamIndex, int maxLength)
     {
@@ -1485,7 +1796,7 @@ public void read(Buffer buffer)
         throws IOException
     {
         // Reduce relocations as much as possible.
-        int capacity = dataSource.aec ? bufferMaxLength : bufferSize;
+        int capacity = aec ? bufferMaxLength : bufferSize;
         byte[] data
             = AbstractCodec2.validateByteArraySize(buffer, capacity, false);
         int length = 0;
@@ -1505,14 +1816,16 @@ public void read(Buffer buffer)
                  * Otherwise, we could have added a check
                  * (dataSource.aec && (render == null)). 
                  */
-                if (capture == null)
-                    message = getClass().getName() + " is disconnected.";
-                else
+                boolean connected = (capture != null) || sourceMode;
+
+                if (connected)
                 {
                     message = null;
                     captureIsBusy = true;
                     renderIsBusy = true;
                 }
+                else
+                    message = getClass().getName() + " is disconnected.";
             }
             /*
              * The caller shouldn't call #read(Buffer) if this instance is
@@ -1537,9 +1850,9 @@ public void read(Buffer buffer)
                  * selected "none" for the playback/render endpoint device.
                  * Otherwise, we could have used a check (render == null).
                  */
-                if (!dataSource.aec)
-                    read = capture.read(data, length, toRead);
-                else
+                boolean aec = (iMediaObject != 0);
+
+                if (aec)
                 {
                     toRead = Math.min(toRead, processedLength);
                     if (toRead == 0)
@@ -1551,6 +1864,8 @@ public void read(Buffer buffer)
                         read = toRead;
                     }
                 }
+                else
+                    read = capture.read(data, length, toRead);
                 cause = null;
             }
             catch (Throwable t)
@@ -1634,32 +1949,42 @@ private BufferTransferHandler runInProcessThread()
 
         do
         {
-            processInput(CAPTURE_INPUT_STREAM_INDEX, captureMaxLength);
-
-            /*
-             * If the capture endpoint device has not made any audio samples
-             * available, there is no input to be processed. Moreover, inputting
-             * from the render endpoint device in such a case will be
-             * inappropriate because it will (repeatedly) introduce random skew
-             * in the audio delivered by the render endpoint device.
-             */
-            int captureLength = maybeIMediaBufferGetLength(captureIMediaBuffer);
             boolean flush;
 
-            if (captureLength < captureMaxLength)
+            if (sourceMode)
+            {
                 flush = false;
+            }
             else
             {
-                int renderMaxLength
-                    = computeRenderLength(
-                            computeCaptureDuration(captureLength));
+                processInput(CAPTURE_INPUT_STREAM_INDEX, captureMaxLength);
 
-                processInput(RENDER_INPUT_STREAM_INDEX, renderMaxLength);
+                /*
+                 * If the capture endpoint device has not made any audio samples
+                 * available, there is no input to be processed. Moreover,
+                 * inputting from the render endpoint device in such a case will
+                 * be inappropriate because it will (repeatedly) introduce
+                 * random skew in the audio delivered by the render endpoint
+                 * device.
+                 */
+                int captureLength
+                    = maybeIMediaBufferGetLength(captureIMediaBuffer);
+
+                if (captureLength < captureMaxLength)
+                    flush = false;
+                else
+                {
+                    int renderMaxLength
+                        = computeRenderLength(
+                                computeCaptureDuration(captureLength));
 
-                processOutput();
-                flush = true;
+                    processInput(RENDER_INPUT_STREAM_INDEX, renderMaxLength);
+                    flush = true;
+                }
             }
 
+            processOutput();
+
             /*
              * IMediaObject::ProcessOutput has completed which means that, as
              * far as it is concerned, it does not have any input data to
@@ -1732,7 +2057,9 @@ private void runInProcessThread(Thread processThread)
                      * device. Otherwise, we could have added a check
                      * (render == null).
                      */
-                    if ((capture == null) || !started)
+                    boolean connected = (capture != null) || sourceMode;
+
+                    if (!connected || !started)
                         break;
 
                     waitWhileCaptureIsBusy();
@@ -1843,17 +2170,26 @@ public synchronized void start()
          * "none" for the playback/render endpoint device. Otherwise, we could
          * have replaced the dataSource.aec check with (render != null).
          */
-        if (dataSource.aec && (capture != null) && (processThread == null))
+        if (aec
+                && ((capture != null) || sourceMode)
+                && (processThread == null))
         {
+            /*
+             * If the selected rendering device does not have an active stream,
+             * the DSP cannot process any output in source mode.
+             */
+            if (renderer != null)
+                renderer.start();
+
             processThread
                 = new Thread(WASAPIStream.class + ".processThread")
+                {
+                    @Override
+                    public void run()
                     {
-                        @Override
-                        public void run()
-                        {
-                            runInProcessThread(this);
-                        }
-                    };
+                        runInProcessThread(this);
+                    }
+                };
             processThread.setDaemon(true);
             processThread.start();
         }
@@ -1881,6 +2217,12 @@ public synchronized void stop()
 
         waitWhileProcessThread();
         processedLength = 0;
+        /*
+         * If the selected rendering device does not have an active stream, the
+         * DSP cannot process any output in source mode.
+         */
+        if (renderer != null)
+            renderer.stop();
     }
 
     /**
@@ -1889,7 +2231,7 @@ public synchronized void stop()
      */
     private void transferCaptureData()
     {
-        if (dataSource.aec)
+        if (aec)
         {
             synchronized (this)
             {
@@ -1948,6 +2290,12 @@ private void uninitializeAEC()
             captureIMediaBuffer.Release();
             captureIMediaBuffer = null;
         }
+
+        Renderer renderer = this.renderer;
+
+        this.renderer = null;
+        if (renderer != null)
+            renderer.close();
     }
 
     private void uninitializeCapture()
diff --git a/src/org/jitsi/impl/neomedia/jmfext/media/renderer/audio/WASAPIRenderer.java b/src/org/jitsi/impl/neomedia/jmfext/media/renderer/audio/WASAPIRenderer.java
index 7f9d1b177680c366bc264d2924b7d1b67a5b0455..909b6ba81182614b803b4e0ea69ac3306087a837 100644
--- a/src/org/jitsi/impl/neomedia/jmfext/media/renderer/audio/WASAPIRenderer.java
+++ b/src/org/jitsi/impl/neomedia/jmfext/media/renderer/audio/WASAPIRenderer.java
@@ -234,7 +234,7 @@ public class WASAPIRenderer
      * The maximum interval of time in milliseconds that the writing to the
      * render endpoint buffer is allowed to be under suspicion that it is
      * malfunctioning. If it remains under suspicion after the maximum interval
-     * of time has elapsed, the writing to the render endpoing buffer is to be
+     * of time has elapsed, the writing to the render endpoint buffer is to be
      * considered malfunctioning for real. 
      */
     private long writeIsMalfunctioningTimeout;
@@ -856,6 +856,18 @@ private void popFromRemainder(int length)
         remainderLength = pop(remainder, remainderLength, length);
     }
 
+    /**
+     * Pops a specific number of bytes from (the head of) a specific array of
+     * <tt>byte</tt>s.
+     *
+     * @param array the array of <tt>byte</tt> from which the specified number
+     * of bytes are to be popped
+     * @param arrayLength the number of elements in <tt>array</tt> which contain
+     * valid data
+     * @param length the number of bytes to be popped from <tt>array</tt>
+     * @return the number of elements in <tt>array</tt> which contain valid data
+     * after the specified number of bytes have been popped from it
+     */
     public static int pop(byte[] array, int arrayLength, int length)
     {
         if (length < 0)