| @@ -339,20 +339,12 @@ public: | |||
| jshort* const src = env->GetShortArrayElements (audioBuffer, nullptr); | |||
| for (int chan = 0; chan < inputChannelBuffer.getNumChannels(); ++chan) | |||
| { | |||
| AudioData::Pointer <AudioData::Float32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::NonConst> d (inputChannelBuffer.getWritePointer (chan)); | |||
| if (chan < numDeviceInputChannels) | |||
| { | |||
| AudioData::Pointer <AudioData::Int16, AudioData::NativeEndian, AudioData::Interleaved, AudioData::Const> s (src + chan, numDeviceInputChannels); | |||
| d.convertSamples (s, actualBufferSize); | |||
| } | |||
| else | |||
| { | |||
| d.clearSamples (actualBufferSize); | |||
| } | |||
| } | |||
| AudioData::deinterleaveSamples<AudioData::Int16, AudioData::NativeEndian, | |||
| AudioData::Float32, AudioData::NativeEndian> (reinterpret_cast<const uint16*> (src), | |||
| numDeviceInputChannels, | |||
| inputChannelBuffer.getArrayOfWritePointers(), | |||
| inputChannelBuffer.getNumChannels(), | |||
| actualBufferSize); | |||
| env->ReleaseShortArrayElements (audioBuffer, src, 0); | |||
| } | |||
| @@ -382,14 +374,12 @@ public: | |||
| jshort* const dest = env->GetShortArrayElements (audioBuffer, nullptr); | |||
| for (int chan = 0; chan < numDeviceOutputChannels; ++chan) | |||
| { | |||
| AudioData::Pointer <AudioData::Int16, AudioData::NativeEndian, AudioData::Interleaved, AudioData::NonConst> d (dest + chan, numDeviceOutputChannels); | |||
| const float* const sourceChanData = outputChannelBuffer.getReadPointer (jmin (chan, outputChannelBuffer.getNumChannels() - 1)); | |||
| AudioData::Pointer <AudioData::Float32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::Const> s (sourceChanData); | |||
| d.convertSamples (s, actualBufferSize); | |||
| } | |||
| AudioData::interleaveSamples<AudioData::Float32, AudioData::NativeEndian, | |||
| AudioData::Int16, AudioData::NativeEndian> (outputChannelBuffer.getArrayOfReadPointers(), | |||
| outputChannelBuffer.getNumChannels(), | |||
| reinterpret_cast<uint16*> (dest), | |||
| numDeviceOutputChannels, | |||
| actualBufferSize); | |||
| env->ReleaseShortArrayElements (audioBuffer, dest, 0); | |||
| jint numWritten = env->CallIntMethod (outputDevice, AudioTrack.write, audioBuffer, 0, actualBufferSize * numDeviceOutputChannels); | |||
| @@ -46,28 +46,26 @@ struct OboeAudioIODeviceBufferHelpers<int16> | |||
| static void convertFromOboe (const int16* srcInterleaved, AudioBuffer<float>& audioBuffer, int numSamples) | |||
| { | |||
| for (int i = 0; i < audioBuffer.getNumChannels(); ++i) | |||
| { | |||
| using DstSampleType = AudioData::Pointer<AudioData::Float32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::NonConst>; | |||
| using SrcSampleType = AudioData::Pointer<AudioData::Int16, AudioData::NativeEndian, AudioData::Interleaved, AudioData::Const>; | |||
| DstSampleType dstData (audioBuffer.getWritePointer (i)); | |||
| SrcSampleType srcData (srcInterleaved + i, audioBuffer.getNumChannels()); | |||
| dstData.convertSamples (srcData, numSamples); | |||
| } | |||
| const auto numChannels = audioBuffer.getNumChannels(); | |||
| AudioData::deinterleaveSamples<AudioData::Int16, AudioData::NativeEndian, | |||
| AudioData::Float32, AudioData::NativeEndian> (reinterpret_cast<const uint16*> (srcInterleaved), | |||
| numChannels, | |||
| audioBuffer.getArrayOfWritePointers(), | |||
| numChannels, | |||
| numSamples); | |||
| } | |||
| static void convertToOboe (const AudioBuffer<float>& audioBuffer, int16* dstInterleaved, int numSamples) | |||
| { | |||
| for (int i = 0; i < audioBuffer.getNumChannels(); ++i) | |||
| { | |||
| using DstSampleType = AudioData::Pointer<AudioData::Int16, AudioData::NativeEndian, AudioData::Interleaved, AudioData::NonConst>; | |||
| using SrcSampleType = AudioData::Pointer<AudioData::Float32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::Const>; | |||
| DstSampleType dstData (dstInterleaved + i, audioBuffer.getNumChannels()); | |||
| SrcSampleType srcData (audioBuffer.getReadPointer (i)); | |||
| dstData.convertSamples (srcData, numSamples); | |||
| } | |||
| const auto numChannels = audioBuffer.getNumChannels(); | |||
| AudioData::interleaveSamples<AudioData::Float32, AudioData::NativeEndian, | |||
| AudioData::Int16, AudioData::NativeEndian> (audioBuffer.getArrayOfReadPointers(), | |||
| numChannels, | |||
| reinterpret_cast<uint16*> (dstInterleaved), | |||
| numChannels, | |||
| numSamples); | |||
| } | |||
| }; | |||
| @@ -98,15 +96,12 @@ struct OboeAudioIODeviceBufferHelpers<float> | |||
| // No need to convert, we instructed the buffer to point to the src data directly already | |||
| jassert (audioBuffer.getWritePointer (0) != srcInterleaved); | |||
| for (int i = 0; i < numChannels; ++i) | |||
| { | |||
| using DstSampleType = AudioData::Pointer<AudioData::Float32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::NonConst>; | |||
| using SrcSampleType = AudioData::Pointer<AudioData::Float32, AudioData::NativeEndian, AudioData::Interleaved, AudioData::Const>; | |||
| DstSampleType dstData (audioBuffer.getWritePointer (i)); | |||
| SrcSampleType srcData (srcInterleaved + i, audioBuffer.getNumChannels()); | |||
| dstData.convertSamples (srcData, numSamples); | |||
| } | |||
| AudioData::deinterleaveSamples<AudioData::Float32, AudioData::NativeEndian, | |||
| AudioData::Float32, AudioData::NativeEndian> (srcInterleaved, | |||
| numChannels, | |||
| audioBuffer.getArrayOfWritePointers(), | |||
| numChannels, | |||
| numSamples); | |||
| } | |||
| } | |||
| @@ -119,15 +114,12 @@ struct OboeAudioIODeviceBufferHelpers<float> | |||
| // No need to convert, we instructed the buffer to point to the src data directly already | |||
| jassert (audioBuffer.getReadPointer (0) != dstInterleaved); | |||
| for (int i = 0; i < numChannels; ++i) | |||
| { | |||
| using DstSampleType = AudioData::Pointer<AudioData::Float32, AudioData::NativeEndian, AudioData::Interleaved, AudioData::NonConst>; | |||
| using SrcSampleType = AudioData::Pointer<AudioData::Float32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::Const>; | |||
| DstSampleType dstData (dstInterleaved + i, audioBuffer.getNumChannels()); | |||
| SrcSampleType srcData (audioBuffer.getReadPointer (i)); | |||
| dstData.convertSamples (srcData, numSamples); | |||
| } | |||
| AudioData::interleaveSamples<AudioData::Float32, AudioData::NativeEndian, | |||
| AudioData::Float32, AudioData::NativeEndian> (audioBuffer.getArrayOfReadPointers(), | |||
| numChannels, | |||
| dstInterleaved, | |||
| numChannels, | |||
| numSamples); | |||
| } | |||
| } | |||
| }; | |||
| @@ -196,29 +196,26 @@ struct BufferHelpers<int16> | |||
| static void convertFromOpenSL (const int16* srcInterleaved, AudioBuffer<float>& audioBuffer) | |||
| { | |||
| for (int i = 0; i < audioBuffer.getNumChannels(); ++i) | |||
| { | |||
| using DstSampleType = AudioData::Pointer<AudioData::Float32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::NonConst>; | |||
| using SrcSampleType = AudioData::Pointer<AudioData::Int16, AudioData::LittleEndian, AudioData::Interleaved, AudioData::Const>; | |||
| DstSampleType dstData (audioBuffer.getWritePointer (i)); | |||
| SrcSampleType srcData (srcInterleaved + i, audioBuffer.getNumChannels()); | |||
| dstData.convertSamples (srcData, audioBuffer.getNumSamples()); | |||
| } | |||
| const auto numChannels = audioBuffer.getNumChannels(); | |||
| AudioData::deinterleaveSamples<AudioData::Int16, AudioData::LittleEndian, | |||
| AudioData::Float32, AudioData::NativeEndian> (reinterpret_cast<const uint16*> (srcInterleaved), | |||
| numChannels, | |||
| audioBuffer.getArrayOfWritePointers(), | |||
| numChannels, | |||
| audioBuffer.getNumSamples()); | |||
| } | |||
| static void convertToOpenSL (const AudioBuffer<float>& audioBuffer, int16* dstInterleaved) | |||
| { | |||
| for (int i = 0; i < audioBuffer.getNumChannels(); ++i) | |||
| { | |||
| using DstSampleType = AudioData::Pointer<AudioData::Int16, AudioData::LittleEndian, AudioData::Interleaved, AudioData::NonConst>; | |||
| using SrcSampleType = AudioData::Pointer<AudioData::Float32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::Const>; | |||
| DstSampleType dstData (dstInterleaved + i, audioBuffer.getNumChannels()); | |||
| SrcSampleType srcData (audioBuffer.getReadPointer (i)); | |||
| dstData.convertSamples (srcData, audioBuffer.getNumSamples()); | |||
| } | |||
| const auto numChannels = audioBuffer.getNumChannels(); | |||
| AudioData::interleaveSamples<AudioData::Float32, AudioData::NativeEndian, | |||
| AudioData::Int16, AudioData::LittleEndian> (audioBuffer.getArrayOfReadPointers(), | |||
| numChannels, | |||
| reinterpret_cast<uint16*> (dstInterleaved), | |||
| numChannels, | |||
| audioBuffer.getNumSamples()); | |||
| } | |||
| }; | |||
| @@ -249,41 +246,38 @@ struct BufferHelpers<float> | |||
| static void convertFromOpenSL (const float* srcInterleaved, AudioBuffer<float>& audioBuffer) | |||
| { | |||
| if (audioBuffer.getNumChannels() == 1) | |||
| const auto numChannels = audioBuffer.getNumChannels(); | |||
| if (numChannels == 1) | |||
| { | |||
| jassert (srcInterleaved == audioBuffer.getWritePointer (0)); | |||
| return; | |||
| } | |||
| for (int i = 0; i < audioBuffer.getNumChannels(); ++i) | |||
| { | |||
| using DstSampleType = AudioData::Pointer<AudioData::Float32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::NonConst>; | |||
| using SrcSampleType = AudioData::Pointer<AudioData::Float32, AudioData::LittleEndian, AudioData::Interleaved, AudioData::Const>; | |||
| DstSampleType dstData (audioBuffer.getWritePointer (i)); | |||
| SrcSampleType srcData (srcInterleaved + i, audioBuffer.getNumChannels()); | |||
| dstData.convertSamples (srcData, audioBuffer.getNumSamples()); | |||
| } | |||
| AudioData::deinterleaveSamples<AudioData::Float32, AudioData::LittleEndian, | |||
| AudioData::Float32, AudioData::NativeEndian> (srcInterleaved, | |||
| numChannels, | |||
| audioBuffer.getArrayOfWritePointers(), | |||
| numChannels, | |||
| audioBuffer.getNumSamples()); | |||
| } | |||
| static void convertToOpenSL (const AudioBuffer<float>& audioBuffer, float* dstInterleaved) | |||
| { | |||
| if (audioBuffer.getNumChannels() == 1) | |||
| const auto numChannels = audioBuffer.getNumChannels(); | |||
| if (numChannels == 1) | |||
| { | |||
| jassert (dstInterleaved == audioBuffer.getReadPointer (0)); | |||
| return; | |||
| } | |||
| for (int i = 0; i < audioBuffer.getNumChannels(); ++i) | |||
| { | |||
| using DstSampleType = AudioData::Pointer<AudioData::Float32, AudioData::LittleEndian, AudioData::Interleaved, AudioData::NonConst>; | |||
| using SrcSampleType = AudioData::Pointer<AudioData::Float32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::Const>; | |||
| DstSampleType dstData (dstInterleaved + i, audioBuffer.getNumChannels()); | |||
| SrcSampleType srcData (audioBuffer.getReadPointer (i)); | |||
| dstData.convertSamples (srcData, audioBuffer.getNumSamples()); | |||
| } | |||
| AudioData::interleaveSamples<AudioData::Float32, AudioData::NativeEndian, | |||
| AudioData::Float32, AudioData::LittleEndian> (audioBuffer.getArrayOfReadPointers(), | |||
| numChannels, | |||
| dstInterleaved, | |||
| numChannels, | |||
| audioBuffer.getNumSamples()); | |||
| } | |||
| }; | |||
| @@ -132,13 +132,10 @@ private: | |||
| source->source->getNextAudioBlock (info); | |||
| typedef AudioData::Pointer <AudioData::Int16, AudioData::LittleEndian, AudioData::Interleaved, AudioData::NonConst> CDSampleFormat; | |||
| typedef AudioData::Pointer <AudioData::Float32, AudioData::NativeEndian, AudioData::NonInterleaved, AudioData::Const> SourceSampleFormat; | |||
| CDSampleFormat left (buffer, 2); | |||
| left.convertSamples (SourceSampleFormat (tempBuffer.getReadPointer (0)), numSamples); | |||
| CDSampleFormat right (buffer + 2, 2); | |||
| right.convertSamples (SourceSampleFormat (tempBuffer.getReadPointer (1)), numSamples); | |||
| AudioData::interleaveSamples<AudioData::Float32, AudioData::NativeEndian, | |||
| AudioData::Int16, AudioData::LittleEndian> (tempBuffer.getArrayOfReadPointers(), 2, | |||
| reinterpret_cast<uint16*> (buffer), 2, | |||
| numSamples); | |||
| source->readPosition += numSamples; | |||
| } | |||
| @@ -388,16 +388,10 @@ bool AudioCDBurner::addAudioTrack (AudioSource* audioSource, int numSamples) | |||
| buffer.clear (bytesPerBlock); | |||
| typedef AudioData::Pointer <AudioData::Int16, AudioData::LittleEndian, | |||
| AudioData::Interleaved, AudioData::NonConst> CDSampleFormat; | |||
| typedef AudioData::Pointer <AudioData::Float32, AudioData::NativeEndian, | |||
| AudioData::NonInterleaved, AudioData::Const> SourceSampleFormat; | |||
| CDSampleFormat left (buffer, 2); | |||
| left.convertSamples (SourceSampleFormat (sourceBuffer.getReadPointer (0)), samplesPerBlock); | |||
| CDSampleFormat right (buffer + 2, 2); | |||
| right.convertSamples (SourceSampleFormat (sourceBuffer.getReadPointer (1)), samplesPerBlock); | |||
| AudioData::interleaveSamples<AudioData::Float32, AudioData::NativeEndian, | |||
| AudioData::Int16, AudioData::LittleEndian> (sourceBuffer.getArrayOfReadPointers(), 2, | |||
| reinterpret_cast<uint16*> (buffer), 2, | |||
| samplesPerBlock); | |||
| hr = pimpl->redbook->AddAudioTrackBlocks (buffer, bytesPerBlock); | |||