The JUCE cross-platform C++ framework, with DISTRHO/KXStudio specific changes
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

401 lines
13KB

  1. /*!
  2. @file AudioUnitSDK/AUEffectBase.cpp
  3. @copyright © 2000-2021 Apple Inc. All rights reserved.
  4. */
  5. #include <AudioUnitSDK/AUEffectBase.h>
  6. #include <cstddef>
  7. /*
  8. This class does not deal as well as it should with N-M effects...
  9. The problem areas are (if the channels don't match):
  10. ProcessInPlace if the channels don't match - there will be problems if InputChan !=
  11. OutputChan Bypass - its just passing the buffers through when not processing them
  12. */
  13. namespace ausdk {
  14. //_____________________________________________________________________________
  15. //
  16. AUEffectBase::AUEffectBase(AudioComponentInstance audioUnit, bool inProcessesInPlace)
  17. : AUBase(audioUnit, 1, 1), // 1 in bus, 1 out bus
  18. mProcessesInPlace(inProcessesInPlace)
  19. #if TARGET_OS_IPHONE
  20. ,
  21. mOnlyOneKernel(false)
  22. #endif
  23. {
  24. }
  25. //_____________________________________________________________________________
  26. //
  27. void AUEffectBase::Cleanup()
  28. {
  29. mKernelList.clear();
  30. mMainOutput = nullptr;
  31. mMainInput = nullptr;
  32. }
  33. //_____________________________________________________________________________
  34. //
  35. OSStatus AUEffectBase::Initialize()
  36. {
  37. // get our current numChannels for input and output
  38. const auto auNumInputs = static_cast<SInt16>(Input(0).GetStreamFormat().mChannelsPerFrame);
  39. const auto auNumOutputs = static_cast<SInt16>(Output(0).GetStreamFormat().mChannelsPerFrame);
  40. // does the unit publish specific information about channel configurations?
  41. const AUChannelInfo* auChannelConfigs = nullptr;
  42. const UInt32 numIOconfigs = SupportedNumChannels(&auChannelConfigs);
  43. if ((numIOconfigs > 0) && (auChannelConfigs != nullptr)) {
  44. bool foundMatch = false;
  45. for (UInt32 i = 0; (i < numIOconfigs) && !foundMatch; ++i) {
  46. const SInt16 configNumInputs = auChannelConfigs[i].inChannels; // NOLINT
  47. const SInt16 configNumOutputs = auChannelConfigs[i].outChannels; // NOLINT
  48. if ((configNumInputs < 0) && (configNumOutputs < 0)) {
  49. // unit accepts any number of channels on input and output
  50. if (((configNumInputs == -1) && (configNumOutputs == -2)) ||
  51. ((configNumInputs == -2) &&
  52. (configNumOutputs == -1))) { // NOLINT repeated branch below
  53. foundMatch = true;
  54. // unit accepts any number of channels on input and output IFF they are the same
  55. // number on both scopes
  56. } else if (((configNumInputs == -1) && (configNumOutputs == -1)) &&
  57. (auNumInputs == auNumOutputs)) {
  58. foundMatch = true;
  59. // unit has specified a particular number of channels on both scopes
  60. } else {
  61. continue;
  62. }
  63. } else {
  64. // the -1 case on either scope is saying that the unit doesn't care about the
  65. // number of channels on that scope
  66. const bool inputMatch = (auNumInputs == configNumInputs) || (configNumInputs == -1);
  67. const bool outputMatch =
  68. (auNumOutputs == configNumOutputs) || (configNumOutputs == -1);
  69. if (inputMatch && outputMatch) {
  70. foundMatch = true;
  71. }
  72. }
  73. }
  74. if (!foundMatch) {
  75. return kAudioUnitErr_FormatNotSupported;
  76. }
  77. } else {
  78. // there is no specifically published channel info
  79. // so for those kinds of effects, the assumption is that the channels (whatever their
  80. // number) should match on both scopes
  81. if ((auNumOutputs != auNumInputs) || (auNumOutputs == 0)) {
  82. return kAudioUnitErr_FormatNotSupported;
  83. }
  84. }
  85. MaintainKernels();
  86. mMainOutput = &Output(0);
  87. mMainInput = &Input(0);
  88. const AudioStreamBasicDescription format = GetStreamFormat(kAudioUnitScope_Output, 0);
  89. mBytesPerFrame = format.mBytesPerFrame;
  90. return noErr;
  91. }
  92. OSStatus AUEffectBase::Reset(AudioUnitScope inScope, AudioUnitElement inElement)
  93. {
  94. for (auto& kernel : mKernelList) {
  95. if (kernel) {
  96. kernel->Reset();
  97. }
  98. }
  99. return AUBase::Reset(inScope, inElement);
  100. }
  101. OSStatus AUEffectBase::GetPropertyInfo(AudioUnitPropertyID inID, AudioUnitScope inScope,
  102. AudioUnitElement inElement, UInt32& outDataSize, bool& outWritable)
  103. {
  104. if (inScope == kAudioUnitScope_Global) {
  105. switch (inID) {
  106. case kAudioUnitProperty_BypassEffect:
  107. case kAudioUnitProperty_InPlaceProcessing:
  108. outWritable = true;
  109. outDataSize = sizeof(UInt32);
  110. return noErr;
  111. default:
  112. break;
  113. }
  114. }
  115. return AUBase::GetPropertyInfo(inID, inScope, inElement, outDataSize, outWritable);
  116. }
  117. OSStatus AUEffectBase::GetProperty(
  118. AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, void* outData)
  119. {
  120. if (inScope == kAudioUnitScope_Global) {
  121. switch (inID) {
  122. case kAudioUnitProperty_BypassEffect:
  123. *static_cast<UInt32*>(outData) = (IsBypassEffect() ? 1 : 0); // NOLINT
  124. return noErr;
  125. case kAudioUnitProperty_InPlaceProcessing:
  126. *static_cast<UInt32*>(outData) = (mProcessesInPlace ? 1 : 0); // NOLINT
  127. return noErr;
  128. default:
  129. break;
  130. }
  131. }
  132. return AUBase::GetProperty(inID, inScope, inElement, outData);
  133. }
  134. OSStatus AUEffectBase::SetProperty(AudioUnitPropertyID inID, AudioUnitScope inScope,
  135. AudioUnitElement inElement, const void* inData, UInt32 inDataSize)
  136. {
  137. if (inScope == kAudioUnitScope_Global) {
  138. switch (inID) {
  139. case kAudioUnitProperty_BypassEffect: {
  140. if (inDataSize < sizeof(UInt32)) {
  141. return kAudioUnitErr_InvalidPropertyValue;
  142. }
  143. const bool tempNewSetting = *static_cast<const UInt32*>(inData) != 0;
  144. // we're changing the state of bypass
  145. if (tempNewSetting != IsBypassEffect()) {
  146. if (!tempNewSetting && IsBypassEffect() &&
  147. IsInitialized()) { // turning bypass off and we're initialized
  148. Reset(kAudioUnitScope_Global, 0);
  149. }
  150. SetBypassEffect(tempNewSetting);
  151. }
  152. return noErr;
  153. }
  154. case kAudioUnitProperty_InPlaceProcessing:
  155. mProcessesInPlace = *static_cast<const UInt32*>(inData) != 0;
  156. return noErr;
  157. default:
  158. break;
  159. }
  160. }
  161. return AUBase::SetProperty(inID, inScope, inElement, inData, inDataSize);
  162. }
  163. void AUEffectBase::MaintainKernels()
  164. {
  165. #if TARGET_OS_IPHONE
  166. const UInt32 nKernels = mOnlyOneKernel ? 1 : GetNumberOfChannels();
  167. #else
  168. const UInt32 nKernels = GetNumberOfChannels();
  169. #endif
  170. if (mKernelList.size() < nKernels) {
  171. mKernelList.reserve(nKernels);
  172. for (auto i = static_cast<UInt32>(mKernelList.size()); i < nKernels; ++i) {
  173. mKernelList.push_back(NewKernel());
  174. }
  175. } else {
  176. while (mKernelList.size() > nKernels) {
  177. mKernelList.pop_back();
  178. }
  179. }
  180. for (UInt32 i = 0; i < nKernels; i++) {
  181. if (mKernelList[i]) {
  182. mKernelList[i]->SetChannelNum(i);
  183. }
  184. }
  185. }
  186. bool AUEffectBase::StreamFormatWritable(AudioUnitScope /*scope*/, AudioUnitElement /*element*/)
  187. {
  188. return !IsInitialized();
  189. }
  190. OSStatus AUEffectBase::ChangeStreamFormat(AudioUnitScope inScope, AudioUnitElement inElement,
  191. const AudioStreamBasicDescription& inPrevFormat, const AudioStreamBasicDescription& inNewFormat)
  192. {
  193. const OSStatus result =
  194. AUBase::ChangeStreamFormat(inScope, inElement, inPrevFormat, inNewFormat);
  195. if (result == noErr) {
  196. // for the moment this only dependency we know about
  197. // where a parameter's range may change is with the sample rate
  198. // and effects are only publishing parameters in the global scope!
  199. if (GetParamHasSampleRateDependency() &&
  200. inPrevFormat.mSampleRate != inNewFormat.mSampleRate) {
  201. PropertyChanged(kAudioUnitProperty_ParameterList, kAudioUnitScope_Global, 0);
  202. }
  203. }
  204. return result;
  205. }
  206. // ____________________________________________________________________________
  207. //
  208. // This method is called (potentially repeatedly) by ProcessForScheduledParams()
  209. // in order to perform the actual DSP required for this portion of the entire buffer
  210. // being processed. The entire buffer can be divided up into smaller "slices"
  211. // according to the timestamps on the scheduled parameters...
  212. //
  213. OSStatus AUEffectBase::ProcessScheduledSlice(void* inUserData, UInt32 /*inStartFrameInBuffer*/,
  214. UInt32 inSliceFramesToProcess, UInt32 /*inTotalBufferFrames*/)
  215. {
  216. const ScheduledProcessParams& sliceParams = *static_cast<ScheduledProcessParams*>(inUserData);
  217. AudioUnitRenderActionFlags& actionFlags = *sliceParams.actionFlags;
  218. AudioBufferList& inputBufferList = *sliceParams.inputBufferList;
  219. AudioBufferList& outputBufferList = *sliceParams.outputBufferList;
  220. UInt32 channelSize = inSliceFramesToProcess * mBytesPerFrame;
  221. // fix the size of the buffer we're operating on before we render this slice of time
  222. for (UInt32 i = 0; i < inputBufferList.mNumberBuffers; i++) {
  223. inputBufferList.mBuffers[i].mDataByteSize = // NOLINT
  224. inputBufferList.mBuffers[i].mNumberChannels * channelSize; // NOLINT
  225. }
  226. for (UInt32 i = 0; i < outputBufferList.mNumberBuffers; i++) {
  227. outputBufferList.mBuffers[i].mDataByteSize = // NOLINT
  228. outputBufferList.mBuffers[i].mNumberChannels * channelSize; // NOLINT
  229. }
  230. // process the buffer
  231. const OSStatus result =
  232. ProcessBufferLists(actionFlags, inputBufferList, outputBufferList, inSliceFramesToProcess);
  233. // we just partially processed the buffers, so increment the data pointers to the next part of
  234. // the buffer to process
  235. for (UInt32 i = 0; i < inputBufferList.mNumberBuffers; i++) {
  236. inputBufferList.mBuffers[i].mData = // NOLINT
  237. static_cast<std::byte*>(inputBufferList.mBuffers[i].mData) + // NOLINT
  238. inputBufferList.mBuffers[i].mNumberChannels * channelSize; // NOLINT
  239. }
  240. for (UInt32 i = 0; i < outputBufferList.mNumberBuffers; i++) {
  241. outputBufferList.mBuffers[i].mData = // NOLINT
  242. static_cast<std::byte*>(outputBufferList.mBuffers[i].mData) + // NOLINT
  243. outputBufferList.mBuffers[i].mNumberChannels * channelSize; // NOLINT
  244. }
  245. return result;
  246. }
  247. // ____________________________________________________________________________
  248. //
  249. OSStatus AUEffectBase::Render(
  250. AudioUnitRenderActionFlags& ioActionFlags, const AudioTimeStamp& inTimeStamp, UInt32 nFrames)
  251. {
  252. if (!HasInput(0)) {
  253. return kAudioUnitErr_NoConnection;
  254. }
  255. OSStatus result = noErr;
  256. result = mMainInput->PullInput(ioActionFlags, inTimeStamp, 0 /* element */, nFrames);
  257. if (result == noErr) {
  258. if (ProcessesInPlace() && mMainOutput->WillAllocateBuffer()) {
  259. mMainOutput->SetBufferList(mMainInput->GetBufferList());
  260. }
  261. if (ShouldBypassEffect()) {
  262. // leave silence bit alone
  263. if (!ProcessesInPlace()) {
  264. mMainInput->CopyBufferContentsTo(mMainOutput->GetBufferList());
  265. }
  266. } else {
  267. auto& paramEventList = GetParamEventList();
  268. if (paramEventList.empty()) {
  269. // this will read/write silence bit
  270. result = ProcessBufferLists(ioActionFlags, mMainInput->GetBufferList(),
  271. mMainOutput->GetBufferList(), nFrames);
  272. } else {
  273. // deal with scheduled parameters...
  274. AudioBufferList& inputBufferList = mMainInput->GetBufferList();
  275. AudioBufferList& outputBufferList = mMainOutput->GetBufferList();
  276. ScheduledProcessParams processParams{ .actionFlags = &ioActionFlags,
  277. .inputBufferList = &inputBufferList,
  278. .outputBufferList = &outputBufferList };
  279. // divide up the buffer into slices according to scheduled params then
  280. // do the DSP for each slice (ProcessScheduledSlice() called for each slice)
  281. result = ProcessForScheduledParams(paramEventList, nFrames, &processParams);
  282. // fixup the buffer pointers to how they were before we started
  283. const UInt32 channelSize = nFrames * mBytesPerFrame;
  284. for (UInt32 i = 0; i < inputBufferList.mNumberBuffers; i++) {
  285. const UInt32 size =
  286. inputBufferList.mBuffers[i].mNumberChannels * channelSize; // NOLINT
  287. inputBufferList.mBuffers[i].mData = // NOLINT
  288. static_cast<std::byte*>(inputBufferList.mBuffers[i].mData) - size; // NOLINT
  289. inputBufferList.mBuffers[i].mDataByteSize = size; // NOLINT
  290. }
  291. for (UInt32 i = 0; i < outputBufferList.mNumberBuffers; i++) {
  292. const UInt32 size =
  293. outputBufferList.mBuffers[i].mNumberChannels * channelSize; // NOLINT
  294. outputBufferList.mBuffers[i].mData = // NOLINT
  295. static_cast<std::byte*>(outputBufferList.mBuffers[i].mData) -
  296. size; // NOLINT
  297. outputBufferList.mBuffers[i].mDataByteSize = size; // NOLINT
  298. }
  299. }
  300. }
  301. if (((ioActionFlags & kAudioUnitRenderAction_OutputIsSilence) != 0u) &&
  302. !ProcessesInPlace()) {
  303. AUBufferList::ZeroBuffer(mMainOutput->GetBufferList());
  304. }
  305. }
  306. return result;
  307. }
  308. OSStatus AUEffectBase::ProcessBufferLists(AudioUnitRenderActionFlags& ioActionFlags,
  309. const AudioBufferList& inBuffer, AudioBufferList& outBuffer, UInt32 inFramesToProcess)
  310. {
  311. if (ShouldBypassEffect()) {
  312. return noErr;
  313. }
  314. bool ioSilence = false;
  315. const bool silentInput = IsInputSilent(ioActionFlags, inFramesToProcess);
  316. ioActionFlags |= kAudioUnitRenderAction_OutputIsSilence;
  317. for (UInt32 channel = 0; channel < mKernelList.size(); ++channel) {
  318. auto& kernel = mKernelList[channel];
  319. if (!kernel) {
  320. continue;
  321. }
  322. ioSilence = silentInput;
  323. const AudioBuffer* const srcBuffer = &inBuffer.mBuffers[channel]; // NOLINT subscript
  324. AudioBuffer* const destBuffer = &outBuffer.mBuffers[channel]; // NOLINT subscript
  325. kernel->Process(static_cast<const Float32*>(srcBuffer->mData),
  326. static_cast<Float32*>(destBuffer->mData), inFramesToProcess, ioSilence);
  327. if (!ioSilence) {
  328. ioActionFlags &= ~kAudioUnitRenderAction_OutputIsSilence;
  329. }
  330. }
  331. return noErr;
  332. }
  333. Float64 AUEffectBase::GetSampleRate() { return Output(0).GetStreamFormat().mSampleRate; }
  334. UInt32 AUEffectBase::GetNumberOfChannels() { return Output(0).GetStreamFormat().mChannelsPerFrame; }
  335. } // namespace ausdk