diff --git a/src/audio-contexts/audio-context.ts b/src/audio-contexts/audio-context.ts index a8fbdf7cf..41af85186 100644 --- a/src/audio-contexts/audio-context.ts +++ b/src/audio-contexts/audio-context.ts @@ -1,12 +1,11 @@ import { Injector } from '@angular/core'; -import { AnalyserNode } from '../audio-nodes/analyser-node'; import { ChannelMergerNode } from '../audio-nodes/channel-merger-node'; import { ChannelSplitterNode } from '../audio-nodes/channel-splitter-node'; import { MediaElementAudioSourceNode } from '../audio-nodes/media-element-audio-source-node'; import { MediaStreamAudioSourceNode } from '../audio-nodes/media-stream-audio-source-node'; import { INVALID_STATE_ERROR_FACTORY_PROVIDER, InvalidStateErrorFactory } from '../factories/invalid-state-error'; import { isValidLatencyHint } from '../helpers/is-valid-latency-hint'; -import { IAnalyserNode, IAudioContext, IAudioContextOptions } from '../interfaces'; +import { IAudioContext, IAudioContextOptions } from '../interfaces'; import { UNPATCHED_AUDIO_CONTEXT_CONSTRUCTOR_PROVIDER, unpatchedAudioContextConstructor as nptchdDCntxtCnstrctr @@ -75,10 +74,6 @@ export class AudioContext extends BaseAudioContext implements IAudioContext { return (this._state !== null) ? this._state : this._unpatchedAudioContext.state; } - public createAnalyser (): IAnalyserNode { - return new AnalyserNode(this); - } - public createChannelMerger (numberOfInputs = 6) { return new ChannelMergerNode(this, { numberOfInputs }); } diff --git a/src/audio-contexts/base-audio-context.ts b/src/audio-contexts/base-audio-context.ts index d22d4a27c..0044ab830 100644 --- a/src/audio-contexts/base-audio-context.ts +++ b/src/audio-contexts/base-audio-context.ts @@ -1,5 +1,6 @@ import { addAudioWorkletModule } from '../add-audio-worklet-module'; import { AudioBuffer } from '../audio-buffer'; +import { AnalyserNode } from '../audio-nodes/analyser-node'; import { AudioBufferSourceNode } from '../audio-nodes/audio-buffer-source-node'; import { BiquadFilterNode } from '../audio-nodes/biquad-filter-node'; import { ConstantSourceNode } from '../audio-nodes/constant-source-node'; @@ -8,6 +9,7 @@ import { IIRFilterNode } from '../audio-nodes/iir-filter-node'; import { OscillatorNode } from '../audio-nodes/oscillator-node'; import { decodeAudioData } from '../decode-audio-data'; import { + IAnalyserNode, IAudioBuffer, IAudioBufferSourceNode, IAudioWorklet, @@ -43,6 +45,10 @@ export class BaseAudioContext extends MinimalBaseAudioContext implements IBaseAu return this._audioWorklet; } + public createAnalyser (): IAnalyserNode { + return new AnalyserNode(this); + } + public createBiquadFilter (): IBiquadFilterNode { return new BiquadFilterNode(this); } diff --git a/src/audio-nodes/analyser-node.ts b/src/audio-nodes/analyser-node.ts index ec2fa97f0..c7d642630 100644 --- a/src/audio-nodes/analyser-node.ts +++ b/src/audio-nodes/analyser-node.ts @@ -1,22 +1,10 @@ -import { Injector } from '@angular/core'; -import { cacheTestResult } from '../helpers/cache-test-result'; +import { AUDIO_NODE_RENDERER_STORE } from '../globals'; +import { createNativeAnalyserNode } from '../helpers/create-native-analyser-node'; import { getNativeContext } from '../helpers/get-native-context'; import { isOfflineAudioContext } from '../helpers/is-offline-audio-context'; import { IAnalyserNode, IAnalyserOptions, IMinimalBaseAudioContext } from '../interfaces'; -import { - ANALYSER_NODE_GET_FLOAT_TIME_DOMAIN_DATA_SUPPORT_TESTER_PROVIDER, - AnalyserNodeGetFloatTimeDomainDataSupportTester } from '../support-testers/analyser-node-get-float-time-domain-data'; -import { - TChannelCountMode, - TChannelInterpretation, - TNativeAnalyserNode, - TUnpatchedAudioContext, - TUnpatchedOfflineAudioContext -} from '../types'; -import { - ANALYSER_NODE_GET_FLOAT_TIME_DOMAIN_DATA_METHOD_WRAPPER_PROVIDER, - AnalyserNodeGetFloatTimeDomainDataMethodWrapper -} from '../wrappers/analyser-node-get-float-time-domain-data-method'; +import { AnalyserNodeRenderer } from '../renderers/analyser-node'; +import { TChannelCountMode, TChannelInterpretation, TNativeAnalyserNode } from '../types'; import { NoneAudioDestinationNode } from './none-audio-destination-node'; const DEFAULT_OPTIONS: IAnalyserOptions = { @@ -29,49 +17,20 @@ const DEFAULT_OPTIONS: IAnalyserOptions = { smoothingTimeConstant: 0.8 }; -const injector = Injector.create({ - providers: [ - ANALYSER_NODE_GET_FLOAT_TIME_DOMAIN_DATA_METHOD_WRAPPER_PROVIDER, - ANALYSER_NODE_GET_FLOAT_TIME_DOMAIN_DATA_SUPPORT_TESTER_PROVIDER - ] -}); - -const analyserNodeGetFloatTimeDomainDataMethodWrapper = injector.get(AnalyserNodeGetFloatTimeDomainDataMethodWrapper); -const analyserNodeGetFloatTimeDomainDataSupportTester = injector.get(AnalyserNodeGetFloatTimeDomainDataSupportTester); - -const isSupportingAnalyserNodeGetFloatTimeDomainData = (context: TUnpatchedAudioContext | TUnpatchedOfflineAudioContext) => cacheTestResult( - AnalyserNodeGetFloatTimeDomainDataSupportTester, - () => analyserNodeGetFloatTimeDomainDataSupportTester.test(context) -); - -const createNativeNode = (nativeContext: TUnpatchedAudioContext | TUnpatchedOfflineAudioContext) => { - if (isOfflineAudioContext(nativeContext)) { - throw new Error('This is not yet supported.'); - } - - const nativeNode = nativeContext.createAnalyser(); - - // Bug #37: Only Edge and Safari create an AnalyserNode with the default properties. - if (nativeNode.channelCount === 1) { - nativeNode.channelCount = 2; - } - - // Bug #36: Safari does not support getFloatTimeDomainData() yet. - if (!isSupportingAnalyserNodeGetFloatTimeDomainData(nativeContext)) { - analyserNodeGetFloatTimeDomainDataMethodWrapper.wrap(nativeNode); - } - - return nativeNode; -}; - export class AnalyserNode extends NoneAudioDestinationNode implements IAnalyserNode { constructor (context: IMinimalBaseAudioContext, options: Partial = DEFAULT_OPTIONS) { const nativeContext = getNativeContext(context); - const { channelCount } = { ...DEFAULT_OPTIONS, ...options }; - const nativeNode = createNativeNode(nativeContext); + const mergedOptions = { ...DEFAULT_OPTIONS, ...options }; + const nativeNode = createNativeAnalyserNode(nativeContext, mergedOptions); + + super(context, nativeNode, mergedOptions.channelCount); + + if (isOfflineAudioContext(nativeContext)) { + const analyserNodeRenderer = new AnalyserNodeRenderer(this); - super(context, nativeNode, channelCount); + AUDIO_NODE_RENDERER_STORE.set(this, analyserNodeRenderer); + } } public get fftSize () { diff --git a/src/helpers/create-native-analyser-node.ts b/src/helpers/create-native-analyser-node.ts new file mode 100644 index 000000000..f0e672199 --- /dev/null +++ b/src/helpers/create-native-analyser-node.ts @@ -0,0 +1,65 @@ +import { Injector } from '@angular/core'; +import { assignNativeAudioNodeOptions } from '../helpers/assign-native-audio-node-options'; +import { cacheTestResult } from '../helpers/cache-test-result'; +import { IAnalyserOptions } from '../interfaces'; +import { + ANALYSER_NODE_GET_FLOAT_TIME_DOMAIN_DATA_SUPPORT_TESTER_PROVIDER, + AnalyserNodeGetFloatTimeDomainDataSupportTester +} from '../support-testers/analyser-node-get-float-time-domain-data'; +import { TNativeAnalyserNode, TUnpatchedAudioContext, TUnpatchedOfflineAudioContext } from '../types'; +import { + ANALYSER_NODE_GET_FLOAT_TIME_DOMAIN_DATA_METHOD_WRAPPER_PROVIDER, + AnalyserNodeGetFloatTimeDomainDataMethodWrapper +} from '../wrappers/analyser-node-get-float-time-domain-data-method'; + +const injector = Injector.create({ + providers: [ + ANALYSER_NODE_GET_FLOAT_TIME_DOMAIN_DATA_METHOD_WRAPPER_PROVIDER, + ANALYSER_NODE_GET_FLOAT_TIME_DOMAIN_DATA_SUPPORT_TESTER_PROVIDER + ] +}); + +const analyserNodeGetFloatTimeDomainDataMethodWrapper = injector.get(AnalyserNodeGetFloatTimeDomainDataMethodWrapper); +const analyserNodeGetFloatTimeDomainDataSupportTester = injector.get(AnalyserNodeGetFloatTimeDomainDataSupportTester); + +const isSupportingAnalyserNodeGetFloatTimeDomainData = (context: TUnpatchedAudioContext | TUnpatchedOfflineAudioContext) => cacheTestResult( + AnalyserNodeGetFloatTimeDomainDataSupportTester, + () => analyserNodeGetFloatTimeDomainDataSupportTester.test(context) +); + +export const createNativeAnalyserNode = ( + nativeContext: TUnpatchedAudioContext | TUnpatchedOfflineAudioContext, + options: Partial = { } +): TNativeAnalyserNode => { + const nativeNode = nativeContext.createAnalyser(); + + assignNativeAudioNodeOptions(nativeNode, options); + + if (options.fftSize !== undefined) { + nativeNode.fftSize = options.fftSize; + } + + if (options.maxDecibels !== undefined) { + nativeNode.maxDecibels = options.maxDecibels; + } + + if (options.minDecibels !== undefined) { + nativeNode.minDecibels = options.minDecibels; + } + + if (options.smoothingTimeConstant !== undefined) { + nativeNode.smoothingTimeConstant = options.smoothingTimeConstant; + } + + // Bug #37: Only Edge and Safari create an AnalyserNode with the default properties. + if (nativeNode.channelCount === 1) { + nativeNode.channelCount = 2; + } + + // Bug #36: Safari does not support getFloatTimeDomainData() yet. + if (!isSupportingAnalyserNodeGetFloatTimeDomainData(nativeContext)) { + analyserNodeGetFloatTimeDomainDataMethodWrapper.wrap(nativeNode); + } + + return nativeNode; +}; diff --git a/src/interfaces/audio-context.ts b/src/interfaces/audio-context.ts index 16c323290..b342eb5ee 100644 --- a/src/interfaces/audio-context.ts +++ b/src/interfaces/audio-context.ts @@ -1,4 +1,3 @@ -import { IAnalyserNode } from './analyser-node'; import { IAudioNode } from './audio-node'; import { IBaseAudioContext } from './base-audio-context'; import { IMediaElementAudioSourceNode } from './media-element-audio-source-node'; @@ -7,9 +6,6 @@ import { IMinimalAudioContext } from './minimal-audio-context'; export interface IAudioContext extends IBaseAudioContext, IMinimalAudioContext { - // @todo This should move into the IBaseAudioContext interface. - createAnalyser (): IAnalyserNode; - // @todo This should move into the IBaseAudioContext interface. createChannelMerger (numberOfInputs?: number): IAudioNode; diff --git a/src/interfaces/base-audio-context.ts b/src/interfaces/base-audio-context.ts index aad0554bc..d89a74261 100644 --- a/src/interfaces/base-audio-context.ts +++ b/src/interfaces/base-audio-context.ts @@ -1,4 +1,5 @@ import { TDecodeErrorCallback, TDecodeSuccessCallback } from '../types'; +import { IAnalyserNode } from './analyser-node'; import { IAudioBufferSourceNode } from './audio-buffer-source-node'; import { IAudioWorklet } from './audio-worklet'; import { IBiquadFilterNode } from './biquad-filter-node'; @@ -14,6 +15,8 @@ export interface IBaseAudioContext extends IMinimalBaseAudioContext { audioWorklet: IAudioWorklet; + createAnalyser (): IAnalyserNode; + createBiquadFilter (): IBiquadFilterNode; createBuffer (numberOfChannels: number, length: number, sampleRate: number): AudioBuffer; diff --git a/src/renderers/analyser-node.ts b/src/renderers/analyser-node.ts new file mode 100644 index 000000000..69dd7f535 --- /dev/null +++ b/src/renderers/analyser-node.ts @@ -0,0 +1,38 @@ +import { createNativeAnalyserNode } from '../helpers/create-native-analyser-node'; +import { getNativeNode } from '../helpers/get-native-node'; +import { isOwnedByContext } from '../helpers/is-owned-by-context'; +import { IAnalyserNode } from '../interfaces'; +import { TNativeAnalyserNode, TNativeAudioNode, TUnpatchedOfflineAudioContext } from '../types'; +import { AudioNodeRenderer } from './audio-node'; + +export class AnalyserNodeRenderer extends AudioNodeRenderer { + + private _nativeNode: null | TNativeAnalyserNode; + + private _proxy: IAnalyserNode; + + constructor (proxy: IAnalyserNode) { + super(); + + this._nativeNode = null; + this._proxy = proxy; + } + + public async render (offlineAudioContext: TUnpatchedOfflineAudioContext): Promise { + if (this._nativeNode !== null) { + return this._nativeNode; + } + + this._nativeNode = getNativeNode(this._proxy); + + // If the initially used nativeNode was not constructed on the same OfflineAudioContext it needs to be created again. + if (!isOwnedByContext(this._nativeNode, offlineAudioContext)) { + this._nativeNode = createNativeAnalyserNode(offlineAudioContext); + } + + await this._connectSources(offlineAudioContext, this._nativeNode); + + return this._nativeNode; + } + +} diff --git a/test/unit/audio-contexts/audio-context.js b/test/unit/audio-contexts/audio-context.js index 756db6db6..2983edcc6 100644 --- a/test/unit/audio-contexts/audio-context.js +++ b/test/unit/audio-contexts/audio-context.js @@ -259,120 +259,8 @@ describe('AudioContext', () => { describe('createAnalyser()', () => { - it('should return an instance of the AnalyserNode interface', () => { - const analyserNode = audioContext.createAnalyser(); - - expect(analyserNode.channelCount).to.equal(2); - expect(analyserNode.channelCountMode).to.equal('max'); - expect(analyserNode.channelInterpretation).to.equal('speakers'); - - expect(analyserNode.fftSize).to.equal(2048); - expect(analyserNode.frequencyBinCount).to.equal(1024); - - expect(analyserNode.getByteFrequencyData).to.be.a('function'); - expect(analyserNode.getByteTimeDomainData).to.be.a('function'); - - expect(analyserNode.getFloatFrequencyData).to.be.a('function'); - expect(analyserNode.getFloatTimeDomainData).to.be.a('function'); - - expect(analyserNode.maxDecibels).to.equal(-30); - expect(analyserNode.minDecibels).to.equal(-100); - - expect(analyserNode.numberOfInputs).to.equal(1); - expect(analyserNode.numberOfOutputs).to.equal(1); - - expect(analyserNode.smoothingTimeConstant).to.closeTo(0.8, 0.0000001); - }); - - it('should throw an error if the AudioContext is closed', (done) => { - audioContext - .close() - .then(() => { - audioContext.createAnalyser(); - }) - .catch((err) => { - expect(err.code).to.equal(11); - expect(err.name).to.equal('InvalidStateError'); - - audioContext = new AudioContext(); - - done(); - }); - }); - - it('should be chainable', () => { - const analyserNode = audioContext.createAnalyser(); - const gainNode = audioContext.createGain(); - - expect(analyserNode.connect(gainNode)).to.equal(gainNode); - }); - - it('should be disconnectable', (done) => { - const candidate = audioContext.createAnalyser(); - const dummy = audioContext.createGain(); - const analyzer = createScriptProcessor(audioContext, 256, 1, 1); - // Safari does not play buffers which contain just one frame. - const ones = audioContext.createBuffer(1, 2, 44100); - - ones.copyToChannel(new Float32Array([ 1, 1 ]), 0); - - const source = audioContext.createBufferSource(); - - source.buffer = ones; - source.loop = true; - - source.connect(candidate); - candidate.connect(analyzer); - analyzer.connect(audioContext.destination); - candidate.connect(dummy); - candidate.disconnect(dummy); - - analyzer.onaudioprocess = (event) => { - const channelData = event.inputBuffer.getChannelData(0); - - if (Array.from(channelData).indexOf(1) > -1) { - source.stop(); - - analyzer.onaudioprocess = null; - - source.disconnect(candidate); - candidate.disconnect(analyzer); - analyzer.disconnect(audioContext.destination); - - done(); - } - }; - - source.start(); - }); - - it('should not be connectable to a node of another AudioContext', (done) => { - const analyserNode = audioContext.createAnalyser(); - const anotherAudioContext = new AudioContext(); - - try { - analyserNode.connect(anotherAudioContext.destination); - } catch (err) { - expect(err.code).to.equal(15); - expect(err.name).to.equal('InvalidAccessError'); - - done(); - } finally { - anotherAudioContext.close(); - } - }); - - describe('getFloatTimeDomainData()', () => { - - it('should return time-domain data', () => { - const analyserNode = audioContext.createAnalyser(); - const data = new Float32Array(analyserNode.fftSize); - - analyserNode.getFloatTimeDomainData(data); - - expect(data[0]).to.equal(0); - }); - + it('should be a function', () => { + expect(audioContext.createAnalyser).to.be.a('function'); }); }); diff --git a/test/unit/audio-contexts/offline-audio-context.js b/test/unit/audio-contexts/offline-audio-context.js index 84ca58ff7..16f82f8a9 100644 --- a/test/unit/audio-contexts/offline-audio-context.js +++ b/test/unit/audio-contexts/offline-audio-context.js @@ -355,6 +355,20 @@ describe('OfflineAudioContext', () => { }); + describe('createAnalyser()', () => { + + let offlineAudioContext; + + beforeEach(() => { + offlineAudioContext = new OfflineAudioContext({ length: 1, sampleRate: 44100 }); + }); + + it('should be a function', () => { + expect(offlineAudioContext.createAnalyser).to.be.a('function'); + }); + + }); + describe('createBiquadFilter()', () => { let offlineAudioContext; diff --git a/test/unit/audio-nodes/analyser-node.js b/test/unit/audio-nodes/analyser-node.js new file mode 100644 index 000000000..b97c79435 --- /dev/null +++ b/test/unit/audio-nodes/analyser-node.js @@ -0,0 +1,471 @@ +import { AnalyserNode } from '../../../src/audio-nodes/analyser-node'; +import { AudioBuffer } from '../../../src/audio-buffer'; +import { AudioBufferSourceNode } from '../../../src/audio-nodes/audio-buffer-source-node'; +import { AudioContext } from '../../../src/audio-contexts/audio-context'; +import { GainNode } from '../../../src/audio-nodes/gain-node'; +import { MinimalAudioContext } from '../../../src/audio-contexts/minimal-audio-context'; +import { MinimalOfflineAudioContext } from '../../../src/audio-contexts/minimal-offline-audio-context'; +import { OfflineAudioContext } from '../../../src/audio-contexts/offline-audio-context'; +import { createRenderer } from '../../helper/create-renderer'; + +describe('AnalyserNode', () => { + + // @todo leche seems to need a unique string as identifier as first argument. + leche.withData([ + [ + 'constructor with AudioContext', + () => new AudioContext(), + (context, options = null) => { + if (options === null) { + return new AnalyserNode(context); + } + + return new AnalyserNode(context, options); + } + ], [ + 'constructor with MinimalAudioContext', + () => new MinimalAudioContext(), + (context, options = null) => { + if (options === null) { + return new AnalyserNode(context); + } + + return new AnalyserNode(context, options); + } + ], [ + 'constructor with OfflineAudioContext', + () => new OfflineAudioContext({ length: 5, sampleRate: 44100 }), + (context, options = null) => { + if (options === null) { + return new AnalyserNode(context); + } + + return new AnalyserNode(context, options); + } + ], [ + 'constructor with MinimalOfflineAudioContext', + () => new MinimalOfflineAudioContext({ length: 5, sampleRate: 44100 }), + (context, options = null) => { + if (options === null) { + return new AnalyserNode(context); + } + + return new AnalyserNode(context, options); + } + ], [ + 'factory function of AudioContext', + () => new AudioContext(), + (context, options = null) => { + const analyserNode = context.createAnalyser(); + + if (options !== null && options.channelCount !== undefined) { + analyserNode.channelCount = options.channelCount; + } + + if (options !== null && options.channelCountMode !== undefined) { + analyserNode.channelCountMode = options.channelCountMode; + } + + if (options !== null && options.channelInterpretation !== undefined) { + analyserNode.channelInterpretation = options.channelInterpretation; + } + + if (options !== null && options.fftSize !== undefined) { + analyserNode.fftSize = options.fftSize; + } + + if (options !== null && options.maxDecibels !== undefined) { + analyserNode.maxDecibels = options.maxDecibels; + } + + if (options !== null && options.minDecibels !== undefined) { + analyserNode.minDecibels = options.minDecibels; + } + + if (options !== null && options.smoothingTimeConstant !== undefined) { + analyserNode.smoothingTimeConstant = options.smoothingTimeConstant; + } + + return analyserNode; + } + ], [ + 'factory function of OfflineAudioContext', + () => new OfflineAudioContext({ length: 5, sampleRate: 44100 }), + (context, options = null) => { + const analyserNode = context.createAnalyser(); + + if (options !== null && options.channelCount !== undefined) { + analyserNode.channelCount = options.channelCount; + } + + if (options !== null && options.channelCountMode !== undefined) { + analyserNode.channelCountMode = options.channelCountMode; + } + + if (options !== null && options.channelInterpretation !== undefined) { + analyserNode.channelInterpretation = options.channelInterpretation; + } + + if (options !== null && options.fftSize !== undefined) { + analyserNode.fftSize = options.fftSize; + } + + if (options !== null && options.maxDecibels !== undefined) { + analyserNode.maxDecibels = options.maxDecibels; + } + + if (options !== null && options.minDecibels !== undefined) { + analyserNode.minDecibels = options.minDecibels; + } + + if (options !== null && options.smoothingTimeConstant !== undefined) { + analyserNode.smoothingTimeConstant = options.smoothingTimeConstant; + } + + return analyserNode; + } + ] + ], (_, createContext, createAnalyserNode) => { + + let context; + + afterEach(() => { + if (context.close !== undefined) { + return context.close(); + } + }); + + beforeEach(() => context = createContext()); + + describe('constructor()', () => { + + describe('without any options', () => { + + let analyserNode; + + beforeEach(() => { + analyserNode = createAnalyserNode(context); + }); + + it('should be an instance of the EventTarget interface', () => { + expect(analyserNode.addEventListener).to.be.a('function'); + expect(analyserNode.dispatchEvent).to.be.a('function'); + expect(analyserNode.removeEventListener).to.be.a('function'); + }); + + it('should be an instance of the AudioNode interface', () => { + expect(analyserNode.channelCount).to.equal(2); + expect(analyserNode.channelCountMode).to.equal('max'); + expect(analyserNode.channelInterpretation).to.equal('speakers'); + expect(analyserNode.connect).to.be.a('function'); + expect(analyserNode.context).to.be.an.instanceOf(context.constructor); + expect(analyserNode.disconnect).to.be.a('function'); + expect(analyserNode.numberOfInputs).to.equal(1); + expect(analyserNode.numberOfOutputs).to.equal(1); + }); + + it('should return an instance of the AnalyserNode interface', () => { + expect(analyserNode.fftSize).to.equal(2048); + expect(analyserNode.frequencyBinCount).to.equal(1024); + expect(analyserNode.getByteFrequencyData).to.be.a('function'); + expect(analyserNode.getByteTimeDomainData).to.be.a('function'); + expect(analyserNode.getFloatFrequencyData).to.be.a('function'); + expect(analyserNode.getFloatTimeDomainData).to.be.a('function'); + expect(analyserNode.maxDecibels).to.equal(-30); + expect(analyserNode.minDecibels).to.equal(-100); + expect(analyserNode.smoothingTimeConstant).to.closeTo(0.8, 0.0000001); + }); + + it('should throw an error if the AudioContext is closed', (done) => { + ((context.close === undefined) ? context.startRendering() : context.close()) + .then(() => createAnalyserNode(context)) + .catch((err) => { + expect(err.code).to.equal(11); + expect(err.name).to.equal('InvalidStateError'); + + context.close = undefined; + + done(); + }); + }); + + }); + + describe('with valid options', () => { + + it('should return an instance with the given channelCount', () => { + const channelCount = 4; + const analyserNode = createAnalyserNode(context, { channelCount }); + + expect(analyserNode.channelCount).to.equal(channelCount); + }); + + it('should return an instance with the given channelCountMode', () => { + const channelCountMode = 'explicit'; + const analyserNode = createAnalyserNode(context, { channelCountMode }); + + expect(analyserNode.channelCountMode).to.equal(channelCountMode); + }); + + it('should return an instance with the given channelInterpretation', () => { + const channelInterpretation = 'discrete'; + const analyserNode = createAnalyserNode(context, { channelInterpretation }); + + expect(analyserNode.channelInterpretation).to.equal(channelInterpretation); + }); + + it('should return an instance with the given fftSize', () => { + const fftSize = 1024; + const analyserNode = createAnalyserNode(context, { fftSize }); + + expect(analyserNode.fftSize).to.equal(fftSize); + }); + + it('should return an instance with the given maxDecibels', () => { + const maxDecibels = -20; + const analyserNode = createAnalyserNode(context, { maxDecibels }); + + expect(analyserNode.maxDecibels).to.equal(maxDecibels); + }); + + it('should return an instance with the given minDecibels', () => { + const minDecibels = -90; + const analyserNode = createAnalyserNode(context, { minDecibels }); + + expect(analyserNode.minDecibels).to.equal(minDecibels); + }); + + it('should return an instance with the given smoothingTimeConstant', () => { + const smoothingTimeConstant = 0.5; + const analyserNode = createAnalyserNode(context, { smoothingTimeConstant }); + + expect(analyserNode.smoothingTimeConstant).to.equal(smoothingTimeConstant); + }); + + }); + + }); + + describe('channelCount', () => { + + let analyserNode; + + beforeEach(() => { + analyserNode = createAnalyserNode(context); + }); + + it('should be assignable to another value', () => { + const channelCount = 4; + + analyserNode.channelCount = channelCount; + + expect(analyserNode.channelCount).to.equal(channelCount); + }); + + }); + + describe('channelCountMode', () => { + + let analyserNode; + + beforeEach(() => { + analyserNode = createAnalyserNode(context); + }); + + it('should be assignable to another value', () => { + const channelCountMode = 'explicit'; + + analyserNode.channelCountMode = channelCountMode; + + expect(analyserNode.channelCountMode).to.equal(channelCountMode); + }); + + }); + + describe('channelInterpretation', () => { + + let analyserNode; + + beforeEach(() => { + analyserNode = createAnalyserNode(context); + }); + + it('should be assignable to another value', () => { + const channelInterpretation = 'discrete'; + + analyserNode.channelInterpretation = channelInterpretation; + + expect(analyserNode.channelInterpretation).to.equal(channelInterpretation); + }); + + }); + + describe('connect()', () => { + + let analyserNode; + + beforeEach(() => { + analyserNode = createAnalyserNode(context); + }); + + it('should be chainable', () => { + const gainNode = new GainNode(context); + + expect(analyserNode.connect(gainNode)).to.equal(gainNode); + }); + + it('should not be connectable to an AudioNode of another AudioContext', (done) => { + const anotherContext = createContext(); + + try { + analyserNode.connect(anotherContext.destination); + } catch (err) { + expect(err.code).to.equal(15); + expect(err.name).to.equal('InvalidAccessError'); + + done(); + } finally { + if (anotherContext.close !== undefined) { + anotherContext.close(); + } + } + }); + + it('should not be connectable to an AudioParam of another AudioContext', (done) => { + const anotherContext = createContext(); + const gainNode = new GainNode(anotherContext); + + try { + analyserNode.connect(gainNode.gain); + } catch (err) { + expect(err.code).to.equal(15); + expect(err.name).to.equal('InvalidAccessError'); + + done(); + } finally { + if (anotherContext.close !== undefined) { + anotherContext.close(); + } + } + }); + + it('should throw an IndexSizeError if the output is out-of-bound', (done) => { + const gainNode = new GainNode(context); + + try { + analyserNode.connect(gainNode.gain, -1); + } catch (err) { + expect(err.code).to.equal(1); + expect(err.name).to.equal('IndexSizeError'); + + done(); + } + }); + + }); + + describe('disconnect()', () => { + + let renderer; + let values; + + beforeEach(() => { + values = [ 1, 1, 1, 1, 1 ]; + + renderer = createRenderer({ + context, + length: (context.length === undefined) ? 5 : undefined, + prepare (destination) { + const analyserNode = createAnalyserNode(context); + const audioBuffer = new AudioBuffer({ length: 5, sampleRate: context.sampleRate }); + const audioBufferSourceNode = new AudioBufferSourceNode(context); + const firstDummyGainNode = new GainNode(context); + const secondDummyGainNode = new GainNode(context); + + audioBuffer.copyToChannel(new Float32Array(values), 0); + + audioBufferSourceNode.buffer = audioBuffer; + + audioBufferSourceNode + .connect(analyserNode) + .connect(firstDummyGainNode) + .connect(destination); + + analyserNode.connect(secondDummyGainNode); + + return { analyserNode, audioBufferSourceNode, firstDummyGainNode, secondDummyGainNode }; + } + }); + }); + + it('should be possible to disconnect a destination', function () { + this.timeout(5000); + + return renderer({ + prepare ({ analyserNode, firstDummyGainNode }) { + analyserNode.disconnect(firstDummyGainNode); + }, + start (startTime, { audioBufferSourceNode }) { + audioBufferSourceNode.start(startTime); + } + }) + .then((channelData) => { + expect(Array.from(channelData)).to.deep.equal([ 0, 0, 0, 0, 0 ]); + }); + }); + + it('should be possible to disconnect another destination in isolation', function () { + this.timeout(5000); + + return renderer({ + prepare ({ analyserNode, secondDummyGainNode }) { + analyserNode.disconnect(secondDummyGainNode); + }, + start (startTime, { audioBufferSourceNode }) { + audioBufferSourceNode.start(startTime); + } + }) + .then((channelData) => { + expect(Array.from(channelData)).to.deep.equal(values); + }); + }); + + it('should be possible to disconnect all destinations', function () { + this.timeout(5000); + + return renderer({ + prepare ({ analyserNode }) { + analyserNode.disconnect(); + }, + start (startTime, { audioBufferSourceNode }) { + audioBufferSourceNode.start(startTime); + } + }) + .then((channelData) => { + expect(Array.from(channelData)).to.deep.equal([ 0, 0, 0, 0, 0 ]); + }); + }); + + }); + + describe('getFloatTimeDomainData()', () => { + + let analyserNode; + + beforeEach(() => { + analyserNode = createAnalyserNode(context); + }); + + it('should return time-domain data', () => { + const data = new Float32Array(analyserNode.fftSize); + + analyserNode.getFloatTimeDomainData(data); + + expect(data[0]).to.equal(0); + }); + + }); + + }); + +});