-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathanalyser.js
149 lines (116 loc) · 3.84 KB
/
analyser.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
const audioContext = new AudioContext()
const bufSrc = audioContext.createBufferSource()
const analyser = audioContext.createAnalyser()
analyser.fftSize = 256
const canvas = document.getElementById('analyser')
const renderCtx = canvas.getContext('2d')
const scale = 8
const numRows = 32
const boxHeight = 32
const lineWidth = 3
const rowWidth = analyser.frequencyBinCount / 4
canvas.width = (rowWidth + numRows + lineWidth) * scale
canvas.height = (numRows + boxHeight + lineWidth) * scale
renderCtx.lineCap = 'round'
renderCtx.lineJoin = 'round'
renderCtx.scale(scale, scale)
const freqDataRows = [...Array(numRows)].map(_ => new Uint8Array(rowWidth))
// ======= INPUT =======
const getFreqData = () => {
const freqData = new Uint8Array(rowWidth)
analyser.getByteFrequencyData(freqData)
return freqData
}
const updateRows = () => {
freqDataRows.push(scaleBins(getFreqData()))
freqDataRows.shift()
}
// ======= PURE =======
const scaleBins = (bins) => {
return bins.map(binVal => binVal * (boxHeight / 256))
}
const verticalGradient = (baseHue, saturation, lightness) => {
const gradient = renderCtx.createLinearGradient(0, boxHeight, 0, 0)
gradient.addColorStop(0.0, `hsl(${baseHue }, ${saturation}%, ${lightness}%)`)
gradient.addColorStop(0.5, `hsl(${baseHue + 90 }, ${saturation}%, ${lightness}%)`)
gradient.addColorStop(1.0, `hsl(${baseHue + 180}, ${saturation}%, ${lightness}%)`)
return gradient
}
const pathFromFreqData = (freqData) => {
const path = new Path2D()
path.moveTo(0, boxHeight - freqData[0])
for (let i = 1; i < freqData.length; i++) {
path.lineTo(i, boxHeight - freqData[i])
}
return path
}
// ======= OUTPUT =======
let frame = 0
const drawFreqData = (freqData, saturation) => {
const path = pathFromFreqData(freqData)
const baseHue = (frame % 1800) / 5
renderCtx.strokeStyle = verticalGradient(baseHue, saturation, 85)
renderCtx.lineWidth = lineWidth
renderCtx.stroke(path)
renderCtx.strokeStyle = verticalGradient(baseHue, saturation, 50)
renderCtx.lineWidth = lineWidth * 0.75
renderCtx.stroke(path)
}
const drawFrame = () => {
const initialTransform = renderCtx.getTransform()
renderCtx.clearRect(0, 0, canvas.width, canvas.height)
renderCtx.translate(numRows + lineWidth / 2, lineWidth / 2)
for (let i = 0; i < numRows; i++) {
const saturation = (i / numRows) * 100 + 25
drawFreqData(freqDataRows[i], saturation)
renderCtx.translate(-1, 1)
}
renderCtx.setTransform(initialTransform)
}
const animate = () => {
updateRows()
drawFrame()
frame = requestAnimationFrame(animate)
}
// ======= ACT =======
let streamSource
navigator.mediaDevices
.getUserMedia({ audio: true, video: false })
.then(stream => {
streamSource = audioContext.createMediaStreamSource(stream)
streamSource.connect(analyser)
audioContext.resume()
})
animate()
// ======= EXTRAS =======
const demo = () => {
const osc = audioContext.createOscillator()
const oscGain = audioContext.createGain()
osc.frequency.value = 3000
oscGain.gain.value = 0.3
osc.connect(oscGain).connect(analyser)
osc.start()
const lfo = audioContext.createOscillator()
const lfoGain = audioContext.createGain()
lfo.frequency.value = 0.25
lfoGain.gain.value = 3000
lfo.connect(lfoGain).connect(osc.frequency)
lfo.start()
}
let bufferSource
const connectFile = fileName => {
fetch(fileName)
.then(response => response.arrayBuffer())
.then(arrayBuffer => audioContext.decodeAudioData(arrayBuffer))
.then(audioBuffer => {
bufferSource = audioContext.createBufferSource()
bufferSource.buffer = audioBuffer
bufferSource.loop = true
bufferSource.start()
bufferSource.connect(analyser)
const cut = audioContext.createGain()
cut.gain = 0.001
bufferSource.connect(cut).connect(audioContext.destination)
})
}
// use web audio to schedule ticks