Soundwave component

This commit is contained in:
tonyrewin 2022-11-14 12:30:20 +03:00
parent 6fc26283bd
commit faab8ad544
3 changed files with 128 additions and 120 deletions

View File

@ -1,7 +1,8 @@
import { createMemo, createSignal, onMount } from 'solid-js'
import { createEffect, createMemo, createSignal, onMount } from 'solid-js'
import { For } from 'solid-js/web'
import type { Shout } from '../../graphql/types.gen'
import { drawAudio } from '../../utils/soundwave'
import { Soundwave } from './Soundwave'
type MediaItem = any
export default (props: { shout: Shout }) => {
@ -12,23 +13,17 @@ export default (props: { shout: Shout }) => {
}
return []
})
let audioRef: HTMLAudioElement
const [currentTrack, setCurrentTrack] = createSignal(media()[0])
const [paused, setPaused] = createSignal(true)
const togglePlayPause = () => setPaused(!paused())
const playMedia = (m: MediaItem) => {}
const playMedia = (m: MediaItem) => {
audioRef.src = m.get('src')
audioRef.play()
}
const [audioContext, setAudioContext] = createSignal<AudioContext>()
const currentTimer = createMemo(() => {
// TODO: return current audio player track position
return 1
})
onMount(() => {
const actx = new AudioContext()
setAudioContext(actx)
drawAudio(actx, currentTrack().src)
})
const SoundWave = () => <canvas></canvas>
onMount(() => setAudioContext(new AudioContext()))
createEffect(() => (paused() ? audioRef.play : audioRef.pause)())
return (
<div class="audio-container">
<div class="audio-img">
@ -43,27 +38,24 @@ export default (props: { shout: Shout }) => {
</div>
<div class="audio-player-list">
<div class="player ng-scope">
<div class="player-title ng-binding ng-scope">{currentTrack().title}</div>
<i class="fas fa-pause fa-3x fa-fw ng-scope" onClick={togglePlayPause} style=""></i>
<div class="player current-track">
<div class="player-title">{currentTrack().title}</div>
<i class="fas fa-pause fa-3x fa-fw" onClick={togglePlayPause}></i>
<div class="player-progress">
<SoundWave />
<span class="position ng-binding">{currentTimer() / currentTrack().length}</span>
<Soundwave context={audioContext()} url={currentTrack().src} />
<span class="track-position">{`${audioRef.currentTime} / ${audioRef.duration}`}</span>
</div>
<audio ref={audioRef} />
</div>
<ul
class="other-songs ng-scope is-playing"
ng-class="{ 'is-playing': post._id === $root.currentMusicPostId }"
style=""
>
<ul class="all-tracks">
<For each={media()}>
{(m: MediaItem) => (
<li ng-repeat="mediaItem in post.media" class="ng-scope">
<li>
<div class="player-status">
<i class="fas fa-play fa-fw ng-scope" onClick={() => playMedia(m)}></i>
<i class="fas fa-play fa-fw" onClick={() => playMedia(m)}></i>
</div>
<span class="track-title ng-binding">{m.title}</span>
<span class="track-title">{m.title}</span>
</li>
)}
</For>

View File

@ -0,0 +1,108 @@
import { onMount } from 'solid-js'
interface SoundwaveProps {
url: String
context: AudioContext
}
export const Soundwave = (props: SoundwaveProps) => {
let canvasRef: HTMLCanvasElement
/**
* A utility function for drawing our line segments
* @param {AudioContext} ctx the audio context
* @param {number} x the x coordinate of the beginning of the line segment
* @param {number} height the desired height of the line segment
* @param {number} width the desired width of the line segment
* @param {boolean} isEven whether or not the segmented is even-numbered
*/
const drawLineSegment = (ctx, x, height, width, isEven) => {
ctx.lineWidth = 1 // how thick the line is
ctx.strokeStyle = '#fff' // what color our line is
ctx.beginPath()
const h = isEven ? height : -height
ctx.moveTo(x, 0)
ctx.lineTo(x, h)
ctx.arc(x + width / 2, h, width / 2, Math.PI, 0, isEven)
ctx.lineTo(x + width, 0)
ctx.stroke()
}
/**
* Draws the audio file into a canvas element.
* @param {Array} normalizedData The filtered array returned from filterData()
* @returns {Array} a normalized array of data
*/
const draw = (normalizedData) => {
// set up the canvas
const canvas = canvasRef
const dpr = window.devicePixelRatio || 1
const padding = 20
canvas.width = canvas.offsetWidth * dpr
canvas.height = (canvas.offsetHeight + padding * 2) * dpr
const ctx = canvas.getContext('2d')
ctx.scale(dpr, dpr)
ctx.translate(0, canvas.offsetHeight / 2 + padding) // set Y = 0 to be in the middle of the canvas
// draw the line segments
const width = canvas.offsetWidth / normalizedData.length
// eslint-disable-next-line unicorn/no-for-loop
for (let i = 0; i < normalizedData.length; i++) {
const x = width * i
let height = normalizedData[i] * canvas.offsetHeight - padding
if (height < 0) {
height = 0
} else if (height > canvas.offsetHeight / 2) {
height = height - canvas.offsetHeight / 2
}
drawLineSegment(ctx, x, height, width, (i + 1) % 2)
}
}
/**
* Filters the AudioBuffer retrieved from an external source
* @param {AudioBuffer} audioBuffer the AudioBuffer from drawAudio()
* @returns {Array} an array of floating point numbers
*/
const filterData = (audioBuffer) => {
const rawData = audioBuffer.getChannelData(0) // We only need to work with one channel of data
const samples = 70 // Number of samples we want to have in our final data set
const blockSize = Math.floor(rawData.length / samples) // the number of samples in each subdivision
const filteredData = []
for (let i = 0; i < samples; i++) {
const blockStart = blockSize * i // the location of the first sample in the block
let sum = 0
for (let j = 0; j < blockSize; j++) {
sum = sum + Math.abs(rawData[blockStart + j]) // find the sum of all the samples in the block
}
filteredData.push(sum / blockSize) // divide the sum by the block size to get the average
}
return filteredData
}
/**
* Normalizes the audio data to make a cleaner illustration
* @param {Array} filteredData the data from filterData()
* @returns {Array} an normalized array of floating point numbers
*/
const normalizeData = (filteredData) => {
const multiplier = Math.pow(Math.max(...filteredData), -1)
return filteredData.map((n) => n * multiplier)
}
/**
* Retrieves audio from an external source, the initializes the drawing function
* @param {AudioContext} audioContext the audio context
* @param {String} url the url of the audio we'd like to fetch
*/
const drawAudio = (audioContext, url) => {
fetch(url)
.then((response) => response.arrayBuffer())
.then((arrayBuffer) => audioContext.decodeAudioData(arrayBuffer))
.then((audioBuffer) => draw(normalizeData(filterData(audioBuffer))))
.catch(console.error)
}
onMount(() => {
drawAudio(props.context, props.url)
})
return <canvas ref={canvasRef}></canvas>
}

View File

@ -1,92 +0,0 @@
/**
* A utility function for drawing our line segments
* @param {AudioContext} ctx the audio context
* @param {number} x the x coordinate of the beginning of the line segment
* @param {number} height the desired height of the line segment
* @param {number} width the desired width of the line segment
* @param {boolean} isEven whether or not the segmented is even-numbered
*/
export const drawLineSegment = (ctx, x, height, width, isEven) => {
ctx.lineWidth = 1 // how thick the line is
ctx.strokeStyle = '#fff' // what color our line is
ctx.beginPath()
height = isEven ? height : -height
ctx.moveTo(x, 0)
ctx.lineTo(x, height)
ctx.arc(x + width / 2, height, width / 2, Math.PI, 0, isEven)
ctx.lineTo(x + width, 0)
ctx.stroke()
}
/**
* Draws the audio file into a canvas element.
* @param {Array} normalizedData The filtered array returned from filterData()
* @returns {Array} a normalized array of data
*/
export const draw = (normalizedData) => {
// set up the canvas
const canvas = document.querySelector('canvas')
const dpr = window.devicePixelRatio || 1
const padding = 20
canvas.width = canvas.offsetWidth * dpr
canvas.height = (canvas.offsetHeight + padding * 2) * dpr
const ctx = canvas.getContext('2d')
ctx.scale(dpr, dpr)
ctx.translate(0, canvas.offsetHeight / 2 + padding) // set Y = 0 to be in the middle of the canvas
// draw the line segments
const width = canvas.offsetWidth / normalizedData.length
for (let i = 0; i < normalizedData.length; i++) {
const x = width * i
let height = normalizedData[i] * canvas.offsetHeight - padding
if (height < 0) {
height = 0
} else if (height > canvas.offsetHeight / 2) {
height = height - canvas.offsetHeight / 2
}
drawLineSegment(ctx, x, height, width, (i + 1) % 2)
}
}
/**
* Filters the AudioBuffer retrieved from an external source
* @param {AudioBuffer} audioBuffer the AudioBuffer from drawAudio()
* @returns {Array} an array of floating point numbers
*/
export const filterData = (audioBuffer) => {
const rawData = audioBuffer.getChannelData(0) // We only need to work with one channel of data
const samples = 70 // Number of samples we want to have in our final data set
const blockSize = Math.floor(rawData.length / samples) // the number of samples in each subdivision
const filteredData = []
for (let i = 0; i < samples; i++) {
let blockStart = blockSize * i // the location of the first sample in the block
let sum = 0
for (let j = 0; j < blockSize; j++) {
sum = sum + Math.abs(rawData[blockStart + j]) // find the sum of all the samples in the block
}
filteredData.push(sum / blockSize) // divide the sum by the block size to get the average
}
return filteredData
}
/**
* Normalizes the audio data to make a cleaner illustration
* @param {Array} filteredData the data from filterData()
* @returns {Array} an normalized array of floating point numbers
*/
export const normalizeData = (filteredData) => {
const multiplier = Math.pow(Math.max(...filteredData), -1)
return filteredData.map((n) => n * multiplier)
}
/**
* Retrieves audio from an external source, the initializes the drawing function
* @param {AudioContext} audioContext the audio context
* @param {String} url the url of the audio we'd like to fetch
*/
export const drawAudio = (audioContext, url) => {
fetch(url)
.then((response) => response.arrayBuffer())
.then((arrayBuffer) => audioContext.decodeAudioData(arrayBuffer))
.then((audioBuffer) => draw(normalizeData(filterData(audioBuffer))))
}