node.js - Web Audio API stream audio tags output via http as they are played -


im working on electron app takes song/voice requests via telegram bot api interface , play them in audio objects in jukebox/radio way.

what i'd achieve live-streaming audio output of app via http clients connect local (nodejs) server.

so need process audio tags pcm played, mix them (maybe convert result mp3 format?) , pipe result clients. @ least that's idea now.

unfortunately im stuck on capturing audio objects outputs. read recordjs , how can record audio audionode object didnt find yet example of mixed multiple audio tag outgoing stream.

can me this?

when web audio api rendering audio raw pcm (uncompressed) available in memory buffer gets emptied/reloaded based on buffer allocated size - can intercept , copy buffer process downstream publication clients

save below code html file in same dir serve using

python -m simplehttpserver 

point browser @ http://localhost:8000/ , pick new html file ... browser must prompt ack use of microphone ... view javascript console ( ctrl-shift-i ) ... here see 1st 3 elements of fft , time domain audio array buffers ... in code search on

array_time_domain

which raw pcm audio (to copied , sent subscribed clients (left exercise reader ;-)) ... comment out fft related code if not needed lower cpu/battery drain

note - onaudioprocess callback called repeatedly audio pumped through assure above mentioned copy process efficient completes quicker cycle period between audio buffer refreshes (hint web worker)

here use input source audio coming microphone. inner callback rendering event loop same irrespective of source audio

<html><head><meta http-equiv="content-type" content="text/html; charset=iso-8859-1"> <title>capture microphone show time & frequency domain output</title>  <script type="text/javascript">  var webaudio_tooling_obj = function () {      var audiocontext = new audiocontext();      console.log("audio starting ...");      var buff_size_renderer = 16384;     var size_show = 3; // number of array elements show in console output      var audioinput = null,     microphone_stream = null,     gain_node = null,     script_processor_node = null,     script_processor_analysis_node = null,     analyser_node = null;      if (!navigator.getusermedia)         navigator.getusermedia = navigator.getusermedia || navigator.webkitgetusermedia ||     navigator.mozgetusermedia || navigator.msgetusermedia;      if (navigator.getusermedia){          navigator.getusermedia({audio:true},              function(stream) {                 start_microphone(stream);             },             function(e) {                 alert('error capturing audio.');             }             );      } else { alert('getusermedia not supported in browser.'); }      // ---      function show_some_data(given_typed_array, num_row_to_display, label) {          var size_buffer = given_typed_array.length;         var index = 0;          console.log("__________ " + label);          if (label === "time") {              (; index < num_row_to_display && index < size_buffer; index += 1) {                  var curr_value_time = (given_typed_array[index] / 128) - 1.0;                  console.log(curr_value_time);             }          } else if (label === "frequency") {              (; index < num_row_to_display && index < size_buffer; index += 1) {                  console.log(given_typed_array[index]);             }          } else {              throw new error("error - must pass time or frequency");         }     }      function process_microphone_buffer(event) {          var i, n, inp, microphone_output_buffer;          microphone_output_buffer = event.inputbuffer.getchanneldata(0); // mono - 1 channel     }      function start_microphone(stream){          gain_node = audiocontext.creategain();         gain_node.connect( audiocontext.destination );          microphone_stream = audiocontext.createmediastreamsource(stream);         microphone_stream.connect(gain_node);           script_processor_node = audiocontext.createscriptprocessor(buff_size_renderer, 1, 1);         script_processor_node.onaudioprocess = process_microphone_buffer;          microphone_stream.connect(script_processor_node);          // --- enable volume control output speakers          document.getelementbyid('volume').addeventlistener('change', function() {              var curr_volume = this.value;             gain_node.gain.value = curr_volume;              console.log("curr_volume ", curr_volume);         });          // --- setup fft          script_processor_analysis_node = audiocontext.createscriptprocessor(2048, 1, 1);         script_processor_analysis_node.connect(gain_node);          analyser_node = audiocontext.createanalyser();         analyser_node.smoothingtimeconstant = 0;         analyser_node.fftsize = 2048;          microphone_stream.connect(analyser_node);          analyser_node.connect(script_processor_analysis_node);          var buffer_length = analyser_node.frequencybincount;          var array_freq_domain = new uint8array(buffer_length);         var array_time_domain = new uint8array(buffer_length);          console.log("buffer_length " + buffer_length);          script_processor_analysis_node.onaudioprocess = function() {              // average first channel             analyser_node.getbytefrequencydata(array_freq_domain);             analyser_node.getbytetimedomaindata(array_time_domain);              // draw spectrogram             if (microphone_stream.playbackstate == microphone_stream.playing_state) {                  show_some_data(array_freq_domain, size_show, "frequency");                 show_some_data(array_time_domain, size_show, "time"); // store record aggregate buffer/file             }         };     }  }(); //  webaudio_tooling_obj = function()  </script>  </head> <body>      <p>volume</p>     <input id="volume" type="range" min="0" max="1" step="0.1" value="0.0"/>  </body> </html> 

Comments