sbGStreamerAudioProcessor.cpp
Go to the documentation of this file.
1 /*
2  *=BEGIN SONGBIRD GPL
3  *
4  * This file is part of the Songbird web player.
5  *
6  * Copyright(c) 2005-2010 POTI, Inc.
7  * http://www.songbirdnest.com
8  *
9  * This file may be licensed under the terms of of the
10  * GNU General Public License Version 2 (the ``GPL'').
11  *
12  * Software distributed under the License is distributed
13  * on an ``AS IS'' basis, WITHOUT WARRANTY OF ANY KIND, either
14  * express or implied. See the GPL for the specific language
15  * governing rights and limitations.
16  *
17  * You should have received a copy of the GPL along with this
18  * program. If not, go to http://www.gnu.org/licenses/gpl.html
19  * or write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21  *
22  *=END SONGBIRD GPL
23  */
24 
26 
27 #include <sbIGStreamerService.h>
28 #include <sbIMediaFormatMutable.h>
29 
30 #include <sbClassInfoUtils.h>
31 #include <sbStandardProperties.h>
32 #include <sbStringBundle.h>
33 #include <sbThreadUtils.h>
34 #include <sbVariantUtils.h>
35 
36 #include <nsServiceManagerUtils.h>
37 #include <nsThreadUtils.h>
38 #include <nsStringAPI.h>
39 #include <prlog.h>
40 
41 #include <gst/base/gstadapter.h>
42 #include <gst/app/gstappsink.h>
43 
48 #ifdef PR_LOGGING
49 static PRLogModuleInfo* gGStreamerAudioProcessor = PR_NewLogModule("sbGStreamerAudioProcessor");
50 #define LOG(args) PR_LOG(gGStreamerAudioProcessor, PR_LOG_WARNING, args)
51 #define TRACE(args) PR_LOG(gGStreamerAudioProcessor, PR_LOG_DEBUG, args)
52 #else /* PR_LOGGING */
53 #define LOG(args) /* nothing */
54 #define TRACE(args) /* nothing */
55 #endif /* PR_LOGGING */
56 
61 
65 
66 NS_DECL_CLASSINFO(sbGstreamerAudioProcessor);
67 NS_IMPL_THREADSAFE_CI(sbGStreamerAudioProcessor);
68 
69 sbGStreamerAudioProcessor::sbGStreamerAudioProcessor() :
71  mConstraintChannelCount(0),
72  mConstraintSampleRate(0),
73  mConstraintAudioFormat(sbIMediacoreAudioProcessor::FORMAT_ANY),
74  mConstraintBlockSize(0),
75  mConstraintBlockSizeBytes(0),
76  mMonitor(NULL),
77  mAdapter(NULL),
78  mAppSink(NULL),
79  mCapsFilter(NULL),
80  mSuspended(PR_FALSE),
81  mFoundAudioPad(PR_FALSE),
82  mHasStarted(PR_FALSE),
83  mIsEOS(PR_FALSE),
84  mIsEndOfSection(PR_FALSE),
85  mHasSentError(PR_FALSE),
86  mSendGap(PR_FALSE),
87  mSampleNumber(0),
88  mExpectedNextSampleNumber(0),
89  mAudioFormat(0),
90  mSampleRate(0),
91  mChannels(0),
92  mBuffersAvailable(0),
93  mPendingBuffer(NULL)
94 {
95 }
96 
97 sbGStreamerAudioProcessor::~sbGStreamerAudioProcessor()
98 {
99  if (mMonitor) {
100  nsAutoMonitor::DestroyMonitor(mMonitor);
101  }
102 }
103 
104 /* sbIMediacoreAudioProcessor interface implementation */
105 
106 /* void init (in sbIMediacoreAudioProcessorListener aListener); */
107 NS_IMETHODIMP
109 {
110  NS_ENSURE_ARG_POINTER(aListener);
111  NS_ENSURE_FALSE(mListener, NS_ERROR_ALREADY_INITIALIZED);
112 
113  mMonitor = nsAutoMonitor::NewMonitor("AudioProcessor::mMonitor");
114 
115  mListener = aListener;
116  return NS_OK;
117 }
118 
119 /* attribute unsigned long constraintSampleRate; */
120 NS_IMETHODIMP
121 sbGStreamerAudioProcessor::GetConstraintSampleRate(PRUint32 *aConstraintSampleRate)
122 {
123  NS_ENSURE_ARG_POINTER(aConstraintSampleRate);
124 
125  *aConstraintSampleRate = mConstraintSampleRate;
126  return NS_OK;
127 }
128 
129 NS_IMETHODIMP
130 sbGStreamerAudioProcessor::SetConstraintSampleRate(PRUint32 aConstraintSampleRate)
131 {
132  NS_ENSURE_FALSE(mPipeline, NS_ERROR_ALREADY_INITIALIZED);
133 
134  mConstraintSampleRate = aConstraintSampleRate;
135  return NS_OK;
136 }
137 
138 /* attribute unsigned long constraintChannelCount; */
139 NS_IMETHODIMP
140 sbGStreamerAudioProcessor::GetConstraintChannelCount(PRUint32 *aConstraintChannelCount)
141 {
142  NS_ENSURE_ARG_POINTER(aConstraintChannelCount);
143 
144  *aConstraintChannelCount = mConstraintChannelCount;
145  return NS_OK;
146 }
147 
148 NS_IMETHODIMP
149 sbGStreamerAudioProcessor::SetConstraintChannelCount(PRUint32 aConstraintChannelCount)
150 {
151  NS_ENSURE_FALSE(mPipeline, NS_ERROR_ALREADY_INITIALIZED);
152 
153  // More than 2 channels is not currently supported.
154  if (aConstraintChannelCount > 2)
155  return NS_ERROR_INVALID_ARG;
156 
157  mConstraintChannelCount = aConstraintChannelCount;
158  return NS_OK;
159 }
160 
161 /* attribute unsigned long constraintBlockSize; */
162 NS_IMETHODIMP
163 sbGStreamerAudioProcessor::GetConstraintBlockSize(PRUint32 *aConstraintBlockSize)
164 {
165  NS_ENSURE_ARG_POINTER(aConstraintBlockSize);
166 
167  *aConstraintBlockSize = mConstraintBlockSize;
168  return NS_OK;
169 }
170 
171 NS_IMETHODIMP
172 sbGStreamerAudioProcessor::SetConstraintBlockSize(PRUint32 aConstraintBlockSize)
173 {
174  NS_ENSURE_FALSE(mPipeline, NS_ERROR_ALREADY_INITIALIZED);
175 
176  mConstraintBlockSize = aConstraintBlockSize;
177  return NS_OK;
178 }
179 
180 /* attribute unsigned long constraintAudioFormat; */
181 NS_IMETHODIMP
182 sbGStreamerAudioProcessor::GetConstraintAudioFormat(PRUint32 *aConstraintAudioFormat)
183 {
184  NS_ENSURE_ARG_POINTER(aConstraintAudioFormat);
185 
186  *aConstraintAudioFormat = mConstraintAudioFormat;
187  return NS_OK;
188 }
189 
190 NS_IMETHODIMP
191 sbGStreamerAudioProcessor::SetConstraintAudioFormat(PRUint32 aConstraintAudioFormat)
192 {
193  // Ensure it's a valid format constant
194  NS_ENSURE_ARG_RANGE(aConstraintAudioFormat, FORMAT_ANY, FORMAT_FLOAT);
195  NS_ENSURE_FALSE(mPipeline, NS_ERROR_ALREADY_INITIALIZED);
196 
197  mConstraintAudioFormat = aConstraintAudioFormat;
198  return NS_OK;
199 }
200 
201 /* void start (in sbIMediaItem aItem); */
202 NS_IMETHODIMP
203 sbGStreamerAudioProcessor::Start(sbIMediaItem *aItem)
204 {
205  TRACE(("%s[%p]", __FUNCTION__, this));
206 
207  NS_ENSURE_TRUE (NS_IsMainThread(), NS_ERROR_FAILURE);
208  NS_ENSURE_ARG_POINTER (aItem);
209  NS_ENSURE_STATE (mListener);
210  NS_ENSURE_FALSE (mPipeline, NS_ERROR_FAILURE);
211 
212  mMediaItem = aItem;
213 
214  nsresult rv = PlayPipeline();
215  NS_ENSURE_SUCCESS(rv, rv);
216 
217  return NS_OK;
218 }
219 
220 /* void stop (); */
221 NS_IMETHODIMP
222 sbGStreamerAudioProcessor::Stop()
223 {
224  TRACE(("%s[%p]", __FUNCTION__, this));
225 
226  NS_ENSURE_TRUE (NS_IsMainThread(), NS_ERROR_FAILURE);
227 
228  // It's permissible to call stop() at any time; if we don't have a pipeline
229  // then we just don't need to do anything.
230  if (!mPipeline)
231  return NS_OK;
232 
233  nsresult rv = StopPipeline();
234  NS_ENSURE_SUCCESS(rv, rv);
235 
236  rv = DestroyPipeline();
237  NS_ENSURE_SUCCESS(rv, rv);
238 
239  return NS_OK;
240 }
241 
242 /* void suspend (); */
243 NS_IMETHODIMP
244 sbGStreamerAudioProcessor::Suspend()
245 {
246  TRACE(("%s[%p]", __FUNCTION__, this));
247 
248  NS_ENSURE_TRUE (NS_IsMainThread(), NS_ERROR_FAILURE);
249  NS_ENSURE_STATE (mPipeline);
250 
251  nsAutoMonitor mon(mMonitor);
252  mSuspended = PR_TRUE;
253  return NS_OK;
254 }
255 
256 /* void resume (); */
257 NS_IMETHODIMP
258 sbGStreamerAudioProcessor::Resume()
259 {
260  TRACE(("%s[%p]", __FUNCTION__, this));
261 
262  NS_ENSURE_TRUE (NS_IsMainThread(), NS_ERROR_FAILURE);
263  NS_ENSURE_STATE (mPipeline);
264 
265  nsAutoMonitor mon(mMonitor);
266  mSuspended = PR_FALSE;
267 
268  nsresult rv = ScheduleSendDataIfAvailable();
269  NS_ENSURE_SUCCESS(rv, rv);
270 
271  return NS_OK;
272 }
273 
274 nsresult
275 sbGStreamerAudioProcessor::BuildPipeline()
276 {
277  TRACE(("%s[%p]", __FUNCTION__, this));
278 
279  /* Our pipeline is going to look like this - it's pretty simple at this level.
280  *
281  * [uridecodebin]-[audioconvert]-[audioresample]-[capsfilter]-[appsink]
282  *
283  * Most of the complexity is in a) how we configure the capsfilter, and
284  * b) what we do in appsink.
285  *
286  * The capsfilter is configured twice:
287  * 1. Initially, it's set up to match the constraint values set on the
288  * interface.
289  * 2. Once we've got a fixed format (which we send along with the START
290  * event, we re-configure the filter to ONLY accept this format. This
291  * ensures that, if the underlying format changes mid-stream, what we
292  * deliver to the listener does not change.
293  */
294  mPipeline = gst_pipeline_new ("audio-processor");
295  NS_ENSURE_TRUE (mPipeline, NS_ERROR_FAILURE);
296 
297  GstElement *uridecodebin = gst_element_factory_make(
298  "uridecodebin",
299  "audio-processor-decoder");
300  if (!uridecodebin) {
301  g_object_unref (mPipeline);
302  mPipeline = NULL;
303 
304  return NS_ERROR_FAILURE;
305  }
306 
307  // Set the source URI from our media item
308  nsString contentURL;
309  nsresult rv = mMediaItem->GetProperty(
310  NS_LITERAL_STRING(SB_PROPERTY_CONTENTURL),
311  contentURL);
312  NS_ENSURE_SUCCESS(rv, rv);
313 
314  // Use the content URL as the display name for this item (used for
315  // error reporting, etc.)
316  mResourceDisplayName = contentURL;
317 
318  g_object_set (uridecodebin, "uri",
319  NS_ConvertUTF16toUTF8(contentURL).BeginReading(), NULL);
320 
321  // Connect to callbacks for when uridecodebin adds a new pad that we (may)
322  // want to connect up to the rest of the pipeline, and for when we're not
323  // going to get any more pads (so that we can fire an error event if we
324  // haven't received an audio pad)
325  g_signal_connect (uridecodebin, "pad-added",
326  G_CALLBACK (decodebin_pad_added_cb), this);
327  g_signal_connect (uridecodebin, "no-more-pads",
328  G_CALLBACK (decodebin_no_more_pads_cb), this);
329 
330  gst_bin_add (GST_BIN (mPipeline), uridecodebin);
331 
332  mAdapter = gst_adapter_new();
333 
334  return NS_OK;
335 }
336 
337 void
338 sbGStreamerAudioProcessor::HandleMessage (GstMessage *message)
339 {
340  // We override the base pipeline message handling - all we want to do here
341  // is deal with errors.
342  nsresult rv;
343  GstMessageType msgtype = GST_MESSAGE_TYPE(message);
344  switch(msgtype) {
345  case GST_MESSAGE_ERROR:
346  {
347  gchar *debug = NULL;
348  GError *gerror = NULL;
349 
350  if (mHasSentError)
351  {
352  LOG(("Ignoring multiple error messages"));
353  return;
354  }
355 
356  gst_message_parse_error(message, &gerror, &debug);
357 
358  nsCOMPtr<sbIMediacoreError> error;
361  getter_AddRefs(error));
362  NS_ENSURE_SUCCESS(rv, /* void */);
363 
365  sbNewVariant(error).get());
366  NS_ENSURE_SUCCESS(rv, /* void */);
367 
368  g_error_free (gerror);
369  g_free(debug);
370 
371  mHasSentError = PR_TRUE;
372 
373  break;
374  }
375  default:
376  LOG(("Ignoring message: %s", gst_message_type_get_name(msgtype)));
377  break;
378  }
379 }
380 
381 /* static */ void
382 sbGStreamerAudioProcessor::decodebin_pad_added_cb (
383  GstElement * uridecodebin,
384  GstPad * pad,
385  sbGStreamerAudioProcessor *processor)
386 {
387  nsresult rv = processor->DecoderPadAdded(uridecodebin, pad);
388  NS_ENSURE_SUCCESS (rv, /* void */);
389 }
390 
391 /* static */ void
392 sbGStreamerAudioProcessor::decodebin_no_more_pads_cb (
393  GstElement * uridecodebin,
394  sbGStreamerAudioProcessor *processor)
395 {
396  nsresult rv = processor->DecoderNoMorePads(uridecodebin);
397  NS_ENSURE_SUCCESS (rv, /* void */);
398 }
399 
400 /* static */ void
401 sbGStreamerAudioProcessor::appsink_new_buffer_cb (
402  GstElement * appsink,
403  sbGStreamerAudioProcessor *processor)
404 {
405  nsresult rv = processor->AppsinkNewBuffer(appsink);
406  NS_ENSURE_SUCCESS (rv, /* void */);
407 }
408 
409 /* static */ void
410 sbGStreamerAudioProcessor::appsink_eos_cb (
411  GstElement * appsink,
412  sbGStreamerAudioProcessor *processor)
413 {
414  nsresult rv = processor->AppsinkEOS(appsink);
415  NS_ENSURE_SUCCESS (rv, /* void */);
416 }
417 
418 nsresult
419 sbGStreamerAudioProcessor::DecoderPadAdded (GstElement *uridecodebin,
420  GstPad *pad)
421 {
422  TRACE(("%s[%p]", __FUNCTION__, this));
423  nsresult rv;
424 
425  // A new decoded pad has been added from the decodebin. If it's the first
426  // audio stream, we use it. Otherwise, we ignore it.
427  GstCaps *caps = gst_pad_get_caps (pad);
428  GstStructure *structure = gst_caps_get_structure (caps, 0);
429  const gchar *name = gst_structure_get_name (structure);
430  bool isAudio = g_str_has_prefix (name, "audio/");
431 
432  gst_caps_unref (caps);
433 
434  if (!isAudio) {
435  LOG(("Ignoring non audio pad"));
436  return NS_OK;
437  }
438 
439  if (mFoundAudioPad) {
440  LOG(("Ignoring additional audio pad"));
441  return NS_OK;
442  }
443 
444  mFoundAudioPad = PR_TRUE;
445 
446  // Now we can build the rest of the pipeline
447  GstElement *audioconvert = NULL;
448  GstElement *audioresample = NULL;
449  GstElement *capsfilter = NULL;
450  GstElement *appsink = NULL;
451  GstPad *sinkpad;
452 
453  audioconvert = gst_element_factory_make("audioconvert", NULL);
454  audioresample = gst_element_factory_make("audioresample", NULL);
455  capsfilter = gst_element_factory_make("capsfilter", NULL);
456  appsink = gst_element_factory_make("appsink", NULL);
457 
458  if (!audioconvert || !audioresample || !capsfilter || !appsink) {
459  LOG(("Missing base elements, corrupt GStreamer install"));
460  goto failed;
461  }
462 
463  rv = ConfigureInitialCapsfilter(capsfilter);
464  NS_ENSURE_SUCCESS(rv, rv);
465 
466  // Disable sync (we're not realtime) and enable signal emission so that
467  // new-buffer signal is sent.
468  // Set max-buffers to 10 (to allow some buffering) - we don't want to buffer
469  // the entire file, decoded, internally.
470  g_object_set (appsink,
471  "emit-signals", TRUE,
472  "sync", FALSE,
473  "max-buffers", 10,
474  NULL);
475  g_signal_connect (appsink, "new-buffer",
476  G_CALLBACK (appsink_new_buffer_cb), this);
477  g_signal_connect (appsink, "eos",
478  G_CALLBACK (appsink_eos_cb), this);
479 
480  gst_bin_add_many (GST_BIN (mPipeline),
481  audioconvert, audioresample, capsfilter, appsink, NULL);
482  gst_element_link_many (audioconvert, audioresample, capsfilter, appsink, NULL);
483 
484  sinkpad = gst_element_get_static_pad (audioconvert, "sink");
485  gst_pad_link (pad, sinkpad);
486  gst_object_unref (sinkpad);
487 
488  gst_element_set_state (audioconvert, GST_STATE_PLAYING);
489  gst_element_set_state (audioresample, GST_STATE_PLAYING);
490  gst_element_set_state (capsfilter, GST_STATE_PLAYING);
491  gst_element_set_state (appsink, GST_STATE_PLAYING);
492 
493  mAppSink = (GstAppSink *)gst_object_ref (appsink);
494  mCapsFilter = (GstElement *)gst_object_ref (capsfilter);
495 
496  return NS_OK;
497 
498 failed:
499  if (audioconvert)
500  g_object_unref (audioconvert);
501  if (audioresample)
502  g_object_unref (audioresample);
503  if (capsfilter)
504  g_object_unref (capsfilter);
505  if (appsink)
506  g_object_unref (appsink);
507 
508  return NS_ERROR_FAILURE;
509 }
510 
511 nsresult
512 sbGStreamerAudioProcessor::OnDestroyPipeline(GstElement *pipeline)
513 {
514  if (mAppSink) {
515  gst_object_unref (mAppSink);
516  mAppSink = NULL;
517  }
518  if (mCapsFilter) {
519  gst_object_unref (mCapsFilter);
520  mCapsFilter = NULL;
521  }
522 
523  if (mAdapter) {
524  g_object_unref (mAdapter);
525  mAdapter = NULL;
526  }
527 
528  if (mPendingBuffer) {
529  gst_buffer_unref (mPendingBuffer);
530  mPendingBuffer = NULL;
531  }
532 
533  // And... reset all our state tracking variables.
534  mSuspended = PR_FALSE;
535  mFoundAudioPad = PR_FALSE;
536  mHasStarted = PR_FALSE;
537  mIsEOS = PR_FALSE;
538  mIsEndOfSection = PR_FALSE;
539  mHasSentError = PR_FALSE;
540  mSendGap = PR_FALSE;
541  mSampleNumber = 0;
542  mExpectedNextSampleNumber = 0;
543  mAudioFormat = FORMAT_ANY;
544  mSampleRate = 0;
545  mChannels = 0;
546  mBuffersAvailable = 0;
547 
548  return NS_OK;
549 }
550 
551 nsresult
552 sbGStreamerAudioProcessor::ConfigureInitialCapsfilter(GstElement *capsfilter)
553 {
554  TRACE(("%s[%p]", __FUNCTION__, this));
555 
556  GstCaps *caps;
557  GstStructure *structure;
558 
559  caps = gst_caps_new_empty ();
560  if (mConstraintAudioFormat == sbIMediacoreAudioProcessor::FORMAT_ANY ||
561  mConstraintAudioFormat == sbIMediacoreAudioProcessor::FORMAT_INT16)
562  {
563  structure = gst_structure_new ("audio/x-raw-int",
564  "endianness", G_TYPE_INT, G_BYTE_ORDER,
565  "width", G_TYPE_INT, 16,
566  "depth", G_TYPE_INT, 16,
567  NULL);
568  if (mConstraintSampleRate) {
569  gst_structure_set(structure,
570  "rate", G_TYPE_INT, mConstraintSampleRate, NULL);
571  }
572 
573  if (mConstraintChannelCount) {
574  gst_structure_set(structure,
575  "channels", G_TYPE_INT, mConstraintChannelCount, NULL);
576  }
577  else {
578  // We don't currently support > 2 channels, so even if no constraint is
579  // set explicitly, we limit to 1 or 2 channels.
580  gst_structure_set(structure, "channels", GST_TYPE_INT_RANGE, 1, 2, NULL);
581  }
582  gst_caps_append_structure (caps, structure);
583  }
584 
585  if (mConstraintAudioFormat == sbIMediacoreAudioProcessor::FORMAT_ANY ||
586  mConstraintAudioFormat == sbIMediacoreAudioProcessor::FORMAT_FLOAT)
587  {
588  structure = gst_structure_new ("audio/x-raw-float",
589  "endianness", G_TYPE_INT, G_BYTE_ORDER,
590  "width", G_TYPE_INT, 32,
591  NULL);
592  if (mConstraintSampleRate) {
593  gst_structure_set(structure,
594  "rate", G_TYPE_INT, mConstraintSampleRate, NULL);
595  }
596 
597  if (mConstraintChannelCount) {
598  gst_structure_set(structure,
599  "channels", G_TYPE_INT, mConstraintChannelCount, NULL);
600  }
601  else {
602  // We don't currently support > 2 channels, so even if no constraint is
603  // set explicitly, we limit to 1 or 2 channels.
604  gst_structure_set(structure, "channels", GST_TYPE_INT_RANGE, 1, 2, NULL);
605  }
606  gst_caps_append_structure (caps, structure);
607  }
608 
609  g_object_set (capsfilter, "caps", caps, NULL);
610 
611  return NS_OK;
612 }
613 
614 nsresult
615 sbGStreamerAudioProcessor::ReconfigureCapsfilter()
616 {
617  TRACE(("%s[%p]", __FUNCTION__, this));
618  GstCaps *caps;
619 
620  if (mAudioFormat == sbIMediacoreAudioProcessor::FORMAT_INT16)
621  {
622  caps = gst_caps_new_simple ("audio/x-raw-int",
623  "endianness", G_TYPE_INT, G_BYTE_ORDER,
624  "width", G_TYPE_INT, 16,
625  "depth", G_TYPE_INT, 16,
626  "rate", G_TYPE_INT, mSampleRate,
627  "channels", G_TYPE_INT, mChannels,
628  NULL);
629  }
630  else {
631  caps = gst_caps_new_simple ("audio/x-raw-float",
632  "endianness", G_TYPE_INT, G_BYTE_ORDER,
633  "width", G_TYPE_INT, 32,
634  "rate", G_TYPE_INT, mSampleRate,
635  "channels", G_TYPE_INT, mChannels,
636  NULL);
637  }
638 
639  g_object_set (mCapsFilter, "caps", caps, NULL);
640 
641  return NS_OK;
642 }
643 
644 nsresult
645 sbGStreamerAudioProcessor::DecoderNoMorePads (GstElement *uridecodebin)
646 {
647  TRACE(("%s[%p]", __FUNCTION__, this));
648 
649  if (!mFoundAudioPad) {
650  // Looks like we didn't find any audio pads at all. Fire off an error.
651  nsresult rv = SendErrorEvent(sbIMediacoreError::SB_STREAM_WRONG_TYPE,
652  "mediacore.error.wrong_type");
653  NS_ENSURE_SUCCESS(rv, rv);
654  }
655 
656  return NS_OK;
657 }
658 
659 PRBool
660 sbGStreamerAudioProcessor::HasEnoughData()
661 {
662  TRACE(("%s[%p]", __FUNCTION__, this));
663 
664  nsAutoMonitor mon(mMonitor);
665 
666  guint available = gst_adapter_available (mAdapter);
667 
668  // We have to have at least one byte of data to have enough in all cases.
669  // Then, we need:
670  // - more bytes than the constraint block size (so we can send a block)
671  // - whatever we have if mIsEndOfSection is set, since this is going to
672  // be followed by a GAP event
673  // - OR, if we're at EOS, we have no more buffers available - this ensures
674  // that we don't send a partial block after getting EOS just because we
675  // haven't actually pulled remaining data from the appsink.
676  return (available > 0 &&
677  ((mIsEOS && !mBuffersAvailable) ||
678  mIsEndOfSection ||
679  available >= mConstraintBlockSizeBytes));
680 }
681 
682 PRUint64
683 sbGStreamerAudioProcessor::GetSampleNumberFromBuffer(GstBuffer *buf)
684 {
685  GstClockTime timestamp = GST_BUFFER_TIMESTAMP (buf);
686  if (timestamp == GST_CLOCK_TIME_NONE) {
687  // We have to assume it's contiguous with the previous buffer in this case
688  return mExpectedNextSampleNumber;
689  }
690 
691  return gst_util_uint64_scale_int_round (timestamp, mSampleRate, GST_SECOND) *
692  mChannels;
693 }
694 
695 PRUint32
696 sbGStreamerAudioProcessor::GetDurationFromBuffer(GstBuffer *buf)
697 {
698  int size;
699  if (mAudioFormat == FORMAT_FLOAT)
700  size = sizeof(float);
701  else
702  size = sizeof(short);
703 
704  return GST_BUFFER_SIZE (buf) / size;
705 }
706 
707 void
708 sbGStreamerAudioProcessor::GetMoreData()
709 {
710  TRACE(("%s[%p]", __FUNCTION__, this));
711 
712  nsAutoMonitor mon(mMonitor);
713 
714  NS_ENSURE_TRUE (mBuffersAvailable > 0, /* void */);
715 
716  if (mPendingBuffer) {
717  // We use pending buffers only when it follows a gap/discontinuity, and thus
718  // we won't get here until that data has all been sent.
719  NS_ASSERTION(gst_adapter_available(mAdapter) == 0,
720  "Had pending buffer but asked to get more data when adapter not empty");
721 
722  mSampleNumber = GetSampleNumberFromBuffer(mPendingBuffer);
723  gst_adapter_push(mAdapter, mPendingBuffer);
724  mSendGap = TRUE;
725  mExpectedNextSampleNumber = mSampleNumber +
726  GetDurationFromBuffer(mPendingBuffer);
727 
728  mBuffersAvailable--;
729  mPendingBuffer = NULL;
730  mIsEndOfSection = PR_FALSE;
731  }
732  else {
733  GstBuffer *buf = gst_app_sink_pull_buffer(mAppSink);
734  NS_ASSERTION(buf, "pulled buffer when asked to get more but got no buffer");
735 
736  // Consider this a discontinuity if the sample numbers are discontinuous,
737  // or we get a DISCONT buffer _other than_ at the very start of the stream.
738  PRUint64 nextSampleNumber = GetSampleNumberFromBuffer(buf);
739  if (nextSampleNumber != mExpectedNextSampleNumber ||
740  (GST_BUFFER_IS_DISCONT (buf) && mExpectedNextSampleNumber != 0))
741  {
742  LOG(("Discontinuity found"));
743  // We include mPendingBuffer in mAvailableBuffers, so don't need to
744  // decrement in this case.
745  mPendingBuffer = buf;
746  mIsEndOfSection = PR_TRUE;
747  }
748  else {
749  mBuffersAvailable--;
750 
751  if (gst_adapter_available(mAdapter) == 0)
752  mSampleNumber = nextSampleNumber;
753  gst_adapter_push(mAdapter, buf);
754  mExpectedNextSampleNumber += GetDurationFromBuffer(buf);
755  }
756  }
757 }
758 
759 nsresult
760 sbGStreamerAudioProcessor::AppsinkNewBuffer(GstElement *appsink)
761 {
762  nsresult rv;
763  nsAutoMonitor mon(mMonitor);
764 
765  // Once we get the first chunk of data, we can determine what format we will
766  // send to consumers.
767  if (mAudioFormat == FORMAT_ANY) {
768  rv = DetermineFormat();
769  NS_ENSURE_SUCCESS(rv, rv);
770  }
771 
772  mBuffersAvailable++;
773 
774  if (!HasEnoughData()) {
775  GetMoreData();
776 
777  if (HasEnoughData()) {
778  rv = ScheduleSendData();
779  NS_ENSURE_SUCCESS(rv, rv);
780  }
781  }
782 
783  return NS_OK;
784 }
785 
786 nsresult
787 sbGStreamerAudioProcessor::ScheduleSendDataIfAvailable()
788 {
789  TRACE(("%s[%p]", __FUNCTION__, this));
790 
791  nsresult rv;
792 
793  nsAutoMonitor mon(mMonitor);
794 
795  if (HasEnoughData()) {
796  rv = ScheduleSendData();
797  NS_ENSURE_SUCCESS(rv, rv);
798  return NS_OK;
799  }
800  else {
801  while(mBuffersAvailable) {
802  GetMoreData();
803 
804  if (HasEnoughData()) {
805  rv = ScheduleSendData();
806  NS_ENSURE_SUCCESS(rv, rv);
807  return NS_OK;
808  }
809  }
810  }
811 
812  if (mIsEOS) {
813  rv = SendEventAsync(sbIMediacoreAudioProcessorListener::EVENT_EOS, nsnull);
814  NS_ENSURE_SUCCESS(rv, rv);
815  }
816 
817  return NS_OK;
818 }
819 
820 nsresult
821 sbGStreamerAudioProcessor::ScheduleSendData()
822 {
823  TRACE(("%s[%p]", __FUNCTION__, this));
824 
825  nsCOMPtr<nsIThread> mainThread;
826  nsresult rv = NS_GetMainThread(getter_AddRefs(mainThread));
827  NS_ENSURE_SUCCESS(rv, rv);
828 
829  nsCOMPtr<nsIRunnable> runnable =
830  NS_NEW_RUNNABLE_METHOD(sbGStreamerAudioProcessor, this, SendDataToListener);
831  NS_ENSURE_TRUE(runnable, NS_ERROR_FAILURE);
832 
833  rv = mainThread->Dispatch(runnable, NS_DISPATCH_NORMAL);
834  NS_ENSURE_SUCCESS(rv, rv);
835 
836  return NS_OK;
837 }
838 
839 nsresult
840 sbGStreamerAudioProcessor::DetermineFormat()
841 {
842  GstPad *appsinkSinkPad = gst_element_get_static_pad (GST_ELEMENT (mAppSink),
843  "sink");
844  GstCaps *caps = gst_pad_get_negotiated_caps(appsinkSinkPad);
845  if (!caps)
846  return NS_ERROR_FAILURE;
847 
848  GstStructure *structure = gst_caps_get_structure(caps, 0);
849  const gchar *capsName = gst_structure_get_name (structure);
850 
851  if (g_str_equal(capsName, "audio/x-raw-float")) {
852  mAudioFormat = FORMAT_FLOAT;
853  mConstraintBlockSizeBytes = mConstraintBlockSize * sizeof(float);
854  }
855  else {
856  mAudioFormat = FORMAT_INT16;
857  mConstraintBlockSizeBytes = mConstraintBlockSize * sizeof(short);
858  }
859 
860  gst_structure_get_int (structure, "rate", &mSampleRate);
861  gst_structure_get_int (structure, "channels", &mChannels);
862 
863  gst_caps_unref (caps);
864 
865  // Now we fix the capsfilter to these settings, so that if the underlying
866  // stream changes, our output does not.
867  nsresult rv = ReconfigureCapsfilter();
868  NS_ENSURE_SUCCESS(rv, rv);
869 
870  return NS_OK;
871 }
872 
873 nsresult
874 sbGStreamerAudioProcessor::DoStreamStart()
875 {
876  nsresult rv;
877  nsCOMPtr<sbIMediaFormatAudioMutable> audioFormat =
878  do_CreateInstance(SB_MEDIAFORMATAUDIO_CONTRACTID, &rv);
879  NS_ENSURE_SUCCESS(rv, rv);
880 
881  rv = audioFormat->SetAudioType(NS_LITERAL_STRING ("audio/x-raw"));
882  NS_ENSURE_SUCCESS(rv, rv);
883  rv = audioFormat->SetSampleRate(mSampleRate);
884  NS_ENSURE_SUCCESS(rv, rv);
885  rv = audioFormat->SetChannels(mChannels);
886  NS_ENSURE_SUCCESS(rv, rv);
887 
888  nsCOMPtr<nsISupports> audioFormatISupports = do_QueryInterface(audioFormat,
889  &rv);
890  NS_ENSURE_SUCCESS(rv, rv);
891 
893  sbNewVariant(audioFormatISupports).get());
894  NS_ENSURE_SUCCESS(rv, rv);
895 
896  return NS_OK;
897 }
898 
899 void
900 sbGStreamerAudioProcessor::SendDataToListener()
901 {
902  nsresult rv;
903  const guint8 *data;
904  guint bytesRead = 0;
905 
906  nsAutoMonitor mon(mMonitor);
907 
908  // It's possible that the pipeline was stopped (on the main thread) before
909  // this queued event was run; in that case we just return.
910  if (!mPipeline)
911  return;
912 
913  NS_ASSERTION(HasEnoughData(), "Asked to send data, but cannot");
914 
915  if (mSuspended)
916  return;
917 
918  if (!mHasStarted) {
919  mHasStarted = PR_TRUE;
920  // Drop monitor to send event to the listener.
921  mon.Exit();
922 
923  rv = DoStreamStart();
924  NS_ENSURE_SUCCESS(rv, /*void*/);
925 
926  mon.Enter();
927 
928  if (!mHasStarted || mSuspended) {
929  // Got stopped or suspended from the start event; nothing to do now.
930  return;
931  }
932  }
933 
934  guint available = gst_adapter_available (mAdapter);
935  if (mConstraintBlockSize == 0)
936  bytesRead = available;
937  else if (available >= mConstraintBlockSizeBytes)
938  bytesRead = mConstraintBlockSizeBytes;
939  else if (mIsEOS || mIsEndOfSection)
940  bytesRead = available;
941  else
942  NS_NOTREACHED("not enough data here");
943 
944  data = gst_adapter_peek(mAdapter, bytesRead);
945 
946  PRUint32 sampleNumber = mSampleNumber;
947  PRUint32 numSamples;
948 
949  PRBool sendGap = mSendGap;
950  mSendGap = PR_FALSE;
951 
952  // Call listener with the monitor released.
953  mon.Exit();
954 
955  if (sendGap) {
956  rv = SendEventSync(sbIMediacoreAudioProcessorListener::EVENT_GAP, nsnull);
957  NS_ENSURE_SUCCESS(rv, /* void */);
958  }
959 
960  if (mAudioFormat == FORMAT_INT16) {
961  numSamples = bytesRead / sizeof(PRInt16);
962  PRInt16 *sampleData = (PRInt16 *)data;
963 
964  rv = mListener->OnIntegerAudioDecoded(sampleNumber, numSamples, sampleData);
965  }
966  else {
967  numSamples = bytesRead / sizeof(float);
968  float *sampleData = (float *)data;
969 
970  rv = mListener->OnFloatAudioDecoded(sampleNumber, numSamples, sampleData);
971  }
972 
973  if (NS_FAILED(rv)) {
974  NS_WARNING("Listener failed to receive data");
975  }
976 
977  mon.Enter();
978 
979  // If we're no longer started, that means that we got stopped while the
980  // monitor was released. Don't try to touch any further state!
981  if (!mHasStarted)
982  return;
983 
984  mSampleNumber += numSamples;
985 
986  gst_adapter_flush(mAdapter, bytesRead);
987 
988  // Listener might have paused or stopped us; in that case we don't want to
989  // schedule another send.
990  if (mSuspended)
991  return;
992 
993  rv = ScheduleSendDataIfAvailable();
994  NS_ENSURE_SUCCESS(rv, /* void */);
995 }
996 
997 nsresult
998 sbGStreamerAudioProcessor::AppsinkEOS(GstElement *appsink)
999 {
1000  TRACE(("%s[%p]", __FUNCTION__, this));
1001 
1002  nsresult rv;
1003 
1004  nsAutoMonitor mon(mMonitor);
1005 
1006  // If we have enough data already, then processing is in-progress; we don't
1007  // need to do anything specific.
1008  if (!HasEnoughData()) {
1009  mIsEOS = PR_TRUE;
1010 
1011  // Otherwise: either setting EOS will make us send the final partial chunk
1012  // of data, followed by an EOS, or we've already finished with ALL data,
1013  // so we should just immediately send EOS.
1014  if (HasEnoughData()) {
1015  rv = ScheduleSendData();
1016  NS_ENSURE_SUCCESS(rv, rv);
1017  }
1018  else {
1019  if (mHasStarted) {
1021  nsnull);
1022  NS_ENSURE_SUCCESS(rv, rv);
1023  }
1024  else if (!mHasSentError) {
1025  // Send an error - it's an error to reach EOS without having received
1026  // any audio samples at all.
1027  rv = SendErrorEvent(sbIMediacoreError::SB_STREAM_DECODE,
1028  "mediacore.error.decode_failed");
1029  NS_ENSURE_SUCCESS(rv, rv);
1030  }
1031  }
1032  }
1033  else {
1034  mIsEOS = PR_TRUE;
1035  }
1036 
1037  return NS_OK;
1038 }
1039 
1040 nsresult
1041 sbGStreamerAudioProcessor::SendErrorEvent(PRUint32 errorCode,
1042  const char *errorName)
1043 {
1044  TRACE(("%s[%p]", __FUNCTION__, this));
1045 
1047  nsString errorMessage = bundle.Get(errorName);
1048 
1049  nsRefPtr<sbMediacoreError> error;
1050  NS_NEWXPCOM(error, sbMediacoreError);
1051  error->Init(errorCode, errorMessage);
1052 
1053  nsresult rv = SendEventAsync(sbIMediacoreAudioProcessorListener::EVENT_ERROR,
1054  sbNewVariant(error).get());
1055  NS_ENSURE_SUCCESS(rv, rv);
1056 
1057  return NS_OK;
1058 }
1059 
1060 nsresult
1061 sbGStreamerAudioProcessor::SendEventInternal(PRUint32 eventType,
1062  nsCOMPtr<nsIVariant> eventDetails)
1063 {
1064  nsresult rv;
1065 
1066  LOG(("Sending event of type %d", eventType));
1067  rv = mListener->OnEvent(eventType, eventDetails);
1068  if (NS_FAILED(rv)) {
1069  LOG(("Listener returned error from OnEvent: %x", rv));
1070  }
1071 
1072  return NS_OK;
1073 }
1074 
1075 nsresult
1076 sbGStreamerAudioProcessor::SendEventAsync(PRUint32 eventType,
1077  nsIVariant *eventDetails)
1078 {
1079  nsresult rv;
1080 
1081  LOG(("Scheduling sending event of type %d", eventType));
1082 
1083  // Hold on to this so that the object doesn't get unreffed and go away before
1084  // the call completes.
1085  nsCOMPtr<nsIVariant> details(eventDetails);
1086  rv = sbInvokeOnMainThread2Async(*this,
1087  &sbGStreamerAudioProcessor::SendEventInternal,
1088  NS_ERROR_FAILURE,
1089  eventType,
1090  details);
1091  NS_ENSURE_SUCCESS(rv, rv);
1092  return NS_OK;
1093 }
1094 
1095 nsresult
1096 sbGStreamerAudioProcessor::SendEventSync(PRUint32 eventType,
1097  nsIVariant *eventDetails)
1098 {
1099  NS_ASSERTION(NS_IsMainThread(),
1100  "SendEventSync() must be called from the main thread");
1101  nsCOMPtr<nsIVariant> details(eventDetails);
1102  nsresult rv = SendEventInternal(eventType, details);
1103  NS_ENSURE_SUCCESS(rv, rv);
1104 
1105  return NS_OK;
1106 }
return NS_OK
const unsigned long SB_STREAM_DECODE
nsresult GetMediacoreErrorFromGstError(GError *gerror, nsString aResource, GStreamer::pipelineOp_t aPipelineOp, sbIMediacoreError **_retval)
NS_DECL_CLASSINFO(sbGstreamerAudioProcessor)
nsString Get(const nsAString &aKey, const nsAString &aDefault=SBVoidString())
NS_IMPL_THREADSAFE_ISUPPORTS3(sbGStreamerAudioProcessor, sbIGStreamerPipeline, sbIMediacoreAudioProcessor, nsIClassInfo) NS_IMPL_CI_INTERFACE_GETTER2(sbGStreamerAudioProcessor
Songbird Variant Utility Definitions.
nsresult sbInvokeOnMainThread2Async(T &aObject, MT aMethod, RT aFailureReturnValue, A1 aArg1, A2 aArg2)
const unsigned long SB_STREAM_WRONG_TYPE
var bundle
function Init()
Songbird Thread Utilities Definitions.
NS_IMPL_THREADSAFE_CI(sbGStreamerAudioProcessor)
GstMessage * message
Songbird String Bundle Definitions.
#define TRACE(args)
function debug(aMsg)
#define LOG(args)
Interface that defines a single item of media in the system.
NS_IMPL_CI_INTERFACE_GETTER2(sbDataRemoteWrapper, sbIDataRemote, nsIClassInfo) sbDataRemoteWrapper
#define SB_PROPERTY_CONTENTURL
observe data
Definition: FeedWriter.js:1329
var failed
#define SB_MEDIAFORMATAUDIO_CONTRACTID
virtual nsresult DestroyPipeline()