FFmpeg
pulse_audio_enc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Lukasz Marek <lukasz.m.luki@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <math.h>
22 #include <pulse/pulseaudio.h>
23 #include <pulse/error.h>
24 #include "libavformat/avformat.h"
25 #include "libavformat/internal.h"
26 #include "libavutil/internal.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/time.h"
29 #include "libavutil/log.h"
30 #include "libavutil/attributes.h"
31 #include "pulse_audio_common.h"
32 
33 typedef struct PulseData {
34  AVClass *class;
35  const char *server;
36  const char *name;
37  const char *stream_name;
38  const char *device;
39  int64_t timestamp;
40  int buffer_size; /**< Buffer size in bytes */
41  int buffer_duration; /**< Buffer size in ms, recalculated to buffer_size */
42  int prebuf;
43  int minreq;
45  pa_threaded_mainloop *mainloop;
46  pa_context *ctx;
47  pa_stream *stream;
49  int mute;
50  pa_volume_t base_volume;
51  pa_volume_t last_volume;
52 } PulseData;
53 
54 static void pulse_audio_sink_device_cb(pa_context *ctx, const pa_sink_info *dev,
55  int eol, void *userdata)
56 {
57  PulseData *s = userdata;
58 
59  if (s->ctx != ctx)
60  return;
61 
62  if (eol) {
63  pa_threaded_mainloop_signal(s->mainloop, 0);
64  } else {
65  if (dev->flags & PA_SINK_FLAT_VOLUME)
66  s->base_volume = dev->base_volume;
67  else
68  s->base_volume = PA_VOLUME_NORM;
69  av_log(s, AV_LOG_DEBUG, "base volume: %u\n", s->base_volume);
70  }
71 }
72 
73 /* Mainloop must be locked before calling this function as it uses pa_threaded_mainloop_wait. */
75 {
76  PulseData *s = h->priv_data;
77  pa_operation *op;
78  if (!(op = pa_context_get_sink_info_by_name(s->ctx, s->device,
80  av_log(s, AV_LOG_ERROR, "pa_context_get_sink_info_by_name failed.\n");
81  return AVERROR_EXTERNAL;
82  }
83  while (pa_operation_get_state(op) == PA_OPERATION_RUNNING)
84  pa_threaded_mainloop_wait(s->mainloop);
85  pa_operation_unref(op);
86  return 0;
87 }
88 
89 static void pulse_audio_sink_input_cb(pa_context *ctx, const pa_sink_input_info *i,
90  int eol, void *userdata)
91 {
92  AVFormatContext *h = userdata;
93  PulseData *s = h->priv_data;
94 
95  if (s->ctx != ctx)
96  return;
97 
98  if (!eol) {
99  double val;
100  pa_volume_t vol = pa_cvolume_avg(&i->volume);
101  if (s->mute < 0 || (s->mute && !i->mute) || (!s->mute && i->mute)) {
102  s->mute = i->mute;
104  }
105 
106  vol = pa_sw_volume_divide(vol, s->base_volume);
107  if (s->last_volume != vol) {
108  val = (double)vol / PA_VOLUME_NORM;
110  s->last_volume = vol;
111  }
112  }
113 }
114 
115 /* This function creates new loop so may be called from PA callbacks.
116  Mainloop must be locked before calling this function as it operates on streams. */
118 {
119  PulseData *s = h->priv_data;
120  pa_operation *op;
121  enum pa_operation_state op_state;
122  pa_mainloop *ml = NULL;
123  pa_context *ctx = NULL;
124  int ret = 0;
125 
126  if ((ret = ff_pulse_audio_connect_context(&ml, &ctx, s->server, "Update sink input information")) < 0)
127  return ret;
128 
129  if (!(op = pa_context_get_sink_input_info(ctx, pa_stream_get_index(s->stream),
131  ret = AVERROR_EXTERNAL;
132  goto fail;
133  }
134 
135  while ((op_state = pa_operation_get_state(op)) == PA_OPERATION_RUNNING)
136  pa_mainloop_iterate(ml, 1, NULL);
137  pa_operation_unref(op);
138  if (op_state != PA_OPERATION_DONE) {
139  ret = AVERROR_EXTERNAL;
140  goto fail;
141  }
142 
143  fail:
145  if (ret)
146  av_log(s, AV_LOG_ERROR, "pa_context_get_sink_input_info failed.\n");
147  return ret;
148 }
149 
150 static void pulse_event(pa_context *ctx, pa_subscription_event_type_t t,
151  uint32_t idx, void *userdata)
152 {
153  AVFormatContext *h = userdata;
154  PulseData *s = h->priv_data;
155 
156  if (s->ctx != ctx)
157  return;
158 
159  if ((t & PA_SUBSCRIPTION_EVENT_FACILITY_MASK) == PA_SUBSCRIPTION_EVENT_SINK_INPUT) {
160  if ((t & PA_SUBSCRIPTION_EVENT_TYPE_MASK) == PA_SUBSCRIPTION_EVENT_CHANGE)
161  // Calling from mainloop callback. No need to lock mainloop.
163  }
164 }
165 
166 static void pulse_stream_writable(pa_stream *stream, size_t nbytes, void *userdata)
167 {
168  AVFormatContext *h = userdata;
169  PulseData *s = h->priv_data;
170  int64_t val = nbytes;
171 
172  if (stream != s->stream)
173  return;
174 
176  pa_threaded_mainloop_signal(s->mainloop, 0);
177 }
178 
179 static void pulse_overflow(pa_stream *stream, void *userdata)
180 {
181  AVFormatContext *h = userdata;
183 }
184 
185 static void pulse_underflow(pa_stream *stream, void *userdata)
186 {
187  AVFormatContext *h = userdata;
189 }
190 
191 static void pulse_stream_state(pa_stream *stream, void *userdata)
192 {
193  PulseData *s = userdata;
194 
195  if (stream != s->stream)
196  return;
197 
198  switch (pa_stream_get_state(s->stream)) {
199  case PA_STREAM_READY:
200  case PA_STREAM_FAILED:
201  case PA_STREAM_TERMINATED:
202  pa_threaded_mainloop_signal(s->mainloop, 0);
203  default:
204  break;
205  }
206 }
207 
209 {
210  pa_stream_state_t state;
211 
212  while ((state = pa_stream_get_state(s->stream)) != PA_STREAM_READY) {
213  if (state == PA_STREAM_FAILED || state == PA_STREAM_TERMINATED)
214  return AVERROR_EXTERNAL;
215  pa_threaded_mainloop_wait(s->mainloop);
216  }
217  return 0;
218 }
219 
220 static void pulse_context_state(pa_context *ctx, void *userdata)
221 {
222  PulseData *s = userdata;
223 
224  if (s->ctx != ctx)
225  return;
226 
227  switch (pa_context_get_state(ctx)) {
228  case PA_CONTEXT_READY:
229  case PA_CONTEXT_FAILED:
230  case PA_CONTEXT_TERMINATED:
231  pa_threaded_mainloop_signal(s->mainloop, 0);
232  default:
233  break;
234  }
235 }
236 
238 {
239  pa_context_state_t state;
240 
241  while ((state = pa_context_get_state(s->ctx)) != PA_CONTEXT_READY) {
242  if (state == PA_CONTEXT_FAILED || state == PA_CONTEXT_TERMINATED)
243  return AVERROR_EXTERNAL;
244  pa_threaded_mainloop_wait(s->mainloop);
245  }
246  return 0;
247 }
248 
249 static void pulse_stream_result(pa_stream *stream, int success, void *userdata)
250 {
251  PulseData *s = userdata;
252 
253  if (stream != s->stream)
254  return;
255 
256  s->last_result = success ? 0 : AVERROR_EXTERNAL;
257  pa_threaded_mainloop_signal(s->mainloop, 0);
258 }
259 
260 static int pulse_finish_stream_operation(PulseData *s, pa_operation *op, const char *name)
261 {
262  if (!op) {
263  pa_threaded_mainloop_unlock(s->mainloop);
264  av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
265  return AVERROR_EXTERNAL;
266  }
267  s->last_result = 2;
268  while (s->last_result == 2)
269  pa_threaded_mainloop_wait(s->mainloop);
270  pa_operation_unref(op);
271  pa_threaded_mainloop_unlock(s->mainloop);
272  if (s->last_result != 0)
273  av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
274  return s->last_result;
275 }
276 
277 static int pulse_set_pause(PulseData *s, int pause)
278 {
279  pa_operation *op;
280  pa_threaded_mainloop_lock(s->mainloop);
281  op = pa_stream_cork(s->stream, pause, pulse_stream_result, s);
282  return pulse_finish_stream_operation(s, op, "pa_stream_cork");
283 }
284 
286 {
287  pa_operation *op;
288  pa_threaded_mainloop_lock(s->mainloop);
289  op = pa_stream_flush(s->stream, pulse_stream_result, s);
290  return pulse_finish_stream_operation(s, op, "pa_stream_flush");
291 }
292 
293 static void pulse_context_result(pa_context *ctx, int success, void *userdata)
294 {
295  PulseData *s = userdata;
296 
297  if (s->ctx != ctx)
298  return;
299 
300  s->last_result = success ? 0 : AVERROR_EXTERNAL;
301  pa_threaded_mainloop_signal(s->mainloop, 0);
302 }
303 
304 static int pulse_finish_context_operation(PulseData *s, pa_operation *op, const char *name)
305 {
306  if (!op) {
307  pa_threaded_mainloop_unlock(s->mainloop);
308  av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
309  return AVERROR_EXTERNAL;
310  }
311  s->last_result = 2;
312  while (s->last_result == 2)
313  pa_threaded_mainloop_wait(s->mainloop);
314  pa_operation_unref(op);
315  pa_threaded_mainloop_unlock(s->mainloop);
316  if (s->last_result != 0)
317  av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
318  return s->last_result;
319 }
320 
322 {
323  pa_operation *op;
324  pa_threaded_mainloop_lock(s->mainloop);
325  op = pa_context_set_sink_input_mute(s->ctx, pa_stream_get_index(s->stream),
326  s->mute, pulse_context_result, s);
327  return pulse_finish_context_operation(s, op, "pa_context_set_sink_input_mute");
328 }
329 
330 static int pulse_set_volume(PulseData *s, double volume)
331 {
332  pa_operation *op;
333  pa_cvolume cvol;
334  pa_volume_t vol;
335  const pa_sample_spec *ss = pa_stream_get_sample_spec(s->stream);
336 
337  vol = pa_sw_volume_multiply(lrint(volume * PA_VOLUME_NORM), s->base_volume);
338  pa_cvolume_set(&cvol, ss->channels, PA_VOLUME_NORM);
339  pa_sw_cvolume_multiply_scalar(&cvol, &cvol, vol);
340  pa_threaded_mainloop_lock(s->mainloop);
341  op = pa_context_set_sink_input_volume(s->ctx, pa_stream_get_index(s->stream),
342  &cvol, pulse_context_result, s);
343  return pulse_finish_context_operation(s, op, "pa_context_set_sink_input_volume");
344 }
345 
347 {
348  pa_operation *op;
349 
350  pa_threaded_mainloop_lock(s->mainloop);
351  op = pa_context_subscribe(s->ctx, PA_SUBSCRIPTION_MASK_SINK_INPUT, pulse_context_result, s);
352  return pulse_finish_context_operation(s, op, "pa_context_subscribe");
353 }
354 
355 static void pulse_map_channels_to_pulse(int64_t channel_layout, pa_channel_map *channel_map)
356 {
357  channel_map->channels = 0;
358  if (channel_layout & AV_CH_FRONT_LEFT)
359  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_LEFT;
360  if (channel_layout & AV_CH_FRONT_RIGHT)
361  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_RIGHT;
362  if (channel_layout & AV_CH_FRONT_CENTER)
363  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_CENTER;
364  if (channel_layout & AV_CH_LOW_FREQUENCY)
365  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_LFE;
366  if (channel_layout & AV_CH_BACK_LEFT)
367  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_REAR_LEFT;
368  if (channel_layout & AV_CH_BACK_RIGHT)
369  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_REAR_RIGHT;
370  if (channel_layout & AV_CH_FRONT_LEFT_OF_CENTER)
371  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER;
372  if (channel_layout & AV_CH_FRONT_RIGHT_OF_CENTER)
373  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER;
374  if (channel_layout & AV_CH_BACK_CENTER)
375  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_REAR_CENTER;
376  if (channel_layout & AV_CH_SIDE_LEFT)
377  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_SIDE_LEFT;
378  if (channel_layout & AV_CH_SIDE_RIGHT)
379  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_SIDE_RIGHT;
380  if (channel_layout & AV_CH_TOP_CENTER)
381  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_CENTER;
382  if (channel_layout & AV_CH_TOP_FRONT_LEFT)
383  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_FRONT_LEFT;
384  if (channel_layout & AV_CH_TOP_FRONT_CENTER)
385  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_FRONT_CENTER;
386  if (channel_layout & AV_CH_TOP_FRONT_RIGHT)
387  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_FRONT_RIGHT;
388  if (channel_layout & AV_CH_TOP_BACK_LEFT)
389  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_REAR_LEFT;
390  if (channel_layout & AV_CH_TOP_BACK_CENTER)
391  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_REAR_CENTER;
392  if (channel_layout & AV_CH_TOP_BACK_RIGHT)
393  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_REAR_RIGHT;
394  if (channel_layout & AV_CH_STEREO_LEFT)
395  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_LEFT;
396  if (channel_layout & AV_CH_STEREO_RIGHT)
397  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_RIGHT;
398  if (channel_layout & AV_CH_WIDE_LEFT)
399  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX0;
400  if (channel_layout & AV_CH_WIDE_RIGHT)
401  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX1;
402  if (channel_layout & AV_CH_SURROUND_DIRECT_LEFT)
403  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX2;
404  if (channel_layout & AV_CH_SURROUND_DIRECT_RIGHT)
405  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX3;
406  if (channel_layout & AV_CH_LOW_FREQUENCY_2)
407  channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_LFE;
408 }
409 
411 {
412  PulseData *s = h->priv_data;
413 
414  if (s->mainloop) {
415  pa_threaded_mainloop_lock(s->mainloop);
416  if (s->stream) {
417  pa_stream_disconnect(s->stream);
418  pa_stream_set_state_callback(s->stream, NULL, NULL);
419  pa_stream_set_write_callback(s->stream, NULL, NULL);
420  pa_stream_set_overflow_callback(s->stream, NULL, NULL);
421  pa_stream_set_underflow_callback(s->stream, NULL, NULL);
422  pa_stream_unref(s->stream);
423  s->stream = NULL;
424  }
425  if (s->ctx) {
426  pa_context_disconnect(s->ctx);
427  pa_context_set_state_callback(s->ctx, NULL, NULL);
428  pa_context_set_subscribe_callback(s->ctx, NULL, NULL);
429  pa_context_unref(s->ctx);
430  s->ctx = NULL;
431  }
432  pa_threaded_mainloop_unlock(s->mainloop);
433  pa_threaded_mainloop_stop(s->mainloop);
434  pa_threaded_mainloop_free(s->mainloop);
435  s->mainloop = NULL;
436  }
437 
438  return 0;
439 }
440 
442 {
443  PulseData *s = h->priv_data;
444  AVStream *st = NULL;
445  int ret;
446  pa_sample_spec sample_spec;
447  pa_buffer_attr buffer_attributes = { -1, -1, -1, -1, -1 };
448  pa_channel_map channel_map;
449  pa_mainloop_api *mainloop_api;
450  const char *stream_name = s->stream_name;
451  static const pa_stream_flags_t stream_flags = PA_STREAM_INTERPOLATE_TIMING |
452  PA_STREAM_AUTO_TIMING_UPDATE |
453  PA_STREAM_NOT_MONOTONIC;
454 
455  if (h->nb_streams != 1 || h->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) {
456  av_log(s, AV_LOG_ERROR, "Only a single audio stream is supported.\n");
457  return AVERROR(EINVAL);
458  }
459  st = h->streams[0];
460 
461  if (!stream_name) {
462  if (h->url[0])
463  stream_name = h->url;
464  else
465  stream_name = "Playback";
466  }
468 
469  if (s->buffer_duration) {
470  int64_t bytes = s->buffer_duration;
471  bytes *= st->codecpar->channels * st->codecpar->sample_rate *
473  bytes /= 1000;
474  buffer_attributes.tlength = FFMAX(s->buffer_size, av_clip64(bytes, 0, UINT32_MAX - 1));
475  av_log(s, AV_LOG_DEBUG,
476  "Buffer duration: %ums recalculated into %"PRId64" bytes buffer.\n",
477  s->buffer_duration, bytes);
478  av_log(s, AV_LOG_DEBUG, "Real buffer length is %u bytes\n", buffer_attributes.tlength);
479  } else if (s->buffer_size)
480  buffer_attributes.tlength = s->buffer_size;
481  if (s->prebuf)
482  buffer_attributes.prebuf = s->prebuf;
483  if (s->minreq)
484  buffer_attributes.minreq = s->minreq;
485 
486  sample_spec.format = ff_codec_id_to_pulse_format(st->codecpar->codec_id);
487  sample_spec.rate = st->codecpar->sample_rate;
488  sample_spec.channels = st->codecpar->channels;
489  if (!pa_sample_spec_valid(&sample_spec)) {
490  av_log(s, AV_LOG_ERROR, "Invalid sample spec.\n");
491  return AVERROR(EINVAL);
492  }
493 
494  if (sample_spec.channels == 1) {
495  channel_map.channels = 1;
496  channel_map.map[0] = PA_CHANNEL_POSITION_MONO;
497  } else if (st->codecpar->channel_layout) {
499  return AVERROR(EINVAL);
501  /* Unknown channel is present in channel_layout, let PulseAudio use its default. */
502  if (channel_map.channels != sample_spec.channels) {
503  av_log(s, AV_LOG_WARNING, "Unknown channel. Using defaul channel map.\n");
504  channel_map.channels = 0;
505  }
506  } else
507  channel_map.channels = 0;
508 
509  if (!channel_map.channels)
510  av_log(s, AV_LOG_WARNING, "Using PulseAudio's default channel map.\n");
511  else if (!pa_channel_map_valid(&channel_map)) {
512  av_log(s, AV_LOG_ERROR, "Invalid channel map.\n");
513  return AVERROR(EINVAL);
514  }
515 
516  /* start main loop */
517  s->mainloop = pa_threaded_mainloop_new();
518  if (!s->mainloop) {
519  av_log(s, AV_LOG_ERROR, "Cannot create threaded mainloop.\n");
520  return AVERROR(ENOMEM);
521  }
522  if ((ret = pa_threaded_mainloop_start(s->mainloop)) < 0) {
523  av_log(s, AV_LOG_ERROR, "Cannot start threaded mainloop: %s.\n", pa_strerror(ret));
524  pa_threaded_mainloop_free(s->mainloop);
525  s->mainloop = NULL;
526  return AVERROR_EXTERNAL;
527  }
528 
529  pa_threaded_mainloop_lock(s->mainloop);
530 
531  mainloop_api = pa_threaded_mainloop_get_api(s->mainloop);
532  if (!mainloop_api) {
533  av_log(s, AV_LOG_ERROR, "Cannot get mainloop API.\n");
534  ret = AVERROR_EXTERNAL;
535  goto fail;
536  }
537 
538  s->ctx = pa_context_new(mainloop_api, s->name);
539  if (!s->ctx) {
540  av_log(s, AV_LOG_ERROR, "Cannot create context.\n");
541  ret = AVERROR(ENOMEM);
542  goto fail;
543  }
544  pa_context_set_state_callback(s->ctx, pulse_context_state, s);
545  pa_context_set_subscribe_callback(s->ctx, pulse_event, h);
546 
547  if ((ret = pa_context_connect(s->ctx, s->server, 0, NULL)) < 0) {
548  av_log(s, AV_LOG_ERROR, "Cannot connect context: %s.\n", pa_strerror(ret));
549  ret = AVERROR_EXTERNAL;
550  goto fail;
551  }
552 
553  if ((ret = pulse_context_wait(s)) < 0) {
554  av_log(s, AV_LOG_ERROR, "Context failed.\n");
555  goto fail;
556  }
557 
558  s->stream = pa_stream_new(s->ctx, stream_name, &sample_spec,
559  channel_map.channels ? &channel_map : NULL);
560 
561  if ((ret = pulse_update_sink_info(h)) < 0) {
562  av_log(s, AV_LOG_ERROR, "Updating sink info failed.\n");
563  goto fail;
564  }
565 
566  if (!s->stream) {
567  av_log(s, AV_LOG_ERROR, "Cannot create stream.\n");
568  ret = AVERROR(ENOMEM);
569  goto fail;
570  }
571  pa_stream_set_state_callback(s->stream, pulse_stream_state, s);
572  pa_stream_set_write_callback(s->stream, pulse_stream_writable, h);
573  pa_stream_set_overflow_callback(s->stream, pulse_overflow, h);
574  pa_stream_set_underflow_callback(s->stream, pulse_underflow, h);
575 
576  if ((ret = pa_stream_connect_playback(s->stream, s->device, &buffer_attributes,
577  stream_flags, NULL, NULL)) < 0) {
578  av_log(s, AV_LOG_ERROR, "pa_stream_connect_playback failed: %s.\n", pa_strerror(ret));
579  ret = AVERROR_EXTERNAL;
580  goto fail;
581  }
582 
583  if ((ret = pulse_stream_wait(s)) < 0) {
584  av_log(s, AV_LOG_ERROR, "Stream failed.\n");
585  goto fail;
586  }
587 
588  /* read back buffer attributes for future use */
589  buffer_attributes = *pa_stream_get_buffer_attr(s->stream);
590  s->buffer_size = buffer_attributes.tlength;
591  s->prebuf = buffer_attributes.prebuf;
592  s->minreq = buffer_attributes.minreq;
593  av_log(s, AV_LOG_DEBUG, "Real buffer attributes: size: %d, prebuf: %d, minreq: %d\n",
594  s->buffer_size, s->prebuf, s->minreq);
595 
596  pa_threaded_mainloop_unlock(s->mainloop);
597 
598  if ((ret = pulse_subscribe_events(s)) < 0) {
599  av_log(s, AV_LOG_ERROR, "Event subscription failed.\n");
600  /* a bit ugly but the simplest to lock here*/
601  pa_threaded_mainloop_lock(s->mainloop);
602  goto fail;
603  }
604 
605  /* force control messages */
606  s->mute = -1;
607  s->last_volume = PA_VOLUME_INVALID;
608  pa_threaded_mainloop_lock(s->mainloop);
609  if ((ret = pulse_update_sink_input_info(h)) < 0) {
610  av_log(s, AV_LOG_ERROR, "Updating sink input info failed.\n");
611  goto fail;
612  }
613  pa_threaded_mainloop_unlock(s->mainloop);
614 
615  avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
616 
617  return 0;
618  fail:
619  pa_threaded_mainloop_unlock(s->mainloop);
621  return ret;
622 }
623 
625 {
626  PulseData *s = h->priv_data;
627  int ret;
628  int64_t writable_size;
629 
630  if (!pkt)
631  return pulse_flash_stream(s);
632 
633  if (pkt->dts != AV_NOPTS_VALUE)
634  s->timestamp = pkt->dts;
635 
636  if (pkt->duration) {
637  s->timestamp += pkt->duration;
638  } else {
639  AVStream *st = h->streams[0];
640  AVRational r = { 1, st->codecpar->sample_rate };
641  int64_t samples = pkt->size / (av_get_bytes_per_sample(st->codecpar->format) * st->codecpar->channels);
642  s->timestamp += av_rescale_q(samples, r, st->time_base);
643  }
644 
645  pa_threaded_mainloop_lock(s->mainloop);
646  if (!PA_STREAM_IS_GOOD(pa_stream_get_state(s->stream))) {
647  av_log(s, AV_LOG_ERROR, "PulseAudio stream is in invalid state.\n");
648  goto fail;
649  }
650  while (pa_stream_writable_size(s->stream) < s->minreq) {
651  if (s->nonblocking) {
652  pa_threaded_mainloop_unlock(s->mainloop);
653  return AVERROR(EAGAIN);
654  } else
655  pa_threaded_mainloop_wait(s->mainloop);
656  }
657 
658  if ((ret = pa_stream_write(s->stream, pkt->data, pkt->size, NULL, 0, PA_SEEK_RELATIVE)) < 0) {
659  av_log(s, AV_LOG_ERROR, "pa_stream_write failed: %s\n", pa_strerror(ret));
660  goto fail;
661  }
662  if ((writable_size = pa_stream_writable_size(s->stream)) >= s->minreq)
663  avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_WRITABLE, &writable_size, sizeof(writable_size));
664 
665  pa_threaded_mainloop_unlock(s->mainloop);
666 
667  return 0;
668  fail:
669  pa_threaded_mainloop_unlock(s->mainloop);
670  return AVERROR_EXTERNAL;
671 }
672 
673 static int pulse_write_frame(AVFormatContext *h, int stream_index,
674  AVFrame **frame, unsigned flags)
675 {
676  AVPacket pkt;
677 
678  /* Planar formats are not supported yet. */
679  if (flags & AV_WRITE_UNCODED_FRAME_QUERY)
680  return av_sample_fmt_is_planar(h->streams[stream_index]->codecpar->format) ?
681  AVERROR(EINVAL) : 0;
682 
683  pkt.data = (*frame)->data[0];
684  pkt.size = (*frame)->nb_samples * av_get_bytes_per_sample((*frame)->format) * (*frame)->channels;
685  pkt.dts = (*frame)->pkt_dts;
686  pkt.duration = (*frame)->pkt_duration;
687  return pulse_write_packet(h, &pkt);
688 }
689 
690 
691 static void pulse_get_output_timestamp(AVFormatContext *h, int stream, int64_t *dts, int64_t *wall)
692 {
693  PulseData *s = h->priv_data;
694  pa_usec_t latency;
695  int neg;
696  pa_threaded_mainloop_lock(s->mainloop);
697  pa_stream_get_latency(s->stream, &latency, &neg);
698  pa_threaded_mainloop_unlock(s->mainloop);
699  if (wall)
700  *wall = av_gettime();
701  if (dts)
702  *dts = s->timestamp - (neg ? -latency : latency);
703 }
704 
706 {
707  PulseData *s = h->priv_data;
708  return ff_pulse_audio_get_devices(device_list, s->server, 1);
709 }
710 
712  void *data, size_t data_size)
713 {
714  PulseData *s = h->priv_data;
715  int ret;
716 
717  switch(type) {
718  case AV_APP_TO_DEV_PAUSE:
719  return pulse_set_pause(s, 1);
720  case AV_APP_TO_DEV_PLAY:
721  return pulse_set_pause(s, 0);
723  return pulse_set_pause(s, !pa_stream_is_corked(s->stream));
724  case AV_APP_TO_DEV_MUTE:
725  if (!s->mute) {
726  s->mute = 1;
727  return pulse_set_mute(s);
728  }
729  return 0;
731  if (s->mute) {
732  s->mute = 0;
733  return pulse_set_mute(s);
734  }
735  return 0;
737  s->mute = !s->mute;
738  return pulse_set_mute(s);
740  return pulse_set_volume(s, *(double *)data);
742  s->last_volume = PA_VOLUME_INVALID;
743  pa_threaded_mainloop_lock(s->mainloop);
745  pa_threaded_mainloop_unlock(s->mainloop);
746  return ret;
748  s->mute = -1;
749  pa_threaded_mainloop_lock(s->mainloop);
751  pa_threaded_mainloop_unlock(s->mainloop);
752  return ret;
753  default:
754  break;
755  }
756  return AVERROR(ENOSYS);
757 }
758 
759 #define OFFSET(a) offsetof(PulseData, a)
760 #define E AV_OPT_FLAG_ENCODING_PARAM
761 static const AVOption options[] = {
762  { "server", "set PulseAudio server", OFFSET(server), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
763  { "name", "set application name", OFFSET(name), AV_OPT_TYPE_STRING, {.str = LIBAVFORMAT_IDENT}, 0, 0, E },
764  { "stream_name", "set stream description", OFFSET(stream_name), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
765  { "device", "set device name", OFFSET(device), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
766  { "buffer_size", "set buffer size in bytes", OFFSET(buffer_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
767  { "buffer_duration", "set buffer duration in millisecs", OFFSET(buffer_duration), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
768  { "prebuf", "set pre-buffering size", OFFSET(prebuf), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
769  { "minreq", "set minimum request size", OFFSET(minreq), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
770  { NULL }
771 };
772 
773 static const AVClass pulse_muxer_class = {
774  .class_name = "PulseAudio outdev",
775  .item_name = av_default_item_name,
776  .option = options,
777  .version = LIBAVUTIL_VERSION_INT,
779 };
780 
782  .name = "pulse",
783  .long_name = NULL_IF_CONFIG_SMALL("Pulse audio output"),
784  .priv_data_size = sizeof(PulseData),
786  .video_codec = AV_CODEC_ID_NONE,
787  .write_header = pulse_write_header,
788  .write_packet = pulse_write_packet,
789  .write_uncoded_frame = pulse_write_frame,
790  .write_trailer = pulse_write_trailer,
791  .get_output_timestamp = pulse_get_output_timestamp,
792  .get_device_list = pulse_get_device_list,
793  .control_message = pulse_control_message,
794  .flags = AVFMT_NOFILE | AVFMT_ALLOW_FLUSH,
795  .priv_class = &pulse_muxer_class,
796 };
#define NULL
Definition: coverity.c:32
const char const char void * val
Definition: avisynth_c.h:863
const char * name
static void pulse_map_channels_to_pulse(int64_t channel_layout, pa_channel_map *channel_map)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
static int pulse_flash_stream(PulseData *s)
#define AV_CH_TOP_FRONT_RIGHT
AVOption.
Definition: opt.h:246
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void pulse_context_result(pa_context *ctx, int success, void *userdata)
const char * stream_name
static int pulse_context_wait(PulseData *s)
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
Definition: utils.c:4892
#define AV_CH_TOP_FRONT_LEFT
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: avcodec.h:3957
#define AV_CH_TOP_FRONT_CENTER
int size
Definition: avcodec.h:1478
static int pulse_finish_stream_operation(PulseData *s, pa_operation *op, const char *name)
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
Volume level change message.
Definition: avdevice.h:293
#define AV_CH_LOW_FREQUENCY_2
#define E
GLint GLenum type
Definition: opengl_enc.c:104
static void pulse_overflow(pa_stream *stream, void *userdata)
#define AV_CH_SURROUND_DIRECT_RIGHT
static AVPacket pkt
char * stream_name
#define AVFMT_ALLOW_FLUSH
Format allows flushing.
Definition: avformat.h:476
const char * device
Buffer fullness status messages.
Definition: avdevice.h:260
Macro definitions for various function/variable attributes.
static struct @315 state
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
Format I/O context.
Definition: avformat.h:1358
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
static void pulse_stream_state(pa_stream *stream, void *userdata)
int avdevice_dev_to_app_control_message(struct AVFormatContext *s, enum AVDevToAppMessageType type, void *data, size_t data_size)
Send control message from device to application.
Definition: avdevice.c:135
#define AVFMT_FLAG_NONBLOCK
Do not block when reading packets from input.
Definition: avformat.h:1492
#define AV_CH_WIDE_LEFT
#define av_cold
Definition: attributes.h:82
AVOptions.
Query whether the feature is possible on this stream.
Definition: internal.h:644
#define AV_CH_TOP_BACK_LEFT
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1495
#define AV_CH_WIDE_RIGHT
#define AV_CH_TOP_BACK_CENTER
#define AV_NE(be, le)
Definition: common.h:50
static int pulse_write_packet(AVFormatContext *h, AVPacket *pkt)
static int pulse_control_message(AVFormatContext *h, int type, void *data, size_t data_size)
#define AV_CH_LOW_FREQUENCY
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1426
static const AVClass pulse_muxer_class
static av_cold int pulse_write_trailer(AVFormatContext *h)
static void pulse_audio_sink_input_cb(pa_context *ctx, const pa_sink_input_info *i, int eol, void *userdata)
static int pulse_stream_wait(PulseData *s)
Get volume/mute messages.
Definition: avdevice.h:191
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1489
uint8_t * data
Definition: avcodec.h:1477
int ff_pulse_audio_connect_context(pa_mainloop **pa_ml, pa_context **pa_ctx, const char *server, const char *description)
#define AV_CH_BACK_LEFT
const char * server
static int pulse_write_frame(AVFormatContext *h, int stream_index, AVFrame **frame, unsigned flags)
uint64_t channel_layout
Audio only.
Definition: avcodec.h:4059
#define av_log(a,...)
#define OFFSET(a)
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
static void pulse_underflow(pa_stream *stream, void *userdata)
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static int pulse_set_volume(PulseData *s, double volume)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
char * url
input or output URL.
Definition: avformat.h:1454
const char * r
Definition: vf_curves.c:114
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static void pulse_event(pa_context *ctx, pa_subscription_event_type_t t, uint32_t idx, void *userdata)
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:3953
Request pause/play.
Definition: avdevice.h:155
#define FFMAX(a, b)
Definition: common.h:94
#define fail()
Definition: checkasm.h:120
Volume control message.
Definition: avdevice.h:168
common internal API header
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1414
#define AV_CH_STEREO_RIGHT
See AV_CH_STEREO_LEFT.
pa_volume_t last_volume
#define LIBAVFORMAT_IDENT
Definition: version.h:46
pa_stream * stream
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
#define AV_CH_TOP_CENTER
static const AVOption options[]
Mute control messages.
Definition: avdevice.h:179
AVOutputFormat ff_pulse_muxer
static int pulse_set_pause(PulseData *s, int pause)
void ff_pulse_audio_disconnect_context(pa_mainloop **pa_ml, pa_context **pa_ctx)
static int pulse_update_sink_input_info(AVFormatContext *h)
const char * name
Definition: avformat.h:505
static int pulse_subscribe_events(PulseData *s)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
pa_sample_format_t av_cold ff_codec_id_to_pulse_format(enum AVCodecID codec_id)
#define AV_CH_FRONT_LEFT_OF_CENTER
#define AV_CH_FRONT_CENTER
char * server
#define AV_CH_FRONT_RIGHT_OF_CENTER
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
Stream structure.
Definition: avformat.h:881
int buffer_duration
Buffer size in ms, recalculated to buffer_size.
#define AV_CH_FRONT_LEFT
#define AV_CH_TOP_BACK_RIGHT
Describe the class of an AVClass context structure.
Definition: log.h:67
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int ff_pulse_audio_get_devices(AVDeviceInfoList *devices, const char *server, int output)
static int pulse_get_device_list(AVFormatContext *h, AVDeviceInfoList *device_list)
Mute state change message.
Definition: avdevice.h:284
List of devices.
Definition: avdevice.h:460
#define flags(name, subs,...)
Definition: cbs_av1.c:561
static int pulse_finish_context_operation(PulseData *s, pa_operation *op, const char *name)
static void pulse_audio_sink_device_cb(pa_context *ctx, const pa_sink_info *dev, int eol, void *userdata)
#define AV_CH_BACK_CENTER
#define AV_CH_SIDE_RIGHT
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
int sample_rate
Audio only.
Definition: avcodec.h:4067
static av_cold int pulse_write_header(AVFormatContext *h)
static int op(uint8_t **dst, const uint8_t *dst_end, GetByteContext *gb, int pixel, int count, int *x, int width, int linesize)
Perform decode operation.
Definition: anm.c:78
Main libavformat public API header.
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:463
pa_context * ctx
static int pulse_set_mute(PulseData *s)
static void pulse_stream_writable(pa_stream *stream, size_t nbytes, void *userdata)
static int pulse_update_sink_info(AVFormatContext *h)
static void pulse_context_state(pa_context *ctx, void *userdata)
void * priv_data
Format private data.
Definition: avformat.h:1386
pa_volume_t base_volume
#define lrint
Definition: tablegen.h:53
#define AV_CH_SURROUND_DIRECT_LEFT
int channels
Audio only.
Definition: avcodec.h:4063
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1476
#define AV_CH_FRONT_RIGHT
pa_threaded_mainloop * mainloop
Filter the word “frame” indicates either a video frame or a group of audio samples
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1028
#define AV_CH_SIDE_LEFT
int buffer_size
Buffer size in bytes.
static void pulse_get_output_timestamp(AVFormatContext *h, int stream, int64_t *dts, int64_t *wall)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:910
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
This structure stores compressed data.
Definition: avcodec.h:1454
int64_t timestamp
static void pulse_stream_result(pa_stream *stream, int success, void *userdata)
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define AV_CH_BACK_RIGHT
const char * name
Definition: opengl_enc.c:102
#define AV_CH_STEREO_LEFT
Stereo downmix.