You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

761 lines
27KB

  1. /*
  2. * Copyright (c) 2013 Lukasz Marek <lukasz.m.luki@gmail.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <math.h>
  21. #include <pulse/pulseaudio.h>
  22. #include <pulse/error.h>
  23. #include "libavformat/avformat.h"
  24. #include "libavformat/internal.h"
  25. #include "libavutil/opt.h"
  26. #include "libavutil/time.h"
  27. #include "libavutil/log.h"
  28. #include "libavutil/attributes.h"
  29. #include "pulse_audio_common.h"
  30. typedef struct PulseData {
  31. AVClass *class;
  32. const char *server;
  33. const char *name;
  34. const char *stream_name;
  35. const char *device;
  36. int64_t timestamp;
  37. int buffer_size; /**< Buffer size in bytes */
  38. int buffer_duration; /**< Buffer size in ms, recalculated to buffer_size */
  39. int last_result;
  40. pa_threaded_mainloop *mainloop;
  41. pa_context *ctx;
  42. pa_stream *stream;
  43. int nonblocking;
  44. int mute;
  45. pa_volume_t base_volume;
  46. pa_volume_t last_volume;
  47. } PulseData;
  48. static void pulse_audio_sink_device_cb(pa_context *ctx, const pa_sink_info *dev,
  49. int eol, void *userdata)
  50. {
  51. PulseData *s = userdata;
  52. if (s->ctx != ctx)
  53. return;
  54. if (eol) {
  55. pa_threaded_mainloop_signal(s->mainloop, 0);
  56. } else {
  57. if (dev->flags & PA_SINK_FLAT_VOLUME)
  58. s->base_volume = dev->base_volume;
  59. else
  60. s->base_volume = PA_VOLUME_NORM;
  61. av_log(s, AV_LOG_DEBUG, "base volume: %u\n", s->base_volume);
  62. }
  63. }
  64. /* Mainloop must be locked before calling this function as it uses pa_threaded_mainloop_wait. */
  65. static int pulse_update_sink_info(AVFormatContext *h)
  66. {
  67. PulseData *s = h->priv_data;
  68. pa_operation *op;
  69. if (!(op = pa_context_get_sink_info_by_name(s->ctx, s->device,
  70. pulse_audio_sink_device_cb, s))) {
  71. av_log(s, AV_LOG_ERROR, "pa_context_get_sink_info_by_name failed.\n");
  72. return AVERROR_EXTERNAL;
  73. }
  74. while (pa_operation_get_state(op) == PA_OPERATION_RUNNING)
  75. pa_threaded_mainloop_wait(s->mainloop);
  76. pa_operation_unref(op);
  77. return 0;
  78. }
  79. static void pulse_audio_sink_input_cb(pa_context *ctx, const pa_sink_input_info *i,
  80. int eol, void *userdata)
  81. {
  82. AVFormatContext *h = userdata;
  83. PulseData *s = h->priv_data;
  84. if (s->ctx != ctx)
  85. return;
  86. if (!eol) {
  87. double val;
  88. pa_volume_t vol = pa_cvolume_avg(&i->volume);
  89. if (s->mute < 0 || (s->mute && !i->mute) || (!s->mute && i->mute)) {
  90. s->mute = i->mute;
  91. avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_MUTE_STATE_CHANGED, &s->mute, sizeof(s->mute));
  92. }
  93. vol = pa_sw_volume_divide(vol, s->base_volume);
  94. if (s->last_volume != vol) {
  95. val = (double)vol / PA_VOLUME_NORM;
  96. avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED, &val, sizeof(val));
  97. s->last_volume = vol;
  98. }
  99. }
  100. }
  101. /* This function creates new loop so may be called from PA callbacks.
  102. Mainloop must be locked before calling this function as it operates on streams. */
  103. static int pulse_update_sink_input_info(AVFormatContext *h)
  104. {
  105. PulseData *s = h->priv_data;
  106. pa_operation *op;
  107. enum pa_operation_state op_state;
  108. pa_mainloop *ml = NULL;
  109. pa_context *ctx = NULL;
  110. int ret = 0;
  111. if ((ret = ff_pulse_audio_connect_context(&ml, &ctx, s->server, "Update sink input information")) < 0)
  112. return ret;
  113. if (!(op = pa_context_get_sink_input_info(ctx, pa_stream_get_index(s->stream),
  114. pulse_audio_sink_input_cb, h))) {
  115. ret = AVERROR_EXTERNAL;
  116. goto fail;
  117. }
  118. while ((op_state = pa_operation_get_state(op)) == PA_OPERATION_RUNNING)
  119. pa_mainloop_iterate(ml, 1, NULL);
  120. pa_operation_unref(op);
  121. if (op_state != PA_OPERATION_DONE) {
  122. ret = AVERROR_EXTERNAL;
  123. goto fail;
  124. }
  125. fail:
  126. ff_pulse_audio_disconnect_context(&ml, &ctx);
  127. if (ret)
  128. av_log(s, AV_LOG_ERROR, "pa_context_get_sink_input_info failed.\n");
  129. return ret;
  130. }
  131. static void pulse_event(pa_context *ctx, pa_subscription_event_type_t t,
  132. uint32_t idx, void *userdata)
  133. {
  134. AVFormatContext *h = userdata;
  135. PulseData *s = h->priv_data;
  136. if (s->ctx != ctx)
  137. return;
  138. if ((t & PA_SUBSCRIPTION_EVENT_FACILITY_MASK) == PA_SUBSCRIPTION_EVENT_SINK_INPUT) {
  139. if ((t & PA_SUBSCRIPTION_EVENT_TYPE_MASK) == PA_SUBSCRIPTION_EVENT_CHANGE)
  140. // Calling from mainloop callback. No need to lock mainloop.
  141. pulse_update_sink_input_info(h);
  142. }
  143. }
  144. static void pulse_stream_writable(pa_stream *stream, size_t nbytes, void *userdata)
  145. {
  146. AVFormatContext *h = userdata;
  147. PulseData *s = h->priv_data;
  148. int64_t val = nbytes;
  149. if (stream != s->stream)
  150. return;
  151. avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_WRITABLE, &val, sizeof(val));
  152. pa_threaded_mainloop_signal(s->mainloop, 0);
  153. }
  154. static void pulse_overflow(pa_stream *stream, void *userdata)
  155. {
  156. AVFormatContext *h = userdata;
  157. avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_OVERFLOW, NULL, 0);
  158. }
  159. static void pulse_underflow(pa_stream *stream, void *userdata)
  160. {
  161. AVFormatContext *h = userdata;
  162. avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_UNDERFLOW, NULL, 0);
  163. }
  164. static void pulse_stream_state(pa_stream *stream, void *userdata)
  165. {
  166. PulseData *s = userdata;
  167. if (stream != s->stream)
  168. return;
  169. switch (pa_stream_get_state(s->stream)) {
  170. case PA_STREAM_READY:
  171. case PA_STREAM_FAILED:
  172. case PA_STREAM_TERMINATED:
  173. pa_threaded_mainloop_signal(s->mainloop, 0);
  174. default:
  175. break;
  176. }
  177. }
  178. static int pulse_stream_wait(PulseData *s)
  179. {
  180. pa_stream_state_t state;
  181. while ((state = pa_stream_get_state(s->stream)) != PA_STREAM_READY) {
  182. if (state == PA_STREAM_FAILED || state == PA_STREAM_TERMINATED)
  183. return AVERROR_EXTERNAL;
  184. pa_threaded_mainloop_wait(s->mainloop);
  185. }
  186. return 0;
  187. }
  188. static void pulse_context_state(pa_context *ctx, void *userdata)
  189. {
  190. PulseData *s = userdata;
  191. if (s->ctx != ctx)
  192. return;
  193. switch (pa_context_get_state(ctx)) {
  194. case PA_CONTEXT_READY:
  195. case PA_CONTEXT_FAILED:
  196. case PA_CONTEXT_TERMINATED:
  197. pa_threaded_mainloop_signal(s->mainloop, 0);
  198. default:
  199. break;
  200. }
  201. }
  202. static int pulse_context_wait(PulseData *s)
  203. {
  204. pa_context_state_t state;
  205. while ((state = pa_context_get_state(s->ctx)) != PA_CONTEXT_READY) {
  206. if (state == PA_CONTEXT_FAILED || state == PA_CONTEXT_TERMINATED)
  207. return AVERROR_EXTERNAL;
  208. pa_threaded_mainloop_wait(s->mainloop);
  209. }
  210. return 0;
  211. }
  212. static void pulse_stream_result(pa_stream *stream, int success, void *userdata)
  213. {
  214. PulseData *s = userdata;
  215. if (stream != s->stream)
  216. return;
  217. s->last_result = success ? 0 : AVERROR_EXTERNAL;
  218. pa_threaded_mainloop_signal(s->mainloop, 0);
  219. }
  220. static int pulse_finish_stream_operation(PulseData *s, pa_operation *op, const char *name)
  221. {
  222. if (!op) {
  223. pa_threaded_mainloop_unlock(s->mainloop);
  224. av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
  225. return AVERROR_EXTERNAL;
  226. }
  227. s->last_result = 2;
  228. while (s->last_result == 2)
  229. pa_threaded_mainloop_wait(s->mainloop);
  230. pa_operation_unref(op);
  231. pa_threaded_mainloop_unlock(s->mainloop);
  232. if (s->last_result != 0)
  233. av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
  234. return s->last_result;
  235. }
  236. static int pulse_flash_stream(PulseData *s)
  237. {
  238. pa_operation *op;
  239. pa_threaded_mainloop_lock(s->mainloop);
  240. op = pa_stream_flush(s->stream, pulse_stream_result, s);
  241. return pulse_finish_stream_operation(s, op, "pa_stream_flush");
  242. }
  243. static void pulse_context_result(pa_context *ctx, int success, void *userdata)
  244. {
  245. PulseData *s = userdata;
  246. if (s->ctx != ctx)
  247. return;
  248. s->last_result = success ? 0 : AVERROR_EXTERNAL;
  249. pa_threaded_mainloop_signal(s->mainloop, 0);
  250. }
  251. static int pulse_finish_context_operation(PulseData *s, pa_operation *op, const char *name)
  252. {
  253. if (!op) {
  254. pa_threaded_mainloop_unlock(s->mainloop);
  255. av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
  256. return AVERROR_EXTERNAL;
  257. }
  258. s->last_result = 2;
  259. while (s->last_result == 2)
  260. pa_threaded_mainloop_wait(s->mainloop);
  261. pa_operation_unref(op);
  262. pa_threaded_mainloop_unlock(s->mainloop);
  263. if (s->last_result != 0)
  264. av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
  265. return s->last_result;
  266. }
  267. static int pulse_set_mute(PulseData *s)
  268. {
  269. pa_operation *op;
  270. pa_threaded_mainloop_lock(s->mainloop);
  271. op = pa_context_set_sink_input_mute(s->ctx, pa_stream_get_index(s->stream),
  272. s->mute, pulse_context_result, s);
  273. return pulse_finish_context_operation(s, op, "pa_context_set_sink_input_mute");
  274. }
  275. static int pulse_set_volume(PulseData *s, double volume)
  276. {
  277. pa_operation *op;
  278. pa_cvolume cvol;
  279. pa_volume_t vol;
  280. const pa_sample_spec *ss = pa_stream_get_sample_spec(s->stream);
  281. vol = pa_sw_volume_multiply(lround(volume * PA_VOLUME_NORM), s->base_volume);
  282. pa_cvolume_set(&cvol, ss->channels, PA_VOLUME_NORM);
  283. pa_sw_cvolume_multiply_scalar(&cvol, &cvol, vol);
  284. pa_threaded_mainloop_lock(s->mainloop);
  285. op = pa_context_set_sink_input_volume(s->ctx, pa_stream_get_index(s->stream),
  286. &cvol, pulse_context_result, s);
  287. return pulse_finish_context_operation(s, op, "pa_context_set_sink_input_volume");
  288. }
  289. static int pulse_subscribe_events(PulseData *s)
  290. {
  291. pa_operation *op;
  292. pa_threaded_mainloop_lock(s->mainloop);
  293. op = pa_context_subscribe(s->ctx, PA_SUBSCRIPTION_MASK_SINK_INPUT, pulse_context_result, s);
  294. return pulse_finish_context_operation(s, op, "pa_context_subscribe");
  295. }
  296. static void pulse_map_channels_to_pulse(int64_t channel_layout, pa_channel_map *channel_map)
  297. {
  298. channel_map->channels = 0;
  299. if (channel_layout & AV_CH_FRONT_LEFT)
  300. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_LEFT;
  301. if (channel_layout & AV_CH_FRONT_RIGHT)
  302. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_RIGHT;
  303. if (channel_layout & AV_CH_FRONT_CENTER)
  304. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_CENTER;
  305. if (channel_layout & AV_CH_LOW_FREQUENCY)
  306. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_LFE;
  307. if (channel_layout & AV_CH_BACK_LEFT)
  308. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_REAR_LEFT;
  309. if (channel_layout & AV_CH_BACK_RIGHT)
  310. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_REAR_RIGHT;
  311. if (channel_layout & AV_CH_FRONT_LEFT_OF_CENTER)
  312. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER;
  313. if (channel_layout & AV_CH_FRONT_RIGHT_OF_CENTER)
  314. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER;
  315. if (channel_layout & AV_CH_BACK_CENTER)
  316. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_REAR_CENTER;
  317. if (channel_layout & AV_CH_SIDE_LEFT)
  318. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_SIDE_LEFT;
  319. if (channel_layout & AV_CH_SIDE_RIGHT)
  320. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_SIDE_RIGHT;
  321. if (channel_layout & AV_CH_TOP_CENTER)
  322. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_CENTER;
  323. if (channel_layout & AV_CH_TOP_FRONT_LEFT)
  324. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_FRONT_LEFT;
  325. if (channel_layout & AV_CH_TOP_FRONT_CENTER)
  326. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_FRONT_CENTER;
  327. if (channel_layout & AV_CH_TOP_FRONT_RIGHT)
  328. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_FRONT_RIGHT;
  329. if (channel_layout & AV_CH_TOP_BACK_LEFT)
  330. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_REAR_LEFT;
  331. if (channel_layout & AV_CH_TOP_BACK_CENTER)
  332. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_REAR_CENTER;
  333. if (channel_layout & AV_CH_TOP_BACK_RIGHT)
  334. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_REAR_RIGHT;
  335. if (channel_layout & AV_CH_STEREO_LEFT)
  336. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_LEFT;
  337. if (channel_layout & AV_CH_STEREO_RIGHT)
  338. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_RIGHT;
  339. if (channel_layout & AV_CH_WIDE_LEFT)
  340. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX0;
  341. if (channel_layout & AV_CH_WIDE_RIGHT)
  342. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX1;
  343. if (channel_layout & AV_CH_SURROUND_DIRECT_LEFT)
  344. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX2;
  345. if (channel_layout & AV_CH_SURROUND_DIRECT_RIGHT)
  346. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX3;
  347. if (channel_layout & AV_CH_LOW_FREQUENCY_2)
  348. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_LFE;
  349. }
  350. static av_cold int pulse_write_trailer(AVFormatContext *h)
  351. {
  352. PulseData *s = h->priv_data;
  353. if (s->mainloop) {
  354. pa_threaded_mainloop_lock(s->mainloop);
  355. if (s->stream) {
  356. pa_stream_disconnect(s->stream);
  357. pa_stream_set_state_callback(s->stream, NULL, NULL);
  358. pa_stream_set_write_callback(s->stream, NULL, NULL);
  359. pa_stream_set_overflow_callback(s->stream, NULL, NULL);
  360. pa_stream_set_underflow_callback(s->stream, NULL, NULL);
  361. pa_stream_unref(s->stream);
  362. s->stream = NULL;
  363. }
  364. if (s->ctx) {
  365. pa_context_disconnect(s->ctx);
  366. pa_context_set_state_callback(s->ctx, NULL, NULL);
  367. pa_context_set_subscribe_callback(s->ctx, NULL, NULL);
  368. pa_context_unref(s->ctx);
  369. s->ctx = NULL;
  370. }
  371. pa_threaded_mainloop_unlock(s->mainloop);
  372. pa_threaded_mainloop_stop(s->mainloop);
  373. pa_threaded_mainloop_free(s->mainloop);
  374. s->mainloop = NULL;
  375. }
  376. return 0;
  377. }
  378. static av_cold int pulse_write_header(AVFormatContext *h)
  379. {
  380. PulseData *s = h->priv_data;
  381. AVStream *st = NULL;
  382. int ret;
  383. pa_sample_spec sample_spec;
  384. pa_buffer_attr buffer_attributes = { -1, -1, -1, -1, -1 };
  385. pa_channel_map channel_map;
  386. pa_mainloop_api *mainloop_api;
  387. const char *stream_name = s->stream_name;
  388. static const pa_stream_flags_t stream_flags = PA_STREAM_INTERPOLATE_TIMING |
  389. PA_STREAM_AUTO_TIMING_UPDATE |
  390. PA_STREAM_NOT_MONOTONIC;
  391. if (h->nb_streams != 1 || h->streams[0]->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
  392. av_log(s, AV_LOG_ERROR, "Only a single audio stream is supported.\n");
  393. return AVERROR(EINVAL);
  394. }
  395. st = h->streams[0];
  396. if (!stream_name) {
  397. if (h->filename[0])
  398. stream_name = h->filename;
  399. else
  400. stream_name = "Playback";
  401. }
  402. s->nonblocking = (h->flags & AVFMT_FLAG_NONBLOCK);
  403. if (s->buffer_duration) {
  404. int64_t bytes = s->buffer_duration;
  405. bytes *= st->codec->channels * st->codec->sample_rate *
  406. av_get_bytes_per_sample(st->codec->sample_fmt);
  407. bytes /= 1000;
  408. buffer_attributes.tlength = FFMAX(s->buffer_size, av_clip64(bytes, 0, UINT32_MAX - 1));
  409. av_log(s, AV_LOG_DEBUG,
  410. "Buffer duration: %ums recalculated into %"PRId64" bytes buffer.\n",
  411. s->buffer_duration, bytes);
  412. av_log(s, AV_LOG_DEBUG, "Real buffer length is %u bytes\n", buffer_attributes.tlength);
  413. } else if (s->buffer_size)
  414. buffer_attributes.tlength = s->buffer_size;
  415. sample_spec.format = ff_codec_id_to_pulse_format(st->codec->codec_id);
  416. sample_spec.rate = st->codec->sample_rate;
  417. sample_spec.channels = st->codec->channels;
  418. if (!pa_sample_spec_valid(&sample_spec)) {
  419. av_log(s, AV_LOG_ERROR, "Invalid sample spec.\n");
  420. return AVERROR(EINVAL);
  421. }
  422. if (sample_spec.channels == 1) {
  423. channel_map.channels = 1;
  424. channel_map.map[0] = PA_CHANNEL_POSITION_MONO;
  425. } else if (st->codec->channel_layout) {
  426. if (av_get_channel_layout_nb_channels(st->codec->channel_layout) != st->codec->channels)
  427. return AVERROR(EINVAL);
  428. pulse_map_channels_to_pulse(st->codec->channel_layout, &channel_map);
  429. /* Unknown channel is present in channel_layout, let PulseAudio use its default. */
  430. if (channel_map.channels != sample_spec.channels) {
  431. av_log(s, AV_LOG_WARNING, "Unknown channel. Using defaul channel map.\n");
  432. channel_map.channels = 0;
  433. }
  434. } else
  435. channel_map.channels = 0;
  436. if (!channel_map.channels)
  437. av_log(s, AV_LOG_WARNING, "Using PulseAudio's default channel map.\n");
  438. else if (!pa_channel_map_valid(&channel_map)) {
  439. av_log(s, AV_LOG_ERROR, "Invalid channel map.\n");
  440. return AVERROR(EINVAL);
  441. }
  442. /* start main loop */
  443. s->mainloop = pa_threaded_mainloop_new();
  444. if (!s->mainloop) {
  445. av_log(s, AV_LOG_ERROR, "Cannot create threaded mainloop.\n");
  446. return AVERROR(ENOMEM);
  447. }
  448. if ((ret = pa_threaded_mainloop_start(s->mainloop)) < 0) {
  449. av_log(s, AV_LOG_ERROR, "Cannot start threaded mainloop: %s.\n", pa_strerror(ret));
  450. pa_threaded_mainloop_free(s->mainloop);
  451. s->mainloop = NULL;
  452. return AVERROR_EXTERNAL;
  453. }
  454. pa_threaded_mainloop_lock(s->mainloop);
  455. mainloop_api = pa_threaded_mainloop_get_api(s->mainloop);
  456. if (!mainloop_api) {
  457. av_log(s, AV_LOG_ERROR, "Cannot get mainloop API.\n");
  458. ret = AVERROR_EXTERNAL;
  459. goto fail;
  460. }
  461. s->ctx = pa_context_new(mainloop_api, s->name);
  462. if (!s->ctx) {
  463. av_log(s, AV_LOG_ERROR, "Cannot create context.\n");
  464. ret = AVERROR(ENOMEM);
  465. goto fail;
  466. }
  467. pa_context_set_state_callback(s->ctx, pulse_context_state, s);
  468. pa_context_set_subscribe_callback(s->ctx, pulse_event, h);
  469. if ((ret = pa_context_connect(s->ctx, s->server, 0, NULL)) < 0) {
  470. av_log(s, AV_LOG_ERROR, "Cannot connect context: %s.\n", pa_strerror(ret));
  471. ret = AVERROR_EXTERNAL;
  472. goto fail;
  473. }
  474. if ((ret = pulse_context_wait(s)) < 0) {
  475. av_log(s, AV_LOG_ERROR, "Context failed.\n");
  476. goto fail;
  477. }
  478. s->stream = pa_stream_new(s->ctx, stream_name, &sample_spec,
  479. channel_map.channels ? &channel_map : NULL);
  480. if ((ret = pulse_update_sink_info(h)) < 0) {
  481. av_log(s, AV_LOG_ERROR, "Updating sink info failed.\n");
  482. goto fail;
  483. }
  484. if (!s->stream) {
  485. av_log(s, AV_LOG_ERROR, "Cannot create stream.\n");
  486. ret = AVERROR(ENOMEM);
  487. goto fail;
  488. }
  489. pa_stream_set_state_callback(s->stream, pulse_stream_state, s);
  490. pa_stream_set_write_callback(s->stream, pulse_stream_writable, h);
  491. pa_stream_set_overflow_callback(s->stream, pulse_overflow, h);
  492. pa_stream_set_underflow_callback(s->stream, pulse_underflow, h);
  493. if ((ret = pa_stream_connect_playback(s->stream, s->device, &buffer_attributes,
  494. stream_flags, NULL, NULL)) < 0) {
  495. av_log(s, AV_LOG_ERROR, "pa_stream_connect_playback failed: %s.\n", pa_strerror(ret));
  496. ret = AVERROR_EXTERNAL;
  497. goto fail;
  498. }
  499. if ((ret = pulse_stream_wait(s)) < 0) {
  500. av_log(s, AV_LOG_ERROR, "Stream failed.\n");
  501. goto fail;
  502. }
  503. pa_threaded_mainloop_unlock(s->mainloop);
  504. if ((ret = pulse_subscribe_events(s)) < 0) {
  505. av_log(s, AV_LOG_ERROR, "Event subscription failed.\n");
  506. /* a bit ugly but the simplest to lock here*/
  507. pa_threaded_mainloop_lock(s->mainloop);
  508. goto fail;
  509. }
  510. /* force control messages */
  511. s->mute = -1;
  512. s->last_volume = PA_VOLUME_INVALID;
  513. pa_threaded_mainloop_lock(s->mainloop);
  514. if ((ret = pulse_update_sink_input_info(h)) < 0) {
  515. av_log(s, AV_LOG_ERROR, "Updating sink input info failed.\n");
  516. goto fail;
  517. }
  518. pa_threaded_mainloop_unlock(s->mainloop);
  519. avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
  520. return 0;
  521. fail:
  522. pa_threaded_mainloop_unlock(s->mainloop);
  523. pulse_write_trailer(h);
  524. return ret;
  525. }
  526. static int pulse_write_packet(AVFormatContext *h, AVPacket *pkt)
  527. {
  528. PulseData *s = h->priv_data;
  529. int ret;
  530. if (!pkt)
  531. return pulse_flash_stream(s);
  532. if (pkt->dts != AV_NOPTS_VALUE)
  533. s->timestamp = pkt->dts;
  534. if (pkt->duration) {
  535. s->timestamp += pkt->duration;
  536. } else {
  537. AVStream *st = h->streams[0];
  538. AVCodecContext *codec_ctx = st->codec;
  539. AVRational r = { 1, codec_ctx->sample_rate };
  540. int64_t samples = pkt->size / (av_get_bytes_per_sample(codec_ctx->sample_fmt) * codec_ctx->channels);
  541. s->timestamp += av_rescale_q(samples, r, st->time_base);
  542. }
  543. pa_threaded_mainloop_lock(s->mainloop);
  544. if (!PA_STREAM_IS_GOOD(pa_stream_get_state(s->stream))) {
  545. av_log(s, AV_LOG_ERROR, "PulseAudio stream is in invalid state.\n");
  546. goto fail;
  547. }
  548. while (!pa_stream_writable_size(s->stream)) {
  549. if (s->nonblocking) {
  550. pa_threaded_mainloop_unlock(s->mainloop);
  551. return AVERROR(EAGAIN);
  552. } else
  553. pa_threaded_mainloop_wait(s->mainloop);
  554. }
  555. if ((ret = pa_stream_write(s->stream, pkt->data, pkt->size, NULL, 0, PA_SEEK_RELATIVE)) < 0) {
  556. av_log(s, AV_LOG_ERROR, "pa_stream_write failed: %s\n", pa_strerror(ret));
  557. goto fail;
  558. }
  559. pa_threaded_mainloop_unlock(s->mainloop);
  560. return 0;
  561. fail:
  562. pa_threaded_mainloop_unlock(s->mainloop);
  563. return AVERROR_EXTERNAL;
  564. }
  565. static int pulse_write_frame(AVFormatContext *h, int stream_index,
  566. AVFrame **frame, unsigned flags)
  567. {
  568. AVPacket pkt;
  569. /* Planar formats are not supported yet. */
  570. if (flags & AV_WRITE_UNCODED_FRAME_QUERY)
  571. return av_sample_fmt_is_planar(h->streams[stream_index]->codec->sample_fmt) ?
  572. AVERROR(EINVAL) : 0;
  573. pkt.data = (*frame)->data[0];
  574. pkt.size = (*frame)->nb_samples * av_get_bytes_per_sample((*frame)->format) * (*frame)->channels;
  575. pkt.dts = (*frame)->pkt_dts;
  576. pkt.duration = av_frame_get_pkt_duration(*frame);
  577. return pulse_write_packet(h, &pkt);
  578. }
  579. static void pulse_get_output_timestamp(AVFormatContext *h, int stream, int64_t *dts, int64_t *wall)
  580. {
  581. PulseData *s = h->priv_data;
  582. pa_usec_t latency;
  583. int neg;
  584. pa_threaded_mainloop_lock(s->mainloop);
  585. pa_stream_get_latency(s->stream, &latency, &neg);
  586. pa_threaded_mainloop_unlock(s->mainloop);
  587. *wall = av_gettime();
  588. *dts = s->timestamp - (neg ? -latency : latency);
  589. }
  590. static int pulse_get_device_list(AVFormatContext *h, AVDeviceInfoList *device_list)
  591. {
  592. PulseData *s = h->priv_data;
  593. return ff_pulse_audio_get_devices(device_list, s->server, 1);
  594. }
  595. static int pulse_control_message(AVFormatContext *h, int type,
  596. void *data, size_t data_size)
  597. {
  598. PulseData *s = h->priv_data;
  599. int ret;
  600. switch(type) {
  601. case AV_APP_TO_DEV_MUTE:
  602. if (!s->mute) {
  603. s->mute = 1;
  604. return pulse_set_mute(s);
  605. }
  606. return 0;
  607. case AV_APP_TO_DEV_UNMUTE:
  608. if (s->mute) {
  609. s->mute = 0;
  610. return pulse_set_mute(s);
  611. }
  612. return 0;
  613. case AV_APP_TO_DEV_TOGGLE_MUTE:
  614. s->mute = !s->mute;
  615. return pulse_set_mute(s);
  616. case AV_APP_TO_DEV_SET_VOLUME:
  617. return pulse_set_volume(s, *(double *)data);
  618. case AV_APP_TO_DEV_GET_VOLUME:
  619. s->last_volume = PA_VOLUME_INVALID;
  620. pa_threaded_mainloop_lock(s->mainloop);
  621. ret = pulse_update_sink_input_info(h);
  622. pa_threaded_mainloop_unlock(s->mainloop);
  623. return ret;
  624. case AV_APP_TO_DEV_GET_MUTE:
  625. s->mute = -1;
  626. pa_threaded_mainloop_lock(s->mainloop);
  627. ret = pulse_update_sink_input_info(h);
  628. pa_threaded_mainloop_unlock(s->mainloop);
  629. return ret;
  630. default:
  631. break;
  632. }
  633. return AVERROR(ENOSYS);
  634. }
  635. #define OFFSET(a) offsetof(PulseData, a)
  636. #define E AV_OPT_FLAG_ENCODING_PARAM
  637. static const AVOption options[] = {
  638. { "server", "set PulseAudio server", OFFSET(server), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
  639. { "name", "set application name", OFFSET(name), AV_OPT_TYPE_STRING, {.str = LIBAVFORMAT_IDENT}, 0, 0, E },
  640. { "stream_name", "set stream description", OFFSET(stream_name), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
  641. { "device", "set device name", OFFSET(device), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
  642. { "buffer_size", "set buffer size in bytes", OFFSET(buffer_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
  643. { "buffer_duration", "set buffer duration in millisecs", OFFSET(buffer_duration), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
  644. { NULL }
  645. };
  646. static const AVClass pulse_muxer_class = {
  647. .class_name = "PulseAudio muxer",
  648. .item_name = av_default_item_name,
  649. .option = options,
  650. .version = LIBAVUTIL_VERSION_INT,
  651. .category = AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
  652. };
  653. AVOutputFormat ff_pulse_muxer = {
  654. .name = "pulse",
  655. .long_name = NULL_IF_CONFIG_SMALL("Pulse audio output"),
  656. .priv_data_size = sizeof(PulseData),
  657. .audio_codec = AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE),
  658. .video_codec = AV_CODEC_ID_NONE,
  659. .write_header = pulse_write_header,
  660. .write_packet = pulse_write_packet,
  661. .write_uncoded_frame = pulse_write_frame,
  662. .write_trailer = pulse_write_trailer,
  663. .get_output_timestamp = pulse_get_output_timestamp,
  664. .get_device_list = pulse_get_device_list,
  665. .control_message = pulse_control_message,
  666. .flags = AVFMT_NOFILE | AVFMT_ALLOW_FLUSH,
  667. .priv_class = &pulse_muxer_class,
  668. };