You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

775 lines
27KB

  1. /*
  2. * Copyright (c) 2013 Lukasz Marek <lukasz.m.luki@gmail.com>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <math.h>
  21. #include <pulse/pulseaudio.h>
  22. #include <pulse/error.h>
  23. #include "libavformat/avformat.h"
  24. #include "libavformat/internal.h"
  25. #include "libavutil/opt.h"
  26. #include "libavutil/time.h"
  27. #include "libavutil/log.h"
  28. #include "libavutil/attributes.h"
  29. #include "pulse_audio_common.h"
  30. typedef struct PulseData {
  31. AVClass *class;
  32. const char *server;
  33. const char *name;
  34. const char *stream_name;
  35. const char *device;
  36. int64_t timestamp;
  37. int buffer_size; /**< Buffer size in bytes */
  38. int buffer_duration; /**< Buffer size in ms, recalculated to buffer_size */
  39. int last_result;
  40. pa_threaded_mainloop *mainloop;
  41. pa_context *ctx;
  42. pa_stream *stream;
  43. int nonblocking;
  44. int mute;
  45. pa_volume_t base_volume;
  46. pa_volume_t last_volume;
  47. } PulseData;
  48. static void pulse_audio_sink_device_cb(pa_context *ctx, const pa_sink_info *dev,
  49. int eol, void *userdata)
  50. {
  51. PulseData *s = userdata;
  52. if (s->ctx != ctx)
  53. return;
  54. if (eol) {
  55. pa_threaded_mainloop_signal(s->mainloop, 0);
  56. } else {
  57. if (dev->flags & PA_SINK_FLAT_VOLUME)
  58. s->base_volume = dev->base_volume;
  59. else
  60. s->base_volume = PA_VOLUME_NORM;
  61. av_log(s, AV_LOG_DEBUG, "base volume: %u\n", s->base_volume);
  62. }
  63. }
  64. /* Mainloop must be locked before calling this function as it uses pa_threaded_mainloop_wait. */
  65. static int pulse_update_sink_info(AVFormatContext *h)
  66. {
  67. PulseData *s = h->priv_data;
  68. pa_operation *op;
  69. if (!(op = pa_context_get_sink_info_by_name(s->ctx, s->device,
  70. pulse_audio_sink_device_cb, s))) {
  71. av_log(s, AV_LOG_ERROR, "pa_context_get_sink_info_by_name failed.\n");
  72. return AVERROR_EXTERNAL;
  73. }
  74. while (pa_operation_get_state(op) == PA_OPERATION_RUNNING)
  75. pa_threaded_mainloop_wait(s->mainloop);
  76. pa_operation_unref(op);
  77. return 0;
  78. }
  79. static void pulse_audio_sink_input_cb(pa_context *ctx, const pa_sink_input_info *i,
  80. int eol, void *userdata)
  81. {
  82. AVFormatContext *h = userdata;
  83. PulseData *s = h->priv_data;
  84. if (s->ctx != ctx)
  85. return;
  86. if (!eol) {
  87. double val;
  88. pa_volume_t vol = pa_cvolume_avg(&i->volume);
  89. if (s->mute < 0 || (s->mute && !i->mute) || (!s->mute && i->mute)) {
  90. s->mute = i->mute;
  91. avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_MUTE_STATE_CHANGED, &s->mute, sizeof(s->mute));
  92. }
  93. vol = pa_sw_volume_divide(vol, s->base_volume);
  94. if (s->last_volume != vol) {
  95. val = (double)vol / PA_VOLUME_NORM;
  96. avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED, &val, sizeof(val));
  97. s->last_volume = vol;
  98. }
  99. }
  100. }
  101. /* This function creates new loop so may be called from PA callbacks.
  102. Mainloop must be locked before calling this function as it operates on streams. */
  103. static int pulse_update_sink_input_info(AVFormatContext *h)
  104. {
  105. PulseData *s = h->priv_data;
  106. pa_operation *op;
  107. enum pa_operation_state op_state;
  108. pa_mainloop *ml = NULL;
  109. pa_context *ctx = NULL;
  110. int ret = 0;
  111. if ((ret = ff_pulse_audio_connect_context(&ml, &ctx, s->server, "Update sink input information")) < 0)
  112. return ret;
  113. if (!(op = pa_context_get_sink_input_info(ctx, pa_stream_get_index(s->stream),
  114. pulse_audio_sink_input_cb, h))) {
  115. ret = AVERROR_EXTERNAL;
  116. goto fail;
  117. }
  118. while ((op_state = pa_operation_get_state(op)) == PA_OPERATION_RUNNING)
  119. pa_mainloop_iterate(ml, 1, NULL);
  120. pa_operation_unref(op);
  121. if (op_state != PA_OPERATION_DONE) {
  122. ret = AVERROR_EXTERNAL;
  123. goto fail;
  124. }
  125. fail:
  126. ff_pulse_audio_disconnect_context(&ml, &ctx);
  127. if (ret)
  128. av_log(s, AV_LOG_ERROR, "pa_context_get_sink_input_info failed.\n");
  129. return ret;
  130. }
  131. static void pulse_event(pa_context *ctx, pa_subscription_event_type_t t,
  132. uint32_t idx, void *userdata)
  133. {
  134. AVFormatContext *h = userdata;
  135. PulseData *s = h->priv_data;
  136. if (s->ctx != ctx)
  137. return;
  138. if ((t & PA_SUBSCRIPTION_EVENT_FACILITY_MASK) == PA_SUBSCRIPTION_EVENT_SINK_INPUT) {
  139. if ((t & PA_SUBSCRIPTION_EVENT_TYPE_MASK) == PA_SUBSCRIPTION_EVENT_CHANGE)
  140. // Calling from mainloop callback. No need to lock mainloop.
  141. pulse_update_sink_input_info(h);
  142. }
  143. }
  144. static void pulse_stream_writable(pa_stream *stream, size_t nbytes, void *userdata)
  145. {
  146. AVFormatContext *h = userdata;
  147. PulseData *s = h->priv_data;
  148. int64_t val = nbytes;
  149. if (stream != s->stream)
  150. return;
  151. avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_WRITABLE, &val, sizeof(val));
  152. pa_threaded_mainloop_signal(s->mainloop, 0);
  153. }
  154. static void pulse_overflow(pa_stream *stream, void *userdata)
  155. {
  156. AVFormatContext *h = userdata;
  157. avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_OVERFLOW, NULL, 0);
  158. }
  159. static void pulse_underflow(pa_stream *stream, void *userdata)
  160. {
  161. AVFormatContext *h = userdata;
  162. avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_UNDERFLOW, NULL, 0);
  163. }
  164. static void pulse_stream_state(pa_stream *stream, void *userdata)
  165. {
  166. PulseData *s = userdata;
  167. if (stream != s->stream)
  168. return;
  169. switch (pa_stream_get_state(s->stream)) {
  170. case PA_STREAM_READY:
  171. case PA_STREAM_FAILED:
  172. case PA_STREAM_TERMINATED:
  173. pa_threaded_mainloop_signal(s->mainloop, 0);
  174. default:
  175. break;
  176. }
  177. }
  178. static int pulse_stream_wait(PulseData *s)
  179. {
  180. pa_stream_state_t state;
  181. while ((state = pa_stream_get_state(s->stream)) != PA_STREAM_READY) {
  182. if (state == PA_STREAM_FAILED || state == PA_STREAM_TERMINATED)
  183. return AVERROR_EXTERNAL;
  184. pa_threaded_mainloop_wait(s->mainloop);
  185. }
  186. return 0;
  187. }
  188. static void pulse_context_state(pa_context *ctx, void *userdata)
  189. {
  190. PulseData *s = userdata;
  191. if (s->ctx != ctx)
  192. return;
  193. switch (pa_context_get_state(ctx)) {
  194. case PA_CONTEXT_READY:
  195. case PA_CONTEXT_FAILED:
  196. case PA_CONTEXT_TERMINATED:
  197. pa_threaded_mainloop_signal(s->mainloop, 0);
  198. default:
  199. break;
  200. }
  201. }
  202. static int pulse_context_wait(PulseData *s)
  203. {
  204. pa_context_state_t state;
  205. while ((state = pa_context_get_state(s->ctx)) != PA_CONTEXT_READY) {
  206. if (state == PA_CONTEXT_FAILED || state == PA_CONTEXT_TERMINATED)
  207. return AVERROR_EXTERNAL;
  208. pa_threaded_mainloop_wait(s->mainloop);
  209. }
  210. return 0;
  211. }
  212. static void pulse_stream_result(pa_stream *stream, int success, void *userdata)
  213. {
  214. PulseData *s = userdata;
  215. if (stream != s->stream)
  216. return;
  217. s->last_result = success ? 0 : AVERROR_EXTERNAL;
  218. pa_threaded_mainloop_signal(s->mainloop, 0);
  219. }
  220. static int pulse_finish_stream_operation(PulseData *s, pa_operation *op, const char *name)
  221. {
  222. if (!op) {
  223. pa_threaded_mainloop_unlock(s->mainloop);
  224. av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
  225. return AVERROR_EXTERNAL;
  226. }
  227. s->last_result = 2;
  228. while (s->last_result == 2)
  229. pa_threaded_mainloop_wait(s->mainloop);
  230. pa_operation_unref(op);
  231. pa_threaded_mainloop_unlock(s->mainloop);
  232. if (s->last_result != 0)
  233. av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
  234. return s->last_result;
  235. }
  236. static int pulse_set_pause(PulseData *s, int pause)
  237. {
  238. pa_operation *op;
  239. pa_threaded_mainloop_lock(s->mainloop);
  240. op = pa_stream_cork(s->stream, pause, pulse_stream_result, s);
  241. return pulse_finish_stream_operation(s, op, "pa_stream_cork");
  242. }
  243. static int pulse_flash_stream(PulseData *s)
  244. {
  245. pa_operation *op;
  246. pa_threaded_mainloop_lock(s->mainloop);
  247. op = pa_stream_flush(s->stream, pulse_stream_result, s);
  248. return pulse_finish_stream_operation(s, op, "pa_stream_flush");
  249. }
  250. static void pulse_context_result(pa_context *ctx, int success, void *userdata)
  251. {
  252. PulseData *s = userdata;
  253. if (s->ctx != ctx)
  254. return;
  255. s->last_result = success ? 0 : AVERROR_EXTERNAL;
  256. pa_threaded_mainloop_signal(s->mainloop, 0);
  257. }
  258. static int pulse_finish_context_operation(PulseData *s, pa_operation *op, const char *name)
  259. {
  260. if (!op) {
  261. pa_threaded_mainloop_unlock(s->mainloop);
  262. av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
  263. return AVERROR_EXTERNAL;
  264. }
  265. s->last_result = 2;
  266. while (s->last_result == 2)
  267. pa_threaded_mainloop_wait(s->mainloop);
  268. pa_operation_unref(op);
  269. pa_threaded_mainloop_unlock(s->mainloop);
  270. if (s->last_result != 0)
  271. av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
  272. return s->last_result;
  273. }
  274. static int pulse_set_mute(PulseData *s)
  275. {
  276. pa_operation *op;
  277. pa_threaded_mainloop_lock(s->mainloop);
  278. op = pa_context_set_sink_input_mute(s->ctx, pa_stream_get_index(s->stream),
  279. s->mute, pulse_context_result, s);
  280. return pulse_finish_context_operation(s, op, "pa_context_set_sink_input_mute");
  281. }
  282. static int pulse_set_volume(PulseData *s, double volume)
  283. {
  284. pa_operation *op;
  285. pa_cvolume cvol;
  286. pa_volume_t vol;
  287. const pa_sample_spec *ss = pa_stream_get_sample_spec(s->stream);
  288. vol = pa_sw_volume_multiply(lround(volume * PA_VOLUME_NORM), s->base_volume);
  289. pa_cvolume_set(&cvol, ss->channels, PA_VOLUME_NORM);
  290. pa_sw_cvolume_multiply_scalar(&cvol, &cvol, vol);
  291. pa_threaded_mainloop_lock(s->mainloop);
  292. op = pa_context_set_sink_input_volume(s->ctx, pa_stream_get_index(s->stream),
  293. &cvol, pulse_context_result, s);
  294. return pulse_finish_context_operation(s, op, "pa_context_set_sink_input_volume");
  295. }
  296. static int pulse_subscribe_events(PulseData *s)
  297. {
  298. pa_operation *op;
  299. pa_threaded_mainloop_lock(s->mainloop);
  300. op = pa_context_subscribe(s->ctx, PA_SUBSCRIPTION_MASK_SINK_INPUT, pulse_context_result, s);
  301. return pulse_finish_context_operation(s, op, "pa_context_subscribe");
  302. }
  303. static void pulse_map_channels_to_pulse(int64_t channel_layout, pa_channel_map *channel_map)
  304. {
  305. channel_map->channels = 0;
  306. if (channel_layout & AV_CH_FRONT_LEFT)
  307. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_LEFT;
  308. if (channel_layout & AV_CH_FRONT_RIGHT)
  309. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_RIGHT;
  310. if (channel_layout & AV_CH_FRONT_CENTER)
  311. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_CENTER;
  312. if (channel_layout & AV_CH_LOW_FREQUENCY)
  313. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_LFE;
  314. if (channel_layout & AV_CH_BACK_LEFT)
  315. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_REAR_LEFT;
  316. if (channel_layout & AV_CH_BACK_RIGHT)
  317. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_REAR_RIGHT;
  318. if (channel_layout & AV_CH_FRONT_LEFT_OF_CENTER)
  319. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER;
  320. if (channel_layout & AV_CH_FRONT_RIGHT_OF_CENTER)
  321. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER;
  322. if (channel_layout & AV_CH_BACK_CENTER)
  323. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_REAR_CENTER;
  324. if (channel_layout & AV_CH_SIDE_LEFT)
  325. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_SIDE_LEFT;
  326. if (channel_layout & AV_CH_SIDE_RIGHT)
  327. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_SIDE_RIGHT;
  328. if (channel_layout & AV_CH_TOP_CENTER)
  329. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_CENTER;
  330. if (channel_layout & AV_CH_TOP_FRONT_LEFT)
  331. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_FRONT_LEFT;
  332. if (channel_layout & AV_CH_TOP_FRONT_CENTER)
  333. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_FRONT_CENTER;
  334. if (channel_layout & AV_CH_TOP_FRONT_RIGHT)
  335. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_FRONT_RIGHT;
  336. if (channel_layout & AV_CH_TOP_BACK_LEFT)
  337. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_REAR_LEFT;
  338. if (channel_layout & AV_CH_TOP_BACK_CENTER)
  339. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_REAR_CENTER;
  340. if (channel_layout & AV_CH_TOP_BACK_RIGHT)
  341. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_REAR_RIGHT;
  342. if (channel_layout & AV_CH_STEREO_LEFT)
  343. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_LEFT;
  344. if (channel_layout & AV_CH_STEREO_RIGHT)
  345. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_RIGHT;
  346. if (channel_layout & AV_CH_WIDE_LEFT)
  347. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX0;
  348. if (channel_layout & AV_CH_WIDE_RIGHT)
  349. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX1;
  350. if (channel_layout & AV_CH_SURROUND_DIRECT_LEFT)
  351. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX2;
  352. if (channel_layout & AV_CH_SURROUND_DIRECT_RIGHT)
  353. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX3;
  354. if (channel_layout & AV_CH_LOW_FREQUENCY_2)
  355. channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_LFE;
  356. }
  357. static av_cold int pulse_write_trailer(AVFormatContext *h)
  358. {
  359. PulseData *s = h->priv_data;
  360. if (s->mainloop) {
  361. pa_threaded_mainloop_lock(s->mainloop);
  362. if (s->stream) {
  363. pa_stream_disconnect(s->stream);
  364. pa_stream_set_state_callback(s->stream, NULL, NULL);
  365. pa_stream_set_write_callback(s->stream, NULL, NULL);
  366. pa_stream_set_overflow_callback(s->stream, NULL, NULL);
  367. pa_stream_set_underflow_callback(s->stream, NULL, NULL);
  368. pa_stream_unref(s->stream);
  369. s->stream = NULL;
  370. }
  371. if (s->ctx) {
  372. pa_context_disconnect(s->ctx);
  373. pa_context_set_state_callback(s->ctx, NULL, NULL);
  374. pa_context_set_subscribe_callback(s->ctx, NULL, NULL);
  375. pa_context_unref(s->ctx);
  376. s->ctx = NULL;
  377. }
  378. pa_threaded_mainloop_unlock(s->mainloop);
  379. pa_threaded_mainloop_stop(s->mainloop);
  380. pa_threaded_mainloop_free(s->mainloop);
  381. s->mainloop = NULL;
  382. }
  383. return 0;
  384. }
  385. static av_cold int pulse_write_header(AVFormatContext *h)
  386. {
  387. PulseData *s = h->priv_data;
  388. AVStream *st = NULL;
  389. int ret;
  390. pa_sample_spec sample_spec;
  391. pa_buffer_attr buffer_attributes = { -1, -1, -1, -1, -1 };
  392. pa_channel_map channel_map;
  393. pa_mainloop_api *mainloop_api;
  394. const char *stream_name = s->stream_name;
  395. static const pa_stream_flags_t stream_flags = PA_STREAM_INTERPOLATE_TIMING |
  396. PA_STREAM_AUTO_TIMING_UPDATE |
  397. PA_STREAM_NOT_MONOTONIC;
  398. if (h->nb_streams != 1 || h->streams[0]->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
  399. av_log(s, AV_LOG_ERROR, "Only a single audio stream is supported.\n");
  400. return AVERROR(EINVAL);
  401. }
  402. st = h->streams[0];
  403. if (!stream_name) {
  404. if (h->filename[0])
  405. stream_name = h->filename;
  406. else
  407. stream_name = "Playback";
  408. }
  409. s->nonblocking = (h->flags & AVFMT_FLAG_NONBLOCK);
  410. if (s->buffer_duration) {
  411. int64_t bytes = s->buffer_duration;
  412. bytes *= st->codec->channels * st->codec->sample_rate *
  413. av_get_bytes_per_sample(st->codec->sample_fmt);
  414. bytes /= 1000;
  415. buffer_attributes.tlength = FFMAX(s->buffer_size, av_clip64(bytes, 0, UINT32_MAX - 1));
  416. av_log(s, AV_LOG_DEBUG,
  417. "Buffer duration: %ums recalculated into %"PRId64" bytes buffer.\n",
  418. s->buffer_duration, bytes);
  419. av_log(s, AV_LOG_DEBUG, "Real buffer length is %u bytes\n", buffer_attributes.tlength);
  420. } else if (s->buffer_size)
  421. buffer_attributes.tlength = s->buffer_size;
  422. sample_spec.format = ff_codec_id_to_pulse_format(st->codec->codec_id);
  423. sample_spec.rate = st->codec->sample_rate;
  424. sample_spec.channels = st->codec->channels;
  425. if (!pa_sample_spec_valid(&sample_spec)) {
  426. av_log(s, AV_LOG_ERROR, "Invalid sample spec.\n");
  427. return AVERROR(EINVAL);
  428. }
  429. if (sample_spec.channels == 1) {
  430. channel_map.channels = 1;
  431. channel_map.map[0] = PA_CHANNEL_POSITION_MONO;
  432. } else if (st->codec->channel_layout) {
  433. if (av_get_channel_layout_nb_channels(st->codec->channel_layout) != st->codec->channels)
  434. return AVERROR(EINVAL);
  435. pulse_map_channels_to_pulse(st->codec->channel_layout, &channel_map);
  436. /* Unknown channel is present in channel_layout, let PulseAudio use its default. */
  437. if (channel_map.channels != sample_spec.channels) {
  438. av_log(s, AV_LOG_WARNING, "Unknown channel. Using defaul channel map.\n");
  439. channel_map.channels = 0;
  440. }
  441. } else
  442. channel_map.channels = 0;
  443. if (!channel_map.channels)
  444. av_log(s, AV_LOG_WARNING, "Using PulseAudio's default channel map.\n");
  445. else if (!pa_channel_map_valid(&channel_map)) {
  446. av_log(s, AV_LOG_ERROR, "Invalid channel map.\n");
  447. return AVERROR(EINVAL);
  448. }
  449. /* start main loop */
  450. s->mainloop = pa_threaded_mainloop_new();
  451. if (!s->mainloop) {
  452. av_log(s, AV_LOG_ERROR, "Cannot create threaded mainloop.\n");
  453. return AVERROR(ENOMEM);
  454. }
  455. if ((ret = pa_threaded_mainloop_start(s->mainloop)) < 0) {
  456. av_log(s, AV_LOG_ERROR, "Cannot start threaded mainloop: %s.\n", pa_strerror(ret));
  457. pa_threaded_mainloop_free(s->mainloop);
  458. s->mainloop = NULL;
  459. return AVERROR_EXTERNAL;
  460. }
  461. pa_threaded_mainloop_lock(s->mainloop);
  462. mainloop_api = pa_threaded_mainloop_get_api(s->mainloop);
  463. if (!mainloop_api) {
  464. av_log(s, AV_LOG_ERROR, "Cannot get mainloop API.\n");
  465. ret = AVERROR_EXTERNAL;
  466. goto fail;
  467. }
  468. s->ctx = pa_context_new(mainloop_api, s->name);
  469. if (!s->ctx) {
  470. av_log(s, AV_LOG_ERROR, "Cannot create context.\n");
  471. ret = AVERROR(ENOMEM);
  472. goto fail;
  473. }
  474. pa_context_set_state_callback(s->ctx, pulse_context_state, s);
  475. pa_context_set_subscribe_callback(s->ctx, pulse_event, h);
  476. if ((ret = pa_context_connect(s->ctx, s->server, 0, NULL)) < 0) {
  477. av_log(s, AV_LOG_ERROR, "Cannot connect context: %s.\n", pa_strerror(ret));
  478. ret = AVERROR_EXTERNAL;
  479. goto fail;
  480. }
  481. if ((ret = pulse_context_wait(s)) < 0) {
  482. av_log(s, AV_LOG_ERROR, "Context failed.\n");
  483. goto fail;
  484. }
  485. s->stream = pa_stream_new(s->ctx, stream_name, &sample_spec,
  486. channel_map.channels ? &channel_map : NULL);
  487. if ((ret = pulse_update_sink_info(h)) < 0) {
  488. av_log(s, AV_LOG_ERROR, "Updating sink info failed.\n");
  489. goto fail;
  490. }
  491. if (!s->stream) {
  492. av_log(s, AV_LOG_ERROR, "Cannot create stream.\n");
  493. ret = AVERROR(ENOMEM);
  494. goto fail;
  495. }
  496. pa_stream_set_state_callback(s->stream, pulse_stream_state, s);
  497. pa_stream_set_write_callback(s->stream, pulse_stream_writable, h);
  498. pa_stream_set_overflow_callback(s->stream, pulse_overflow, h);
  499. pa_stream_set_underflow_callback(s->stream, pulse_underflow, h);
  500. if ((ret = pa_stream_connect_playback(s->stream, s->device, &buffer_attributes,
  501. stream_flags, NULL, NULL)) < 0) {
  502. av_log(s, AV_LOG_ERROR, "pa_stream_connect_playback failed: %s.\n", pa_strerror(ret));
  503. ret = AVERROR_EXTERNAL;
  504. goto fail;
  505. }
  506. if ((ret = pulse_stream_wait(s)) < 0) {
  507. av_log(s, AV_LOG_ERROR, "Stream failed.\n");
  508. goto fail;
  509. }
  510. pa_threaded_mainloop_unlock(s->mainloop);
  511. if ((ret = pulse_subscribe_events(s)) < 0) {
  512. av_log(s, AV_LOG_ERROR, "Event subscription failed.\n");
  513. /* a bit ugly but the simplest to lock here*/
  514. pa_threaded_mainloop_lock(s->mainloop);
  515. goto fail;
  516. }
  517. /* force control messages */
  518. s->mute = -1;
  519. s->last_volume = PA_VOLUME_INVALID;
  520. pa_threaded_mainloop_lock(s->mainloop);
  521. if ((ret = pulse_update_sink_input_info(h)) < 0) {
  522. av_log(s, AV_LOG_ERROR, "Updating sink input info failed.\n");
  523. goto fail;
  524. }
  525. pa_threaded_mainloop_unlock(s->mainloop);
  526. avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
  527. return 0;
  528. fail:
  529. pa_threaded_mainloop_unlock(s->mainloop);
  530. pulse_write_trailer(h);
  531. return ret;
  532. }
  533. static int pulse_write_packet(AVFormatContext *h, AVPacket *pkt)
  534. {
  535. PulseData *s = h->priv_data;
  536. int ret;
  537. if (!pkt)
  538. return pulse_flash_stream(s);
  539. if (pkt->dts != AV_NOPTS_VALUE)
  540. s->timestamp = pkt->dts;
  541. if (pkt->duration) {
  542. s->timestamp += pkt->duration;
  543. } else {
  544. AVStream *st = h->streams[0];
  545. AVCodecContext *codec_ctx = st->codec;
  546. AVRational r = { 1, codec_ctx->sample_rate };
  547. int64_t samples = pkt->size / (av_get_bytes_per_sample(codec_ctx->sample_fmt) * codec_ctx->channels);
  548. s->timestamp += av_rescale_q(samples, r, st->time_base);
  549. }
  550. pa_threaded_mainloop_lock(s->mainloop);
  551. if (!PA_STREAM_IS_GOOD(pa_stream_get_state(s->stream))) {
  552. av_log(s, AV_LOG_ERROR, "PulseAudio stream is in invalid state.\n");
  553. goto fail;
  554. }
  555. while (!pa_stream_writable_size(s->stream)) {
  556. if (s->nonblocking) {
  557. pa_threaded_mainloop_unlock(s->mainloop);
  558. return AVERROR(EAGAIN);
  559. } else
  560. pa_threaded_mainloop_wait(s->mainloop);
  561. }
  562. if ((ret = pa_stream_write(s->stream, pkt->data, pkt->size, NULL, 0, PA_SEEK_RELATIVE)) < 0) {
  563. av_log(s, AV_LOG_ERROR, "pa_stream_write failed: %s\n", pa_strerror(ret));
  564. goto fail;
  565. }
  566. pa_threaded_mainloop_unlock(s->mainloop);
  567. return 0;
  568. fail:
  569. pa_threaded_mainloop_unlock(s->mainloop);
  570. return AVERROR_EXTERNAL;
  571. }
  572. static int pulse_write_frame(AVFormatContext *h, int stream_index,
  573. AVFrame **frame, unsigned flags)
  574. {
  575. AVPacket pkt;
  576. /* Planar formats are not supported yet. */
  577. if (flags & AV_WRITE_UNCODED_FRAME_QUERY)
  578. return av_sample_fmt_is_planar(h->streams[stream_index]->codec->sample_fmt) ?
  579. AVERROR(EINVAL) : 0;
  580. pkt.data = (*frame)->data[0];
  581. pkt.size = (*frame)->nb_samples * av_get_bytes_per_sample((*frame)->format) * (*frame)->channels;
  582. pkt.dts = (*frame)->pkt_dts;
  583. pkt.duration = av_frame_get_pkt_duration(*frame);
  584. return pulse_write_packet(h, &pkt);
  585. }
  586. static void pulse_get_output_timestamp(AVFormatContext *h, int stream, int64_t *dts, int64_t *wall)
  587. {
  588. PulseData *s = h->priv_data;
  589. pa_usec_t latency;
  590. int neg;
  591. pa_threaded_mainloop_lock(s->mainloop);
  592. pa_stream_get_latency(s->stream, &latency, &neg);
  593. pa_threaded_mainloop_unlock(s->mainloop);
  594. *wall = av_gettime();
  595. *dts = s->timestamp - (neg ? -latency : latency);
  596. }
  597. static int pulse_get_device_list(AVFormatContext *h, AVDeviceInfoList *device_list)
  598. {
  599. PulseData *s = h->priv_data;
  600. return ff_pulse_audio_get_devices(device_list, s->server, 1);
  601. }
  602. static int pulse_control_message(AVFormatContext *h, int type,
  603. void *data, size_t data_size)
  604. {
  605. PulseData *s = h->priv_data;
  606. int ret;
  607. switch(type) {
  608. case AV_APP_TO_DEV_PAUSE:
  609. return pulse_set_pause(s, 1);
  610. case AV_APP_TO_DEV_PLAY:
  611. return pulse_set_pause(s, 0);
  612. case AV_APP_TO_DEV_TOGGLE_PAUSE:
  613. return pulse_set_pause(s, !pa_stream_is_corked(s->stream));
  614. case AV_APP_TO_DEV_MUTE:
  615. if (!s->mute) {
  616. s->mute = 1;
  617. return pulse_set_mute(s);
  618. }
  619. return 0;
  620. case AV_APP_TO_DEV_UNMUTE:
  621. if (s->mute) {
  622. s->mute = 0;
  623. return pulse_set_mute(s);
  624. }
  625. return 0;
  626. case AV_APP_TO_DEV_TOGGLE_MUTE:
  627. s->mute = !s->mute;
  628. return pulse_set_mute(s);
  629. case AV_APP_TO_DEV_SET_VOLUME:
  630. return pulse_set_volume(s, *(double *)data);
  631. case AV_APP_TO_DEV_GET_VOLUME:
  632. s->last_volume = PA_VOLUME_INVALID;
  633. pa_threaded_mainloop_lock(s->mainloop);
  634. ret = pulse_update_sink_input_info(h);
  635. pa_threaded_mainloop_unlock(s->mainloop);
  636. return ret;
  637. case AV_APP_TO_DEV_GET_MUTE:
  638. s->mute = -1;
  639. pa_threaded_mainloop_lock(s->mainloop);
  640. ret = pulse_update_sink_input_info(h);
  641. pa_threaded_mainloop_unlock(s->mainloop);
  642. return ret;
  643. default:
  644. break;
  645. }
  646. return AVERROR(ENOSYS);
  647. }
  648. #define OFFSET(a) offsetof(PulseData, a)
  649. #define E AV_OPT_FLAG_ENCODING_PARAM
  650. static const AVOption options[] = {
  651. { "server", "set PulseAudio server", OFFSET(server), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
  652. { "name", "set application name", OFFSET(name), AV_OPT_TYPE_STRING, {.str = LIBAVFORMAT_IDENT}, 0, 0, E },
  653. { "stream_name", "set stream description", OFFSET(stream_name), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
  654. { "device", "set device name", OFFSET(device), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
  655. { "buffer_size", "set buffer size in bytes", OFFSET(buffer_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
  656. { "buffer_duration", "set buffer duration in millisecs", OFFSET(buffer_duration), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
  657. { NULL }
  658. };
  659. static const AVClass pulse_muxer_class = {
  660. .class_name = "PulseAudio muxer",
  661. .item_name = av_default_item_name,
  662. .option = options,
  663. .version = LIBAVUTIL_VERSION_INT,
  664. .category = AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
  665. };
  666. AVOutputFormat ff_pulse_muxer = {
  667. .name = "pulse",
  668. .long_name = NULL_IF_CONFIG_SMALL("Pulse audio output"),
  669. .priv_data_size = sizeof(PulseData),
  670. .audio_codec = AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE),
  671. .video_codec = AV_CODEC_ID_NONE,
  672. .write_header = pulse_write_header,
  673. .write_packet = pulse_write_packet,
  674. .write_uncoded_frame = pulse_write_frame,
  675. .write_trailer = pulse_write_trailer,
  676. .get_output_timestamp = pulse_get_output_timestamp,
  677. .get_device_list = pulse_get_device_list,
  678. .control_message = pulse_control_message,
  679. .flags = AVFMT_NOFILE | AVFMT_ALLOW_FLUSH,
  680. .priv_class = &pulse_muxer_class,
  681. };