You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

884 lines
27KB

  1. /*
  2. * AviSynth(+) support
  3. * Copyright (c) 2012 AvxSynth Team
  4. *
  5. * This file is part of FFmpeg
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/attributes.h"
  22. #include "libavutil/internal.h"
  23. #include "libavcodec/internal.h"
  24. #include "avformat.h"
  25. #include "internal.h"
  26. #include "config.h"
  27. /* Enable function pointer definitions for runtime loading. */
  28. #define AVSC_NO_DECLSPEC
  29. /* Platform-specific directives. */
  30. #ifdef _WIN32
  31. #include "compat/w32dlfcn.h"
  32. #undef EXTERN_C
  33. #define AVISYNTH_LIB "avisynth"
  34. #else
  35. #include <dlfcn.h>
  36. #define AVISYNTH_NAME "libavisynth"
  37. #define AVISYNTH_LIB AVISYNTH_NAME SLIBSUF
  38. #endif
  39. #include <avisynth/avisynth_c.h>
  40. typedef struct AviSynthLibrary {
  41. void *library;
  42. #define AVSC_DECLARE_FUNC(name) name ## _func name
  43. AVSC_DECLARE_FUNC(avs_bit_blt);
  44. AVSC_DECLARE_FUNC(avs_clip_get_error);
  45. AVSC_DECLARE_FUNC(avs_create_script_environment);
  46. AVSC_DECLARE_FUNC(avs_delete_script_environment);
  47. AVSC_DECLARE_FUNC(avs_get_audio);
  48. AVSC_DECLARE_FUNC(avs_get_error);
  49. AVSC_DECLARE_FUNC(avs_get_frame);
  50. AVSC_DECLARE_FUNC(avs_get_version);
  51. AVSC_DECLARE_FUNC(avs_get_video_info);
  52. AVSC_DECLARE_FUNC(avs_invoke);
  53. AVSC_DECLARE_FUNC(avs_is_color_space);
  54. AVSC_DECLARE_FUNC(avs_release_clip);
  55. AVSC_DECLARE_FUNC(avs_release_value);
  56. AVSC_DECLARE_FUNC(avs_release_video_frame);
  57. AVSC_DECLARE_FUNC(avs_take_clip);
  58. AVSC_DECLARE_FUNC(avs_bits_per_pixel);
  59. AVSC_DECLARE_FUNC(avs_get_height_p);
  60. AVSC_DECLARE_FUNC(avs_get_pitch_p);
  61. AVSC_DECLARE_FUNC(avs_get_read_ptr_p);
  62. AVSC_DECLARE_FUNC(avs_get_row_size_p);
  63. AVSC_DECLARE_FUNC(avs_is_planar_rgb);
  64. AVSC_DECLARE_FUNC(avs_is_planar_rgba);
  65. #undef AVSC_DECLARE_FUNC
  66. } AviSynthLibrary;
  67. typedef struct AviSynthContext {
  68. AVS_ScriptEnvironment *env;
  69. AVS_Clip *clip;
  70. const AVS_VideoInfo *vi;
  71. /* avisynth_read_packet_video() iterates over this. */
  72. int n_planes;
  73. const int *planes;
  74. int curr_stream;
  75. int curr_frame;
  76. int64_t curr_sample;
  77. int error;
  78. /* Linked list pointers. */
  79. struct AviSynthContext *next;
  80. } AviSynthContext;
  81. static const int avs_planes_packed[1] = { 0 };
  82. static const int avs_planes_grey[1] = { AVS_PLANAR_Y };
  83. static const int avs_planes_yuv[3] = { AVS_PLANAR_Y, AVS_PLANAR_U,
  84. AVS_PLANAR_V };
  85. static const int avs_planes_rgb[3] = { AVS_PLANAR_G, AVS_PLANAR_B,
  86. AVS_PLANAR_R };
  87. static const int avs_planes_yuva[4] = { AVS_PLANAR_Y, AVS_PLANAR_U,
  88. AVS_PLANAR_V, AVS_PLANAR_A };
  89. static const int avs_planes_rgba[4] = { AVS_PLANAR_G, AVS_PLANAR_B,
  90. AVS_PLANAR_R, AVS_PLANAR_A };
  91. /* A conflict between C++ global objects, atexit, and dynamic loading requires
  92. * us to register our own atexit handler to prevent double freeing. */
  93. static AviSynthLibrary avs_library;
  94. static int avs_atexit_called = 0;
  95. /* Linked list of AviSynthContexts. An atexit handler destroys this list. */
  96. static AviSynthContext *avs_ctx_list = NULL;
  97. static av_cold void avisynth_atexit_handler(void);
  98. static av_cold int avisynth_load_library(void)
  99. {
  100. avs_library.library = dlopen(AVISYNTH_LIB, RTLD_NOW | RTLD_LOCAL);
  101. if (!avs_library.library)
  102. return AVERROR_UNKNOWN;
  103. #define LOAD_AVS_FUNC(name, continue_on_fail) \
  104. avs_library.name = (name ## _func) \
  105. dlsym(avs_library.library, #name); \
  106. if (!continue_on_fail && !avs_library.name) \
  107. goto fail;
  108. LOAD_AVS_FUNC(avs_bit_blt, 0);
  109. LOAD_AVS_FUNC(avs_clip_get_error, 0);
  110. LOAD_AVS_FUNC(avs_create_script_environment, 0);
  111. LOAD_AVS_FUNC(avs_delete_script_environment, 0);
  112. LOAD_AVS_FUNC(avs_get_audio, 0);
  113. LOAD_AVS_FUNC(avs_get_error, 1); // New to AviSynth 2.6
  114. LOAD_AVS_FUNC(avs_get_frame, 0);
  115. LOAD_AVS_FUNC(avs_get_version, 0);
  116. LOAD_AVS_FUNC(avs_get_video_info, 0);
  117. LOAD_AVS_FUNC(avs_invoke, 0);
  118. LOAD_AVS_FUNC(avs_is_color_space, 1);
  119. LOAD_AVS_FUNC(avs_release_clip, 0);
  120. LOAD_AVS_FUNC(avs_release_value, 0);
  121. LOAD_AVS_FUNC(avs_release_video_frame, 0);
  122. LOAD_AVS_FUNC(avs_take_clip, 0);
  123. LOAD_AVS_FUNC(avs_bits_per_pixel, 1);
  124. LOAD_AVS_FUNC(avs_get_height_p, 1);
  125. LOAD_AVS_FUNC(avs_get_pitch_p, 1);
  126. LOAD_AVS_FUNC(avs_get_read_ptr_p, 1);
  127. LOAD_AVS_FUNC(avs_get_row_size_p, 1);
  128. LOAD_AVS_FUNC(avs_is_planar_rgb, 1);
  129. LOAD_AVS_FUNC(avs_is_planar_rgba, 1);
  130. #undef LOAD_AVS_FUNC
  131. atexit(avisynth_atexit_handler);
  132. return 0;
  133. fail:
  134. dlclose(avs_library.library);
  135. return AVERROR_UNKNOWN;
  136. }
  137. /* Note that avisynth_context_create and avisynth_context_destroy
  138. * do not allocate or free the actual context! That is taken care of
  139. * by libavformat. */
  140. static av_cold int avisynth_context_create(AVFormatContext *s)
  141. {
  142. AviSynthContext *avs = s->priv_data;
  143. int ret;
  144. if (!avs_library.library)
  145. if (ret = avisynth_load_library())
  146. return ret;
  147. avs->env = avs_library.avs_create_script_environment(3);
  148. if (avs_library.avs_get_error) {
  149. const char *error = avs_library.avs_get_error(avs->env);
  150. if (error) {
  151. av_log(s, AV_LOG_ERROR, "%s\n", error);
  152. return AVERROR_UNKNOWN;
  153. }
  154. }
  155. if (!avs_ctx_list) {
  156. avs_ctx_list = avs;
  157. } else {
  158. avs->next = avs_ctx_list;
  159. avs_ctx_list = avs;
  160. }
  161. return 0;
  162. }
  163. static av_cold void avisynth_context_destroy(AviSynthContext *avs)
  164. {
  165. if (avs_atexit_called)
  166. return;
  167. if (avs == avs_ctx_list) {
  168. avs_ctx_list = avs->next;
  169. } else {
  170. AviSynthContext *prev = avs_ctx_list;
  171. while (prev->next != avs)
  172. prev = prev->next;
  173. prev->next = avs->next;
  174. }
  175. if (avs->clip) {
  176. avs_library.avs_release_clip(avs->clip);
  177. avs->clip = NULL;
  178. }
  179. if (avs->env) {
  180. avs_library.avs_delete_script_environment(avs->env);
  181. avs->env = NULL;
  182. }
  183. }
  184. static av_cold void avisynth_atexit_handler(void)
  185. {
  186. AviSynthContext *avs = avs_ctx_list;
  187. while (avs) {
  188. AviSynthContext *next = avs->next;
  189. avisynth_context_destroy(avs);
  190. avs = next;
  191. }
  192. dlclose(avs_library.library);
  193. avs_atexit_called = 1;
  194. }
  195. /* Create AVStream from audio and video data. */
  196. static int avisynth_create_stream_video(AVFormatContext *s, AVStream *st)
  197. {
  198. AviSynthContext *avs = s->priv_data;
  199. int planar = 0; // 0: packed, 1: YUV, 2: Y8, 3: Planar RGB, 4: YUVA, 5: Planar RGBA
  200. st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
  201. st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
  202. st->codecpar->width = avs->vi->width;
  203. st->codecpar->height = avs->vi->height;
  204. st->avg_frame_rate = (AVRational) { avs->vi->fps_numerator,
  205. avs->vi->fps_denominator };
  206. st->start_time = 0;
  207. st->duration = avs->vi->num_frames;
  208. st->nb_frames = avs->vi->num_frames;
  209. avpriv_set_pts_info(st, 32, avs->vi->fps_denominator, avs->vi->fps_numerator);
  210. av_log(s, AV_LOG_TRACE, "avs_is_field_based: %d\n", avs_is_field_based(avs->vi));
  211. av_log(s, AV_LOG_TRACE, "avs_is_parity_known: %d\n", avs_is_parity_known(avs->vi));
  212. /* The following typically only works when assumetff (-bff) and
  213. * assumefieldbased is used in-script. Additional
  214. * logic using GetParity() could deliver more accurate results
  215. * but also decodes a frame which we want to avoid. */
  216. st->codecpar->field_order = AV_FIELD_UNKNOWN;
  217. if (avs_is_field_based(avs->vi)) {
  218. if (avs_is_tff(avs->vi)) {
  219. st->codecpar->field_order = AV_FIELD_TT;
  220. }
  221. else if (avs_is_bff(avs->vi)) {
  222. st->codecpar->field_order = AV_FIELD_BB;
  223. }
  224. }
  225. switch (avs->vi->pixel_type) {
  226. /* 10~16-bit YUV pix_fmts (AviSynth+) */
  227. case AVS_CS_YUV444P10:
  228. st->codecpar->format = AV_PIX_FMT_YUV444P10;
  229. planar = 1;
  230. break;
  231. case AVS_CS_YUV422P10:
  232. st->codecpar->format = AV_PIX_FMT_YUV422P10;
  233. planar = 1;
  234. break;
  235. case AVS_CS_YUV420P10:
  236. st->codecpar->format = AV_PIX_FMT_YUV420P10;
  237. planar = 1;
  238. break;
  239. case AVS_CS_YUV444P12:
  240. st->codecpar->format = AV_PIX_FMT_YUV444P12;
  241. planar = 1;
  242. break;
  243. case AVS_CS_YUV422P12:
  244. st->codecpar->format = AV_PIX_FMT_YUV422P12;
  245. planar = 1;
  246. break;
  247. case AVS_CS_YUV420P12:
  248. st->codecpar->format = AV_PIX_FMT_YUV420P12;
  249. planar = 1;
  250. break;
  251. case AVS_CS_YUV444P14:
  252. st->codecpar->format = AV_PIX_FMT_YUV444P14;
  253. planar = 1;
  254. break;
  255. case AVS_CS_YUV422P14:
  256. st->codecpar->format = AV_PIX_FMT_YUV422P14;
  257. planar = 1;
  258. break;
  259. case AVS_CS_YUV420P14:
  260. st->codecpar->format = AV_PIX_FMT_YUV420P14;
  261. planar = 1;
  262. break;
  263. case AVS_CS_YUV444P16:
  264. st->codecpar->format = AV_PIX_FMT_YUV444P16;
  265. planar = 1;
  266. break;
  267. case AVS_CS_YUV422P16:
  268. st->codecpar->format = AV_PIX_FMT_YUV422P16;
  269. planar = 1;
  270. break;
  271. case AVS_CS_YUV420P16:
  272. st->codecpar->format = AV_PIX_FMT_YUV420P16;
  273. planar = 1;
  274. break;
  275. /* 8~16-bit YUV pix_fmts with Alpha (AviSynth+) */
  276. case AVS_CS_YUVA444:
  277. st->codecpar->format = AV_PIX_FMT_YUVA444P;
  278. planar = 4;
  279. break;
  280. case AVS_CS_YUVA422:
  281. st->codecpar->format = AV_PIX_FMT_YUVA422P;
  282. planar = 4;
  283. break;
  284. case AVS_CS_YUVA420:
  285. st->codecpar->format = AV_PIX_FMT_YUVA420P;
  286. planar = 4;
  287. break;
  288. case AVS_CS_YUVA444P10:
  289. st->codecpar->format = AV_PIX_FMT_YUVA444P10;
  290. planar = 4;
  291. break;
  292. case AVS_CS_YUVA422P10:
  293. st->codecpar->format = AV_PIX_FMT_YUVA422P10;
  294. planar = 4;
  295. break;
  296. case AVS_CS_YUVA420P10:
  297. st->codecpar->format = AV_PIX_FMT_YUVA420P10;
  298. planar = 4;
  299. break;
  300. case AVS_CS_YUVA422P12:
  301. st->codecpar->format = AV_PIX_FMT_YUVA422P12;
  302. planar = 4;
  303. break;
  304. case AVS_CS_YUVA444P16:
  305. st->codecpar->format = AV_PIX_FMT_YUVA444P16;
  306. planar = 4;
  307. break;
  308. case AVS_CS_YUVA422P16:
  309. st->codecpar->format = AV_PIX_FMT_YUVA422P16;
  310. planar = 4;
  311. break;
  312. case AVS_CS_YUVA420P16:
  313. st->codecpar->format = AV_PIX_FMT_YUVA420P16;
  314. planar = 4;
  315. break;
  316. /* Planar RGB pix_fmts (AviSynth+) */
  317. case AVS_CS_RGBP:
  318. st->codecpar->format = AV_PIX_FMT_GBRP;
  319. planar = 3;
  320. break;
  321. case AVS_CS_RGBP10:
  322. st->codecpar->format = AV_PIX_FMT_GBRP10;
  323. planar = 3;
  324. break;
  325. case AVS_CS_RGBP12:
  326. st->codecpar->format = AV_PIX_FMT_GBRP12;
  327. planar = 3;
  328. break;
  329. case AVS_CS_RGBP14:
  330. st->codecpar->format = AV_PIX_FMT_GBRP14;
  331. planar = 3;
  332. break;
  333. case AVS_CS_RGBP16:
  334. st->codecpar->format = AV_PIX_FMT_GBRP16;
  335. planar = 3;
  336. break;
  337. /* Single precision floating point Planar RGB (AviSynth+) */
  338. case AVS_CS_RGBPS:
  339. st->codecpar->format = AV_PIX_FMT_GBRPF32;
  340. planar = 3;
  341. break;
  342. /* Planar RGB pix_fmts with Alpha (AviSynth+) */
  343. case AVS_CS_RGBAP:
  344. st->codecpar->format = AV_PIX_FMT_GBRAP;
  345. planar = 5;
  346. break;
  347. case AVS_CS_RGBAP10:
  348. st->codecpar->format = AV_PIX_FMT_GBRAP10;
  349. planar = 5;
  350. break;
  351. case AVS_CS_RGBAP12:
  352. st->codecpar->format = AV_PIX_FMT_GBRAP12;
  353. planar = 5;
  354. break;
  355. case AVS_CS_RGBAP16:
  356. st->codecpar->format = AV_PIX_FMT_GBRAP16;
  357. planar = 5;
  358. break;
  359. /* Single precision floating point Planar RGB with Alpha (AviSynth+) */
  360. case AVS_CS_RGBAPS:
  361. st->codecpar->format = AV_PIX_FMT_GBRAPF32;
  362. planar = 5;
  363. break;
  364. /* 10~16-bit gray pix_fmts (AviSynth+) */
  365. case AVS_CS_Y10:
  366. st->codecpar->format = AV_PIX_FMT_GRAY10;
  367. planar = 2;
  368. break;
  369. case AVS_CS_Y12:
  370. st->codecpar->format = AV_PIX_FMT_GRAY12;
  371. planar = 2;
  372. break;
  373. case AVS_CS_Y14:
  374. st->codecpar->format = AV_PIX_FMT_GRAY14;
  375. planar = 2;
  376. break;
  377. case AVS_CS_Y16:
  378. st->codecpar->format = AV_PIX_FMT_GRAY16;
  379. planar = 2;
  380. break;
  381. /* Single precision floating point gray (AviSynth+) */
  382. case AVS_CS_Y32:
  383. st->codecpar->format = AV_PIX_FMT_GRAYF32;
  384. planar = 2;
  385. break;
  386. /* pix_fmts added in AviSynth 2.6 */
  387. case AVS_CS_YV24:
  388. st->codecpar->format = AV_PIX_FMT_YUV444P;
  389. planar = 1;
  390. break;
  391. case AVS_CS_YV16:
  392. st->codecpar->format = AV_PIX_FMT_YUV422P;
  393. planar = 1;
  394. break;
  395. case AVS_CS_YV411:
  396. st->codecpar->format = AV_PIX_FMT_YUV411P;
  397. planar = 1;
  398. break;
  399. case AVS_CS_Y8:
  400. st->codecpar->format = AV_PIX_FMT_GRAY8;
  401. planar = 2;
  402. break;
  403. /* 16-bit packed RGB pix_fmts (AviSynth+) */
  404. case AVS_CS_BGR48:
  405. st->codecpar->format = AV_PIX_FMT_BGR48;
  406. break;
  407. case AVS_CS_BGR64:
  408. st->codecpar->format = AV_PIX_FMT_BGRA64;
  409. break;
  410. /* AviSynth 2.5 pix_fmts */
  411. case AVS_CS_BGR24:
  412. st->codecpar->format = AV_PIX_FMT_BGR24;
  413. break;
  414. case AVS_CS_BGR32:
  415. st->codecpar->format = AV_PIX_FMT_RGB32;
  416. break;
  417. case AVS_CS_YUY2:
  418. st->codecpar->format = AV_PIX_FMT_YUYV422;
  419. break;
  420. case AVS_CS_YV12:
  421. st->codecpar->format = AV_PIX_FMT_YUV420P;
  422. planar = 1;
  423. break;
  424. case AVS_CS_I420: // Is this even used anywhere?
  425. st->codecpar->format = AV_PIX_FMT_YUV420P;
  426. planar = 1;
  427. break;
  428. default:
  429. av_log(s, AV_LOG_ERROR,
  430. "unknown AviSynth colorspace %d\n", avs->vi->pixel_type);
  431. avs->error = 1;
  432. return AVERROR_UNKNOWN;
  433. }
  434. switch (planar) {
  435. case 5: // Planar RGB + Alpha
  436. avs->n_planes = 4;
  437. avs->planes = avs_planes_rgba;
  438. break;
  439. case 4: // YUV + Alpha
  440. avs->n_planes = 4;
  441. avs->planes = avs_planes_yuva;
  442. break;
  443. case 3: // Planar RGB
  444. avs->n_planes = 3;
  445. avs->planes = avs_planes_rgb;
  446. break;
  447. case 2: // Y8
  448. avs->n_planes = 1;
  449. avs->planes = avs_planes_grey;
  450. break;
  451. case 1: // YUV
  452. avs->n_planes = 3;
  453. avs->planes = avs_planes_yuv;
  454. break;
  455. default:
  456. avs->n_planes = 1;
  457. avs->planes = avs_planes_packed;
  458. }
  459. return 0;
  460. }
  461. static int avisynth_create_stream_audio(AVFormatContext *s, AVStream *st)
  462. {
  463. AviSynthContext *avs = s->priv_data;
  464. st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
  465. st->codecpar->sample_rate = avs->vi->audio_samples_per_second;
  466. st->codecpar->channels = avs->vi->nchannels;
  467. st->duration = avs->vi->num_audio_samples;
  468. avpriv_set_pts_info(st, 64, 1, avs->vi->audio_samples_per_second);
  469. switch (avs->vi->sample_type) {
  470. case AVS_SAMPLE_INT8:
  471. st->codecpar->codec_id = AV_CODEC_ID_PCM_U8;
  472. break;
  473. case AVS_SAMPLE_INT16:
  474. st->codecpar->codec_id = AV_CODEC_ID_PCM_S16LE;
  475. break;
  476. case AVS_SAMPLE_INT24:
  477. st->codecpar->codec_id = AV_CODEC_ID_PCM_S24LE;
  478. break;
  479. case AVS_SAMPLE_INT32:
  480. st->codecpar->codec_id = AV_CODEC_ID_PCM_S32LE;
  481. break;
  482. case AVS_SAMPLE_FLOAT:
  483. st->codecpar->codec_id = AV_CODEC_ID_PCM_F32LE;
  484. break;
  485. default:
  486. av_log(s, AV_LOG_ERROR,
  487. "unknown AviSynth sample type %d\n", avs->vi->sample_type);
  488. avs->error = 1;
  489. return AVERROR_UNKNOWN;
  490. }
  491. return 0;
  492. }
  493. static int avisynth_create_stream(AVFormatContext *s)
  494. {
  495. AviSynthContext *avs = s->priv_data;
  496. AVStream *st;
  497. int ret;
  498. int id = 0;
  499. if (avs_has_video(avs->vi)) {
  500. st = avformat_new_stream(s, NULL);
  501. if (!st)
  502. return AVERROR_UNKNOWN;
  503. st->id = id++;
  504. if (ret = avisynth_create_stream_video(s, st))
  505. return ret;
  506. }
  507. if (avs_has_audio(avs->vi)) {
  508. st = avformat_new_stream(s, NULL);
  509. if (!st)
  510. return AVERROR_UNKNOWN;
  511. st->id = id++;
  512. if (ret = avisynth_create_stream_audio(s, st))
  513. return ret;
  514. }
  515. return 0;
  516. }
  517. static int avisynth_open_file(AVFormatContext *s)
  518. {
  519. AviSynthContext *avs = s->priv_data;
  520. AVS_Value arg, val;
  521. int ret;
  522. #ifdef _WIN32
  523. char filename_ansi[MAX_PATH * 4];
  524. wchar_t filename_wc[MAX_PATH * 4];
  525. #endif
  526. if (ret = avisynth_context_create(s))
  527. return ret;
  528. #ifdef _WIN32
  529. /* Convert UTF-8 to ANSI code page */
  530. MultiByteToWideChar(CP_UTF8, 0, s->url, -1, filename_wc, MAX_PATH * 4);
  531. WideCharToMultiByte(CP_THREAD_ACP, 0, filename_wc, -1, filename_ansi,
  532. MAX_PATH * 4, NULL, NULL);
  533. arg = avs_new_value_string(filename_ansi);
  534. #else
  535. arg = avs_new_value_string(s->url);
  536. #endif
  537. val = avs_library.avs_invoke(avs->env, "Import", arg, 0);
  538. if (avs_is_error(val)) {
  539. av_log(s, AV_LOG_ERROR, "%s\n", avs_as_error(val));
  540. ret = AVERROR_UNKNOWN;
  541. goto fail;
  542. }
  543. if (!avs_is_clip(val)) {
  544. av_log(s, AV_LOG_ERROR, "AviSynth script did not return a clip\n");
  545. ret = AVERROR_UNKNOWN;
  546. goto fail;
  547. }
  548. avs->clip = avs_library.avs_take_clip(val, avs->env);
  549. avs->vi = avs_library.avs_get_video_info(avs->clip);
  550. /* On Windows, FFmpeg supports AviSynth interface version 6 or higher.
  551. * This includes AviSynth 2.6 RC1 or higher, and AviSynth+ r1718 or higher,
  552. * and excludes 2.5 and the 2.6 alphas. */
  553. if (avs_library.avs_get_version(avs->clip) < 6) {
  554. av_log(s, AV_LOG_ERROR,
  555. "AviSynth version is too old. Please upgrade to either AviSynth 2.6 >= RC1 or AviSynth+ >= r1718.\n");
  556. ret = AVERROR_UNKNOWN;
  557. goto fail;
  558. }
  559. /* Release the AVS_Value as it will go out of scope. */
  560. avs_library.avs_release_value(val);
  561. if (ret = avisynth_create_stream(s))
  562. goto fail;
  563. return 0;
  564. fail:
  565. avisynth_context_destroy(avs);
  566. return ret;
  567. }
  568. static void avisynth_next_stream(AVFormatContext *s, AVStream **st,
  569. AVPacket *pkt, int *discard)
  570. {
  571. AviSynthContext *avs = s->priv_data;
  572. avs->curr_stream++;
  573. avs->curr_stream %= s->nb_streams;
  574. *st = s->streams[avs->curr_stream];
  575. if ((*st)->discard == AVDISCARD_ALL)
  576. *discard = 1;
  577. else
  578. *discard = 0;
  579. return;
  580. }
  581. /* Copy AviSynth clip data into an AVPacket. */
  582. static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt,
  583. int discard)
  584. {
  585. AviSynthContext *avs = s->priv_data;
  586. AVS_VideoFrame *frame;
  587. unsigned char *dst_p;
  588. const unsigned char *src_p;
  589. int n, i, plane, rowsize, planeheight, pitch, bits, ret;
  590. const char *error;
  591. if (avs->curr_frame >= avs->vi->num_frames)
  592. return AVERROR_EOF;
  593. /* This must happen even if the stream is discarded to prevent desync. */
  594. n = avs->curr_frame++;
  595. if (discard)
  596. return 0;
  597. bits = avs_library.avs_bits_per_pixel(avs->vi);
  598. /* Without the cast to int64_t, calculation overflows at about 9k x 9k
  599. * resolution. */
  600. pkt->size = (((int64_t)avs->vi->width *
  601. (int64_t)avs->vi->height) * bits) / 8;
  602. if (!pkt->size)
  603. return AVERROR_UNKNOWN;
  604. if ((ret = av_new_packet(pkt, pkt->size)) < 0)
  605. return ret;
  606. pkt->pts = n;
  607. pkt->dts = n;
  608. pkt->duration = 1;
  609. pkt->stream_index = avs->curr_stream;
  610. frame = avs_library.avs_get_frame(avs->clip, n);
  611. error = avs_library.avs_clip_get_error(avs->clip);
  612. if (error) {
  613. av_log(s, AV_LOG_ERROR, "%s\n", error);
  614. avs->error = 1;
  615. av_packet_unref(pkt);
  616. return AVERROR_UNKNOWN;
  617. }
  618. dst_p = pkt->data;
  619. for (i = 0; i < avs->n_planes; i++) {
  620. plane = avs->planes[i];
  621. src_p = avs_library.avs_get_read_ptr_p(frame, plane);
  622. pitch = avs_library.avs_get_pitch_p(frame, plane);
  623. rowsize = avs_library.avs_get_row_size_p(frame, plane);
  624. planeheight = avs_library.avs_get_height_p(frame, plane);
  625. /* Flip RGB video. */
  626. if (avs_library.avs_is_color_space(avs->vi, AVS_CS_BGR) ||
  627. avs_library.avs_is_color_space(avs->vi, AVS_CS_BGR48) ||
  628. avs_library.avs_is_color_space(avs->vi, AVS_CS_BGR64)) {
  629. src_p = src_p + (planeheight - 1) * pitch;
  630. pitch = -pitch;
  631. }
  632. avs_library.avs_bit_blt(avs->env, dst_p, rowsize, src_p, pitch,
  633. rowsize, planeheight);
  634. dst_p += rowsize * planeheight;
  635. }
  636. avs_library.avs_release_video_frame(frame);
  637. return 0;
  638. }
  639. static int avisynth_read_packet_audio(AVFormatContext *s, AVPacket *pkt,
  640. int discard)
  641. {
  642. AviSynthContext *avs = s->priv_data;
  643. AVRational fps, samplerate;
  644. int samples, ret;
  645. int64_t n;
  646. const char *error;
  647. if (avs->curr_sample >= avs->vi->num_audio_samples)
  648. return AVERROR_EOF;
  649. fps.num = avs->vi->fps_numerator;
  650. fps.den = avs->vi->fps_denominator;
  651. samplerate.num = avs->vi->audio_samples_per_second;
  652. samplerate.den = 1;
  653. if (avs_has_video(avs->vi)) {
  654. if (avs->curr_frame < avs->vi->num_frames)
  655. samples = av_rescale_q(avs->curr_frame, samplerate, fps) -
  656. avs->curr_sample;
  657. else
  658. samples = av_rescale_q(1, samplerate, fps);
  659. } else {
  660. samples = 1000;
  661. }
  662. /* After seeking, audio may catch up with video. */
  663. if (samples <= 0) {
  664. pkt->size = 0;
  665. pkt->data = NULL;
  666. return 0;
  667. }
  668. if (avs->curr_sample + samples > avs->vi->num_audio_samples)
  669. samples = avs->vi->num_audio_samples - avs->curr_sample;
  670. /* This must happen even if the stream is discarded to prevent desync. */
  671. n = avs->curr_sample;
  672. avs->curr_sample += samples;
  673. if (discard)
  674. return 0;
  675. pkt->size = avs_bytes_per_channel_sample(avs->vi) *
  676. samples * avs->vi->nchannels;
  677. if (!pkt->size)
  678. return AVERROR_UNKNOWN;
  679. if ((ret = av_new_packet(pkt, pkt->size)) < 0)
  680. return ret;
  681. pkt->pts = n;
  682. pkt->dts = n;
  683. pkt->duration = samples;
  684. pkt->stream_index = avs->curr_stream;
  685. avs_library.avs_get_audio(avs->clip, pkt->data, n, samples);
  686. error = avs_library.avs_clip_get_error(avs->clip);
  687. if (error) {
  688. av_log(s, AV_LOG_ERROR, "%s\n", error);
  689. avs->error = 1;
  690. av_packet_unref(pkt);
  691. return AVERROR_UNKNOWN;
  692. }
  693. return 0;
  694. }
  695. static av_cold int avisynth_read_header(AVFormatContext *s)
  696. {
  697. int ret;
  698. // Calling library must implement a lock for thread-safe opens.
  699. if (ret = ff_lock_avformat())
  700. return ret;
  701. if (ret = avisynth_open_file(s)) {
  702. ff_unlock_avformat();
  703. return ret;
  704. }
  705. ff_unlock_avformat();
  706. return 0;
  707. }
  708. static int avisynth_read_packet(AVFormatContext *s, AVPacket *pkt)
  709. {
  710. AviSynthContext *avs = s->priv_data;
  711. AVStream *st;
  712. int discard = 0;
  713. int ret;
  714. if (avs->error)
  715. return AVERROR_UNKNOWN;
  716. /* If either stream reaches EOF, try to read the other one before
  717. * giving up. */
  718. avisynth_next_stream(s, &st, pkt, &discard);
  719. if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
  720. ret = avisynth_read_packet_video(s, pkt, discard);
  721. if (ret == AVERROR_EOF && avs_has_audio(avs->vi)) {
  722. avisynth_next_stream(s, &st, pkt, &discard);
  723. return avisynth_read_packet_audio(s, pkt, discard);
  724. }
  725. } else {
  726. ret = avisynth_read_packet_audio(s, pkt, discard);
  727. if (ret == AVERROR_EOF && avs_has_video(avs->vi)) {
  728. avisynth_next_stream(s, &st, pkt, &discard);
  729. return avisynth_read_packet_video(s, pkt, discard);
  730. }
  731. }
  732. return ret;
  733. }
  734. static av_cold int avisynth_read_close(AVFormatContext *s)
  735. {
  736. if (ff_lock_avformat())
  737. return AVERROR_UNKNOWN;
  738. avisynth_context_destroy(s->priv_data);
  739. ff_unlock_avformat();
  740. return 0;
  741. }
  742. static int avisynth_read_seek(AVFormatContext *s, int stream_index,
  743. int64_t timestamp, int flags)
  744. {
  745. AviSynthContext *avs = s->priv_data;
  746. AVStream *st;
  747. AVRational fps, samplerate;
  748. if (avs->error)
  749. return AVERROR_UNKNOWN;
  750. fps = (AVRational) { avs->vi->fps_numerator,
  751. avs->vi->fps_denominator };
  752. samplerate = (AVRational) { avs->vi->audio_samples_per_second, 1 };
  753. st = s->streams[stream_index];
  754. if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
  755. /* AviSynth frame counts are signed int. */
  756. if ((timestamp >= avs->vi->num_frames) ||
  757. (timestamp > INT_MAX) ||
  758. (timestamp < 0))
  759. return AVERROR_EOF;
  760. avs->curr_frame = timestamp;
  761. if (avs_has_audio(avs->vi))
  762. avs->curr_sample = av_rescale_q(timestamp, samplerate, fps);
  763. } else {
  764. if ((timestamp >= avs->vi->num_audio_samples) || (timestamp < 0))
  765. return AVERROR_EOF;
  766. /* Force frame granularity for seeking. */
  767. if (avs_has_video(avs->vi)) {
  768. avs->curr_frame = av_rescale_q(timestamp, fps, samplerate);
  769. avs->curr_sample = av_rescale_q(avs->curr_frame, samplerate, fps);
  770. } else {
  771. avs->curr_sample = timestamp;
  772. }
  773. }
  774. return 0;
  775. }
  776. AVInputFormat ff_avisynth_demuxer = {
  777. .name = "avisynth",
  778. .long_name = NULL_IF_CONFIG_SMALL("AviSynth script"),
  779. .priv_data_size = sizeof(AviSynthContext),
  780. .read_header = avisynth_read_header,
  781. .read_packet = avisynth_read_packet,
  782. .read_close = avisynth_read_close,
  783. .read_seek = avisynth_read_seek,
  784. .extensions = "avs",
  785. };