You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

711 lines
21KB

  1. /*
  2. * AviSynth/AvxSynth support
  3. * Copyright (c) 2012 AvxSynth Team.
  4. *
  5. * This file is part of FFmpeg
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/internal.h"
  21. #include "libavcodec/internal.h"
  22. #include "avformat.h"
  23. #include "internal.h"
  24. /* Enable function pointer definitions for runtime loading. */
  25. #define AVSC_NO_DECLSPEC
  26. /* Platform-specific directives for AviSynth vs AvxSynth. */
  27. #ifdef _WIN32
  28. #include <windows.h>
  29. #undef EXTERN_C
  30. #include "compat/avisynth/avisynth_c.h"
  31. #include "compat/avisynth/avisynth_c_25.h"
  32. #define AVISYNTH_LIB "avisynth"
  33. #define USING_AVISYNTH
  34. #else
  35. #include <dlfcn.h>
  36. #include "compat/avisynth/avxsynth_c.h"
  37. #if defined (__APPLE__)
  38. #define AVISYNTH_LIB "libavxsynth.dylib"
  39. #else
  40. #define AVISYNTH_LIB "libavxsynth.so"
  41. #endif
  42. #define LoadLibrary(x) dlopen(x, RTLD_NOW | RTLD_LOCAL)
  43. #define GetProcAddress dlsym
  44. #define FreeLibrary dlclose
  45. #endif
  46. typedef struct AviSynthLibrary {
  47. void *library;
  48. #define AVSC_DECLARE_FUNC(name) name ## _func name
  49. AVSC_DECLARE_FUNC(avs_bit_blt);
  50. AVSC_DECLARE_FUNC(avs_clip_get_error);
  51. AVSC_DECLARE_FUNC(avs_create_script_environment);
  52. AVSC_DECLARE_FUNC(avs_delete_script_environment);
  53. AVSC_DECLARE_FUNC(avs_get_audio);
  54. AVSC_DECLARE_FUNC(avs_get_error);
  55. AVSC_DECLARE_FUNC(avs_get_frame);
  56. AVSC_DECLARE_FUNC(avs_get_version);
  57. AVSC_DECLARE_FUNC(avs_get_video_info);
  58. AVSC_DECLARE_FUNC(avs_invoke);
  59. AVSC_DECLARE_FUNC(avs_release_clip);
  60. AVSC_DECLARE_FUNC(avs_release_value);
  61. AVSC_DECLARE_FUNC(avs_release_video_frame);
  62. AVSC_DECLARE_FUNC(avs_take_clip);
  63. #ifdef USING_AVISYNTH
  64. AVSC_DECLARE_FUNC(avs_bits_per_pixel);
  65. AVSC_DECLARE_FUNC(avs_get_height_p);
  66. AVSC_DECLARE_FUNC(avs_get_pitch_p);
  67. AVSC_DECLARE_FUNC(avs_get_read_ptr_p);
  68. AVSC_DECLARE_FUNC(avs_get_row_size_p);
  69. AVSC_DECLARE_FUNC(avs_is_yv24);
  70. AVSC_DECLARE_FUNC(avs_is_yv16);
  71. AVSC_DECLARE_FUNC(avs_is_yv411);
  72. AVSC_DECLARE_FUNC(avs_is_y8);
  73. #endif
  74. #undef AVSC_DECLARE_FUNC
  75. } AviSynthLibrary;
  76. typedef struct AviSynthContext {
  77. AVS_ScriptEnvironment *env;
  78. AVS_Clip *clip;
  79. const AVS_VideoInfo *vi;
  80. /* avisynth_read_packet_video() iterates over this. */
  81. int n_planes;
  82. const int *planes;
  83. int curr_stream;
  84. int curr_frame;
  85. int64_t curr_sample;
  86. int error;
  87. /* Linked list pointers. */
  88. struct AviSynthContext *next;
  89. } AviSynthContext;
  90. static const int avs_planes_packed[1] = { 0 };
  91. static const int avs_planes_grey[1] = { AVS_PLANAR_Y };
  92. static const int avs_planes_yuv[3] = { AVS_PLANAR_Y, AVS_PLANAR_U,
  93. AVS_PLANAR_V };
  94. /* A conflict between C++ global objects, atexit, and dynamic loading requires
  95. * us to register our own atexit handler to prevent double freeing. */
  96. static AviSynthLibrary avs_library;
  97. static int avs_atexit_called = 0;
  98. /* Linked list of AviSynthContexts. An atexit handler destroys this list. */
  99. static AviSynthContext *avs_ctx_list = NULL;
  100. static av_cold void avisynth_atexit_handler(void);
  101. static av_cold int avisynth_load_library(void)
  102. {
  103. avs_library.library = LoadLibrary(AVISYNTH_LIB);
  104. if (!avs_library.library)
  105. return AVERROR_UNKNOWN;
  106. #define LOAD_AVS_FUNC(name, continue_on_fail) \
  107. avs_library.name = \
  108. (void *)GetProcAddress(avs_library.library, #name); \
  109. if (!continue_on_fail && !avs_library.name) \
  110. goto fail;
  111. LOAD_AVS_FUNC(avs_bit_blt, 0);
  112. LOAD_AVS_FUNC(avs_clip_get_error, 0);
  113. LOAD_AVS_FUNC(avs_create_script_environment, 0);
  114. LOAD_AVS_FUNC(avs_delete_script_environment, 0);
  115. LOAD_AVS_FUNC(avs_get_audio, 0);
  116. LOAD_AVS_FUNC(avs_get_error, 1); // New to AviSynth 2.6
  117. LOAD_AVS_FUNC(avs_get_frame, 0);
  118. LOAD_AVS_FUNC(avs_get_version, 0);
  119. LOAD_AVS_FUNC(avs_get_video_info, 0);
  120. LOAD_AVS_FUNC(avs_invoke, 0);
  121. LOAD_AVS_FUNC(avs_release_clip, 0);
  122. LOAD_AVS_FUNC(avs_release_value, 0);
  123. LOAD_AVS_FUNC(avs_release_video_frame, 0);
  124. LOAD_AVS_FUNC(avs_take_clip, 0);
  125. #ifdef USING_AVISYNTH
  126. LOAD_AVS_FUNC(avs_bits_per_pixel, 0);
  127. LOAD_AVS_FUNC(avs_get_height_p, 0);
  128. LOAD_AVS_FUNC(avs_get_pitch_p, 0);
  129. LOAD_AVS_FUNC(avs_get_read_ptr_p, 0);
  130. LOAD_AVS_FUNC(avs_get_row_size_p, 0);
  131. LOAD_AVS_FUNC(avs_is_yv24, 0);
  132. LOAD_AVS_FUNC(avs_is_yv16, 0);
  133. LOAD_AVS_FUNC(avs_is_yv411, 0);
  134. LOAD_AVS_FUNC(avs_is_y8, 0);
  135. #endif
  136. #undef LOAD_AVS_FUNC
  137. atexit(avisynth_atexit_handler);
  138. return 0;
  139. fail:
  140. FreeLibrary(avs_library.library);
  141. return AVERROR_UNKNOWN;
  142. }
  143. /* Note that avisynth_context_create and avisynth_context_destroy
  144. * do not allocate or free the actual context! That is taken care of
  145. * by libavformat. */
  146. static av_cold int avisynth_context_create(AVFormatContext *s)
  147. {
  148. AviSynthContext *avs = s->priv_data;
  149. int ret;
  150. if (!avs_library.library)
  151. if (ret = avisynth_load_library())
  152. return ret;
  153. avs->env = avs_library.avs_create_script_environment(3);
  154. if (avs_library.avs_get_error) {
  155. const char *error = avs_library.avs_get_error(avs->env);
  156. if (error) {
  157. av_log(s, AV_LOG_ERROR, "%s\n", error);
  158. return AVERROR_UNKNOWN;
  159. }
  160. }
  161. if (!avs_ctx_list) {
  162. avs_ctx_list = avs;
  163. } else {
  164. avs->next = avs_ctx_list;
  165. avs_ctx_list = avs;
  166. }
  167. return 0;
  168. }
  169. static av_cold void avisynth_context_destroy(AviSynthContext *avs)
  170. {
  171. if (avs_atexit_called)
  172. return;
  173. if (avs == avs_ctx_list) {
  174. avs_ctx_list = avs->next;
  175. } else {
  176. AviSynthContext *prev = avs_ctx_list;
  177. while (prev->next != avs)
  178. prev = prev->next;
  179. prev->next = avs->next;
  180. }
  181. if (avs->clip) {
  182. avs_library.avs_release_clip(avs->clip);
  183. avs->clip = NULL;
  184. }
  185. if (avs->env) {
  186. avs_library.avs_delete_script_environment(avs->env);
  187. avs->env = NULL;
  188. }
  189. }
  190. static av_cold void avisynth_atexit_handler(void)
  191. {
  192. AviSynthContext *avs = avs_ctx_list;
  193. while (avs) {
  194. AviSynthContext *next = avs->next;
  195. avisynth_context_destroy(avs);
  196. avs = next;
  197. }
  198. FreeLibrary(avs_library.library);
  199. avs_atexit_called = 1;
  200. }
  201. /* Create AVStream from audio and video data. */
  202. static int avisynth_create_stream_video(AVFormatContext *s, AVStream *st)
  203. {
  204. AviSynthContext *avs = s->priv_data;
  205. int planar = 0; // 0: packed, 1: YUV, 2: Y8
  206. st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
  207. st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
  208. st->codec->width = avs->vi->width;
  209. st->codec->height = avs->vi->height;
  210. st->time_base = (AVRational) { avs->vi->fps_denominator,
  211. avs->vi->fps_numerator };
  212. st->avg_frame_rate = (AVRational) { avs->vi->fps_numerator,
  213. avs->vi->fps_denominator };
  214. st->start_time = 0;
  215. st->duration = avs->vi->num_frames;
  216. st->nb_frames = avs->vi->num_frames;
  217. switch (avs->vi->pixel_type) {
  218. #ifdef USING_AVISYNTH
  219. case AVS_CS_YV24:
  220. st->codec->pix_fmt = AV_PIX_FMT_YUV444P;
  221. planar = 1;
  222. break;
  223. case AVS_CS_YV16:
  224. st->codec->pix_fmt = AV_PIX_FMT_YUV422P;
  225. planar = 1;
  226. break;
  227. case AVS_CS_YV411:
  228. st->codec->pix_fmt = AV_PIX_FMT_YUV411P;
  229. planar = 1;
  230. break;
  231. case AVS_CS_Y8:
  232. st->codec->pix_fmt = AV_PIX_FMT_GRAY8;
  233. planar = 2;
  234. break;
  235. #endif
  236. case AVS_CS_BGR24:
  237. st->codec->pix_fmt = AV_PIX_FMT_BGR24;
  238. break;
  239. case AVS_CS_BGR32:
  240. st->codec->pix_fmt = AV_PIX_FMT_RGB32;
  241. break;
  242. case AVS_CS_YUY2:
  243. st->codec->pix_fmt = AV_PIX_FMT_YUYV422;
  244. break;
  245. case AVS_CS_YV12:
  246. st->codec->pix_fmt = AV_PIX_FMT_YUV420P;
  247. planar = 1;
  248. break;
  249. case AVS_CS_I420: // Is this even used anywhere?
  250. st->codec->pix_fmt = AV_PIX_FMT_YUV420P;
  251. planar = 1;
  252. break;
  253. default:
  254. av_log(s, AV_LOG_ERROR,
  255. "unknown AviSynth colorspace %d\n", avs->vi->pixel_type);
  256. avs->error = 1;
  257. return AVERROR_UNKNOWN;
  258. }
  259. switch (planar) {
  260. case 2: // Y8
  261. avs->n_planes = 1;
  262. avs->planes = avs_planes_grey;
  263. break;
  264. case 1: // YUV
  265. avs->n_planes = 3;
  266. avs->planes = avs_planes_yuv;
  267. break;
  268. default:
  269. avs->n_planes = 1;
  270. avs->planes = avs_planes_packed;
  271. }
  272. return 0;
  273. }
  274. static int avisynth_create_stream_audio(AVFormatContext *s, AVStream *st)
  275. {
  276. AviSynthContext *avs = s->priv_data;
  277. st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
  278. st->codec->sample_rate = avs->vi->audio_samples_per_second;
  279. st->codec->channels = avs->vi->nchannels;
  280. st->time_base = (AVRational) { 1,
  281. avs->vi->audio_samples_per_second };
  282. st->duration = avs->vi->num_audio_samples;
  283. switch (avs->vi->sample_type) {
  284. case AVS_SAMPLE_INT8:
  285. st->codec->codec_id = AV_CODEC_ID_PCM_U8;
  286. break;
  287. case AVS_SAMPLE_INT16:
  288. st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
  289. break;
  290. case AVS_SAMPLE_INT24:
  291. st->codec->codec_id = AV_CODEC_ID_PCM_S24LE;
  292. break;
  293. case AVS_SAMPLE_INT32:
  294. st->codec->codec_id = AV_CODEC_ID_PCM_S32LE;
  295. break;
  296. case AVS_SAMPLE_FLOAT:
  297. st->codec->codec_id = AV_CODEC_ID_PCM_F32LE;
  298. break;
  299. default:
  300. av_log(s, AV_LOG_ERROR,
  301. "unknown AviSynth sample type %d\n", avs->vi->sample_type);
  302. avs->error = 1;
  303. return AVERROR_UNKNOWN;
  304. }
  305. return 0;
  306. }
  307. static int avisynth_create_stream(AVFormatContext *s)
  308. {
  309. AviSynthContext *avs = s->priv_data;
  310. AVStream *st;
  311. int ret;
  312. int id = 0;
  313. if (avs_has_video(avs->vi)) {
  314. st = avformat_new_stream(s, NULL);
  315. if (!st)
  316. return AVERROR_UNKNOWN;
  317. st->id = id++;
  318. if (ret = avisynth_create_stream_video(s, st))
  319. return ret;
  320. }
  321. if (avs_has_audio(avs->vi)) {
  322. st = avformat_new_stream(s, NULL);
  323. if (!st)
  324. return AVERROR_UNKNOWN;
  325. st->id = id++;
  326. if (ret = avisynth_create_stream_audio(s, st))
  327. return ret;
  328. }
  329. return 0;
  330. }
  331. static int avisynth_open_file(AVFormatContext *s)
  332. {
  333. AviSynthContext *avs = s->priv_data;
  334. AVS_Value arg, val;
  335. int ret;
  336. #ifdef USING_AVISYNTH
  337. char filename_ansi[MAX_PATH * 4];
  338. wchar_t filename_wc[MAX_PATH * 4];
  339. #endif
  340. if (ret = avisynth_context_create(s))
  341. return ret;
  342. #ifdef USING_AVISYNTH
  343. /* Convert UTF-8 to ANSI code page */
  344. MultiByteToWideChar(CP_UTF8, 0, s->filename, -1, filename_wc, MAX_PATH * 4);
  345. WideCharToMultiByte(CP_THREAD_ACP, 0, filename_wc, -1, filename_ansi,
  346. MAX_PATH * 4, NULL, NULL);
  347. arg = avs_new_value_string(filename_ansi);
  348. #else
  349. arg = avs_new_value_string(s->filename);
  350. #endif
  351. val = avs_library.avs_invoke(avs->env, "Import", arg, 0);
  352. if (avs_is_error(val)) {
  353. av_log(s, AV_LOG_ERROR, "%s\n", avs_as_error(val));
  354. ret = AVERROR_UNKNOWN;
  355. goto fail;
  356. }
  357. if (!avs_is_clip(val)) {
  358. av_log(s, AV_LOG_ERROR, "AviSynth script did not return a clip\n");
  359. ret = AVERROR_UNKNOWN;
  360. goto fail;
  361. }
  362. avs->clip = avs_library.avs_take_clip(val, avs->env);
  363. avs->vi = avs_library.avs_get_video_info(avs->clip);
  364. /* Release the AVS_Value as it will go out of scope. */
  365. avs_library.avs_release_value(val);
  366. if (ret = avisynth_create_stream(s))
  367. goto fail;
  368. return 0;
  369. fail:
  370. avisynth_context_destroy(avs);
  371. return ret;
  372. }
  373. static void avisynth_next_stream(AVFormatContext *s, AVStream **st,
  374. AVPacket *pkt, int *discard)
  375. {
  376. AviSynthContext *avs = s->priv_data;
  377. avs->curr_stream++;
  378. avs->curr_stream %= s->nb_streams;
  379. *st = s->streams[avs->curr_stream];
  380. if ((*st)->discard == AVDISCARD_ALL)
  381. *discard = 1;
  382. else
  383. *discard = 0;
  384. return;
  385. }
  386. /* Copy AviSynth clip data into an AVPacket. */
  387. static int avisynth_read_packet_video(AVFormatContext *s, AVPacket *pkt,
  388. int discard)
  389. {
  390. AviSynthContext *avs = s->priv_data;
  391. AVS_VideoFrame *frame;
  392. unsigned char *dst_p;
  393. const unsigned char *src_p;
  394. int n, i, plane, rowsize, planeheight, pitch, bits;
  395. const char *error;
  396. if (avs->curr_frame >= avs->vi->num_frames)
  397. return AVERROR_EOF;
  398. /* This must happen even if the stream is discarded to prevent desync. */
  399. n = avs->curr_frame++;
  400. if (discard)
  401. return 0;
  402. #ifdef USING_AVISYNTH
  403. /* Define the bpp values for the new AviSynth 2.6 colorspaces.
  404. * Since AvxSynth doesn't have these functions, special-case
  405. * it in order to avoid implicit declaration errors. */
  406. if (avs_library.avs_is_yv24(avs->vi))
  407. bits = 24;
  408. else if (avs_library.avs_is_yv16(avs->vi))
  409. bits = 16;
  410. else if (avs_library.avs_is_yv411(avs->vi))
  411. bits = 12;
  412. else if (avs_library.avs_is_y8(avs->vi))
  413. bits = 8;
  414. else
  415. bits = avs_library.avs_bits_per_pixel(avs->vi);
  416. #else
  417. bits = avs_bits_per_pixel(avs->vi);
  418. #endif
  419. /* Without the cast to int64_t, calculation overflows at about 9k x 9k
  420. * resolution. */
  421. pkt->size = (((int64_t)avs->vi->width *
  422. (int64_t)avs->vi->height) * bits) / 8;
  423. if (!pkt->size)
  424. return AVERROR_UNKNOWN;
  425. if (av_new_packet(pkt, pkt->size) < 0)
  426. return AVERROR(ENOMEM);
  427. pkt->pts = n;
  428. pkt->dts = n;
  429. pkt->duration = 1;
  430. pkt->stream_index = avs->curr_stream;
  431. frame = avs_library.avs_get_frame(avs->clip, n);
  432. error = avs_library.avs_clip_get_error(avs->clip);
  433. if (error) {
  434. av_log(s, AV_LOG_ERROR, "%s\n", error);
  435. avs->error = 1;
  436. av_packet_unref(pkt);
  437. return AVERROR_UNKNOWN;
  438. }
  439. dst_p = pkt->data;
  440. for (i = 0; i < avs->n_planes; i++) {
  441. plane = avs->planes[i];
  442. #ifdef USING_AVISYNTH
  443. src_p = avs_library.avs_get_read_ptr_p(frame, plane);
  444. pitch = avs_library.avs_get_pitch_p(frame, plane);
  445. if (avs_library.avs_get_version(avs->clip) == 3) {
  446. rowsize = avs_get_row_size_p_25(frame, plane);
  447. planeheight = avs_get_height_p_25(frame, plane);
  448. } else {
  449. rowsize = avs_library.avs_get_row_size_p(frame, plane);
  450. planeheight = avs_library.avs_get_height_p(frame, plane);
  451. }
  452. #else
  453. src_p = avs_get_read_ptr_p(frame, plane);
  454. pitch = avs_get_pitch_p(frame, plane);
  455. rowsize = avs_get_row_size_p(frame, plane);
  456. planeheight = avs_get_height_p(frame, plane);
  457. #endif
  458. /* Flip RGB video. */
  459. if (avs_is_rgb24(avs->vi) || avs_is_rgb(avs->vi)) {
  460. src_p = src_p + (planeheight - 1) * pitch;
  461. pitch = -pitch;
  462. }
  463. avs_library.avs_bit_blt(avs->env, dst_p, rowsize, src_p, pitch,
  464. rowsize, planeheight);
  465. dst_p += rowsize * planeheight;
  466. }
  467. avs_library.avs_release_video_frame(frame);
  468. return 0;
  469. }
  470. static int avisynth_read_packet_audio(AVFormatContext *s, AVPacket *pkt,
  471. int discard)
  472. {
  473. AviSynthContext *avs = s->priv_data;
  474. AVRational fps, samplerate;
  475. int samples;
  476. int64_t n;
  477. const char *error;
  478. if (avs->curr_sample >= avs->vi->num_audio_samples)
  479. return AVERROR_EOF;
  480. fps.num = avs->vi->fps_numerator;
  481. fps.den = avs->vi->fps_denominator;
  482. samplerate.num = avs->vi->audio_samples_per_second;
  483. samplerate.den = 1;
  484. if (avs_has_video(avs->vi)) {
  485. if (avs->curr_frame < avs->vi->num_frames)
  486. samples = av_rescale_q(avs->curr_frame, samplerate, fps) -
  487. avs->curr_sample;
  488. else
  489. samples = av_rescale_q(1, samplerate, fps);
  490. } else {
  491. samples = 1000;
  492. }
  493. /* After seeking, audio may catch up with video. */
  494. if (samples <= 0) {
  495. pkt->size = 0;
  496. pkt->data = NULL;
  497. return 0;
  498. }
  499. if (avs->curr_sample + samples > avs->vi->num_audio_samples)
  500. samples = avs->vi->num_audio_samples - avs->curr_sample;
  501. /* This must happen even if the stream is discarded to prevent desync. */
  502. n = avs->curr_sample;
  503. avs->curr_sample += samples;
  504. if (discard)
  505. return 0;
  506. pkt->size = avs_bytes_per_channel_sample(avs->vi) *
  507. samples * avs->vi->nchannels;
  508. if (!pkt->size)
  509. return AVERROR_UNKNOWN;
  510. if (av_new_packet(pkt, pkt->size) < 0)
  511. return AVERROR(ENOMEM);
  512. pkt->pts = n;
  513. pkt->dts = n;
  514. pkt->duration = samples;
  515. pkt->stream_index = avs->curr_stream;
  516. avs_library.avs_get_audio(avs->clip, pkt->data, n, samples);
  517. error = avs_library.avs_clip_get_error(avs->clip);
  518. if (error) {
  519. av_log(s, AV_LOG_ERROR, "%s\n", error);
  520. avs->error = 1;
  521. av_packet_unref(pkt);
  522. return AVERROR_UNKNOWN;
  523. }
  524. return 0;
  525. }
  526. static av_cold int avisynth_read_header(AVFormatContext *s)
  527. {
  528. int ret;
  529. // Calling library must implement a lock for thread-safe opens.
  530. if (ret = avpriv_lock_avformat())
  531. return ret;
  532. if (ret = avisynth_open_file(s)) {
  533. avpriv_unlock_avformat();
  534. return ret;
  535. }
  536. avpriv_unlock_avformat();
  537. return 0;
  538. }
  539. static int avisynth_read_packet(AVFormatContext *s, AVPacket *pkt)
  540. {
  541. AviSynthContext *avs = s->priv_data;
  542. AVStream *st;
  543. int discard = 0;
  544. int ret;
  545. if (avs->error)
  546. return AVERROR_UNKNOWN;
  547. /* If either stream reaches EOF, try to read the other one before
  548. * giving up. */
  549. avisynth_next_stream(s, &st, pkt, &discard);
  550. if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
  551. ret = avisynth_read_packet_video(s, pkt, discard);
  552. if (ret == AVERROR_EOF && avs_has_audio(avs->vi)) {
  553. avisynth_next_stream(s, &st, pkt, &discard);
  554. return avisynth_read_packet_audio(s, pkt, discard);
  555. }
  556. } else {
  557. ret = avisynth_read_packet_audio(s, pkt, discard);
  558. if (ret == AVERROR_EOF && avs_has_video(avs->vi)) {
  559. avisynth_next_stream(s, &st, pkt, &discard);
  560. return avisynth_read_packet_video(s, pkt, discard);
  561. }
  562. }
  563. return ret;
  564. }
  565. static av_cold int avisynth_read_close(AVFormatContext *s)
  566. {
  567. if (avpriv_lock_avformat())
  568. return AVERROR_UNKNOWN;
  569. avisynth_context_destroy(s->priv_data);
  570. avpriv_unlock_avformat();
  571. return 0;
  572. }
  573. static int avisynth_read_seek(AVFormatContext *s, int stream_index,
  574. int64_t timestamp, int flags)
  575. {
  576. AviSynthContext *avs = s->priv_data;
  577. AVStream *st;
  578. AVRational fps, samplerate;
  579. if (avs->error)
  580. return AVERROR_UNKNOWN;
  581. fps = (AVRational) { avs->vi->fps_numerator,
  582. avs->vi->fps_denominator };
  583. samplerate = (AVRational) { avs->vi->audio_samples_per_second, 1 };
  584. st = s->streams[stream_index];
  585. if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
  586. /* AviSynth frame counts are signed int. */
  587. if ((timestamp >= avs->vi->num_frames) ||
  588. (timestamp > INT_MAX) ||
  589. (timestamp < 0))
  590. return AVERROR_EOF;
  591. avs->curr_frame = timestamp;
  592. if (avs_has_audio(avs->vi))
  593. avs->curr_sample = av_rescale_q(timestamp, samplerate, fps);
  594. } else {
  595. if ((timestamp >= avs->vi->num_audio_samples) || (timestamp < 0))
  596. return AVERROR_EOF;
  597. /* Force frame granularity for seeking. */
  598. if (avs_has_video(avs->vi)) {
  599. avs->curr_frame = av_rescale_q(timestamp, fps, samplerate);
  600. avs->curr_sample = av_rescale_q(avs->curr_frame, samplerate, fps);
  601. } else {
  602. avs->curr_sample = timestamp;
  603. }
  604. }
  605. return 0;
  606. }
  607. AVInputFormat ff_avisynth_demuxer = {
  608. .name = "avisynth",
  609. .long_name = NULL_IF_CONFIG_SMALL("AviSynth script"),
  610. .priv_data_size = sizeof(AviSynthContext),
  611. .read_header = avisynth_read_header,
  612. .read_packet = avisynth_read_packet,
  613. .read_close = avisynth_read_close,
  614. .read_seek = avisynth_read_seek,
  615. .extensions = "avs",
  616. };