You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

395 lines
14KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include <float.h>
  19. #include <math.h>
  20. #include "config.h"
  21. #include "libavutil/avassert.h"
  22. #include "libavutil/channel_layout.h"
  23. #include "libavutil/common.h"
  24. #include "libavutil/log.h"
  25. #include "libavutil/mathematics.h"
  26. #include "libavutil/opt.h"
  27. #include "libavutil/samplefmt.h"
  28. #include "audio.h"
  29. #include "avfilter.h"
  30. #include "internal.h"
  31. typedef struct TrimContext {
  32. const AVClass *class;
  33. /*
  34. * AVOptions
  35. */
  36. int64_t duration;
  37. int64_t start_time, end_time;
  38. int64_t start_frame, end_frame;
  39. double duration_dbl;
  40. double start_time_dbl, end_time_dbl;
  41. /*
  42. * in the link timebase for video,
  43. * in 1/samplerate for audio
  44. */
  45. int64_t start_pts, end_pts;
  46. int64_t start_sample, end_sample;
  47. /*
  48. * number of video frames that arrived on this filter so far
  49. */
  50. int64_t nb_frames;
  51. /*
  52. * number of audio samples that arrived on this filter so far
  53. */
  54. int64_t nb_samples;
  55. /*
  56. * timestamp of the first frame in the output, in the timebase units
  57. */
  58. int64_t first_pts;
  59. /*
  60. * duration in the timebase units
  61. */
  62. int64_t duration_tb;
  63. int64_t next_pts;
  64. int eof;
  65. } TrimContext;
  66. static av_cold int init(AVFilterContext *ctx)
  67. {
  68. TrimContext *s = ctx->priv;
  69. s->first_pts = AV_NOPTS_VALUE;
  70. return 0;
  71. }
  72. static int config_input(AVFilterLink *inlink)
  73. {
  74. AVFilterContext *ctx = inlink->dst;
  75. TrimContext *s = ctx->priv;
  76. AVRational tb = (inlink->type == AVMEDIA_TYPE_VIDEO) ?
  77. inlink->time_base : (AVRational){ 1, inlink->sample_rate };
  78. if (s->start_time_dbl != DBL_MAX)
  79. s->start_time = s->start_time_dbl * 1e6;
  80. if (s->end_time_dbl != DBL_MAX)
  81. s->end_time = s->end_time_dbl * 1e6;
  82. if (s->duration_dbl != 0)
  83. s->duration = s->duration_dbl * 1e6;
  84. if (s->start_time != INT64_MAX) {
  85. int64_t start_pts = av_rescale_q(s->start_time, AV_TIME_BASE_Q, tb);
  86. if (s->start_pts == AV_NOPTS_VALUE || start_pts < s->start_pts)
  87. s->start_pts = start_pts;
  88. }
  89. if (s->end_time != INT64_MAX) {
  90. int64_t end_pts = av_rescale_q(s->end_time, AV_TIME_BASE_Q, tb);
  91. if (s->end_pts == AV_NOPTS_VALUE || end_pts > s->end_pts)
  92. s->end_pts = end_pts;
  93. }
  94. if (s->duration)
  95. s->duration_tb = av_rescale_q(s->duration, AV_TIME_BASE_Q, tb);
  96. return 0;
  97. }
  98. static int config_output(AVFilterLink *outlink)
  99. {
  100. outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
  101. return 0;
  102. }
  103. #define OFFSET(x) offsetof(TrimContext, x)
  104. #define COMMON_OPTS \
  105. { "starti", "Timestamp of the first frame that " \
  106. "should be passed", OFFSET(start_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
  107. { "endi", "Timestamp of the first frame that " \
  108. "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
  109. { "start_pts", "Timestamp of the first frame that should be " \
  110. " passed", OFFSET(start_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
  111. { "end_pts", "Timestamp of the first frame that should be " \
  112. "dropped again", OFFSET(end_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
  113. { "durationi", "Maximum duration of the output", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, FLAGS },
  114. #define COMPAT_OPTS \
  115. { "start", "Timestamp in seconds of the first frame that " \
  116. "should be passed", OFFSET(start_time_dbl),AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
  117. { "end", "Timestamp in seconds of the first frame that " \
  118. "should be dropped again", OFFSET(end_time_dbl), AV_OPT_TYPE_DOUBLE, { .dbl = DBL_MAX }, -DBL_MAX, DBL_MAX, FLAGS }, \
  119. { "duration", "Maximum duration of the output in seconds", OFFSET(duration_dbl), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, DBL_MAX, FLAGS },
  120. #if CONFIG_TRIM_FILTER
  121. static int trim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
  122. {
  123. AVFilterContext *ctx = inlink->dst;
  124. TrimContext *s = ctx->priv;
  125. int drop;
  126. /* drop everything if EOF has already been returned */
  127. if (s->eof) {
  128. av_frame_free(&frame);
  129. return 0;
  130. }
  131. if (s->start_frame >= 0 || s->start_pts != AV_NOPTS_VALUE) {
  132. drop = 1;
  133. if (s->start_frame >= 0 && s->nb_frames >= s->start_frame)
  134. drop = 0;
  135. if (s->start_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
  136. frame->pts >= s->start_pts)
  137. drop = 0;
  138. if (drop)
  139. goto drop;
  140. }
  141. if (s->first_pts == AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE)
  142. s->first_pts = frame->pts;
  143. if (s->end_frame != INT64_MAX || s->end_pts != AV_NOPTS_VALUE || s->duration_tb) {
  144. drop = 1;
  145. if (s->end_frame != INT64_MAX && s->nb_frames < s->end_frame)
  146. drop = 0;
  147. if (s->end_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
  148. frame->pts < s->end_pts)
  149. drop = 0;
  150. if (s->duration_tb && frame->pts != AV_NOPTS_VALUE &&
  151. frame->pts - s->first_pts < s->duration_tb)
  152. drop = 0;
  153. if (drop) {
  154. s->eof = inlink->closed = 1;
  155. goto drop;
  156. }
  157. }
  158. s->nb_frames++;
  159. return ff_filter_frame(ctx->outputs[0], frame);
  160. drop:
  161. s->nb_frames++;
  162. av_frame_free(&frame);
  163. return 0;
  164. }
  165. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
  166. static const AVOption trim_options[] = {
  167. COMMON_OPTS
  168. { "start_frame", "Number of the first frame that should be passed "
  169. "to the output", OFFSET(start_frame), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
  170. { "end_frame", "Number of the first frame that should be dropped "
  171. "again", OFFSET(end_frame), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
  172. COMPAT_OPTS
  173. { NULL }
  174. };
  175. #undef FLAGS
  176. AVFILTER_DEFINE_CLASS(trim);
  177. static const AVFilterPad trim_inputs[] = {
  178. {
  179. .name = "default",
  180. .type = AVMEDIA_TYPE_VIDEO,
  181. .filter_frame = trim_filter_frame,
  182. .config_props = config_input,
  183. },
  184. { NULL }
  185. };
  186. static const AVFilterPad trim_outputs[] = {
  187. {
  188. .name = "default",
  189. .type = AVMEDIA_TYPE_VIDEO,
  190. .config_props = config_output,
  191. },
  192. { NULL }
  193. };
  194. AVFilter ff_vf_trim = {
  195. .name = "trim",
  196. .description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
  197. .init = init,
  198. .priv_size = sizeof(TrimContext),
  199. .priv_class = &trim_class,
  200. .inputs = trim_inputs,
  201. .outputs = trim_outputs,
  202. };
  203. #endif // CONFIG_TRIM_FILTER
  204. #if CONFIG_ATRIM_FILTER
  205. static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
  206. {
  207. AVFilterContext *ctx = inlink->dst;
  208. TrimContext *s = ctx->priv;
  209. int64_t start_sample, end_sample = frame->nb_samples;
  210. int64_t pts;
  211. int drop;
  212. /* drop everything if EOF has already been returned */
  213. if (s->eof) {
  214. av_frame_free(&frame);
  215. return 0;
  216. }
  217. if (frame->pts != AV_NOPTS_VALUE)
  218. pts = av_rescale_q(frame->pts, inlink->time_base,
  219. (AVRational){ 1, inlink->sample_rate });
  220. else
  221. pts = s->next_pts;
  222. s->next_pts = pts + frame->nb_samples;
  223. /* check if at least a part of the frame is after the start time */
  224. if (s->start_sample < 0 && s->start_pts == AV_NOPTS_VALUE) {
  225. start_sample = 0;
  226. } else {
  227. drop = 1;
  228. start_sample = frame->nb_samples;
  229. if (s->start_sample >= 0 &&
  230. s->nb_samples + frame->nb_samples > s->start_sample) {
  231. drop = 0;
  232. start_sample = FFMIN(start_sample, s->start_sample - s->nb_samples);
  233. }
  234. if (s->start_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
  235. pts + frame->nb_samples > s->start_pts) {
  236. drop = 0;
  237. start_sample = FFMIN(start_sample, s->start_pts - pts);
  238. }
  239. if (drop)
  240. goto drop;
  241. }
  242. if (s->first_pts == AV_NOPTS_VALUE)
  243. s->first_pts = pts + start_sample;
  244. /* check if at least a part of the frame is before the end time */
  245. if (s->end_sample == INT64_MAX && s->end_pts == AV_NOPTS_VALUE && !s->duration_tb) {
  246. end_sample = frame->nb_samples;
  247. } else {
  248. drop = 1;
  249. end_sample = 0;
  250. if (s->end_sample != INT64_MAX &&
  251. s->nb_samples < s->end_sample) {
  252. drop = 0;
  253. end_sample = FFMAX(end_sample, s->end_sample - s->nb_samples);
  254. }
  255. if (s->end_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
  256. pts < s->end_pts) {
  257. drop = 0;
  258. end_sample = FFMAX(end_sample, s->end_pts - pts);
  259. }
  260. if (s->duration_tb && pts - s->first_pts < s->duration_tb) {
  261. drop = 0;
  262. end_sample = FFMAX(end_sample, s->first_pts + s->duration_tb - pts);
  263. }
  264. if (drop) {
  265. s->eof = inlink->closed = 1;
  266. goto drop;
  267. }
  268. }
  269. s->nb_samples += frame->nb_samples;
  270. start_sample = FFMAX(0, start_sample);
  271. end_sample = FFMIN(frame->nb_samples, end_sample);
  272. av_assert0(start_sample < end_sample || (start_sample == end_sample && !frame->nb_samples));
  273. if (start_sample) {
  274. AVFrame *out = ff_get_audio_buffer(ctx->outputs[0], end_sample - start_sample);
  275. if (!out) {
  276. av_frame_free(&frame);
  277. return AVERROR(ENOMEM);
  278. }
  279. av_frame_copy_props(out, frame);
  280. av_samples_copy(out->extended_data, frame->extended_data, 0, start_sample,
  281. out->nb_samples, inlink->channels,
  282. frame->format);
  283. if (out->pts != AV_NOPTS_VALUE)
  284. out->pts += av_rescale_q(start_sample, (AVRational){ 1, out->sample_rate },
  285. inlink->time_base);
  286. av_frame_free(&frame);
  287. frame = out;
  288. } else
  289. frame->nb_samples = end_sample;
  290. return ff_filter_frame(ctx->outputs[0], frame);
  291. drop:
  292. s->nb_samples += frame->nb_samples;
  293. av_frame_free(&frame);
  294. return 0;
  295. }
  296. #define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
  297. static const AVOption atrim_options[] = {
  298. COMMON_OPTS
  299. { "start_sample", "Number of the first audio sample that should be "
  300. "passed to the output", OFFSET(start_sample), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
  301. { "end_sample", "Number of the first audio sample that should be "
  302. "dropped again", OFFSET(end_sample), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
  303. COMPAT_OPTS
  304. { NULL }
  305. };
  306. #undef FLAGS
  307. AVFILTER_DEFINE_CLASS(atrim);
  308. static const AVFilterPad atrim_inputs[] = {
  309. {
  310. .name = "default",
  311. .type = AVMEDIA_TYPE_AUDIO,
  312. .filter_frame = atrim_filter_frame,
  313. .config_props = config_input,
  314. },
  315. { NULL }
  316. };
  317. static const AVFilterPad atrim_outputs[] = {
  318. {
  319. .name = "default",
  320. .type = AVMEDIA_TYPE_AUDIO,
  321. .config_props = config_output,
  322. },
  323. { NULL }
  324. };
  325. AVFilter ff_af_atrim = {
  326. .name = "atrim",
  327. .description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
  328. .init = init,
  329. .priv_size = sizeof(TrimContext),
  330. .priv_class = &atrim_class,
  331. .inputs = atrim_inputs,
  332. .outputs = atrim_outputs,
  333. };
  334. #endif // CONFIG_ATRIM_FILTER