You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

333 lines
11KB

  1. /*
  2. * FITS image decoder
  3. * Copyright (c) 2017 Paras Chadha
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * FITS image decoder
  24. *
  25. * Specification: https://fits.gsfc.nasa.gov/fits_standard.html Version 3.0
  26. *
  27. * Support all 2d images alongwith, bzero, bscale and blank keywords.
  28. * RGBA images are supported as NAXIS3 = 3 or 4 i.e. Planes in RGBA order. Also CTYPE = 'RGB ' should be present.
  29. * Also to interpret data, values are linearly scaled using min-max scaling but not RGB images.
  30. */
  31. #include "avcodec.h"
  32. #include "internal.h"
  33. #include <float.h>
  34. #include "libavutil/intreadwrite.h"
  35. #include "libavutil/intfloat.h"
  36. #include "libavutil/dict.h"
  37. #include "libavutil/opt.h"
  38. #include "fits.h"
  39. typedef struct FITSContext {
  40. const AVClass *class;
  41. int blank_val;
  42. } FITSContext;
  43. /**
  44. * Calculate the data_min and data_max values from the data.
  45. * This is called if the values are not present in the header.
  46. * @param ptr8 pointer to the data
  47. * @param header pointer to the header
  48. * @param end pointer to end of packet
  49. * @return 0 if calculated successfully otherwise AVERROR_INVALIDDATA
  50. */
  51. static int fill_data_min_max(const uint8_t *ptr8, FITSHeader *header, const uint8_t *end)
  52. {
  53. uint8_t t8;
  54. int16_t t16;
  55. int32_t t32;
  56. int64_t t64;
  57. float tflt;
  58. double tdbl;
  59. int i, j;
  60. header->data_min = DBL_MAX;
  61. header->data_max = DBL_MIN;
  62. switch (header->bitpix) {
  63. #define CASE_N(a, t, rd) \
  64. case a: \
  65. for (i = 0; i < header->naxisn[1]; i++) { \
  66. for (j = 0; j < header->naxisn[0]; j++) { \
  67. t = rd; \
  68. if (!header->blank_found || t != header->blank) { \
  69. if (t > header->data_max) \
  70. header->data_max = t; \
  71. if (t < header->data_min) \
  72. header->data_min = t; \
  73. } \
  74. ptr8 += abs(a) >> 3; \
  75. } \
  76. } \
  77. break
  78. CASE_N(-64, tdbl, av_int2double(AV_RB64(ptr8)));
  79. CASE_N(-32, tflt, av_int2float(AV_RB32(ptr8)));
  80. CASE_N(8, t8, ptr8[0]);
  81. CASE_N(16, t16, AV_RB16(ptr8));
  82. CASE_N(32, t32, AV_RB32(ptr8));
  83. CASE_N(64, t64, AV_RB64(ptr8));
  84. default:
  85. return AVERROR_INVALIDDATA;
  86. }
  87. return 0;
  88. }
  89. /**
  90. * Read the fits header and store the values in FITSHeader pointed by header
  91. * @param avctx AVCodec context
  92. * @param ptr pointer to pointer to the data
  93. * @param header pointer to the FITSHeader
  94. * @param end pointer to end of packet
  95. * @param metadata pointer to pointer to AVDictionary to store metadata
  96. * @return 0 if calculated successfully otherwise AVERROR_INVALIDDATA
  97. */
  98. static int fits_read_header(AVCodecContext *avctx, const uint8_t **ptr, FITSHeader *header,
  99. const uint8_t *end, AVDictionary **metadata)
  100. {
  101. const uint8_t *ptr8 = *ptr;
  102. int lines_read, bytes_left, i, ret;
  103. size_t size;
  104. lines_read = 1; // to account for first header line, SIMPLE or XTENSION which is not included in packet...
  105. avpriv_fits_header_init(header, STATE_BITPIX);
  106. do {
  107. if (end - ptr8 < 80)
  108. return AVERROR_INVALIDDATA;
  109. ret = avpriv_fits_header_parse_line(avctx, header, ptr8, &metadata);
  110. ptr8 += 80;
  111. lines_read++;
  112. } while (!ret);
  113. if (ret < 0)
  114. return ret;
  115. bytes_left = (((lines_read + 35) / 36) * 36 - lines_read) * 80;
  116. if (end - ptr8 < bytes_left)
  117. return AVERROR_INVALIDDATA;
  118. ptr8 += bytes_left;
  119. if (header->rgb && (header->naxis != 3 || (header->naxisn[2] != 3 && header->naxisn[2] != 4))) {
  120. av_log(avctx, AV_LOG_ERROR, "File contains RGB image but NAXIS = %d and NAXIS3 = %d\n", header->naxis, header->naxisn[2]);
  121. return AVERROR_INVALIDDATA;
  122. }
  123. if (!header->rgb && header->naxis != 2) {
  124. av_log(avctx, AV_LOG_ERROR, "unsupported number of dimensions, NAXIS = %d\n", header->naxis);
  125. return AVERROR_INVALIDDATA;
  126. }
  127. if (header->blank_found && (header->bitpix == -32 || header->bitpix == -64)) {
  128. av_log(avctx, AV_LOG_WARNING, "BLANK keyword found but BITPIX = %d\n. Ignoring BLANK", header->bitpix);
  129. header->blank_found = 0;
  130. }
  131. size = abs(header->bitpix) >> 3;
  132. for (i = 0; i < header->naxis; i++) {
  133. if (size == 0 || header->naxisn[i] > SIZE_MAX / size) {
  134. av_log(avctx, AV_LOG_ERROR, "unsupported size of FITS image");
  135. return AVERROR_INVALIDDATA;
  136. }
  137. size *= header->naxisn[i];
  138. }
  139. if (end - ptr8 < size)
  140. return AVERROR_INVALIDDATA;
  141. *ptr = ptr8;
  142. if (!header->rgb && (!header->data_min_found || !header->data_max_found)) {
  143. ret = fill_data_min_max(ptr8, header, end);
  144. if (ret < 0) {
  145. av_log(avctx, AV_LOG_ERROR, "invalid BITPIX, %d\n", header->bitpix);
  146. return ret;
  147. }
  148. } else {
  149. /*
  150. * instead of applying bscale and bzero to every element,
  151. * we can do inverse transformation on data_min and data_max
  152. */
  153. header->data_min = (header->data_min - header->bzero) / header->bscale;
  154. header->data_max = (header->data_max - header->bzero) / header->bscale;
  155. }
  156. if (!header->rgb && header->data_min >= header->data_max) {
  157. if (header->data_min > header->data_max) {
  158. av_log(avctx, AV_LOG_ERROR, "data min/max (%g %g) is invalid\n", header->data_min, header->data_max);
  159. return AVERROR_INVALIDDATA;
  160. }
  161. av_log(avctx, AV_LOG_WARNING, "data min/max indicates a blank image\n");
  162. header->data_max ++;
  163. }
  164. return 0;
  165. }
  166. static int fits_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
  167. {
  168. AVFrame *p=data;
  169. const uint8_t *ptr8 = avpkt->data, *end;
  170. uint8_t t8;
  171. int16_t t16;
  172. int32_t t32;
  173. int64_t t64;
  174. float tflt;
  175. double tdbl;
  176. int ret, i, j, k;
  177. const int map[] = {2, 0, 1, 3}; // mapping from GBRA -> RGBA as RGBA is to be stored in FITS file..
  178. uint8_t *dst8;
  179. uint16_t *dst16;
  180. uint64_t t;
  181. FITSHeader header;
  182. FITSContext * fitsctx = avctx->priv_data;
  183. end = ptr8 + avpkt->size;
  184. p->metadata = NULL;
  185. ret = fits_read_header(avctx, &ptr8, &header, end, &p->metadata);
  186. if (ret < 0)
  187. return ret;
  188. if (header.rgb) {
  189. if (header.bitpix == 8) {
  190. if (header.naxisn[2] == 3) {
  191. avctx->pix_fmt = AV_PIX_FMT_GBRP;
  192. } else {
  193. avctx->pix_fmt = AV_PIX_FMT_GBRAP;
  194. }
  195. } else if (header.bitpix == 16) {
  196. if (header.naxisn[2] == 3) {
  197. avctx->pix_fmt = AV_PIX_FMT_GBRP16;
  198. } else {
  199. avctx->pix_fmt = AV_PIX_FMT_GBRAP16;
  200. }
  201. } else {
  202. av_log(avctx, AV_LOG_ERROR, "unsupported BITPIX = %d\n", header.bitpix);
  203. return AVERROR_INVALIDDATA;
  204. }
  205. } else {
  206. if (header.bitpix == 8) {
  207. avctx->pix_fmt = AV_PIX_FMT_GRAY8;
  208. } else {
  209. avctx->pix_fmt = AV_PIX_FMT_GRAY16;
  210. }
  211. }
  212. if ((ret = ff_set_dimensions(avctx, header.naxisn[0], header.naxisn[1])) < 0)
  213. return ret;
  214. if ((ret = ff_get_buffer(avctx, p, 0)) < 0)
  215. return ret;
  216. /*
  217. * FITS stores images with bottom row first. Therefore we have
  218. * to fill the image from bottom to top.
  219. */
  220. if (header.rgb) {
  221. switch(header.bitpix) {
  222. #define CASE_RGB(cas, dst, type, dref) \
  223. case cas: \
  224. for (k = 0; k < header.naxisn[2]; k++) { \
  225. for (i = 0; i < avctx->height; i++) { \
  226. dst = (type *) (p->data[map[k]] + (avctx->height - i - 1) * p->linesize[map[k]]); \
  227. for (j = 0; j < avctx->width; j++) { \
  228. t32 = dref(ptr8); \
  229. if (!header.blank_found || t32 != header.blank) { \
  230. t = t32 * header.bscale + header.bzero; \
  231. } else { \
  232. t = fitsctx->blank_val; \
  233. } \
  234. *dst++ = (type) t; \
  235. ptr8 += cas >> 3; \
  236. } \
  237. } \
  238. } \
  239. break
  240. CASE_RGB(8, dst8, uint8_t, *);
  241. CASE_RGB(16, dst16, uint16_t, AV_RB16);
  242. }
  243. } else {
  244. double scale = header.data_max - header.data_min;
  245. if (scale <= 0 || !isfinite(scale)) {
  246. scale = 1;
  247. }
  248. scale = 1/scale;
  249. switch (header.bitpix) {
  250. #define CASE_GRAY(cas, dst, type, t, rd) \
  251. case cas: \
  252. for (i = 0; i < avctx->height; i++) { \
  253. dst = (type *) (p->data[0] + (avctx->height-i-1)* p->linesize[0]); \
  254. for (j = 0; j < avctx->width; j++) { \
  255. t = rd; \
  256. if (!header.blank_found || t != header.blank) { \
  257. *dst++ = lrint(((t - header.data_min) * ((1 << (sizeof(type) * 8)) - 1)) * scale); \
  258. } else { \
  259. *dst++ = fitsctx->blank_val; \
  260. } \
  261. ptr8 += abs(cas) >> 3; \
  262. } \
  263. } \
  264. break
  265. CASE_GRAY(-64, dst16, uint16_t, tdbl, av_int2double(AV_RB64(ptr8)));
  266. CASE_GRAY(-32, dst16, uint16_t, tflt, av_int2float(AV_RB32(ptr8)));
  267. CASE_GRAY(8, dst8, uint8_t, t8, ptr8[0]);
  268. CASE_GRAY(16, dst16, uint16_t, t16, AV_RB16(ptr8));
  269. CASE_GRAY(32, dst16, uint16_t, t32, AV_RB32(ptr8));
  270. CASE_GRAY(64, dst16, uint16_t, t64, AV_RB64(ptr8));
  271. default:
  272. av_log(avctx, AV_LOG_ERROR, "invalid BITPIX, %d\n", header.bitpix);
  273. return AVERROR_INVALIDDATA;
  274. }
  275. }
  276. p->key_frame = 1;
  277. p->pict_type = AV_PICTURE_TYPE_I;
  278. *got_frame = 1;
  279. return avpkt->size;
  280. }
  281. static const AVOption fits_options[] = {
  282. { "blank_value", "value that is used to replace BLANK pixels in data array", offsetof(FITSContext, blank_val), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 65535, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM},
  283. { NULL },
  284. };
  285. static const AVClass fits_decoder_class = {
  286. .class_name = "FITS decoder",
  287. .item_name = av_default_item_name,
  288. .option = fits_options,
  289. .version = LIBAVUTIL_VERSION_INT,
  290. };
  291. AVCodec ff_fits_decoder = {
  292. .name = "fits",
  293. .type = AVMEDIA_TYPE_VIDEO,
  294. .id = AV_CODEC_ID_FITS,
  295. .priv_data_size = sizeof(FITSContext),
  296. .decode = fits_decode_frame,
  297. .capabilities = AV_CODEC_CAP_DR1,
  298. .long_name = NULL_IF_CONFIG_SMALL("Flexible Image Transport System"),
  299. .priv_class = &fits_decoder_class
  300. };