You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

319 lines
11KB

  1. /*
  2. * FITS image decoder
  3. * Copyright (c) 2017 Paras Chadha
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * FITS image decoder
  24. *
  25. * Specification: https://fits.gsfc.nasa.gov/fits_standard.html Version 3.0
  26. *
  27. * Support all 2d images alongwith, bzero, bscale and blank keywords.
  28. * RGBA images are supported as NAXIS3 = 3 or 4 i.e. Planes in RGBA order. Also CTYPE = 'RGB ' should be present.
  29. * Also to interpret data, values are linearly scaled using min-max scaling but not RGB images.
  30. */
  31. #include "avcodec.h"
  32. #include "internal.h"
  33. #include <float.h>
  34. #include "libavutil/intreadwrite.h"
  35. #include "libavutil/intfloat.h"
  36. #include "libavutil/dict.h"
  37. #include "libavutil/opt.h"
  38. #include "fits.h"
  39. typedef struct FITSContext {
  40. const AVClass *class;
  41. int blank_val;
  42. } FITSContext;
  43. /**
  44. * Calculate the data_min and data_max values from the data.
  45. * This is called if the values are not present in the header.
  46. * @param ptr8 pointer to the data
  47. * @param header pointer to the header
  48. * @param end pointer to end of packet
  49. * @return 0 if calculated successfully otherwise AVERROR_INVALIDDATA
  50. */
  51. static int fill_data_min_max(const uint8_t *ptr8, FITSHeader *header, const uint8_t *end)
  52. {
  53. uint8_t t8;
  54. int16_t t16;
  55. int32_t t32;
  56. int64_t t64;
  57. float tflt;
  58. double tdbl;
  59. int i, j;
  60. header->data_min = DBL_MAX;
  61. header->data_max = DBL_MIN;
  62. switch (header->bitpix) {
  63. #define CASE_N(a, t, rd) \
  64. case a: \
  65. for (i = 0; i < header->naxisn[1]; i++) { \
  66. for (j = 0; j < header->naxisn[0]; j++) { \
  67. t = rd; \
  68. if (!header->blank_found || t != header->blank) { \
  69. if (t > header->data_max) \
  70. header->data_max = t; \
  71. if (t < header->data_min) \
  72. header->data_min = t; \
  73. } \
  74. ptr8 += abs(a) >> 3; \
  75. } \
  76. } \
  77. break
  78. CASE_N(-64, tdbl, av_int2double(AV_RB64(ptr8)));
  79. CASE_N(-32, tflt, av_int2float(AV_RB32(ptr8)));
  80. CASE_N(8, t8, ptr8[0]);
  81. CASE_N(16, t16, AV_RB16(ptr8));
  82. CASE_N(32, t32, AV_RB32(ptr8));
  83. CASE_N(64, t64, AV_RB64(ptr8));
  84. default:
  85. return AVERROR_INVALIDDATA;
  86. }
  87. return 0;
  88. }
  89. /**
  90. * Read the fits header and store the values in FITSHeader pointed by header
  91. * @param avctx AVCodec context
  92. * @param ptr pointer to pointer to the data
  93. * @param header pointer to the FITSHeader
  94. * @param end pointer to end of packet
  95. * @param metadata pointer to pointer to AVDictionary to store metadata
  96. * @return 0 if calculated successfully otherwise AVERROR_INVALIDDATA
  97. */
  98. static int fits_read_header(AVCodecContext *avctx, const uint8_t **ptr, FITSHeader *header,
  99. const uint8_t *end, AVDictionary **metadata)
  100. {
  101. const uint8_t *ptr8 = *ptr;
  102. int lines_read, bytes_left, i, ret;
  103. size_t size;
  104. lines_read = 1; // to account for first header line, SIMPLE or XTENSION which is not included in packet...
  105. avpriv_fits_header_init(header, STATE_BITPIX);
  106. do {
  107. if (end - ptr8 < 80)
  108. return AVERROR_INVALIDDATA;
  109. ret = avpriv_fits_header_parse_line(avctx, header, ptr8, &metadata);
  110. ptr8 += 80;
  111. lines_read++;
  112. } while (!ret);
  113. if (ret < 0)
  114. return ret;
  115. bytes_left = (((lines_read + 35) / 36) * 36 - lines_read) * 80;
  116. if (end - ptr8 < bytes_left)
  117. return AVERROR_INVALIDDATA;
  118. ptr8 += bytes_left;
  119. if (header->rgb && (header->naxis != 3 || (header->naxisn[2] != 3 && header->naxisn[2] != 4))) {
  120. av_log(avctx, AV_LOG_ERROR, "File contains RGB image but NAXIS = %d and NAXIS3 = %d\n", header->naxis, header->naxisn[2]);
  121. return AVERROR_INVALIDDATA;
  122. }
  123. if (!header->rgb && header->naxis != 2) {
  124. av_log(avctx, AV_LOG_ERROR, "unsupported number of dimensions, NAXIS = %d\n", header->naxis);
  125. return AVERROR_INVALIDDATA;
  126. }
  127. if (header->blank_found && (header->bitpix == -32 || header->bitpix == -64)) {
  128. av_log(avctx, AV_LOG_WARNING, "BLANK keyword found but BITPIX = %d\n. Ignoring BLANK", header->bitpix);
  129. header->blank_found = 0;
  130. }
  131. size = abs(header->bitpix) >> 3;
  132. for (i = 0; i < header->naxis; i++) {
  133. if (header->naxisn[i] > SIZE_MAX / size) {
  134. av_log(avctx, AV_LOG_ERROR, "unsupported size of FITS image");
  135. return AVERROR_INVALIDDATA;
  136. }
  137. size *= header->naxisn[i];
  138. }
  139. if (end - ptr8 < size)
  140. return AVERROR_INVALIDDATA;
  141. *ptr = ptr8;
  142. if (!header->rgb && (!header->data_min_found || !header->data_max_found)) {
  143. ret = fill_data_min_max(ptr8, header, end);
  144. if (ret < 0) {
  145. av_log(avctx, AV_LOG_ERROR, "invalid BITPIX, %d\n", header->bitpix);
  146. return ret;
  147. }
  148. } else {
  149. /*
  150. * instead of applying bscale and bzero to every element,
  151. * we can do inverse transformation on data_min and data_max
  152. */
  153. header->data_min = (header->data_min - header->bzero) / header->bscale;
  154. header->data_max = (header->data_max - header->bzero) / header->bscale;
  155. }
  156. return 0;
  157. }
  158. static int fits_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
  159. {
  160. AVFrame *p=data;
  161. const uint8_t *ptr8 = avpkt->data, *end;
  162. uint8_t t8;
  163. int16_t t16;
  164. int32_t t32;
  165. int64_t t64;
  166. float tflt;
  167. double tdbl;
  168. int ret, i, j, k;
  169. const int map[] = {2, 0, 1, 3}; // mapping from GBRA -> RGBA as RGBA is to be stored in FITS file..
  170. uint8_t *dst8;
  171. uint16_t *dst16;
  172. uint64_t t;
  173. FITSHeader header;
  174. FITSContext * fitsctx = avctx->priv_data;
  175. end = ptr8 + avpkt->size;
  176. p->metadata = NULL;
  177. ret = fits_read_header(avctx, &ptr8, &header, end, &p->metadata);
  178. if (ret < 0)
  179. return ret;
  180. if (header.rgb) {
  181. if (header.bitpix == 8) {
  182. if (header.naxisn[2] == 3) {
  183. avctx->pix_fmt = AV_PIX_FMT_GBRP;
  184. } else {
  185. avctx->pix_fmt = AV_PIX_FMT_GBRAP;
  186. }
  187. } else if (header.bitpix == 16) {
  188. if (header.naxisn[2] == 3) {
  189. avctx->pix_fmt = AV_PIX_FMT_GBRP16;
  190. } else {
  191. avctx->pix_fmt = AV_PIX_FMT_GBRAP16;
  192. }
  193. } else {
  194. av_log(avctx, AV_LOG_ERROR, "unsupported BITPIX = %d\n", header.bitpix);
  195. return AVERROR_INVALIDDATA;
  196. }
  197. } else {
  198. if (header.bitpix == 8) {
  199. avctx->pix_fmt = AV_PIX_FMT_GRAY8;
  200. } else {
  201. avctx->pix_fmt = AV_PIX_FMT_GRAY16;
  202. }
  203. }
  204. if ((ret = ff_set_dimensions(avctx, header.naxisn[0], header.naxisn[1])) < 0)
  205. return ret;
  206. if ((ret = ff_get_buffer(avctx, p, 0)) < 0)
  207. return ret;
  208. /*
  209. * FITS stores images with bottom row first. Therefore we have
  210. * to fill the image from bottom to top.
  211. */
  212. if (header.rgb) {
  213. switch(header.bitpix) {
  214. #define CASE_RGB(cas, dst, type, dref) \
  215. case cas: \
  216. for (k = 0; k < header.naxisn[2]; k++) { \
  217. for (i = 0; i < avctx->height; i++) { \
  218. dst = (type *) (p->data[map[k]] + (avctx->height - i - 1) * p->linesize[map[k]]); \
  219. for (j = 0; j < avctx->width; j++) { \
  220. t32 = dref(ptr8); \
  221. if (!header.blank_found || t32 != header.blank) { \
  222. t = t32 * header.bscale + header.bzero; \
  223. } else { \
  224. t = fitsctx->blank_val; \
  225. } \
  226. *dst++ = (type) t; \
  227. ptr8 += cas >> 3; \
  228. } \
  229. } \
  230. } \
  231. break
  232. CASE_RGB(8, dst8, uint8_t, *);
  233. CASE_RGB(16, dst16, uint16_t, AV_RB16);
  234. }
  235. } else {
  236. switch (header.bitpix) {
  237. #define CASE_GRAY(cas, dst, type, t, rd) \
  238. case cas: \
  239. for (i = 0; i < avctx->height; i++) { \
  240. dst = (type *) (p->data[0] + (avctx->height-i-1)* p->linesize[0]); \
  241. for (j = 0; j < avctx->width; j++) { \
  242. t = rd; \
  243. if (!header.blank_found || t != header.blank) { \
  244. t = ((t - header.data_min) * ((1 << (sizeof(type) * 8)) - 1)) / (header.data_max - header.data_min); \
  245. } else { \
  246. t = fitsctx->blank_val; \
  247. } \
  248. *dst++ = (type) t; \
  249. ptr8 += abs(cas) >> 3; \
  250. } \
  251. } \
  252. break
  253. CASE_GRAY(-64, dst16, uint16_t, tdbl, av_int2double(AV_RB64(ptr8)));
  254. CASE_GRAY(-32, dst16, uint16_t, tflt, av_int2float(AV_RB32(ptr8)));
  255. CASE_GRAY(8, dst8, uint8_t, t8, ptr8[0]);
  256. CASE_GRAY(16, dst16, uint16_t, t16, AV_RB16(ptr8));
  257. CASE_GRAY(32, dst16, uint16_t, t32, AV_RB32(ptr8));
  258. CASE_GRAY(64, dst16, uint16_t, t64, AV_RB64(ptr8));
  259. default:
  260. av_log(avctx, AV_LOG_ERROR, "invalid BITPIX, %d\n", header.bitpix);
  261. return AVERROR_INVALIDDATA;
  262. }
  263. }
  264. p->key_frame = 1;
  265. p->pict_type = AV_PICTURE_TYPE_I;
  266. *got_frame = 1;
  267. return avpkt->size;
  268. }
  269. static const AVOption fits_options[] = {
  270. { "blank_value", "value that is used to replace BLANK pixels in data array", offsetof(FITSContext, blank_val), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 65535, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM},
  271. { NULL },
  272. };
  273. static const AVClass fits_decoder_class = {
  274. .class_name = "FITS decoder",
  275. .item_name = av_default_item_name,
  276. .option = fits_options,
  277. .version = LIBAVUTIL_VERSION_INT,
  278. };
  279. AVCodec ff_fits_decoder = {
  280. .name = "fits",
  281. .type = AVMEDIA_TYPE_VIDEO,
  282. .id = AV_CODEC_ID_FITS,
  283. .priv_data_size = sizeof(FITSContext),
  284. .decode = fits_decode_frame,
  285. .capabilities = AV_CODEC_CAP_DR1,
  286. .long_name = NULL_IF_CONFIG_SMALL("Flexible Image Transport System"),
  287. .priv_class = &fits_decoder_class
  288. };