You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

621 lines
17KB

  1. /*
  2. * Simple IDCT
  3. *
  4. * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
  5. * Copyright (c) 2006 Mans Rullgard <mans@mansr.com>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include "libavutil/arm/asm.S"
  24. #define W1 22725 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  25. #define W2 21407 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  26. #define W3 19266 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  27. #define W4 16383 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  28. #define W5 12873 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  29. #define W6 8867 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  30. #define W7 4520 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  31. #define ROW_SHIFT 11
  32. #define COL_SHIFT 20
  33. #define W13 (W1 | (W3 << 16))
  34. #define W26 (W2 | (W6 << 16))
  35. #define W57 (W5 | (W7 << 16))
  36. function idct_row_armv5te
  37. str lr, [sp, #-4]!
  38. ldrd v1, v2, [a1, #8]
  39. ldrd a3, a4, [a1] /* a3 = row[1:0], a4 = row[3:2] */
  40. orrs v1, v1, v2
  41. itt eq
  42. cmpeq v1, a4
  43. cmpeq v1, a3, lsr #16
  44. beq row_dc_only
  45. mov v1, #(1<<(ROW_SHIFT-1))
  46. mov ip, #16384
  47. sub ip, ip, #1 /* ip = W4 */
  48. smlabb v1, ip, a3, v1 /* v1 = W4*row[0]+(1<<(RS-1)) */
  49. ldr ip, =W26 /* ip = W2 | (W6 << 16) */
  50. smultb a2, ip, a4
  51. smulbb lr, ip, a4
  52. add v2, v1, a2
  53. sub v3, v1, a2
  54. sub v4, v1, lr
  55. add v1, v1, lr
  56. ldr ip, =W13 /* ip = W1 | (W3 << 16) */
  57. ldr lr, =W57 /* lr = W5 | (W7 << 16) */
  58. smulbt v5, ip, a3
  59. smultt v6, lr, a4
  60. smlatt v5, ip, a4, v5
  61. smultt a2, ip, a3
  62. smulbt v7, lr, a3
  63. sub v6, v6, a2
  64. smulbt a2, ip, a4
  65. smultt fp, lr, a3
  66. sub v7, v7, a2
  67. smulbt a2, lr, a4
  68. ldrd a3, a4, [a1, #8] /* a3=row[5:4] a4=row[7:6] */
  69. sub fp, fp, a2
  70. orrs a2, a3, a4
  71. beq 1f
  72. smlabt v5, lr, a3, v5
  73. smlabt v6, ip, a3, v6
  74. smlatt v5, lr, a4, v5
  75. smlabt v6, lr, a4, v6
  76. smlatt v7, lr, a3, v7
  77. smlatt fp, ip, a3, fp
  78. smulbt a2, ip, a4
  79. smlatt v7, ip, a4, v7
  80. sub fp, fp, a2
  81. ldr ip, =W26 /* ip = W2 | (W6 << 16) */
  82. mov a2, #16384
  83. sub a2, a2, #1 /* a2 = W4 */
  84. smulbb a2, a2, a3 /* a2 = W4*row[4] */
  85. smultb lr, ip, a4 /* lr = W6*row[6] */
  86. add v1, v1, a2 /* v1 += W4*row[4] */
  87. add v1, v1, lr /* v1 += W6*row[6] */
  88. add v4, v4, a2 /* v4 += W4*row[4] */
  89. sub v4, v4, lr /* v4 -= W6*row[6] */
  90. smulbb lr, ip, a4 /* lr = W2*row[6] */
  91. sub v2, v2, a2 /* v2 -= W4*row[4] */
  92. sub v2, v2, lr /* v2 -= W2*row[6] */
  93. sub v3, v3, a2 /* v3 -= W4*row[4] */
  94. add v3, v3, lr /* v3 += W2*row[6] */
  95. 1: add a2, v1, v5
  96. mov a3, a2, lsr #11
  97. bic a3, a3, #0x1f0000
  98. sub a2, v2, v6
  99. mov a2, a2, lsr #11
  100. add a3, a3, a2, lsl #16
  101. add a2, v3, v7
  102. mov a4, a2, lsr #11
  103. bic a4, a4, #0x1f0000
  104. add a2, v4, fp
  105. mov a2, a2, lsr #11
  106. add a4, a4, a2, lsl #16
  107. strd a3, a4, [a1]
  108. sub a2, v4, fp
  109. mov a3, a2, lsr #11
  110. bic a3, a3, #0x1f0000
  111. sub a2, v3, v7
  112. mov a2, a2, lsr #11
  113. add a3, a3, a2, lsl #16
  114. add a2, v2, v6
  115. mov a4, a2, lsr #11
  116. bic a4, a4, #0x1f0000
  117. sub a2, v1, v5
  118. mov a2, a2, lsr #11
  119. add a4, a4, a2, lsl #16
  120. strd a3, a4, [a1, #8]
  121. ldr pc, [sp], #4
  122. row_dc_only:
  123. orr a3, a3, a3, lsl #16
  124. bic a3, a3, #0xe000
  125. mov a3, a3, lsl #3
  126. mov a4, a3
  127. strd a3, a4, [a1]
  128. strd a3, a4, [a1, #8]
  129. ldr pc, [sp], #4
  130. endfunc
  131. .macro idct_col
  132. ldr a4, [a1] /* a4 = col[1:0] */
  133. mov ip, #16384
  134. sub ip, ip, #1 /* ip = W4 */
  135. #if 0
  136. mov v1, #(1<<(COL_SHIFT-1))
  137. smlabt v2, ip, a4, v1 /* v2 = W4*col[1] + (1<<(COL_SHIFT-1)) */
  138. smlabb v1, ip, a4, v1 /* v1 = W4*col[0] + (1<<(COL_SHIFT-1)) */
  139. ldr a4, [a1, #(16*4)]
  140. #else
  141. mov v1, #((1<<(COL_SHIFT-1))/W4) /* this matches the C version */
  142. add v2, v1, a4, asr #16
  143. rsb v2, v2, v2, lsl #14
  144. mov a4, a4, lsl #16
  145. add v1, v1, a4, asr #16
  146. ldr a4, [a1, #(16*4)]
  147. rsb v1, v1, v1, lsl #14
  148. #endif
  149. smulbb lr, ip, a4
  150. smulbt a3, ip, a4
  151. sub v3, v1, lr
  152. sub v5, v1, lr
  153. add v7, v1, lr
  154. add v1, v1, lr
  155. sub v4, v2, a3
  156. sub v6, v2, a3
  157. add fp, v2, a3
  158. ldr ip, =W26
  159. ldr a4, [a1, #(16*2)]
  160. add v2, v2, a3
  161. smulbb lr, ip, a4
  162. smultb a3, ip, a4
  163. add v1, v1, lr
  164. sub v7, v7, lr
  165. add v3, v3, a3
  166. sub v5, v5, a3
  167. smulbt lr, ip, a4
  168. smultt a3, ip, a4
  169. add v2, v2, lr
  170. sub fp, fp, lr
  171. add v4, v4, a3
  172. ldr a4, [a1, #(16*6)]
  173. sub v6, v6, a3
  174. smultb lr, ip, a4
  175. smulbb a3, ip, a4
  176. add v1, v1, lr
  177. sub v7, v7, lr
  178. sub v3, v3, a3
  179. add v5, v5, a3
  180. smultt lr, ip, a4
  181. smulbt a3, ip, a4
  182. add v2, v2, lr
  183. sub fp, fp, lr
  184. sub v4, v4, a3
  185. add v6, v6, a3
  186. stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp}
  187. ldr ip, =W13
  188. ldr a4, [a1, #(16*1)]
  189. ldr lr, =W57
  190. smulbb v1, ip, a4
  191. smultb v3, ip, a4
  192. smulbb v5, lr, a4
  193. smultb v7, lr, a4
  194. smulbt v2, ip, a4
  195. smultt v4, ip, a4
  196. smulbt v6, lr, a4
  197. smultt fp, lr, a4
  198. rsb v4, v4, #0
  199. ldr a4, [a1, #(16*3)]
  200. rsb v3, v3, #0
  201. smlatb v1, ip, a4, v1
  202. smlatb v3, lr, a4, v3
  203. smulbb a3, ip, a4
  204. smulbb a2, lr, a4
  205. sub v5, v5, a3
  206. sub v7, v7, a2
  207. smlatt v2, ip, a4, v2
  208. smlatt v4, lr, a4, v4
  209. smulbt a3, ip, a4
  210. smulbt a2, lr, a4
  211. sub v6, v6, a3
  212. ldr a4, [a1, #(16*5)]
  213. sub fp, fp, a2
  214. smlabb v1, lr, a4, v1
  215. smlabb v3, ip, a4, v3
  216. smlatb v5, lr, a4, v5
  217. smlatb v7, ip, a4, v7
  218. smlabt v2, lr, a4, v2
  219. smlabt v4, ip, a4, v4
  220. smlatt v6, lr, a4, v6
  221. ldr a3, [a1, #(16*7)]
  222. smlatt fp, ip, a4, fp
  223. smlatb v1, lr, a3, v1
  224. smlabb v3, lr, a3, v3
  225. smlatb v5, ip, a3, v5
  226. smulbb a4, ip, a3
  227. smlatt v2, lr, a3, v2
  228. sub v7, v7, a4
  229. smlabt v4, lr, a3, v4
  230. smulbt a4, ip, a3
  231. smlatt v6, ip, a3, v6
  232. sub fp, fp, a4
  233. .endm
  234. function idct_col_armv5te
  235. str lr, [sp, #-4]!
  236. idct_col
  237. ldmfd sp!, {a3, a4}
  238. adds a2, a3, v1
  239. mov a2, a2, lsr #20
  240. it mi
  241. orrmi a2, a2, #0xf000
  242. add ip, a4, v2
  243. mov ip, ip, asr #20
  244. orr a2, a2, ip, lsl #16
  245. str a2, [a1]
  246. subs a3, a3, v1
  247. mov a2, a3, lsr #20
  248. it mi
  249. orrmi a2, a2, #0xf000
  250. sub a4, a4, v2
  251. mov a4, a4, asr #20
  252. orr a2, a2, a4, lsl #16
  253. ldmfd sp!, {a3, a4}
  254. str a2, [a1, #(16*7)]
  255. subs a2, a3, v3
  256. mov a2, a2, lsr #20
  257. it mi
  258. orrmi a2, a2, #0xf000
  259. sub ip, a4, v4
  260. mov ip, ip, asr #20
  261. orr a2, a2, ip, lsl #16
  262. str a2, [a1, #(16*1)]
  263. adds a3, a3, v3
  264. mov a2, a3, lsr #20
  265. it mi
  266. orrmi a2, a2, #0xf000
  267. add a4, a4, v4
  268. mov a4, a4, asr #20
  269. orr a2, a2, a4, lsl #16
  270. ldmfd sp!, {a3, a4}
  271. str a2, [a1, #(16*6)]
  272. adds a2, a3, v5
  273. mov a2, a2, lsr #20
  274. it mi
  275. orrmi a2, a2, #0xf000
  276. add ip, a4, v6
  277. mov ip, ip, asr #20
  278. orr a2, a2, ip, lsl #16
  279. str a2, [a1, #(16*2)]
  280. subs a3, a3, v5
  281. mov a2, a3, lsr #20
  282. it mi
  283. orrmi a2, a2, #0xf000
  284. sub a4, a4, v6
  285. mov a4, a4, asr #20
  286. orr a2, a2, a4, lsl #16
  287. ldmfd sp!, {a3, a4}
  288. str a2, [a1, #(16*5)]
  289. adds a2, a3, v7
  290. mov a2, a2, lsr #20
  291. it mi
  292. orrmi a2, a2, #0xf000
  293. add ip, a4, fp
  294. mov ip, ip, asr #20
  295. orr a2, a2, ip, lsl #16
  296. str a2, [a1, #(16*3)]
  297. subs a3, a3, v7
  298. mov a2, a3, lsr #20
  299. it mi
  300. orrmi a2, a2, #0xf000
  301. sub a4, a4, fp
  302. mov a4, a4, asr #20
  303. orr a2, a2, a4, lsl #16
  304. str a2, [a1, #(16*4)]
  305. ldr pc, [sp], #4
  306. endfunc
  307. .macro clip dst, src:vararg
  308. movs \dst, \src
  309. it mi
  310. movmi \dst, #0
  311. cmp \dst, #255
  312. it gt
  313. movgt \dst, #255
  314. .endm
  315. .macro aclip dst, src:vararg
  316. adds \dst, \src
  317. it mi
  318. movmi \dst, #0
  319. cmp \dst, #255
  320. it gt
  321. movgt \dst, #255
  322. .endm
  323. function idct_col_put_armv5te
  324. str lr, [sp, #-4]!
  325. idct_col
  326. ldmfd sp!, {a3, a4}
  327. ldr lr, [sp, #32]
  328. add a2, a3, v1
  329. clip a2, a2, asr #20
  330. add ip, a4, v2
  331. clip ip, ip, asr #20
  332. orr a2, a2, ip, lsl #8
  333. sub a3, a3, v1
  334. clip a3, a3, asr #20
  335. sub a4, a4, v2
  336. clip a4, a4, asr #20
  337. ldr v1, [sp, #28]
  338. strh a2, [v1]
  339. add a2, v1, #2
  340. str a2, [sp, #28]
  341. orr a2, a3, a4, lsl #8
  342. rsb v2, lr, lr, lsl #3
  343. ldmfd sp!, {a3, a4}
  344. strh_pre a2, v2, v1
  345. sub a2, a3, v3
  346. clip a2, a2, asr #20
  347. sub ip, a4, v4
  348. clip ip, ip, asr #20
  349. orr a2, a2, ip, lsl #8
  350. strh_pre a2, v1, lr
  351. add a3, a3, v3
  352. clip a2, a3, asr #20
  353. add a4, a4, v4
  354. clip a4, a4, asr #20
  355. orr a2, a2, a4, lsl #8
  356. ldmfd sp!, {a3, a4}
  357. strh_dpre a2, v2, lr
  358. add a2, a3, v5
  359. clip a2, a2, asr #20
  360. add ip, a4, v6
  361. clip ip, ip, asr #20
  362. orr a2, a2, ip, lsl #8
  363. strh_pre a2, v1, lr
  364. sub a3, a3, v5
  365. clip a2, a3, asr #20
  366. sub a4, a4, v6
  367. clip a4, a4, asr #20
  368. orr a2, a2, a4, lsl #8
  369. ldmfd sp!, {a3, a4}
  370. strh_dpre a2, v2, lr
  371. add a2, a3, v7
  372. clip a2, a2, asr #20
  373. add ip, a4, fp
  374. clip ip, ip, asr #20
  375. orr a2, a2, ip, lsl #8
  376. strh a2, [v1, lr]
  377. sub a3, a3, v7
  378. clip a2, a3, asr #20
  379. sub a4, a4, fp
  380. clip a4, a4, asr #20
  381. orr a2, a2, a4, lsl #8
  382. strh_dpre a2, v2, lr
  383. ldr pc, [sp], #4
  384. endfunc
  385. function idct_col_add_armv5te
  386. str lr, [sp, #-4]!
  387. idct_col
  388. ldr lr, [sp, #36]
  389. ldmfd sp!, {a3, a4}
  390. ldrh ip, [lr]
  391. add a2, a3, v1
  392. sub a3, a3, v1
  393. and v1, ip, #255
  394. aclip a2, v1, a2, asr #20
  395. add v1, a4, v2
  396. mov v1, v1, asr #20
  397. aclip v1, v1, ip, lsr #8
  398. orr a2, a2, v1, lsl #8
  399. ldr v1, [sp, #32]
  400. sub a4, a4, v2
  401. rsb v2, v1, v1, lsl #3
  402. ldrh_pre ip, v2, lr
  403. strh a2, [lr]
  404. and a2, ip, #255
  405. aclip a3, a2, a3, asr #20
  406. mov a4, a4, asr #20
  407. aclip a4, a4, ip, lsr #8
  408. add a2, lr, #2
  409. str a2, [sp, #28]
  410. orr a2, a3, a4, lsl #8
  411. strh a2, [v2]
  412. ldmfd sp!, {a3, a4}
  413. ldrh_pre ip, lr, v1
  414. sub a2, a3, v3
  415. add a3, a3, v3
  416. and v3, ip, #255
  417. aclip a2, v3, a2, asr #20
  418. sub v3, a4, v4
  419. mov v3, v3, asr #20
  420. aclip v3, v3, ip, lsr #8
  421. orr a2, a2, v3, lsl #8
  422. add a4, a4, v4
  423. ldrh_dpre ip, v2, v1
  424. strh a2, [lr]
  425. and a2, ip, #255
  426. aclip a3, a2, a3, asr #20
  427. mov a4, a4, asr #20
  428. aclip a4, a4, ip, lsr #8
  429. orr a2, a3, a4, lsl #8
  430. strh a2, [v2]
  431. ldmfd sp!, {a3, a4}
  432. ldrh_pre ip, lr, v1
  433. add a2, a3, v5
  434. sub a3, a3, v5
  435. and v3, ip, #255
  436. aclip a2, v3, a2, asr #20
  437. add v3, a4, v6
  438. mov v3, v3, asr #20
  439. aclip v3, v3, ip, lsr #8
  440. orr a2, a2, v3, lsl #8
  441. sub a4, a4, v6
  442. ldrh_dpre ip, v2, v1
  443. strh a2, [lr]
  444. and a2, ip, #255
  445. aclip a3, a2, a3, asr #20
  446. mov a4, a4, asr #20
  447. aclip a4, a4, ip, lsr #8
  448. orr a2, a3, a4, lsl #8
  449. strh a2, [v2]
  450. ldmfd sp!, {a3, a4}
  451. ldrh_pre ip, lr, v1
  452. add a2, a3, v7
  453. sub a3, a3, v7
  454. and v3, ip, #255
  455. aclip a2, v3, a2, asr #20
  456. add v3, a4, fp
  457. mov v3, v3, asr #20
  458. aclip v3, v3, ip, lsr #8
  459. orr a2, a2, v3, lsl #8
  460. sub a4, a4, fp
  461. ldrh_dpre ip, v2, v1
  462. strh a2, [lr]
  463. and a2, ip, #255
  464. aclip a3, a2, a3, asr #20
  465. mov a4, a4, asr #20
  466. aclip a4, a4, ip, lsr #8
  467. orr a2, a3, a4, lsl #8
  468. strh a2, [v2]
  469. ldr pc, [sp], #4
  470. endfunc
  471. function ff_simple_idct_armv5te, export=1
  472. stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, lr}
  473. bl idct_row_armv5te
  474. add a1, a1, #16
  475. bl idct_row_armv5te
  476. add a1, a1, #16
  477. bl idct_row_armv5te
  478. add a1, a1, #16
  479. bl idct_row_armv5te
  480. add a1, a1, #16
  481. bl idct_row_armv5te
  482. add a1, a1, #16
  483. bl idct_row_armv5te
  484. add a1, a1, #16
  485. bl idct_row_armv5te
  486. add a1, a1, #16
  487. bl idct_row_armv5te
  488. sub a1, a1, #(16*7)
  489. bl idct_col_armv5te
  490. add a1, a1, #4
  491. bl idct_col_armv5te
  492. add a1, a1, #4
  493. bl idct_col_armv5te
  494. add a1, a1, #4
  495. bl idct_col_armv5te
  496. ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
  497. endfunc
  498. function ff_simple_idct_add_armv5te, export=1
  499. stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
  500. mov a1, a3
  501. bl idct_row_armv5te
  502. add a1, a1, #16
  503. bl idct_row_armv5te
  504. add a1, a1, #16
  505. bl idct_row_armv5te
  506. add a1, a1, #16
  507. bl idct_row_armv5te
  508. add a1, a1, #16
  509. bl idct_row_armv5te
  510. add a1, a1, #16
  511. bl idct_row_armv5te
  512. add a1, a1, #16
  513. bl idct_row_armv5te
  514. add a1, a1, #16
  515. bl idct_row_armv5te
  516. sub a1, a1, #(16*7)
  517. bl idct_col_add_armv5te
  518. add a1, a1, #4
  519. bl idct_col_add_armv5te
  520. add a1, a1, #4
  521. bl idct_col_add_armv5te
  522. add a1, a1, #4
  523. bl idct_col_add_armv5te
  524. add sp, sp, #8
  525. ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
  526. endfunc
  527. function ff_simple_idct_put_armv5te, export=1
  528. stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
  529. mov a1, a3
  530. bl idct_row_armv5te
  531. add a1, a1, #16
  532. bl idct_row_armv5te
  533. add a1, a1, #16
  534. bl idct_row_armv5te
  535. add a1, a1, #16
  536. bl idct_row_armv5te
  537. add a1, a1, #16
  538. bl idct_row_armv5te
  539. add a1, a1, #16
  540. bl idct_row_armv5te
  541. add a1, a1, #16
  542. bl idct_row_armv5te
  543. add a1, a1, #16
  544. bl idct_row_armv5te
  545. sub a1, a1, #(16*7)
  546. bl idct_col_put_armv5te
  547. add a1, a1, #4
  548. bl idct_col_put_armv5te
  549. add a1, a1, #4
  550. bl idct_col_put_armv5te
  551. add a1, a1, #4
  552. bl idct_col_put_armv5te
  553. add sp, sp, #8
  554. ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
  555. endfunc