You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

719 lines
19KB

  1. /*
  2. * Simple IDCT
  3. *
  4. * Copyright (c) 2001 Michael Niedermayer <michaelni@gmx.at>
  5. * Copyright (c) 2006 Mans Rullgard <mru@inprovide.com>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #define W1 22725 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  24. #define W2 21407 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  25. #define W3 19266 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  26. #define W4 16383 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  27. #define W5 12873 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  28. #define W6 8867 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  29. #define W7 4520 /* cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 */
  30. #define ROW_SHIFT 11
  31. #define COL_SHIFT 20
  32. #define W13 (W1 | (W3 << 16))
  33. #define W26 (W2 | (W6 << 16))
  34. #define W57 (W5 | (W7 << 16))
  35. .text
  36. .align
  37. w13: .long W13
  38. w26: .long W26
  39. w57: .long W57
  40. .align
  41. .func idct_row_armv5te
  42. idct_row_armv5te:
  43. str lr, [sp, #-4]!
  44. ldrd v1, [a1, #8]
  45. ldrd a3, [a1] /* a3 = row[1:0], a4 = row[3:2] */
  46. orrs v1, v1, v2
  47. cmpeq v1, a4
  48. cmpeq v1, a3, lsr #16
  49. beq row_dc_only
  50. mov v1, #(1<<(ROW_SHIFT-1))
  51. mov ip, #16384
  52. sub ip, ip, #1 /* ip = W4 */
  53. smlabb v1, ip, a3, v1 /* v1 = W4*row[0]+(1<<(RS-1)) */
  54. ldr ip, [pc, #(w26-.-8)] /* ip = W2 | (W6 << 16) */
  55. smultb a2, ip, a4
  56. smulbb lr, ip, a4
  57. add v2, v1, a2
  58. sub v3, v1, a2
  59. sub v4, v1, lr
  60. add v1, v1, lr
  61. ldr ip, [pc, #(w13-.-8)] /* ip = W1 | (W3 << 16) */
  62. ldr lr, [pc, #(w57-.-8)] /* lr = W5 | (W7 << 16) */
  63. smulbt v5, ip, a3
  64. smultt v6, lr, a4
  65. smlatt v5, ip, a4, v5
  66. smultt a2, ip, a3
  67. smulbt v7, lr, a3
  68. sub v6, v6, a2
  69. smulbt a2, ip, a4
  70. smultt fp, lr, a3
  71. sub v7, v7, a2
  72. smulbt a2, lr, a4
  73. ldrd a3, [a1, #8] /* a3=row[5:4] a4=row[7:6] */
  74. sub fp, fp, a2
  75. orrs a2, a3, a4
  76. beq 1f
  77. smlabt v5, lr, a3, v5
  78. smlabt v6, ip, a3, v6
  79. smlatt v5, lr, a4, v5
  80. smlabt v6, lr, a4, v6
  81. smlatt v7, lr, a3, v7
  82. smlatt fp, ip, a3, fp
  83. smulbt a2, ip, a4
  84. smlatt v7, ip, a4, v7
  85. sub fp, fp, a2
  86. ldr ip, [pc, #(w26-.-8)] /* ip = W2 | (W6 << 16) */
  87. mov a2, #16384
  88. sub a2, a2, #1 /* a2 = W4 */
  89. smulbb a2, a2, a3 /* a2 = W4*row[4] */
  90. smultb lr, ip, a4 /* lr = W6*row[6] */
  91. add v1, v1, a2 /* v1 += W4*row[4] */
  92. add v1, v1, lr /* v1 += W6*row[6] */
  93. add v4, v4, a2 /* v4 += W4*row[4] */
  94. sub v4, v4, lr /* v4 -= W6*row[6] */
  95. smulbb lr, ip, a4 /* lr = W2*row[6] */
  96. sub v2, v2, a2 /* v2 -= W4*row[4] */
  97. sub v2, v2, lr /* v2 -= W2*row[6] */
  98. sub v3, v3, a2 /* v3 -= W4*row[4] */
  99. add v3, v3, lr /* v3 += W2*row[6] */
  100. 1: add a2, v1, v5
  101. mov a3, a2, lsr #11
  102. bic a3, a3, #0x1f0000
  103. sub a2, v2, v6
  104. mov a2, a2, lsr #11
  105. add a3, a3, a2, lsl #16
  106. add a2, v3, v7
  107. mov a4, a2, lsr #11
  108. bic a4, a4, #0x1f0000
  109. add a2, v4, fp
  110. mov a2, a2, lsr #11
  111. add a4, a4, a2, lsl #16
  112. strd a3, [a1]
  113. sub a2, v4, fp
  114. mov a3, a2, lsr #11
  115. bic a3, a3, #0x1f0000
  116. sub a2, v3, v7
  117. mov a2, a2, lsr #11
  118. add a3, a3, a2, lsl #16
  119. add a2, v2, v6
  120. mov a4, a2, lsr #11
  121. bic a4, a4, #0x1f0000
  122. sub a2, v1, v5
  123. mov a2, a2, lsr #11
  124. add a4, a4, a2, lsl #16
  125. strd a3, [a1, #8]
  126. ldr pc, [sp], #4
  127. row_dc_only:
  128. orr a3, a3, a3, lsl #16
  129. bic a3, a3, #0xe000
  130. mov a3, a3, lsl #3
  131. mov a4, a3
  132. strd a3, [a1]
  133. strd a3, [a1, #8]
  134. ldr pc, [sp], #4
  135. .endfunc
  136. .macro idct_col
  137. ldr a4, [a1] /* a4 = col[1:0] */
  138. mov ip, #16384
  139. sub ip, ip, #1 /* ip = W4 */
  140. #if 0
  141. mov v1, #(1<<(COL_SHIFT-1))
  142. smlabt v2, ip, a4, v1 /* v2 = W4*col[1] + (1<<(COL_SHIFT-1)) */
  143. smlabb v1, ip, a4, v1 /* v1 = W4*col[0] + (1<<(COL_SHIFT-1)) */
  144. ldr a4, [a1, #(16*4)]
  145. #else
  146. mov v1, #((1<<(COL_SHIFT-1))/W4) /* this matches the C version */
  147. add v2, v1, a4, asr #16
  148. rsb v2, v2, v2, lsl #14
  149. mov a4, a4, lsl #16
  150. add v1, v1, a4, asr #16
  151. ldr a4, [a1, #(16*4)]
  152. rsb v1, v1, v1, lsl #14
  153. #endif
  154. smulbb lr, ip, a4
  155. smulbt a3, ip, a4
  156. sub v3, v1, lr
  157. sub v5, v1, lr
  158. add v7, v1, lr
  159. add v1, v1, lr
  160. sub v4, v2, a3
  161. sub v6, v2, a3
  162. add fp, v2, a3
  163. ldr ip, [pc, #(w26-.-8)]
  164. ldr a4, [a1, #(16*2)]
  165. add v2, v2, a3
  166. smulbb lr, ip, a4
  167. smultb a3, ip, a4
  168. add v1, v1, lr
  169. sub v7, v7, lr
  170. add v3, v3, a3
  171. sub v5, v5, a3
  172. smulbt lr, ip, a4
  173. smultt a3, ip, a4
  174. add v2, v2, lr
  175. sub fp, fp, lr
  176. add v4, v4, a3
  177. ldr a4, [a1, #(16*6)]
  178. sub v6, v6, a3
  179. smultb lr, ip, a4
  180. smulbb a3, ip, a4
  181. add v1, v1, lr
  182. sub v7, v7, lr
  183. sub v3, v3, a3
  184. add v5, v5, a3
  185. smultt lr, ip, a4
  186. smulbt a3, ip, a4
  187. add v2, v2, lr
  188. sub fp, fp, lr
  189. sub v4, v4, a3
  190. add v6, v6, a3
  191. stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp}
  192. ldr ip, [pc, #(w13-.-8)]
  193. ldr a4, [a1, #(16*1)]
  194. ldr lr, [pc, #(w57-.-8)]
  195. smulbb v1, ip, a4
  196. smultb v3, ip, a4
  197. smulbb v5, lr, a4
  198. smultb v7, lr, a4
  199. smulbt v2, ip, a4
  200. smultt v4, ip, a4
  201. smulbt v6, lr, a4
  202. smultt fp, lr, a4
  203. rsb v4, v4, #0
  204. ldr a4, [a1, #(16*3)]
  205. rsb v3, v3, #0
  206. smlatb v1, ip, a4, v1
  207. smlatb v3, lr, a4, v3
  208. smulbb a3, ip, a4
  209. smulbb a2, lr, a4
  210. sub v5, v5, a3
  211. sub v7, v7, a2
  212. smlatt v2, ip, a4, v2
  213. smlatt v4, lr, a4, v4
  214. smulbt a3, ip, a4
  215. smulbt a2, lr, a4
  216. sub v6, v6, a3
  217. ldr a4, [a1, #(16*5)]
  218. sub fp, fp, a2
  219. smlabb v1, lr, a4, v1
  220. smlabb v3, ip, a4, v3
  221. smlatb v5, lr, a4, v5
  222. smlatb v7, ip, a4, v7
  223. smlabt v2, lr, a4, v2
  224. smlabt v4, ip, a4, v4
  225. smlatt v6, lr, a4, v6
  226. ldr a3, [a1, #(16*7)]
  227. smlatt fp, ip, a4, fp
  228. smlatb v1, lr, a3, v1
  229. smlabb v3, lr, a3, v3
  230. smlatb v5, ip, a3, v5
  231. smulbb a4, ip, a3
  232. smlatt v2, lr, a3, v2
  233. sub v7, v7, a4
  234. smlabt v4, lr, a3, v4
  235. smulbt a4, ip, a3
  236. smlatt v6, ip, a3, v6
  237. sub fp, fp, a4
  238. .endm
  239. .align
  240. .func idct_col_armv5te
  241. idct_col_armv5te:
  242. str lr, [sp, #-4]!
  243. idct_col
  244. ldmfd sp!, {a3, a4}
  245. adds a2, a3, v1
  246. mov a2, a2, lsr #20
  247. orrmi a2, a2, #0xf000
  248. add ip, a4, v2
  249. mov ip, ip, asr #20
  250. orr a2, a2, ip, lsl #16
  251. str a2, [a1]
  252. subs a3, a3, v1
  253. mov a2, a3, lsr #20
  254. orrmi a2, a2, #0xf000
  255. sub a4, a4, v2
  256. mov a4, a4, asr #20
  257. orr a2, a2, a4, lsl #16
  258. ldmfd sp!, {a3, a4}
  259. str a2, [a1, #(16*7)]
  260. subs a2, a3, v3
  261. mov a2, a2, lsr #20
  262. orrmi a2, a2, #0xf000
  263. sub ip, a4, v4
  264. mov ip, ip, asr #20
  265. orr a2, a2, ip, lsl #16
  266. str a2, [a1, #(16*1)]
  267. adds a3, a3, v3
  268. mov a2, a3, lsr #20
  269. orrmi a2, a2, #0xf000
  270. add a4, a4, v4
  271. mov a4, a4, asr #20
  272. orr a2, a2, a4, lsl #16
  273. ldmfd sp!, {a3, a4}
  274. str a2, [a1, #(16*6)]
  275. adds a2, a3, v5
  276. mov a2, a2, lsr #20
  277. orrmi a2, a2, #0xf000
  278. add ip, a4, v6
  279. mov ip, ip, asr #20
  280. orr a2, a2, ip, lsl #16
  281. str a2, [a1, #(16*2)]
  282. subs a3, a3, v5
  283. mov a2, a3, lsr #20
  284. orrmi a2, a2, #0xf000
  285. sub a4, a4, v6
  286. mov a4, a4, asr #20
  287. orr a2, a2, a4, lsl #16
  288. ldmfd sp!, {a3, a4}
  289. str a2, [a1, #(16*5)]
  290. adds a2, a3, v7
  291. mov a2, a2, lsr #20
  292. orrmi a2, a2, #0xf000
  293. add ip, a4, fp
  294. mov ip, ip, asr #20
  295. orr a2, a2, ip, lsl #16
  296. str a2, [a1, #(16*3)]
  297. subs a3, a3, v7
  298. mov a2, a3, lsr #20
  299. orrmi a2, a2, #0xf000
  300. sub a4, a4, fp
  301. mov a4, a4, asr #20
  302. orr a2, a2, a4, lsl #16
  303. str a2, [a1, #(16*4)]
  304. ldr pc, [sp], #4
  305. .endfunc
  306. .align
  307. .func idct_col_put_armv5te
  308. idct_col_put_armv5te:
  309. str lr, [sp, #-4]!
  310. idct_col
  311. ldmfd sp!, {a3, a4}
  312. ldr lr, [sp, #32]
  313. add a2, a3, v1
  314. movs a2, a2, asr #20
  315. movmi a2, #0
  316. cmp a2, #255
  317. movgt a2, #255
  318. add ip, a4, v2
  319. movs ip, ip, asr #20
  320. movmi ip, #0
  321. cmp ip, #255
  322. movgt ip, #255
  323. orr a2, a2, ip, lsl #8
  324. sub a3, a3, v1
  325. movs a3, a3, asr #20
  326. movmi a3, #0
  327. cmp a3, #255
  328. movgt a3, #255
  329. sub a4, a4, v2
  330. movs a4, a4, asr #20
  331. movmi a4, #0
  332. cmp a4, #255
  333. ldr v1, [sp, #28]
  334. movgt a4, #255
  335. strh a2, [v1]
  336. add a2, v1, #2
  337. str a2, [sp, #28]
  338. orr a2, a3, a4, lsl #8
  339. rsb v2, lr, lr, lsl #3
  340. ldmfd sp!, {a3, a4}
  341. strh a2, [v2, v1]!
  342. sub a2, a3, v3
  343. movs a2, a2, asr #20
  344. movmi a2, #0
  345. cmp a2, #255
  346. movgt a2, #255
  347. sub ip, a4, v4
  348. movs ip, ip, asr #20
  349. movmi ip, #0
  350. cmp ip, #255
  351. movgt ip, #255
  352. orr a2, a2, ip, lsl #8
  353. strh a2, [v1, lr]!
  354. add a3, a3, v3
  355. movs a2, a3, asr #20
  356. movmi a2, #0
  357. cmp a2, #255
  358. movgt a2, #255
  359. add a4, a4, v4
  360. movs a4, a4, asr #20
  361. movmi a4, #0
  362. cmp a4, #255
  363. movgt a4, #255
  364. orr a2, a2, a4, lsl #8
  365. ldmfd sp!, {a3, a4}
  366. strh a2, [v2, -lr]!
  367. add a2, a3, v5
  368. movs a2, a2, asr #20
  369. movmi a2, #0
  370. cmp a2, #255
  371. movgt a2, #255
  372. add ip, a4, v6
  373. movs ip, ip, asr #20
  374. movmi ip, #0
  375. cmp ip, #255
  376. movgt ip, #255
  377. orr a2, a2, ip, lsl #8
  378. strh a2, [v1, lr]!
  379. sub a3, a3, v5
  380. movs a2, a3, asr #20
  381. movmi a2, #0
  382. cmp a2, #255
  383. movgt a2, #255
  384. sub a4, a4, v6
  385. movs a4, a4, asr #20
  386. movmi a4, #0
  387. cmp a4, #255
  388. movgt a4, #255
  389. orr a2, a2, a4, lsl #8
  390. ldmfd sp!, {a3, a4}
  391. strh a2, [v2, -lr]!
  392. add a2, a3, v7
  393. movs a2, a2, asr #20
  394. movmi a2, #0
  395. cmp a2, #255
  396. movgt a2, #255
  397. add ip, a4, fp
  398. movs ip, ip, asr #20
  399. movmi ip, #0
  400. cmp ip, #255
  401. movgt ip, #255
  402. orr a2, a2, ip, lsl #8
  403. strh a2, [v1, lr]
  404. sub a3, a3, v7
  405. movs a2, a3, asr #20
  406. movmi a2, #0
  407. cmp a2, #255
  408. movgt a2, #255
  409. sub a4, a4, fp
  410. movs a4, a4, asr #20
  411. movmi a4, #0
  412. cmp a4, #255
  413. movgt a4, #255
  414. orr a2, a2, a4, lsl #8
  415. strh a2, [v2, -lr]
  416. ldr pc, [sp], #4
  417. .endfunc
  418. .align
  419. .func idct_col_add_armv5te
  420. idct_col_add_armv5te:
  421. str lr, [sp, #-4]!
  422. idct_col
  423. ldr lr, [sp, #36]
  424. ldmfd sp!, {a3, a4}
  425. ldrh ip, [lr]
  426. add a2, a3, v1
  427. mov a2, a2, asr #20
  428. sub a3, a3, v1
  429. and v1, ip, #255
  430. adds a2, a2, v1
  431. movmi a2, #0
  432. cmp a2, #255
  433. movgt a2, #255
  434. add v1, a4, v2
  435. mov v1, v1, asr #20
  436. adds v1, v1, ip, lsr #8
  437. movmi v1, #0
  438. cmp v1, #255
  439. movgt v1, #255
  440. orr a2, a2, v1, lsl #8
  441. ldr v1, [sp, #32]
  442. sub a4, a4, v2
  443. rsb v2, v1, v1, lsl #3
  444. ldrh ip, [v2, lr]!
  445. strh a2, [lr]
  446. mov a3, a3, asr #20
  447. and a2, ip, #255
  448. adds a3, a3, a2
  449. movmi a3, #0
  450. cmp a3, #255
  451. movgt a3, #255
  452. mov a4, a4, asr #20
  453. adds a4, a4, ip, lsr #8
  454. movmi a4, #0
  455. cmp a4, #255
  456. movgt a4, #255
  457. add a2, lr, #2
  458. str a2, [sp, #28]
  459. orr a2, a3, a4, lsl #8
  460. strh a2, [v2]
  461. ldmfd sp!, {a3, a4}
  462. ldrh ip, [lr, v1]!
  463. sub a2, a3, v3
  464. mov a2, a2, asr #20
  465. add a3, a3, v3
  466. and v3, ip, #255
  467. adds a2, a2, v3
  468. movmi a2, #0
  469. cmp a2, #255
  470. movgt a2, #255
  471. sub v3, a4, v4
  472. mov v3, v3, asr #20
  473. adds v3, v3, ip, lsr #8
  474. movmi v3, #0
  475. cmp v3, #255
  476. movgt v3, #255
  477. orr a2, a2, v3, lsl #8
  478. add a4, a4, v4
  479. ldrh ip, [v2, -v1]!
  480. strh a2, [lr]
  481. mov a3, a3, asr #20
  482. and a2, ip, #255
  483. adds a3, a3, a2
  484. movmi a3, #0
  485. cmp a3, #255
  486. movgt a3, #255
  487. mov a4, a4, asr #20
  488. adds a4, a4, ip, lsr #8
  489. movmi a4, #0
  490. cmp a4, #255
  491. movgt a4, #255
  492. orr a2, a3, a4, lsl #8
  493. strh a2, [v2]
  494. ldmfd sp!, {a3, a4}
  495. ldrh ip, [lr, v1]!
  496. add a2, a3, v5
  497. mov a2, a2, asr #20
  498. sub a3, a3, v5
  499. and v3, ip, #255
  500. adds a2, a2, v3
  501. movmi a2, #0
  502. cmp a2, #255
  503. movgt a2, #255
  504. add v3, a4, v6
  505. mov v3, v3, asr #20
  506. adds v3, v3, ip, lsr #8
  507. movmi v3, #0
  508. cmp v3, #255
  509. movgt v3, #255
  510. orr a2, a2, v3, lsl #8
  511. sub a4, a4, v6
  512. ldrh ip, [v2, -v1]!
  513. strh a2, [lr]
  514. mov a3, a3, asr #20
  515. and a2, ip, #255
  516. adds a3, a3, a2
  517. movmi a3, #0
  518. cmp a3, #255
  519. movgt a3, #255
  520. mov a4, a4, asr #20
  521. adds a4, a4, ip, lsr #8
  522. movmi a4, #0
  523. cmp a4, #255
  524. movgt a4, #255
  525. orr a2, a3, a4, lsl #8
  526. strh a2, [v2]
  527. ldmfd sp!, {a3, a4}
  528. ldrh ip, [lr, v1]!
  529. add a2, a3, v7
  530. mov a2, a2, asr #20
  531. sub a3, a3, v7
  532. and v3, ip, #255
  533. adds a2, a2, v3
  534. movmi a2, #0
  535. cmp a2, #255
  536. movgt a2, #255
  537. add v3, a4, fp
  538. mov v3, v3, asr #20
  539. adds v3, v3, ip, lsr #8
  540. movmi v3, #0
  541. cmp v3, #255
  542. movgt v3, #255
  543. orr a2, a2, v3, lsl #8
  544. sub a4, a4, fp
  545. ldrh ip, [v2, -v1]!
  546. strh a2, [lr]
  547. mov a3, a3, asr #20
  548. and a2, ip, #255
  549. adds a3, a3, a2
  550. movmi a3, #0
  551. cmp a3, #255
  552. movgt a3, #255
  553. mov a4, a4, asr #20
  554. adds a4, a4, ip, lsr #8
  555. movmi a4, #0
  556. cmp a4, #255
  557. movgt a4, #255
  558. orr a2, a3, a4, lsl #8
  559. strh a2, [v2]
  560. ldr pc, [sp], #4
  561. .endfunc
  562. .align
  563. .global simple_idct_armv5te
  564. .func simple_idct_armv5te
  565. simple_idct_armv5te:
  566. stmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, lr}
  567. bl idct_row_armv5te
  568. add a1, a1, #16
  569. bl idct_row_armv5te
  570. add a1, a1, #16
  571. bl idct_row_armv5te
  572. add a1, a1, #16
  573. bl idct_row_armv5te
  574. add a1, a1, #16
  575. bl idct_row_armv5te
  576. add a1, a1, #16
  577. bl idct_row_armv5te
  578. add a1, a1, #16
  579. bl idct_row_armv5te
  580. add a1, a1, #16
  581. bl idct_row_armv5te
  582. sub a1, a1, #(16*7)
  583. bl idct_col_armv5te
  584. add a1, a1, #4
  585. bl idct_col_armv5te
  586. add a1, a1, #4
  587. bl idct_col_armv5te
  588. add a1, a1, #4
  589. bl idct_col_armv5te
  590. ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
  591. .endfunc
  592. .align
  593. .global simple_idct_add_armv5te
  594. .func simple_idct_add_armv5te
  595. simple_idct_add_armv5te:
  596. stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
  597. mov a1, a3
  598. bl idct_row_armv5te
  599. add a1, a1, #16
  600. bl idct_row_armv5te
  601. add a1, a1, #16
  602. bl idct_row_armv5te
  603. add a1, a1, #16
  604. bl idct_row_armv5te
  605. add a1, a1, #16
  606. bl idct_row_armv5te
  607. add a1, a1, #16
  608. bl idct_row_armv5te
  609. add a1, a1, #16
  610. bl idct_row_armv5te
  611. add a1, a1, #16
  612. bl idct_row_armv5te
  613. sub a1, a1, #(16*7)
  614. bl idct_col_add_armv5te
  615. add a1, a1, #4
  616. bl idct_col_add_armv5te
  617. add a1, a1, #4
  618. bl idct_col_add_armv5te
  619. add a1, a1, #4
  620. bl idct_col_add_armv5te
  621. add sp, sp, #8
  622. ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
  623. .endfunc
  624. .align
  625. .global simple_idct_put_armv5te
  626. .func simple_idct_put_armv5te
  627. simple_idct_put_armv5te:
  628. stmfd sp!, {a1, a2, v1, v2, v3, v4, v5, v6, v7, fp, lr}
  629. mov a1, a3
  630. bl idct_row_armv5te
  631. add a1, a1, #16
  632. bl idct_row_armv5te
  633. add a1, a1, #16
  634. bl idct_row_armv5te
  635. add a1, a1, #16
  636. bl idct_row_armv5te
  637. add a1, a1, #16
  638. bl idct_row_armv5te
  639. add a1, a1, #16
  640. bl idct_row_armv5te
  641. add a1, a1, #16
  642. bl idct_row_armv5te
  643. add a1, a1, #16
  644. bl idct_row_armv5te
  645. sub a1, a1, #(16*7)
  646. bl idct_col_put_armv5te
  647. add a1, a1, #4
  648. bl idct_col_put_armv5te
  649. add a1, a1, #4
  650. bl idct_col_put_armv5te
  651. add a1, a1, #4
  652. bl idct_col_put_armv5te
  653. add sp, sp, #8
  654. ldmfd sp!, {v1, v2, v3, v4, v5, v6, v7, fp, pc}
  655. .endfunc