You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

531 lines
15KB

  1. // Software scaling and colorspace conversion routines for MPlayer
  2. // Orginal C implementation by A'rpi/ESP-team <arpi@thot.banki.hu>
  3. // current version mostly by Michael Niedermayer (michaelni@gmx.at)
  4. // the parts written by michael are under GNU GPL
  5. #include <inttypes.h>
  6. #include <string.h>
  7. #include <math.h>
  8. #include <stdio.h>
  9. #include "../config.h"
  10. #include "../mangle.h"
  11. #ifdef HAVE_MALLOC_H
  12. #include <malloc.h>
  13. #endif
  14. #include "swscale.h"
  15. #include "../cpudetect.h"
  16. #undef MOVNTQ
  17. #undef PAVGB
  18. //#undef HAVE_MMX2
  19. //#undef HAVE_MMX
  20. //#undef ARCH_X86
  21. #define DITHER1XBPP
  22. int fullUVIpol=0;
  23. //disables the unscaled height version
  24. int allwaysIpol=0;
  25. #define RET 0xC3 //near return opcode
  26. //#define ASSERT(x) if(!(x)) { printf("ASSERT " #x " failed\n"); *((int*)0)=0; }
  27. #define ASSERT(x) ;
  28. extern int verbose; // defined in mplayer.c
  29. /*
  30. NOTES
  31. known BUGS with known cause (no bugreports please!, but patches are welcome :) )
  32. horizontal fast_bilinear MMX2 scaler reads 1-7 samples too much (might cause a sig11)
  33. Supported output formats BGR15 BGR16 BGR24 BGR32 YV12
  34. BGR15 & BGR16 MMX verions support dithering
  35. Special versions: fast Y 1:1 scaling (no interpolation in y direction)
  36. TODO
  37. more intelligent missalignment avoidance for the horizontal scaler
  38. dither in C
  39. change the distance of the u & v buffer
  40. Move static / global vars into a struct so multiple scalers can be used
  41. write special vertical cubic upscale version
  42. Optimize C code (yv12 / minmax)
  43. dstStride[3]
  44. */
  45. #define ABS(a) ((a) > 0 ? (a) : (-(a)))
  46. #define MIN(a,b) ((a) > (b) ? (b) : (a))
  47. #define MAX(a,b) ((a) < (b) ? (b) : (a))
  48. #ifdef ARCH_X86
  49. #define CAN_COMPILE_X86_ASM
  50. #endif
  51. #ifdef CAN_COMPILE_X86_ASM
  52. static uint64_t __attribute__((aligned(8))) yCoeff= 0x2568256825682568LL;
  53. static uint64_t __attribute__((aligned(8))) vrCoeff= 0x3343334333433343LL;
  54. static uint64_t __attribute__((aligned(8))) ubCoeff= 0x40cf40cf40cf40cfLL;
  55. static uint64_t __attribute__((aligned(8))) vgCoeff= 0xE5E2E5E2E5E2E5E2LL;
  56. static uint64_t __attribute__((aligned(8))) ugCoeff= 0xF36EF36EF36EF36ELL;
  57. static uint64_t __attribute__((aligned(8))) bF8= 0xF8F8F8F8F8F8F8F8LL;
  58. static uint64_t __attribute__((aligned(8))) bFC= 0xFCFCFCFCFCFCFCFCLL;
  59. static uint64_t __attribute__((aligned(8))) w400= 0x0400040004000400LL;
  60. static uint64_t __attribute__((aligned(8))) w80= 0x0080008000800080LL;
  61. static uint64_t __attribute__((aligned(8))) w10= 0x0010001000100010LL;
  62. static uint64_t __attribute__((aligned(8))) w02= 0x0002000200020002LL;
  63. static uint64_t __attribute__((aligned(8))) bm00001111=0x00000000FFFFFFFFLL;
  64. static uint64_t __attribute__((aligned(8))) bm00000111=0x0000000000FFFFFFLL;
  65. static uint64_t __attribute__((aligned(8))) bm11111000=0xFFFFFFFFFF000000LL;
  66. static volatile uint64_t __attribute__((aligned(8))) b5Dither;
  67. static volatile uint64_t __attribute__((aligned(8))) g5Dither;
  68. static volatile uint64_t __attribute__((aligned(8))) g6Dither;
  69. static volatile uint64_t __attribute__((aligned(8))) r5Dither;
  70. static uint64_t __attribute__((aligned(8))) dither4[2]={
  71. 0x0103010301030103LL,
  72. 0x0200020002000200LL,};
  73. static uint64_t __attribute__((aligned(8))) dither8[2]={
  74. 0x0602060206020602LL,
  75. 0x0004000400040004LL,};
  76. static uint64_t __attribute__((aligned(8))) b16Mask= 0x001F001F001F001FLL;
  77. static uint64_t __attribute__((aligned(8))) g16Mask= 0x07E007E007E007E0LL;
  78. static uint64_t __attribute__((aligned(8))) r16Mask= 0xF800F800F800F800LL;
  79. static uint64_t __attribute__((aligned(8))) b15Mask= 0x001F001F001F001FLL;
  80. static uint64_t __attribute__((aligned(8))) g15Mask= 0x03E003E003E003E0LL;
  81. static uint64_t __attribute__((aligned(8))) r15Mask= 0x7C007C007C007C00LL;
  82. static uint64_t __attribute__((aligned(8))) M24A= 0x00FF0000FF0000FFLL;
  83. static uint64_t __attribute__((aligned(8))) M24B= 0xFF0000FF0000FF00LL;
  84. static uint64_t __attribute__((aligned(8))) M24C= 0x0000FF0000FF0000LL;
  85. static uint64_t __attribute__((aligned(8))) temp0;
  86. static uint64_t __attribute__((aligned(8))) asm_yalpha1;
  87. static uint64_t __attribute__((aligned(8))) asm_uvalpha1;
  88. static int16_t __attribute__((aligned(8))) *lumPixBuf[2000];
  89. static int16_t __attribute__((aligned(8))) *chrPixBuf[2000];
  90. static int16_t __attribute__((aligned(8))) hLumFilter[8000];
  91. static int16_t __attribute__((aligned(8))) hLumFilterPos[2000];
  92. static int16_t __attribute__((aligned(8))) hChrFilter[8000];
  93. static int16_t __attribute__((aligned(8))) hChrFilterPos[2000];
  94. static int16_t __attribute__((aligned(8))) vLumFilter[8000];
  95. static int16_t __attribute__((aligned(8))) vLumFilterPos[2000];
  96. static int16_t __attribute__((aligned(8))) vChrFilter[8000];
  97. static int16_t __attribute__((aligned(8))) vChrFilterPos[2000];
  98. // Contain simply the values from v(Lum|Chr)Filter just nicely packed for mmx
  99. //FIXME these are very likely too small / 8000 caused problems with 480x480
  100. static int16_t __attribute__((aligned(8))) lumMmxFilter[16000];
  101. static int16_t __attribute__((aligned(8))) chrMmxFilter[16000];
  102. #else
  103. static int16_t *lumPixBuf[2000];
  104. static int16_t *chrPixBuf[2000];
  105. static int16_t hLumFilter[8000];
  106. static int16_t hLumFilterPos[2000];
  107. static int16_t hChrFilter[8000];
  108. static int16_t hChrFilterPos[2000];
  109. static int16_t vLumFilter[8000];
  110. static int16_t vLumFilterPos[2000];
  111. static int16_t vChrFilter[8000];
  112. static int16_t vChrFilterPos[2000];
  113. //FIXME just dummy vars
  114. static int16_t lumMmxFilter[1];
  115. static int16_t chrMmxFilter[1];
  116. #endif
  117. // clipping helper table for C implementations:
  118. static unsigned char clip_table[768];
  119. static unsigned short clip_table16b[768];
  120. static unsigned short clip_table16g[768];
  121. static unsigned short clip_table16r[768];
  122. static unsigned short clip_table15b[768];
  123. static unsigned short clip_table15g[768];
  124. static unsigned short clip_table15r[768];
  125. // yuv->rgb conversion tables:
  126. static int yuvtab_2568[256];
  127. static int yuvtab_3343[256];
  128. static int yuvtab_0c92[256];
  129. static int yuvtab_1a1e[256];
  130. static int yuvtab_40cf[256];
  131. // Needed for cubic scaler to catch overflows
  132. static int clip_yuvtab_2568[768];
  133. static int clip_yuvtab_3343[768];
  134. static int clip_yuvtab_0c92[768];
  135. static int clip_yuvtab_1a1e[768];
  136. static int clip_yuvtab_40cf[768];
  137. static int hLumFilterSize=0;
  138. static int hChrFilterSize=0;
  139. static int vLumFilterSize=0;
  140. static int vChrFilterSize=0;
  141. static int vLumBufSize=0;
  142. static int vChrBufSize=0;
  143. int sws_flags=0;
  144. #ifdef CAN_COMPILE_X86_ASM
  145. static uint8_t funnyYCode[10000];
  146. static uint8_t funnyUVCode[10000];
  147. #endif
  148. static int canMMX2BeUsed=0;
  149. #ifdef CAN_COMPILE_X86_ASM
  150. void in_asm_used_var_warning_killer()
  151. {
  152. volatile int i= yCoeff+vrCoeff+ubCoeff+vgCoeff+ugCoeff+bF8+bFC+w400+w80+w10+
  153. bm00001111+bm00000111+bm11111000+b16Mask+g16Mask+r16Mask+b15Mask+g15Mask+r15Mask+temp0+asm_yalpha1+ asm_uvalpha1+
  154. M24A+M24B+M24C+w02 + funnyYCode[0]+ funnyUVCode[0]+b5Dither+g5Dither+r5Dither+g6Dither+dither4[0]+dither8[0];
  155. if(i) i=0;
  156. }
  157. #endif
  158. static inline void yuv2yuvXinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  159. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  160. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW)
  161. {
  162. //FIXME Optimize (just quickly writen not opti..)
  163. int i;
  164. for(i=0; i<dstW; i++)
  165. {
  166. int val=0;
  167. int j;
  168. for(j=0; j<lumFilterSize; j++)
  169. val += lumSrc[j][i] * lumFilter[j];
  170. dest[i]= MIN(MAX(val>>19, 0), 255);
  171. }
  172. if(uDest != NULL)
  173. for(i=0; i<(dstW>>1); i++)
  174. {
  175. int u=0;
  176. int v=0;
  177. int j;
  178. for(j=0; j<chrFilterSize; j++)
  179. {
  180. u += chrSrc[j][i] * chrFilter[j];
  181. v += chrSrc[j][i + 2048] * chrFilter[j];
  182. }
  183. uDest[i]= MIN(MAX(u>>19, 0), 255);
  184. vDest[i]= MIN(MAX(v>>19, 0), 255);
  185. }
  186. }
  187. static inline void yuv2rgbXinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  188. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  189. uint8_t *dest, int dstW, int dstbpp)
  190. {
  191. if(dstbpp==32)
  192. {
  193. int i;
  194. for(i=0; i<(dstW>>1); i++){
  195. int j;
  196. int Y1=0;
  197. int Y2=0;
  198. int U=0;
  199. int V=0;
  200. int Cb, Cr, Cg;
  201. for(j=0; j<lumFilterSize; j++)
  202. {
  203. Y1 += lumSrc[j][2*i] * lumFilter[j];
  204. Y2 += lumSrc[j][2*i+1] * lumFilter[j];
  205. }
  206. for(j=0; j<chrFilterSize; j++)
  207. {
  208. U += chrSrc[j][i] * chrFilter[j];
  209. V += chrSrc[j][i+2048] * chrFilter[j];
  210. }
  211. Y1= clip_yuvtab_2568[ (Y1>>19) + 256 ];
  212. Y2= clip_yuvtab_2568[ (Y2>>19) + 256 ];
  213. U >>= 19;
  214. V >>= 19;
  215. Cb= clip_yuvtab_40cf[U+ 256];
  216. Cg= clip_yuvtab_1a1e[V+ 256] + yuvtab_0c92[U+ 256];
  217. Cr= clip_yuvtab_3343[V+ 256];
  218. dest[8*i+0]=clip_table[((Y1 + Cb) >>13)];
  219. dest[8*i+1]=clip_table[((Y1 + Cg) >>13)];
  220. dest[8*i+2]=clip_table[((Y1 + Cr) >>13)];
  221. dest[8*i+4]=clip_table[((Y2 + Cb) >>13)];
  222. dest[8*i+5]=clip_table[((Y2 + Cg) >>13)];
  223. dest[8*i+6]=clip_table[((Y2 + Cr) >>13)];
  224. }
  225. }
  226. else if(dstbpp==24)
  227. {
  228. int i;
  229. for(i=0; i<(dstW>>1); i++){
  230. int j;
  231. int Y1=0;
  232. int Y2=0;
  233. int U=0;
  234. int V=0;
  235. int Cb, Cr, Cg;
  236. for(j=0; j<lumFilterSize; j++)
  237. {
  238. Y1 += lumSrc[j][2*i] * lumFilter[j];
  239. Y2 += lumSrc[j][2*i+1] * lumFilter[j];
  240. }
  241. for(j=0; j<chrFilterSize; j++)
  242. {
  243. U += chrSrc[j][i] * chrFilter[j];
  244. V += chrSrc[j][i+2048] * chrFilter[j];
  245. }
  246. Y1= clip_yuvtab_2568[ (Y1>>19) + 256 ];
  247. Y2= clip_yuvtab_2568[ (Y2>>19) + 256 ];
  248. U >>= 19;
  249. V >>= 19;
  250. Cb= clip_yuvtab_40cf[U+ 256];
  251. Cg= clip_yuvtab_1a1e[V+ 256] + yuvtab_0c92[U+ 256];
  252. Cr= clip_yuvtab_3343[V+ 256];
  253. dest[0]=clip_table[((Y1 + Cb) >>13)];
  254. dest[1]=clip_table[((Y1 + Cg) >>13)];
  255. dest[2]=clip_table[((Y1 + Cr) >>13)];
  256. dest[3]=clip_table[((Y2 + Cb) >>13)];
  257. dest[4]=clip_table[((Y2 + Cg) >>13)];
  258. dest[5]=clip_table[((Y2 + Cr) >>13)];
  259. dest+=6;
  260. }
  261. }
  262. else if(dstbpp==16)
  263. {
  264. int i;
  265. for(i=0; i<(dstW>>1); i++){
  266. int j;
  267. int Y1=0;
  268. int Y2=0;
  269. int U=0;
  270. int V=0;
  271. int Cb, Cr, Cg;
  272. for(j=0; j<lumFilterSize; j++)
  273. {
  274. Y1 += lumSrc[j][2*i] * lumFilter[j];
  275. Y2 += lumSrc[j][2*i+1] * lumFilter[j];
  276. }
  277. for(j=0; j<chrFilterSize; j++)
  278. {
  279. U += chrSrc[j][i] * chrFilter[j];
  280. V += chrSrc[j][i+2048] * chrFilter[j];
  281. }
  282. Y1= clip_yuvtab_2568[ (Y1>>19) + 256 ];
  283. Y2= clip_yuvtab_2568[ (Y2>>19) + 256 ];
  284. U >>= 19;
  285. V >>= 19;
  286. Cb= clip_yuvtab_40cf[U+ 256];
  287. Cg= clip_yuvtab_1a1e[V+ 256] + yuvtab_0c92[U+ 256];
  288. Cr= clip_yuvtab_3343[V+ 256];
  289. ((uint16_t*)dest)[2*i] =
  290. clip_table16b[(Y1 + Cb) >>13] |
  291. clip_table16g[(Y1 + Cg) >>13] |
  292. clip_table16r[(Y1 + Cr) >>13];
  293. ((uint16_t*)dest)[2*i+1] =
  294. clip_table16b[(Y2 + Cb) >>13] |
  295. clip_table16g[(Y2 + Cg) >>13] |
  296. clip_table16r[(Y2 + Cr) >>13];
  297. }
  298. }
  299. else if(dstbpp==15)
  300. {
  301. int i;
  302. for(i=0; i<(dstW>>1); i++){
  303. int j;
  304. int Y1=0;
  305. int Y2=0;
  306. int U=0;
  307. int V=0;
  308. int Cb, Cr, Cg;
  309. for(j=0; j<lumFilterSize; j++)
  310. {
  311. Y1 += lumSrc[j][2*i] * lumFilter[j];
  312. Y2 += lumSrc[j][2*i+1] * lumFilter[j];
  313. }
  314. for(j=0; j<chrFilterSize; j++)
  315. {
  316. U += chrSrc[j][i] * chrFilter[j];
  317. V += chrSrc[j][i+2048] * chrFilter[j];
  318. }
  319. Y1= clip_yuvtab_2568[ (Y1>>19) + 256 ];
  320. Y2= clip_yuvtab_2568[ (Y2>>19) + 256 ];
  321. U >>= 19;
  322. V >>= 19;
  323. Cb= clip_yuvtab_40cf[U+ 256];
  324. Cg= clip_yuvtab_1a1e[V+ 256] + yuvtab_0c92[U+ 256];
  325. Cr= clip_yuvtab_3343[V+ 256];
  326. ((uint16_t*)dest)[2*i] =
  327. clip_table15b[(Y1 + Cb) >>13] |
  328. clip_table15g[(Y1 + Cg) >>13] |
  329. clip_table15r[(Y1 + Cr) >>13];
  330. ((uint16_t*)dest)[2*i+1] =
  331. clip_table15b[(Y2 + Cb) >>13] |
  332. clip_table15g[(Y2 + Cg) >>13] |
  333. clip_table15r[(Y2 + Cr) >>13];
  334. }
  335. }
  336. }
  337. //Note: we have C, X86, MMX, MMX2, 3DNOW version therse no 3DNOW+MMX2 one
  338. //Plain C versions
  339. #if !defined (HAVE_MMX) || defined (RUNTIME_CPUDETECT)
  340. #define COMPILE_C
  341. #endif
  342. #ifdef CAN_COMPILE_X86_ASM
  343. #if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
  344. #define COMPILE_MMX
  345. #endif
  346. #if defined (HAVE_MMX2) || defined (RUNTIME_CPUDETECT)
  347. #define COMPILE_MMX2
  348. #endif
  349. #if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
  350. #define COMPILE_3DNOW
  351. #endif
  352. #endif //CAN_COMPILE_X86_ASM
  353. #undef HAVE_MMX
  354. #undef HAVE_MMX2
  355. #undef HAVE_3DNOW
  356. #undef ARCH_X86
  357. #ifdef COMPILE_C
  358. #undef HAVE_MMX
  359. #undef HAVE_MMX2
  360. #undef HAVE_3DNOW
  361. #undef ARCH_X86
  362. #define RENAME(a) a ## _C
  363. #include "swscale_template.c"
  364. #endif
  365. #ifdef CAN_COMPILE_X86_ASM
  366. //X86 versions
  367. /*
  368. #undef RENAME
  369. #undef HAVE_MMX
  370. #undef HAVE_MMX2
  371. #undef HAVE_3DNOW
  372. #define ARCH_X86
  373. #define RENAME(a) a ## _X86
  374. #include "swscale_template.c"
  375. */
  376. //MMX versions
  377. #ifdef COMPILE_MMX
  378. #undef RENAME
  379. #define HAVE_MMX
  380. #undef HAVE_MMX2
  381. #undef HAVE_3DNOW
  382. #define ARCH_X86
  383. #define RENAME(a) a ## _MMX
  384. #include "swscale_template.c"
  385. #endif
  386. //MMX2 versions
  387. #ifdef COMPILE_MMX2
  388. #undef RENAME
  389. #define HAVE_MMX
  390. #define HAVE_MMX2
  391. #undef HAVE_3DNOW
  392. #define ARCH_X86
  393. #define RENAME(a) a ## _MMX2
  394. #include "swscale_template.c"
  395. #endif
  396. //3DNOW versions
  397. #ifdef COMPILE_3DNOW
  398. #undef RENAME
  399. #define HAVE_MMX
  400. #undef HAVE_MMX2
  401. #define HAVE_3DNOW
  402. #define ARCH_X86
  403. #define RENAME(a) a ## _3DNow
  404. #include "swscale_template.c"
  405. #endif
  406. #endif //CAN_COMPILE_X86_ASM
  407. // minor note: the HAVE_xyz is messed up after that line so dont use it
  408. // *** bilinear scaling and yuv->rgb or yuv->yuv conversion of yv12 slices:
  409. // *** Note: it's called multiple times while decoding a frame, first time y==0
  410. // switching the cpu type during a sliced drawing can have bad effects, like sig11
  411. void SwScale_YV12slice(unsigned char* srcptr[],int stride[], int srcSliceY ,
  412. int srcSliceH, uint8_t* dstptr[], int dststride, int dstbpp,
  413. int srcW, int srcH, int dstW, int dstH){
  414. #ifdef RUNTIME_CPUDETECT
  415. #ifdef CAN_COMPILE_X86_ASM
  416. // ordered per speed fasterst first
  417. if(gCpuCaps.hasMMX2)
  418. SwScale_YV12slice_MMX2(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH);
  419. else if(gCpuCaps.has3DNow)
  420. SwScale_YV12slice_3DNow(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH);
  421. else if(gCpuCaps.hasMMX)
  422. SwScale_YV12slice_MMX(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH);
  423. else
  424. SwScale_YV12slice_C(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH);
  425. #else
  426. SwScale_YV12slice_C(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH);
  427. #endif
  428. #else //RUNTIME_CPUDETECT
  429. #ifdef HAVE_MMX2
  430. SwScale_YV12slice_MMX2(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH);
  431. #elif defined (HAVE_3DNOW)
  432. SwScale_YV12slice_3DNow(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH);
  433. #elif defined (HAVE_MMX)
  434. SwScale_YV12slice_MMX(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH);
  435. #else
  436. SwScale_YV12slice_C(srcptr, stride, srcSliceY, srcSliceH, dstptr, dststride, dstbpp, srcW, srcH, dstW, dstH);
  437. #endif
  438. #endif //!RUNTIME_CPUDETECT
  439. }
  440. void SwScale_Init(){
  441. // generating tables:
  442. int i;
  443. for(i=0; i<768; i++){
  444. int c= MIN(MAX(i-256, 0), 255);
  445. clip_table[i]=c;
  446. yuvtab_2568[c]= clip_yuvtab_2568[i]=(0x2568*(c-16))+(256<<13);
  447. yuvtab_3343[c]= clip_yuvtab_3343[i]=0x3343*(c-128);
  448. yuvtab_0c92[c]= clip_yuvtab_0c92[i]=-0x0c92*(c-128);
  449. yuvtab_1a1e[c]= clip_yuvtab_1a1e[i]=-0x1a1e*(c-128);
  450. yuvtab_40cf[c]= clip_yuvtab_40cf[i]=0x40cf*(c-128);
  451. }
  452. for(i=0; i<768; i++)
  453. {
  454. int v= clip_table[i];
  455. clip_table16b[i]= v>>3;
  456. clip_table16g[i]= (v<<3)&0x07E0;
  457. clip_table16r[i]= (v<<8)&0xF800;
  458. clip_table15b[i]= v>>3;
  459. clip_table15g[i]= (v<<2)&0x03E0;
  460. clip_table15r[i]= (v<<7)&0x7C00;
  461. }
  462. }