You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1736 lines
48KB

  1. //
  2. // Copyright (c) 2009-2013 Mikko Mononen memon@inside.org
  3. //
  4. // This software is provided 'as-is', without any express or implied
  5. // warranty. In no event will the authors be held liable for any damages
  6. // arising from the use of this software.
  7. // Permission is granted to anyone to use this software for any purpose,
  8. // including commercial applications, and to alter it and redistribute it
  9. // freely, subject to the following restrictions:
  10. // 1. The origin of this software must not be misrepresented; you must not
  11. // claim that you wrote the original software. If you use this software
  12. // in a product, an acknowledgment in the product documentation would be
  13. // appreciated but is not required.
  14. // 2. Altered source versions must be plainly marked as such, and must not be
  15. // misrepresented as being the original software.
  16. // 3. This notice may not be removed or altered from any source distribution.
  17. //
  18. #ifndef NANOVG_GL_H
  19. #define NANOVG_GL_H
  20. #include <stdio.h>
  21. #ifdef __cplusplus
  22. extern "C" {
  23. #endif
  24. // Create flags
  25. enum NVGcreateFlags {
  26. // Flag indicating if geometry based anti-aliasing is used (may not be needed when using MSAA).
  27. NVG_ANTIALIAS = 1<<0,
  28. // Flag indicating if strokes should be drawn using stencil buffer. The rendering will be a little
  29. // slower, but path overlaps (i.e. self-intersecting or sharp turns) will be drawn just once.
  30. NVG_STENCIL_STROKES = 1<<1,
  31. // Flag indicating that additional debug checks are done.
  32. NVG_DEBUG = 1<<2,
  33. };
  34. #if defined NANOVG_GL2_IMPLEMENTATION
  35. # define NANOVG_GL2 1
  36. # define NANOVG_GL_IMPLEMENTATION 1
  37. #elif defined NANOVG_GL3_IMPLEMENTATION
  38. # define NANOVG_GL3 1
  39. # define NANOVG_GL_IMPLEMENTATION 1
  40. # define NANOVG_GL_USE_UNIFORMBUFFER 1
  41. #elif defined NANOVG_GLES2_IMPLEMENTATION
  42. # define NANOVG_GLES2 1
  43. # define NANOVG_GL_IMPLEMENTATION 1
  44. #elif defined NANOVG_GLES3_IMPLEMENTATION
  45. # define NANOVG_GLES3 1
  46. # define NANOVG_GL_IMPLEMENTATION 1
  47. #endif
  48. #define NANOVG_GL_USE_STATE_FILTER (1)
  49. // Creates NanoVG contexts for different OpenGL (ES) versions.
  50. // Flags should be combination of the create flags above.
  51. #if defined NANOVG_GL2
  52. NVGcontext* nvgCreateGL2(int flags);
  53. void nvgDeleteGL2(NVGcontext* ctx);
  54. int nvglCreateImageFromHandleGL2(NVGcontext* ctx, GLuint textureId, int w, int h, int flags);
  55. GLuint nvglImageHandleGL2(NVGcontext* ctx, int image);
  56. #endif
  57. #if defined NANOVG_GL3
  58. NVGcontext* nvgCreateGL3(int flags);
  59. void nvgDeleteGL3(NVGcontext* ctx);
  60. int nvglCreateImageFromHandleGL3(NVGcontext* ctx, GLuint textureId, int w, int h, int flags);
  61. GLuint nvglImageHandleGL3(NVGcontext* ctx, int image);
  62. #endif
  63. #if defined NANOVG_GLES2
  64. NVGcontext* nvgCreateGLES2(int flags);
  65. void nvgDeleteGLES2(NVGcontext* ctx);
  66. int nvglCreateImageFromHandleGLES2(NVGcontext* ctx, GLuint textureId, int w, int h, int flags);
  67. GLuint nvglImageHandleGLES2(NVGcontext* ctx, int image);
  68. #endif
  69. #if defined NANOVG_GLES3
  70. NVGcontext* nvgCreateGLES3(int flags);
  71. void nvgDeleteGLES3(NVGcontext* ctx);
  72. int nvglCreateImageFromHandleGLES3(NVGcontext* ctx, GLuint textureId, int w, int h, int flags);
  73. GLuint nvglImageHandleGLES3(NVGcontext* ctx, int image);
  74. #endif
  75. // These are additional flags on top of NVGimageFlags.
  76. enum NVGimageFlagsGL {
  77. NVG_IMAGE_NODELETE = 1<<16, // Do not delete GL texture handle.
  78. };
  79. #ifdef __cplusplus
  80. }
  81. #endif
  82. #endif /* NANOVG_GL_H */
  83. #ifdef NANOVG_GL_IMPLEMENTATION
  84. #include <stdlib.h>
  85. #include <stdio.h>
  86. #include <string.h>
  87. #include <math.h>
  88. #include "nanovg.h"
  89. enum GLNVGuniformLoc {
  90. GLNVG_LOC_VIEWSIZE,
  91. GLNVG_LOC_TEX,
  92. GLNVG_LOC_FRAG,
  93. GLNVG_MAX_LOCS
  94. };
  95. enum GLNVGshaderType {
  96. NSVG_SHADER_FILLGRAD,
  97. NSVG_SHADER_FILLIMG,
  98. NSVG_SHADER_SIMPLE,
  99. NSVG_SHADER_IMG
  100. };
  101. #if NANOVG_GL_USE_UNIFORMBUFFER
  102. enum GLNVGuniformBindings {
  103. GLNVG_FRAG_BINDING = 0,
  104. };
  105. #endif
  106. struct GLNVGshader {
  107. GLuint prog;
  108. GLuint frag;
  109. GLuint vert;
  110. GLint loc[GLNVG_MAX_LOCS];
  111. };
  112. typedef struct GLNVGshader GLNVGshader;
  113. struct GLNVGtexture {
  114. int id;
  115. GLuint tex;
  116. int width, height;
  117. int type;
  118. int flags;
  119. };
  120. typedef struct GLNVGtexture GLNVGtexture;
  121. struct GLNVGblend
  122. {
  123. GLenum srcRGB;
  124. GLenum dstRGB;
  125. GLenum srcAlpha;
  126. GLenum dstAlpha;
  127. };
  128. typedef struct GLNVGblend GLNVGblend;
  129. enum GLNVGcallType {
  130. GLNVG_NONE = 0,
  131. GLNVG_FILL,
  132. GLNVG_CONVEXFILL,
  133. GLNVG_STROKE,
  134. GLNVG_TRIANGLES,
  135. };
  136. struct GLNVGcall {
  137. int type;
  138. int image;
  139. int pathOffset;
  140. int pathCount;
  141. int triangleOffset;
  142. int triangleCount;
  143. int uniformOffset;
  144. GLNVGblend blendFunc;
  145. };
  146. typedef struct GLNVGcall GLNVGcall;
  147. struct GLNVGpath {
  148. int fillOffset;
  149. int fillCount;
  150. int strokeOffset;
  151. int strokeCount;
  152. };
  153. typedef struct GLNVGpath GLNVGpath;
  154. struct GLNVGfragUniforms {
  155. #if NANOVG_GL_USE_UNIFORMBUFFER
  156. float scissorMat[12]; // matrices are actually 3 vec4s
  157. float paintMat[12];
  158. struct NVGcolor innerCol;
  159. struct NVGcolor outerCol;
  160. float scissorExt[2];
  161. float scissorScale[2];
  162. float extent[2];
  163. float radius;
  164. float feather;
  165. float strokeMult;
  166. float strokeThr;
  167. int texType;
  168. int type;
  169. #else
  170. // note: after modifying layout or size of uniform array,
  171. // don't forget to also update the fragment shader source!
  172. #define NANOVG_GL_UNIFORMARRAY_SIZE 11
  173. union {
  174. struct {
  175. float scissorMat[12]; // matrices are actually 3 vec4s
  176. float paintMat[12];
  177. struct NVGcolor innerCol;
  178. struct NVGcolor outerCol;
  179. float scissorExt[2];
  180. float scissorScale[2];
  181. float extent[2];
  182. float radius;
  183. float feather;
  184. float strokeMult;
  185. float strokeThr;
  186. float texType;
  187. float type;
  188. };
  189. float uniformArray[NANOVG_GL_UNIFORMARRAY_SIZE][4];
  190. };
  191. #endif
  192. };
  193. typedef struct GLNVGfragUniforms GLNVGfragUniforms;
  194. struct GLNVGcontext {
  195. GLNVGshader shader;
  196. GLNVGtexture* textures;
  197. float view[2];
  198. int ntextures;
  199. int ctextures;
  200. int textureId;
  201. GLuint vertBuf;
  202. #if defined NANOVG_GL3
  203. GLuint vertArr;
  204. #endif
  205. #if NANOVG_GL_USE_UNIFORMBUFFER
  206. GLuint fragBuf;
  207. #endif
  208. int fragSize;
  209. int flags;
  210. // Per frame buffers
  211. GLNVGcall* calls;
  212. int ccalls;
  213. int ncalls;
  214. GLNVGpath* paths;
  215. int cpaths;
  216. int npaths;
  217. struct NVGvertex* verts;
  218. int cverts;
  219. int nverts;
  220. unsigned char* uniforms;
  221. int cuniforms;
  222. int nuniforms;
  223. // cached state
  224. #if NANOVG_GL_USE_STATE_FILTER
  225. GLuint boundTexture;
  226. GLuint stencilMask;
  227. GLenum stencilFunc;
  228. GLint stencilFuncRef;
  229. GLuint stencilFuncMask;
  230. GLNVGblend blendFunc;
  231. #endif
  232. };
  233. typedef struct GLNVGcontext GLNVGcontext;
  234. static int glnvg__maxi(int a, int b) { return a > b ? a : b; }
  235. #ifdef NANOVG_GLES2
  236. static unsigned int glnvg__nearestPow2(unsigned int num)
  237. {
  238. unsigned n = num > 0 ? num - 1 : 0;
  239. n |= n >> 1;
  240. n |= n >> 2;
  241. n |= n >> 4;
  242. n |= n >> 8;
  243. n |= n >> 16;
  244. n++;
  245. return n;
  246. }
  247. #endif
  248. static void glnvg__bindTexture(GLNVGcontext* gl, GLuint tex)
  249. {
  250. #if NANOVG_GL_USE_STATE_FILTER
  251. if (gl->boundTexture != tex) {
  252. gl->boundTexture = tex;
  253. glBindTexture(GL_TEXTURE_2D, tex);
  254. }
  255. #else
  256. glBindTexture(GL_TEXTURE_2D, tex);
  257. #endif
  258. }
  259. static void glnvg__stencilMask(GLNVGcontext* gl, GLuint mask)
  260. {
  261. #if NANOVG_GL_USE_STATE_FILTER
  262. if (gl->stencilMask != mask) {
  263. gl->stencilMask = mask;
  264. glStencilMask(mask);
  265. }
  266. #else
  267. glStencilMask(mask);
  268. #endif
  269. }
  270. static void glnvg__stencilFunc(GLNVGcontext* gl, GLenum func, GLint ref, GLuint mask)
  271. {
  272. #if NANOVG_GL_USE_STATE_FILTER
  273. if ((gl->stencilFunc != func) ||
  274. (gl->stencilFuncRef != ref) ||
  275. (gl->stencilFuncMask != mask)) {
  276. gl->stencilFunc = func;
  277. gl->stencilFuncRef = ref;
  278. gl->stencilFuncMask = mask;
  279. glStencilFunc(func, ref, mask);
  280. }
  281. #else
  282. glStencilFunc(func, ref, mask);
  283. #endif
  284. }
  285. static void glnvg__blendFuncSeparate(GLNVGcontext* gl, const GLNVGblend* blend)
  286. {
  287. #if NANOVG_GL_USE_STATE_FILTER
  288. if ((gl->blendFunc.srcRGB != blend->srcRGB) ||
  289. (gl->blendFunc.dstRGB != blend->dstRGB) ||
  290. (gl->blendFunc.srcAlpha != blend->srcAlpha) ||
  291. (gl->blendFunc.dstAlpha != blend->dstAlpha)) {
  292. gl->blendFunc = *blend;
  293. glBlendFuncSeparate(blend->srcRGB, blend->dstRGB, blend->srcAlpha,blend->dstAlpha);
  294. }
  295. #else
  296. glBlendFuncSeparate(blend->srcRGB, blend->dstRGB, blend->srcAlpha,blend->dstAlpha);
  297. #endif
  298. }
  299. static GLNVGtexture* glnvg__allocTexture(GLNVGcontext* gl)
  300. {
  301. GLNVGtexture* tex = NULL;
  302. int i;
  303. for (i = 0; i < gl->ntextures; i++) {
  304. if (gl->textures[i].id == 0) {
  305. tex = &gl->textures[i];
  306. break;
  307. }
  308. }
  309. if (tex == NULL) {
  310. if (gl->ntextures+1 > gl->ctextures) {
  311. GLNVGtexture* textures;
  312. int ctextures = glnvg__maxi(gl->ntextures+1, 4) + gl->ctextures/2; // 1.5x Overallocate
  313. textures = (GLNVGtexture*)realloc(gl->textures, sizeof(GLNVGtexture)*ctextures);
  314. if (textures == NULL) return NULL;
  315. gl->textures = textures;
  316. gl->ctextures = ctextures;
  317. }
  318. tex = &gl->textures[gl->ntextures++];
  319. }
  320. memset(tex, 0, sizeof(*tex));
  321. tex->id = ++gl->textureId;
  322. return tex;
  323. }
  324. static GLNVGtexture* glnvg__findTexture(GLNVGcontext* gl, int id)
  325. {
  326. int i;
  327. for (i = 0; i < gl->ntextures; i++)
  328. if (gl->textures[i].id == id)
  329. return &gl->textures[i];
  330. return NULL;
  331. }
  332. static int glnvg__deleteTexture(GLNVGcontext* gl, int id)
  333. {
  334. int i;
  335. for (i = 0; i < gl->ntextures; i++) {
  336. if (gl->textures[i].id == id) {
  337. if (gl->textures[i].tex != 0 && (gl->textures[i].flags & NVG_IMAGE_NODELETE) == 0)
  338. glDeleteTextures(1, &gl->textures[i].tex);
  339. memset(&gl->textures[i], 0, sizeof(gl->textures[i]));
  340. return 1;
  341. }
  342. }
  343. return 0;
  344. }
  345. static void glnvg__dumpShaderError(GLuint shader, const char* name, const char* type)
  346. {
  347. GLchar str[512+1];
  348. GLsizei len = 0;
  349. glGetShaderInfoLog(shader, 512, &len, str);
  350. if (len > 512) len = 512;
  351. str[len] = '\0';
  352. printf("Shader %s/%s error:\n%s\n", name, type, str);
  353. }
  354. static void glnvg__dumpProgramError(GLuint prog, const char* name)
  355. {
  356. GLchar str[512+1];
  357. GLsizei len = 0;
  358. glGetProgramInfoLog(prog, 512, &len, str);
  359. if (len > 512) len = 512;
  360. str[len] = '\0';
  361. printf("Program %s error:\n%s\n", name, str);
  362. }
  363. static void glnvg__checkError(GLNVGcontext* gl, const char* str)
  364. {
  365. GLenum err;
  366. if ((gl->flags & NVG_DEBUG) == 0) return;
  367. err = glGetError();
  368. if (err != GL_NO_ERROR) {
  369. printf("Error %08x after %s\n", err, str);
  370. return;
  371. }
  372. }
  373. /* static void loc_print_shader(char**lines, int numlines) { */
  374. /* int i; */
  375. /* for(i = 0; i < numlines; i++) */
  376. /* { */
  377. /* printf("xxx loc_print_shader: line[%d]=\"%s\"\n", i, lines[i]); */
  378. /* } */
  379. /* } */
  380. static int glnvg__createShader(GLNVGshader* shader, const char* name, const char* header, const char* opts, const char *vshader, const char *fshader)
  381. {
  382. GLint status;
  383. GLuint prog, vert, frag;
  384. /* const char* str[3]; */
  385. /* str[0] = header; */
  386. /* str[1] = opts != NULL ? opts : ""; */
  387. /* vshader[0] = header; */
  388. /* vshader[1] = opts != NULL ? opts : (char*)""; */
  389. /* fshader[0] = header; */
  390. /* fshader[1] = opts != NULL ? opts : (char*)""; */
  391. memset(shader, 0, sizeof(*shader));
  392. prog = glCreateProgram();
  393. vert = glCreateShader(GL_VERTEX_SHADER);
  394. frag = glCreateShader(GL_FRAGMENT_SHADER);
  395. /* str[2] = vshader; */
  396. /* printf("xxx glnvg__createShader: glShaderSource(vert, ..)\n"); */
  397. /* loc_print_shader(vshader, vshader_numlines); */
  398. glShaderSource(vert, 1, &vshader, 0);
  399. /* str[2] = fshader; */
  400. /* printf("xxx glnvg__createShader: glShaderSource(frag, ..)\n"); */
  401. /* loc_print_shader(fshader, fshader_numlines); */
  402. glShaderSource(frag, 1, &fshader, 0);
  403. /* printf("xxx glnvg__createShader: compileShader(vert)\n"); */
  404. glCompileShader(vert);
  405. glGetShaderiv(vert, GL_COMPILE_STATUS, &status);
  406. if (status != GL_TRUE) {
  407. glnvg__dumpShaderError(vert, name, "vert");
  408. return 0;
  409. }
  410. /* printf("xxx glnvg__createShader: compileShader(frag)\n"); */
  411. glCompileShader(frag);
  412. glGetShaderiv(frag, GL_COMPILE_STATUS, &status);
  413. if (status != GL_TRUE) {
  414. glnvg__dumpShaderError(frag, name, "frag");
  415. return 0;
  416. }
  417. /* printf("xxx glnvg__createShader: attach shaders\n"); */
  418. glAttachShader(prog, vert);
  419. glAttachShader(prog, frag);
  420. glBindAttribLocation(prog, 0, "vertex");
  421. glBindAttribLocation(prog, 1, "tcoord");
  422. /* printf("xxx glnvg__createShader: link program\n"); */
  423. glLinkProgram(prog);
  424. glGetProgramiv(prog, GL_LINK_STATUS, &status);
  425. /* printf("xxx glnvg__createShader: link program status=%u\n", status); */
  426. if (status != GL_TRUE) {
  427. glnvg__dumpProgramError(prog, name);
  428. return 0;
  429. }
  430. shader->prog = prog;
  431. shader->vert = vert;
  432. shader->frag = frag;
  433. return 1;
  434. }
  435. static void glnvg__deleteShader(GLNVGshader* shader)
  436. {
  437. if (shader->prog != 0)
  438. glDeleteProgram(shader->prog);
  439. if (shader->vert != 0)
  440. glDeleteShader(shader->vert);
  441. if (shader->frag != 0)
  442. glDeleteShader(shader->frag);
  443. }
  444. static void glnvg__getUniforms(GLNVGshader* shader)
  445. {
  446. shader->loc[GLNVG_LOC_VIEWSIZE] = glGetUniformLocation(shader->prog, "viewSize");
  447. shader->loc[GLNVG_LOC_TEX] = glGetUniformLocation(shader->prog, "tex");
  448. #if NANOVG_GL_USE_UNIFORMBUFFER
  449. shader->loc[GLNVG_LOC_FRAG] = glGetUniformBlockIndex(shader->prog, "frag");
  450. #else
  451. shader->loc[GLNVG_LOC_FRAG] = glGetUniformLocation(shader->prog, "frag");
  452. #endif
  453. }
  454. static int glnvg__renderCreate(void* uptr)
  455. {
  456. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  457. int align = 4;
  458. // (note) [bsp] 25Oct2018: the VirtualBox GL wrapper/driver does not support shader sources with multiple lines.
  459. // the header+opts are therefore 'inlined' now
  460. #if 0
  461. // TODO: mediump float may not be enough for GLES2 in iOS.
  462. // see the following discussion: https://github.com/memononen/nanovg/issues/46
  463. static char* shaderHeader =
  464. #if defined NANOVG_GL2
  465. "#define NANOVG_GL2 1\n"
  466. #elif defined NANOVG_GL3
  467. "#version 150 core\n"
  468. "#define NANOVG_GL3 1\n"
  469. #elif defined NANOVG_GLES2
  470. "#version 100\n"
  471. "#define NANOVG_GL2 1\n"
  472. #elif defined NANOVG_GLES3
  473. "#version 300 es\n"
  474. "#define NANOVG_GL3 1\n"
  475. #endif
  476. #if NANOVG_GL_USE_UNIFORMBUFFER
  477. "#define USE_UNIFORMBUFFER 1\n"
  478. #else
  479. "#define UNIFORMARRAY_SIZE 11\n"
  480. #endif
  481. "\n";
  482. #endif
  483. static const char* fillVertShader = {
  484. #if defined NANOVG_GL2
  485. "#define NANOVG_GL2 1\n"
  486. #elif defined NANOVG_GL3
  487. "#version 150 core\n"
  488. "#define NANOVG_GL3 1\n"
  489. #elif defined NANOVG_GLES2
  490. "#version 100\n"
  491. "#define NANOVG_GL2 1\n"
  492. #elif defined NANOVG_GLES3
  493. "#version 300 es\n"
  494. "#define NANOVG_GL3 1\n"
  495. #endif
  496. #if NANOVG_GL_USE_UNIFORMBUFFER
  497. "#define USE_UNIFORMBUFFER 1\n"
  498. #else
  499. "#define UNIFORMARRAY_SIZE 11\n"
  500. #endif
  501. "#define EDGE_AA 1\n" // opts
  502. "#ifdef NANOVG_GL3\n"
  503. " uniform vec2 viewSize;\n"
  504. " in vec2 vertex;\n"
  505. " in vec2 tcoord;\n"
  506. " out vec2 ftcoord;\n"
  507. " out vec2 fpos;\n"
  508. "#else\n"
  509. " uniform vec2 viewSize;\n"
  510. " attribute vec2 vertex;\n"
  511. " attribute vec2 tcoord;\n"
  512. " varying vec2 ftcoord;\n"
  513. " varying vec2 fpos;\n"
  514. "#endif\n"
  515. "void main(void) {\n"
  516. " ftcoord = tcoord;\n"
  517. " fpos = vertex;\n"
  518. " gl_Position = vec4(2.0*vertex.x/viewSize.x - 1.0, 1.0 - 2.0*vertex.y/viewSize.y, 0, 1);\n"
  519. "}\n"
  520. };
  521. static const char* fillFragShader = {
  522. #if defined NANOVG_GL2
  523. "#define NANOVG_GL2 1\n"
  524. #elif defined NANOVG_GL3
  525. "#version 150 core\n"
  526. "#define NANOVG_GL3 1\n"
  527. #elif defined NANOVG_GLES2
  528. "#version 100\n"
  529. "#define NANOVG_GL2 1\n"
  530. #elif defined NANOVG_GLES3
  531. "#version 300 es\n"
  532. "#define NANOVG_GL3 1\n"
  533. #endif
  534. #if NANOVG_GL_USE_UNIFORMBUFFER
  535. "#define USE_UNIFORMBUFFER 1\n"
  536. #else
  537. "#define UNIFORMARRAY_SIZE 11\n"
  538. #endif
  539. "#define EDGE_AA 1\n" // opts
  540. "#ifdef GL_ES\n"
  541. "#if defined(GL_FRAGMENT_PRECISION_HIGH) || defined(NANOVG_GL3)\n"
  542. " precision highp float;\n"
  543. "#else\n"
  544. " precision mediump float;\n"
  545. "#endif\n"
  546. "#endif\n"
  547. "#ifdef NANOVG_GL3\n"
  548. "#ifdef USE_UNIFORMBUFFER\n"
  549. " layout(std140) uniform frag {\n"
  550. " mat3 scissorMat;\n"
  551. " mat3 paintMat;\n"
  552. " vec4 innerCol;\n"
  553. " vec4 outerCol;\n"
  554. " vec2 scissorExt;\n"
  555. " vec2 scissorScale;\n"
  556. " vec2 extent;\n"
  557. " float radius;\n"
  558. " float feather;\n"
  559. " float strokeMult;\n"
  560. " float strokeThr;\n"
  561. " int texType;\n"
  562. " int type;\n"
  563. " };\n"
  564. "#else\n" // NANOVG_GL3 && !USE_UNIFORMBUFFER
  565. " uniform vec4 frag[UNIFORMARRAY_SIZE];\n"
  566. "#endif\n"
  567. " uniform sampler2D tex;\n"
  568. " in vec2 ftcoord;\n"
  569. " in vec2 fpos;\n"
  570. " out vec4 outColor;\n"
  571. "#else\n" // !NANOVG_GL3
  572. " uniform vec4 frag[UNIFORMARRAY_SIZE];\n"
  573. " uniform sampler2D tex;\n"
  574. " varying vec2 ftcoord;\n"
  575. " varying vec2 fpos;\n"
  576. "#endif\n"
  577. "#ifndef USE_UNIFORMBUFFER\n"
  578. " #define scissorMat mat3(frag[0].xyz, frag[1].xyz, frag[2].xyz)\n"
  579. " #define paintMat mat3(frag[3].xyz, frag[4].xyz, frag[5].xyz)\n"
  580. " #define innerCol frag[6]\n"
  581. " #define outerCol frag[7]\n"
  582. " #define scissorExt frag[8].xy\n"
  583. " #define scissorScale frag[8].zw\n"
  584. " #define extent frag[9].xy\n"
  585. " #define radius frag[9].z\n"
  586. " #define feather frag[9].w\n"
  587. " #define strokeMult frag[10].x\n"
  588. " #define strokeThr frag[10].y\n"
  589. " #define texType int(frag[10].z)\n"
  590. " #define type int(frag[10].w)\n"
  591. "#endif\n"
  592. "\n"
  593. "float sdroundrect(vec2 pt, vec2 ext, float rad) {\n"
  594. " vec2 ext2 = ext - vec2(rad,rad);\n"
  595. " vec2 d = abs(pt) - ext2;\n"
  596. " return min(max(d.x,d.y),0.0) + length(max(d,0.0)) - rad;\n"
  597. "}\n"
  598. "\n"
  599. "// Scissoring\n"
  600. "float scissorMask(vec2 p) {\n"
  601. " vec2 sc = (abs((scissorMat * vec3(p,1.0)).xy) - scissorExt);\n"
  602. " sc = vec2(0.5,0.5) - sc * scissorScale;\n"
  603. " return clamp(sc.x,0.0,1.0) * clamp(sc.y,0.0,1.0);\n"
  604. "}\n"
  605. "#ifdef EDGE_AA\n"
  606. "// Stroke - from [0..1] to clipped pyramid, where the slope is 1px.\n"
  607. "float strokeMask() {\n"
  608. " return min(1.0, (1.0-abs(ftcoord.x*2.0-1.0))*strokeMult) * min(1.0, ftcoord.y);\n"
  609. "}\n"
  610. "#endif\n"
  611. "\n"
  612. /* "void main(void) {\n" */
  613. "void main() {\n"
  614. " vec4 result;\n"
  615. " float scissor = scissorMask(fpos);\n"
  616. "#ifdef EDGE_AA\n"
  617. " float strokeAlpha = strokeMask();\n"
  618. " if (strokeAlpha < strokeThr) discard;\n"
  619. "#else\n"
  620. " float strokeAlpha = 1.0;\n"
  621. "#endif\n"
  622. " if (type == 0) { // Gradient\n"
  623. " // Calculate gradient color using box gradient\n"
  624. " vec2 pt = (paintMat * vec3(fpos,1.0)).xy;\n"
  625. " float d = clamp((sdroundrect(pt, extent, radius) + feather*0.5) / feather, 0.0, 1.0);\n"
  626. " vec4 color = mix(innerCol,outerCol,d);\n"
  627. " // Combine alpha\n"
  628. " color *= strokeAlpha * scissor;\n"
  629. " result = color;\n"
  630. " } else if (type == 1) { // Image\n"
  631. " // Calculate color fron texture\n"
  632. " vec2 pt = (paintMat * vec3(fpos,1.0)).xy / extent;\n"
  633. "#ifdef NANOVG_GL3\n"
  634. " vec4 color = texture(tex, pt);\n"
  635. "#else\n"
  636. " vec4 color = texture2D(tex, pt);\n"
  637. "#endif\n"
  638. " if (texType == 1) color = vec4(color.xyz*color.w,color.w);"
  639. " if (texType == 2) color = vec4(color.x);"
  640. " // Apply color tint and alpha.\n"
  641. " color *= innerCol;\n"
  642. " // Combine alpha\n"
  643. " color *= strokeAlpha * scissor;\n"
  644. " result = color;\n"
  645. " } else if (type == 2) { // Stencil fill\n"
  646. " result = vec4(1,1,1,1);\n"
  647. " } else if (type == 3) { // Textured tris\n"
  648. "#ifdef NANOVG_GL3\n"
  649. " vec4 color = texture(tex, ftcoord);\n"
  650. "#else\n"
  651. " vec4 color = texture2D(tex, ftcoord);\n"
  652. "#endif\n"
  653. " if (texType == 1) color = vec4(color.xyz*color.w,color.w);"
  654. " if (texType == 2) color = vec4(color.x);"
  655. " color *= scissor;\n"
  656. " result = color * innerCol;\n"
  657. " }\n"
  658. "#ifdef NANOVG_GL3\n"
  659. " outColor = result;\n"
  660. "#else\n"
  661. " gl_FragColor = result;\n"
  662. "#endif\n"
  663. "}\n"
  664. };
  665. glnvg__checkError(gl, "init");
  666. /* printf("xxx glnvg__renderCreate: 1\n"); */
  667. if (gl->flags & NVG_ANTIALIAS) {
  668. /* printf("xxx glnvg__renderCreate: 2a\n"); */
  669. if (glnvg__createShader(&gl->shader, "shader", NULL/*shaderHeader*/, NULL/*"#define EDGE_AA 1\n"*/, fillVertShader, fillFragShader) == 0)
  670. return 0;
  671. } else {
  672. /* printf("xxx glnvg__renderCreate: 2b\n"); */
  673. if (glnvg__createShader(&gl->shader, "shader", NULL/*shaderHeader*/, NULL, fillVertShader, fillFragShader) == 0)
  674. return 0;
  675. }
  676. /* printf("xxx glnvg__renderCreate: 3\n"); */
  677. glnvg__checkError(gl, "uniform locations");
  678. /* printf("xxx glnvg__renderCreate: 4\n"); */
  679. glnvg__getUniforms(&gl->shader);
  680. /* printf("xxx glnvg__renderCreate: 5\n"); */
  681. // Create dynamic vertex array
  682. #if defined NANOVG_GL3
  683. glGenVertexArrays(1, &gl->vertArr);
  684. #endif
  685. glGenBuffers(1, &gl->vertBuf);
  686. /* printf("xxx glnvg__renderCreate: 6\n"); */
  687. #if NANOVG_GL_USE_UNIFORMBUFFER
  688. // Create UBOs
  689. glUniformBlockBinding(gl->shader.prog, gl->shader.loc[GLNVG_LOC_FRAG], GLNVG_FRAG_BINDING);
  690. glGenBuffers(1, &gl->fragBuf);
  691. glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &align);
  692. #endif
  693. gl->fragSize = sizeof(GLNVGfragUniforms) + align - sizeof(GLNVGfragUniforms) % align;
  694. /* printf("xxx glnvg__renderCreate: 7\n"); */
  695. glnvg__checkError(gl, "create done");
  696. /* printf("xxx glnvg__renderCreate: 8\n"); */
  697. glFinish();
  698. /* printf("xxx glnvg__renderCreate: LEAVE\n"); */
  699. return 1;
  700. }
  701. static int glnvg__renderCreateTexture(void* uptr, int type, int w, int h, int imageFlags, const unsigned char* data)
  702. {
  703. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  704. GLNVGtexture* tex = glnvg__allocTexture(gl);
  705. if (tex == NULL) return 0;
  706. #ifdef NANOVG_GLES2
  707. // Check for non-power of 2.
  708. if (glnvg__nearestPow2(w) != (unsigned int)w || glnvg__nearestPow2(h) != (unsigned int)h) {
  709. // No repeat
  710. if ((imageFlags & NVG_IMAGE_REPEATX) != 0 || (imageFlags & NVG_IMAGE_REPEATY) != 0) {
  711. printf("Repeat X/Y is not supported for non power-of-two textures (%d x %d)\n", w, h);
  712. imageFlags &= ~(NVG_IMAGE_REPEATX | NVG_IMAGE_REPEATY);
  713. }
  714. // No mips.
  715. if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) {
  716. printf("Mip-maps is not support for non power-of-two textures (%d x %d)\n", w, h);
  717. imageFlags &= ~NVG_IMAGE_GENERATE_MIPMAPS;
  718. }
  719. }
  720. #endif
  721. glGenTextures(1, &tex->tex);
  722. tex->width = w;
  723. tex->height = h;
  724. tex->type = type;
  725. tex->flags = imageFlags;
  726. glnvg__bindTexture(gl, tex->tex);
  727. glPixelStorei(GL_UNPACK_ALIGNMENT,1);
  728. #ifndef NANOVG_GLES2
  729. glPixelStorei(GL_UNPACK_ROW_LENGTH, tex->width);
  730. glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
  731. glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
  732. #endif
  733. #if defined (NANOVG_GL2)
  734. // GL 1.4 and later has support for generating mipmaps using a tex parameter.
  735. if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) {
  736. glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE);
  737. }
  738. #endif
  739. if (type == NVG_TEXTURE_RGBA)
  740. glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
  741. else
  742. #if defined(NANOVG_GLES2)
  743. glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, w, h, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, data);
  744. #elif defined(NANOVG_GLES3)
  745. glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, w, h, 0, GL_RED, GL_UNSIGNED_BYTE, data);
  746. #else
  747. glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, w, h, 0, GL_RED, GL_UNSIGNED_BYTE, data);
  748. #endif
  749. if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) {
  750. if (imageFlags & NVG_IMAGE_NEAREST) {
  751. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST);
  752. } else {
  753. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
  754. }
  755. } else {
  756. if (imageFlags & NVG_IMAGE_NEAREST) {
  757. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
  758. } else {
  759. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
  760. }
  761. }
  762. if (imageFlags & NVG_IMAGE_NEAREST) {
  763. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
  764. } else {
  765. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
  766. }
  767. if (imageFlags & NVG_IMAGE_REPEATX)
  768. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
  769. else
  770. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
  771. if (imageFlags & NVG_IMAGE_REPEATY)
  772. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
  773. else
  774. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
  775. glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
  776. #ifndef NANOVG_GLES2
  777. glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
  778. glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
  779. glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
  780. #endif
  781. // The new way to build mipmaps on GLES and GL3
  782. #if !defined(NANOVG_GL2)
  783. if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) {
  784. glGenerateMipmap(GL_TEXTURE_2D);
  785. }
  786. #endif
  787. glnvg__checkError(gl, "create tex");
  788. glnvg__bindTexture(gl, 0);
  789. return tex->id;
  790. }
  791. static int glnvg__renderDeleteTexture(void* uptr, int image)
  792. {
  793. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  794. return glnvg__deleteTexture(gl, image);
  795. }
  796. static int glnvg__renderUpdateTexture(void* uptr, int image, int x, int y, int w, int h, const unsigned char* data)
  797. {
  798. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  799. GLNVGtexture* tex = glnvg__findTexture(gl, image);
  800. if (tex == NULL) return 0;
  801. glnvg__bindTexture(gl, tex->tex);
  802. glPixelStorei(GL_UNPACK_ALIGNMENT,1);
  803. #ifndef NANOVG_GLES2
  804. glPixelStorei(GL_UNPACK_ROW_LENGTH, tex->width);
  805. glPixelStorei(GL_UNPACK_SKIP_PIXELS, x);
  806. glPixelStorei(GL_UNPACK_SKIP_ROWS, y);
  807. #else
  808. // No support for all of skip, need to update a whole row at a time.
  809. if (tex->type == NVG_TEXTURE_RGBA)
  810. data += y*tex->width*4;
  811. else
  812. data += y*tex->width;
  813. x = 0;
  814. w = tex->width;
  815. #endif
  816. if (tex->type == NVG_TEXTURE_RGBA)
  817. glTexSubImage2D(GL_TEXTURE_2D, 0, x,y, w,h, GL_RGBA, GL_UNSIGNED_BYTE, data);
  818. else
  819. #ifdef NANOVG_GLES2
  820. glTexSubImage2D(GL_TEXTURE_2D, 0, x,y, w,h, GL_LUMINANCE, GL_UNSIGNED_BYTE, data);
  821. #else
  822. glTexSubImage2D(GL_TEXTURE_2D, 0, x,y, w,h, GL_RED, GL_UNSIGNED_BYTE, data);
  823. #endif
  824. glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
  825. #ifndef NANOVG_GLES2
  826. glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
  827. glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
  828. glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
  829. #endif
  830. glnvg__bindTexture(gl, 0);
  831. return 1;
  832. }
  833. static int glnvg__renderGetTextureSize(void* uptr, int image, int* w, int* h)
  834. {
  835. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  836. GLNVGtexture* tex = glnvg__findTexture(gl, image);
  837. if (tex == NULL) return 0;
  838. *w = tex->width;
  839. *h = tex->height;
  840. return 1;
  841. }
  842. static void glnvg__xformToMat3x4(float* m3, float* t)
  843. {
  844. m3[0] = t[0];
  845. m3[1] = t[1];
  846. m3[2] = 0.0f;
  847. m3[3] = 0.0f;
  848. m3[4] = t[2];
  849. m3[5] = t[3];
  850. m3[6] = 0.0f;
  851. m3[7] = 0.0f;
  852. m3[8] = t[4];
  853. m3[9] = t[5];
  854. m3[10] = 1.0f;
  855. m3[11] = 0.0f;
  856. }
  857. static NVGcolor glnvg__premulColor(NVGcolor c)
  858. {
  859. c.r *= c.a;
  860. c.g *= c.a;
  861. c.b *= c.a;
  862. return c;
  863. }
  864. static int glnvg__convertPaint(GLNVGcontext* gl, GLNVGfragUniforms* frag, NVGpaint* paint,
  865. NVGscissor* scissor, float width, float fringe, float strokeThr)
  866. {
  867. GLNVGtexture* tex = NULL;
  868. float invxform[6];
  869. memset(frag, 0, sizeof(*frag));
  870. frag->innerCol = glnvg__premulColor(paint->innerColor);
  871. frag->outerCol = glnvg__premulColor(paint->outerColor);
  872. if (scissor->extent[0] < -0.5f || scissor->extent[1] < -0.5f) {
  873. memset(frag->scissorMat, 0, sizeof(frag->scissorMat));
  874. frag->scissorExt[0] = 1.0f;
  875. frag->scissorExt[1] = 1.0f;
  876. frag->scissorScale[0] = 1.0f;
  877. frag->scissorScale[1] = 1.0f;
  878. } else {
  879. nvgTransformInverse(invxform, scissor->xform);
  880. glnvg__xformToMat3x4(frag->scissorMat, invxform);
  881. frag->scissorExt[0] = scissor->extent[0];
  882. frag->scissorExt[1] = scissor->extent[1];
  883. frag->scissorScale[0] = sqrtf(scissor->xform[0]*scissor->xform[0] + scissor->xform[2]*scissor->xform[2]) / fringe;
  884. frag->scissorScale[1] = sqrtf(scissor->xform[1]*scissor->xform[1] + scissor->xform[3]*scissor->xform[3]) / fringe;
  885. }
  886. memcpy(frag->extent, paint->extent, sizeof(frag->extent));
  887. frag->strokeMult = (width*0.5f + fringe*0.5f) / fringe;
  888. frag->strokeThr = strokeThr;
  889. if (paint->image != 0) {
  890. tex = glnvg__findTexture(gl, paint->image);
  891. if (tex == NULL) return 0;
  892. if ((tex->flags & NVG_IMAGE_FLIPY) != 0) {
  893. float m1[6], m2[6];
  894. nvgTransformTranslate(m1, 0.0f, frag->extent[1] * 0.5f);
  895. nvgTransformMultiply(m1, paint->xform);
  896. nvgTransformScale(m2, 1.0f, -1.0f);
  897. nvgTransformMultiply(m2, m1);
  898. nvgTransformTranslate(m1, 0.0f, -frag->extent[1] * 0.5f);
  899. nvgTransformMultiply(m1, m2);
  900. nvgTransformInverse(invxform, m1);
  901. } else {
  902. nvgTransformInverse(invxform, paint->xform);
  903. }
  904. frag->type = NSVG_SHADER_FILLIMG;
  905. #if NANOVG_GL_USE_UNIFORMBUFFER
  906. if (tex->type == NVG_TEXTURE_RGBA)
  907. frag->texType = (tex->flags & NVG_IMAGE_PREMULTIPLIED) ? 0 : 1;
  908. else
  909. frag->texType = 2;
  910. #else
  911. if (tex->type == NVG_TEXTURE_RGBA)
  912. frag->texType = (tex->flags & NVG_IMAGE_PREMULTIPLIED) ? 0.0f : 1.0f;
  913. else
  914. frag->texType = 2.0f;
  915. #endif
  916. // printf("frag->texType = %d\n", frag->texType);
  917. } else {
  918. frag->type = NSVG_SHADER_FILLGRAD;
  919. frag->radius = paint->radius;
  920. frag->feather = paint->feather;
  921. nvgTransformInverse(invxform, paint->xform);
  922. }
  923. glnvg__xformToMat3x4(frag->paintMat, invxform);
  924. return 1;
  925. }
  926. static GLNVGfragUniforms* nvg__fragUniformPtr(GLNVGcontext* gl, int i);
  927. static void glnvg__setUniforms(GLNVGcontext* gl, int uniformOffset, int image)
  928. {
  929. #if NANOVG_GL_USE_UNIFORMBUFFER
  930. glBindBufferRange(GL_UNIFORM_BUFFER, GLNVG_FRAG_BINDING, gl->fragBuf, uniformOffset, sizeof(GLNVGfragUniforms));
  931. #else
  932. GLNVGfragUniforms* frag = nvg__fragUniformPtr(gl, uniformOffset);
  933. glUniform4fv(gl->shader.loc[GLNVG_LOC_FRAG], NANOVG_GL_UNIFORMARRAY_SIZE, &(frag->uniformArray[0][0]));
  934. #endif
  935. if (image != 0) {
  936. GLNVGtexture* tex = glnvg__findTexture(gl, image);
  937. glnvg__bindTexture(gl, tex != NULL ? tex->tex : 0);
  938. glnvg__checkError(gl, "tex paint tex");
  939. } else {
  940. glnvg__bindTexture(gl, 0);
  941. }
  942. }
  943. static void glnvg__renderViewport(void* uptr, int width, int height, float devicePixelRatio)
  944. {
  945. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  946. gl->view[0] = (float)width;
  947. gl->view[1] = (float)height;
  948. }
  949. static void glnvg__fill(GLNVGcontext* gl, GLNVGcall* call)
  950. {
  951. GLNVGpath* paths = &gl->paths[call->pathOffset];
  952. int i, npaths = call->pathCount;
  953. // Draw shapes
  954. glEnable(GL_STENCIL_TEST);
  955. glnvg__stencilMask(gl, 0xff);
  956. glnvg__stencilFunc(gl, GL_ALWAYS, 0, 0xff);
  957. glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
  958. // set bindpoint for solid loc
  959. glnvg__setUniforms(gl, call->uniformOffset, 0);
  960. glnvg__checkError(gl, "fill simple");
  961. glStencilOpSeparate(GL_FRONT, GL_KEEP, GL_KEEP, GL_INCR_WRAP);
  962. glStencilOpSeparate(GL_BACK, GL_KEEP, GL_KEEP, GL_DECR_WRAP);
  963. glDisable(GL_CULL_FACE);
  964. for (i = 0; i < npaths; i++)
  965. glDrawArrays(GL_TRIANGLE_FAN, paths[i].fillOffset, paths[i].fillCount);
  966. glEnable(GL_CULL_FACE);
  967. // Draw anti-aliased pixels
  968. glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
  969. glnvg__setUniforms(gl, call->uniformOffset + gl->fragSize, call->image);
  970. glnvg__checkError(gl, "fill fill");
  971. if (gl->flags & NVG_ANTIALIAS) {
  972. glnvg__stencilFunc(gl, GL_EQUAL, 0x00, 0xff);
  973. glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP);
  974. // Draw fringes
  975. for (i = 0; i < npaths; i++)
  976. glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
  977. }
  978. // Draw fill
  979. glnvg__stencilFunc(gl, GL_NOTEQUAL, 0x0, 0xff);
  980. glStencilOp(GL_ZERO, GL_ZERO, GL_ZERO);
  981. glDrawArrays(GL_TRIANGLE_STRIP, call->triangleOffset, call->triangleCount);
  982. glDisable(GL_STENCIL_TEST);
  983. }
  984. static void glnvg__convexFill(GLNVGcontext* gl, GLNVGcall* call)
  985. {
  986. GLNVGpath* paths = &gl->paths[call->pathOffset];
  987. int i, npaths = call->pathCount;
  988. glnvg__setUniforms(gl, call->uniformOffset, call->image);
  989. glnvg__checkError(gl, "convex fill");
  990. for (i = 0; i < npaths; i++)
  991. glDrawArrays(GL_TRIANGLE_FAN, paths[i].fillOffset, paths[i].fillCount);
  992. if (gl->flags & NVG_ANTIALIAS) {
  993. // Draw fringes
  994. for (i = 0; i < npaths; i++)
  995. glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
  996. }
  997. }
  998. static void glnvg__stroke(GLNVGcontext* gl, GLNVGcall* call)
  999. {
  1000. GLNVGpath* paths = &gl->paths[call->pathOffset];
  1001. int npaths = call->pathCount, i;
  1002. if (gl->flags & NVG_STENCIL_STROKES) {
  1003. glEnable(GL_STENCIL_TEST);
  1004. glnvg__stencilMask(gl, 0xff);
  1005. // Fill the stroke base without overlap
  1006. glnvg__stencilFunc(gl, GL_EQUAL, 0x0, 0xff);
  1007. glStencilOp(GL_KEEP, GL_KEEP, GL_INCR);
  1008. glnvg__setUniforms(gl, call->uniformOffset + gl->fragSize, call->image);
  1009. glnvg__checkError(gl, "stroke fill 0");
  1010. for (i = 0; i < npaths; i++)
  1011. glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
  1012. // Draw anti-aliased pixels.
  1013. glnvg__setUniforms(gl, call->uniformOffset, call->image);
  1014. glnvg__stencilFunc(gl, GL_EQUAL, 0x00, 0xff);
  1015. glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP);
  1016. for (i = 0; i < npaths; i++)
  1017. glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
  1018. // Clear stencil buffer.
  1019. glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
  1020. glnvg__stencilFunc(gl, GL_ALWAYS, 0x0, 0xff);
  1021. glStencilOp(GL_ZERO, GL_ZERO, GL_ZERO);
  1022. glnvg__checkError(gl, "stroke fill 1");
  1023. for (i = 0; i < npaths; i++)
  1024. glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
  1025. glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
  1026. glDisable(GL_STENCIL_TEST);
  1027. // glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, strokeWidth, fringe, 1.0f - 0.5f/255.0f);
  1028. } else {
  1029. glnvg__setUniforms(gl, call->uniformOffset, call->image);
  1030. glnvg__checkError(gl, "stroke fill");
  1031. // Draw Strokes
  1032. for (i = 0; i < npaths; i++)
  1033. glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
  1034. }
  1035. }
  1036. static void glnvg__triangles(GLNVGcontext* gl, GLNVGcall* call)
  1037. {
  1038. glnvg__setUniforms(gl, call->uniformOffset, call->image);
  1039. glnvg__checkError(gl, "triangles fill");
  1040. glDrawArrays(GL_TRIANGLES, call->triangleOffset, call->triangleCount);
  1041. }
  1042. static void glnvg__renderCancel(void* uptr) {
  1043. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  1044. gl->nverts = 0;
  1045. gl->npaths = 0;
  1046. gl->ncalls = 0;
  1047. gl->nuniforms = 0;
  1048. }
  1049. static GLenum glnvg_convertBlendFuncFactor(int factor)
  1050. {
  1051. if (factor == NVG_ZERO)
  1052. return GL_ZERO;
  1053. if (factor == NVG_ONE)
  1054. return GL_ONE;
  1055. if (factor == NVG_SRC_COLOR)
  1056. return GL_SRC_COLOR;
  1057. if (factor == NVG_ONE_MINUS_SRC_COLOR)
  1058. return GL_ONE_MINUS_SRC_COLOR;
  1059. if (factor == NVG_DST_COLOR)
  1060. return GL_DST_COLOR;
  1061. if (factor == NVG_ONE_MINUS_DST_COLOR)
  1062. return GL_ONE_MINUS_DST_COLOR;
  1063. if (factor == NVG_SRC_ALPHA)
  1064. return GL_SRC_ALPHA;
  1065. if (factor == NVG_ONE_MINUS_SRC_ALPHA)
  1066. return GL_ONE_MINUS_SRC_ALPHA;
  1067. if (factor == NVG_DST_ALPHA)
  1068. return GL_DST_ALPHA;
  1069. if (factor == NVG_ONE_MINUS_DST_ALPHA)
  1070. return GL_ONE_MINUS_DST_ALPHA;
  1071. if (factor == NVG_SRC_ALPHA_SATURATE)
  1072. return GL_SRC_ALPHA_SATURATE;
  1073. return GL_INVALID_ENUM;
  1074. }
  1075. static GLNVGblend glnvg__blendCompositeOperation(NVGcompositeOperationState op)
  1076. {
  1077. GLNVGblend blend;
  1078. blend.srcRGB = glnvg_convertBlendFuncFactor(op.srcRGB);
  1079. blend.dstRGB = glnvg_convertBlendFuncFactor(op.dstRGB);
  1080. blend.srcAlpha = glnvg_convertBlendFuncFactor(op.srcAlpha);
  1081. blend.dstAlpha = glnvg_convertBlendFuncFactor(op.dstAlpha);
  1082. if (blend.srcRGB == GL_INVALID_ENUM || blend.dstRGB == GL_INVALID_ENUM || blend.srcAlpha == GL_INVALID_ENUM || blend.dstAlpha == GL_INVALID_ENUM)
  1083. {
  1084. blend.srcRGB = GL_ONE;
  1085. blend.dstRGB = GL_ONE_MINUS_SRC_ALPHA;
  1086. blend.srcAlpha = GL_ONE;
  1087. blend.dstAlpha = GL_ONE_MINUS_SRC_ALPHA;
  1088. }
  1089. return blend;
  1090. }
  1091. static void glnvg__renderFlush(void* uptr)
  1092. {
  1093. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  1094. int i;
  1095. if (gl->ncalls > 0) {
  1096. // Setup require GL state.
  1097. glUseProgram(gl->shader.prog);
  1098. glEnable(GL_CULL_FACE);
  1099. glCullFace(GL_BACK);
  1100. glFrontFace(GL_CCW);
  1101. glEnable(GL_BLEND);
  1102. glDisable(GL_DEPTH_TEST);
  1103. glDisable(GL_SCISSOR_TEST);
  1104. glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
  1105. glStencilMask(0xffffffff);
  1106. glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP);
  1107. glStencilFunc(GL_ALWAYS, 0, 0xffffffff);
  1108. glActiveTexture(GL_TEXTURE0);
  1109. glBindTexture(GL_TEXTURE_2D, 0);
  1110. #if NANOVG_GL_USE_STATE_FILTER
  1111. gl->boundTexture = 0;
  1112. gl->stencilMask = 0xffffffff;
  1113. gl->stencilFunc = GL_ALWAYS;
  1114. gl->stencilFuncRef = 0;
  1115. gl->stencilFuncMask = 0xffffffff;
  1116. gl->blendFunc.srcRGB = GL_INVALID_ENUM;
  1117. gl->blendFunc.srcAlpha = GL_INVALID_ENUM;
  1118. gl->blendFunc.dstRGB = GL_INVALID_ENUM;
  1119. gl->blendFunc.dstAlpha = GL_INVALID_ENUM;
  1120. #endif
  1121. #if NANOVG_GL_USE_UNIFORMBUFFER
  1122. // Upload ubo for frag shaders
  1123. glBindBuffer(GL_UNIFORM_BUFFER, gl->fragBuf);
  1124. glBufferData(GL_UNIFORM_BUFFER, gl->nuniforms * gl->fragSize, gl->uniforms, GL_STREAM_DRAW);
  1125. #endif
  1126. // Upload vertex data
  1127. #if defined NANOVG_GL3
  1128. glBindVertexArray(gl->vertArr);
  1129. #endif
  1130. glBindBuffer(GL_ARRAY_BUFFER, gl->vertBuf);
  1131. glBufferData(GL_ARRAY_BUFFER, gl->nverts * sizeof(NVGvertex), gl->verts, GL_STREAM_DRAW);
  1132. glEnableVertexAttribArray(0);
  1133. glEnableVertexAttribArray(1);
  1134. glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(NVGvertex), (const GLvoid*)(size_t)0);
  1135. glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(NVGvertex), (const GLvoid*)(0 + 2*sizeof(float)));
  1136. // Set view and texture just once per frame.
  1137. glUniform1i(gl->shader.loc[GLNVG_LOC_TEX], 0);
  1138. glUniform2fv(gl->shader.loc[GLNVG_LOC_VIEWSIZE], 1, gl->view);
  1139. #if NANOVG_GL_USE_UNIFORMBUFFER
  1140. glBindBuffer(GL_UNIFORM_BUFFER, gl->fragBuf);
  1141. #endif
  1142. for (i = 0; i < gl->ncalls; i++) {
  1143. GLNVGcall* call = &gl->calls[i];
  1144. glnvg__blendFuncSeparate(gl,&call->blendFunc);
  1145. if (call->type == GLNVG_FILL)
  1146. glnvg__fill(gl, call);
  1147. else if (call->type == GLNVG_CONVEXFILL)
  1148. glnvg__convexFill(gl, call);
  1149. else if (call->type == GLNVG_STROKE)
  1150. glnvg__stroke(gl, call);
  1151. else if (call->type == GLNVG_TRIANGLES)
  1152. glnvg__triangles(gl, call);
  1153. }
  1154. glDisableVertexAttribArray(0);
  1155. glDisableVertexAttribArray(1);
  1156. #if defined NANOVG_GL3
  1157. glBindVertexArray(0);
  1158. #endif
  1159. glDisable(GL_CULL_FACE);
  1160. glBindBuffer(GL_ARRAY_BUFFER, 0);
  1161. glUseProgram(0);
  1162. glnvg__bindTexture(gl, 0);
  1163. }
  1164. // Reset calls
  1165. gl->nverts = 0;
  1166. gl->npaths = 0;
  1167. gl->ncalls = 0;
  1168. gl->nuniforms = 0;
  1169. }
  1170. static int glnvg__maxVertCount(const NVGpath* paths, int npaths)
  1171. {
  1172. int i, count = 0;
  1173. for (i = 0; i < npaths; i++) {
  1174. count += paths[i].nfill;
  1175. count += paths[i].nstroke;
  1176. }
  1177. return count;
  1178. }
  1179. static GLNVGcall* glnvg__allocCall(GLNVGcontext* gl)
  1180. {
  1181. GLNVGcall* ret = NULL;
  1182. if (gl->ncalls+1 > gl->ccalls) {
  1183. GLNVGcall* calls;
  1184. int ccalls = glnvg__maxi(gl->ncalls+1, 128) + gl->ccalls/2; // 1.5x Overallocate
  1185. calls = (GLNVGcall*)realloc(gl->calls, sizeof(GLNVGcall) * ccalls);
  1186. if (calls == NULL) return NULL;
  1187. gl->calls = calls;
  1188. gl->ccalls = ccalls;
  1189. }
  1190. ret = &gl->calls[gl->ncalls++];
  1191. memset(ret, 0, sizeof(GLNVGcall));
  1192. return ret;
  1193. }
  1194. static int glnvg__allocPaths(GLNVGcontext* gl, int n)
  1195. {
  1196. int ret = 0;
  1197. if (gl->npaths+n > gl->cpaths) {
  1198. GLNVGpath* paths;
  1199. int cpaths = glnvg__maxi(gl->npaths + n, 128) + gl->cpaths/2; // 1.5x Overallocate
  1200. paths = (GLNVGpath*)realloc(gl->paths, sizeof(GLNVGpath) * cpaths);
  1201. if (paths == NULL) return -1;
  1202. gl->paths = paths;
  1203. gl->cpaths = cpaths;
  1204. }
  1205. ret = gl->npaths;
  1206. gl->npaths += n;
  1207. return ret;
  1208. }
  1209. static int glnvg__allocVerts(GLNVGcontext* gl, int n)
  1210. {
  1211. int ret = 0;
  1212. if (gl->nverts+n > gl->cverts) {
  1213. NVGvertex* verts;
  1214. int cverts = glnvg__maxi(gl->nverts + n, 4096) + gl->cverts/2; // 1.5x Overallocate
  1215. verts = (NVGvertex*)realloc(gl->verts, sizeof(NVGvertex) * cverts);
  1216. if (verts == NULL) return -1;
  1217. gl->verts = verts;
  1218. gl->cverts = cverts;
  1219. }
  1220. ret = gl->nverts;
  1221. gl->nverts += n;
  1222. return ret;
  1223. }
  1224. static int glnvg__allocFragUniforms(GLNVGcontext* gl, int n)
  1225. {
  1226. int ret = 0, structSize = gl->fragSize;
  1227. if (gl->nuniforms+n > gl->cuniforms) {
  1228. unsigned char* uniforms;
  1229. int cuniforms = glnvg__maxi(gl->nuniforms+n, 128) + gl->cuniforms/2; // 1.5x Overallocate
  1230. uniforms = (unsigned char*)realloc(gl->uniforms, structSize * cuniforms);
  1231. if (uniforms == NULL) return -1;
  1232. gl->uniforms = uniforms;
  1233. gl->cuniforms = cuniforms;
  1234. }
  1235. ret = gl->nuniforms * structSize;
  1236. gl->nuniforms += n;
  1237. return ret;
  1238. }
  1239. static GLNVGfragUniforms* nvg__fragUniformPtr(GLNVGcontext* gl, int i)
  1240. {
  1241. return (GLNVGfragUniforms*)&gl->uniforms[i];
  1242. }
  1243. static void glnvg__vset(NVGvertex* vtx, float x, float y, float u, float v)
  1244. {
  1245. vtx->x = x;
  1246. vtx->y = y;
  1247. vtx->u = u;
  1248. vtx->v = v;
  1249. }
  1250. static void glnvg__renderFill(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor, float fringe,
  1251. const float* bounds, const NVGpath* paths, int npaths)
  1252. {
  1253. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  1254. GLNVGcall* call = glnvg__allocCall(gl);
  1255. NVGvertex* quad;
  1256. GLNVGfragUniforms* frag;
  1257. int i, maxverts, offset;
  1258. if (call == NULL) return;
  1259. call->type = GLNVG_FILL;
  1260. call->triangleCount = 4;
  1261. call->pathOffset = glnvg__allocPaths(gl, npaths);
  1262. if (call->pathOffset == -1) goto error;
  1263. call->pathCount = npaths;
  1264. call->image = paint->image;
  1265. call->blendFunc = glnvg__blendCompositeOperation(compositeOperation);
  1266. if (npaths == 1 && paths[0].convex)
  1267. {
  1268. call->type = GLNVG_CONVEXFILL;
  1269. call->triangleCount = 0; // Bounding box fill quad not needed for convex fill
  1270. }
  1271. // Allocate vertices for all the paths.
  1272. maxverts = glnvg__maxVertCount(paths, npaths) + call->triangleCount;
  1273. offset = glnvg__allocVerts(gl, maxverts);
  1274. if (offset == -1) goto error;
  1275. for (i = 0; i < npaths; i++) {
  1276. GLNVGpath* copy = &gl->paths[call->pathOffset + i];
  1277. const NVGpath* path = &paths[i];
  1278. memset(copy, 0, sizeof(GLNVGpath));
  1279. if (path->nfill > 0) {
  1280. copy->fillOffset = offset;
  1281. copy->fillCount = path->nfill;
  1282. memcpy(&gl->verts[offset], path->fill, sizeof(NVGvertex) * path->nfill);
  1283. offset += path->nfill;
  1284. }
  1285. if (path->nstroke > 0) {
  1286. copy->strokeOffset = offset;
  1287. copy->strokeCount = path->nstroke;
  1288. memcpy(&gl->verts[offset], path->stroke, sizeof(NVGvertex) * path->nstroke);
  1289. offset += path->nstroke;
  1290. }
  1291. }
  1292. // Setup uniforms for draw calls
  1293. if (call->type == GLNVG_FILL) {
  1294. // Quad
  1295. call->triangleOffset = offset;
  1296. quad = &gl->verts[call->triangleOffset];
  1297. glnvg__vset(&quad[0], bounds[2], bounds[3], 0.5f, 1.0f);
  1298. glnvg__vset(&quad[1], bounds[2], bounds[1], 0.5f, 1.0f);
  1299. glnvg__vset(&quad[2], bounds[0], bounds[3], 0.5f, 1.0f);
  1300. glnvg__vset(&quad[3], bounds[0], bounds[1], 0.5f, 1.0f);
  1301. call->uniformOffset = glnvg__allocFragUniforms(gl, 2);
  1302. if (call->uniformOffset == -1) goto error;
  1303. // Simple shader for stencil
  1304. frag = nvg__fragUniformPtr(gl, call->uniformOffset);
  1305. memset(frag, 0, sizeof(*frag));
  1306. frag->strokeThr = -1.0f;
  1307. frag->type = NSVG_SHADER_SIMPLE;
  1308. // Fill shader
  1309. glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, fringe, fringe, -1.0f);
  1310. } else {
  1311. call->uniformOffset = glnvg__allocFragUniforms(gl, 1);
  1312. if (call->uniformOffset == -1) goto error;
  1313. // Fill shader
  1314. glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, fringe, fringe, -1.0f);
  1315. }
  1316. return;
  1317. error:
  1318. // We get here if call alloc was ok, but something else is not.
  1319. // Roll back the last call to prevent drawing it.
  1320. if (gl->ncalls > 0) gl->ncalls--;
  1321. }
  1322. static void glnvg__renderStroke(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor, float fringe,
  1323. float strokeWidth, const NVGpath* paths, int npaths)
  1324. {
  1325. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  1326. GLNVGcall* call = glnvg__allocCall(gl);
  1327. int i, maxverts, offset;
  1328. if (call == NULL) return;
  1329. call->type = GLNVG_STROKE;
  1330. call->pathOffset = glnvg__allocPaths(gl, npaths);
  1331. if (call->pathOffset == -1) goto error;
  1332. call->pathCount = npaths;
  1333. call->image = paint->image;
  1334. call->blendFunc = glnvg__blendCompositeOperation(compositeOperation);
  1335. // Allocate vertices for all the paths.
  1336. maxverts = glnvg__maxVertCount(paths, npaths);
  1337. offset = glnvg__allocVerts(gl, maxverts);
  1338. if (offset == -1) goto error;
  1339. for (i = 0; i < npaths; i++) {
  1340. GLNVGpath* copy = &gl->paths[call->pathOffset + i];
  1341. const NVGpath* path = &paths[i];
  1342. memset(copy, 0, sizeof(GLNVGpath));
  1343. if (path->nstroke) {
  1344. copy->strokeOffset = offset;
  1345. copy->strokeCount = path->nstroke;
  1346. memcpy(&gl->verts[offset], path->stroke, sizeof(NVGvertex) * path->nstroke);
  1347. offset += path->nstroke;
  1348. }
  1349. }
  1350. if (gl->flags & NVG_STENCIL_STROKES) {
  1351. // Fill shader
  1352. call->uniformOffset = glnvg__allocFragUniforms(gl, 2);
  1353. if (call->uniformOffset == -1) goto error;
  1354. glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, strokeWidth, fringe, -1.0f);
  1355. glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, strokeWidth, fringe, 1.0f - 0.5f/255.0f);
  1356. } else {
  1357. // Fill shader
  1358. call->uniformOffset = glnvg__allocFragUniforms(gl, 1);
  1359. if (call->uniformOffset == -1) goto error;
  1360. glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, strokeWidth, fringe, -1.0f);
  1361. }
  1362. return;
  1363. error:
  1364. // We get here if call alloc was ok, but something else is not.
  1365. // Roll back the last call to prevent drawing it.
  1366. if (gl->ncalls > 0) gl->ncalls--;
  1367. }
  1368. static void glnvg__renderTriangles(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor,
  1369. const NVGvertex* verts, int nverts)
  1370. {
  1371. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  1372. GLNVGcall* call = glnvg__allocCall(gl);
  1373. GLNVGfragUniforms* frag;
  1374. if (call == NULL) return;
  1375. call->type = GLNVG_TRIANGLES;
  1376. call->image = paint->image;
  1377. call->blendFunc = glnvg__blendCompositeOperation(compositeOperation);
  1378. // Allocate vertices for all the paths.
  1379. call->triangleOffset = glnvg__allocVerts(gl, nverts);
  1380. if (call->triangleOffset == -1) goto error;
  1381. call->triangleCount = nverts;
  1382. memcpy(&gl->verts[call->triangleOffset], verts, sizeof(NVGvertex) * nverts);
  1383. // Fill shader
  1384. call->uniformOffset = glnvg__allocFragUniforms(gl, 1);
  1385. if (call->uniformOffset == -1) goto error;
  1386. frag = nvg__fragUniformPtr(gl, call->uniformOffset);
  1387. glnvg__convertPaint(gl, frag, paint, scissor, 1.0f, 1.0f, -1.0f);
  1388. frag->type = NSVG_SHADER_IMG;
  1389. return;
  1390. error:
  1391. // We get here if call alloc was ok, but something else is not.
  1392. // Roll back the last call to prevent drawing it.
  1393. if (gl->ncalls > 0) gl->ncalls--;
  1394. }
  1395. static void glnvg__renderDelete(void* uptr)
  1396. {
  1397. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  1398. int i;
  1399. if (gl == NULL) return;
  1400. glnvg__deleteShader(&gl->shader);
  1401. #if NANOVG_GL3
  1402. #if NANOVG_GL_USE_UNIFORMBUFFER
  1403. if (gl->fragBuf != 0)
  1404. glDeleteBuffers(1, &gl->fragBuf);
  1405. #endif
  1406. if (gl->vertArr != 0)
  1407. glDeleteVertexArrays(1, &gl->vertArr);
  1408. #endif
  1409. if (gl->vertBuf != 0)
  1410. glDeleteBuffers(1, &gl->vertBuf);
  1411. for (i = 0; i < gl->ntextures; i++) {
  1412. if (gl->textures[i].tex != 0 && (gl->textures[i].flags & NVG_IMAGE_NODELETE) == 0)
  1413. glDeleteTextures(1, &gl->textures[i].tex);
  1414. }
  1415. free(gl->textures);
  1416. free(gl->paths);
  1417. free(gl->verts);
  1418. free(gl->uniforms);
  1419. free(gl->calls);
  1420. free(gl);
  1421. }
  1422. #if defined NANOVG_GL2
  1423. NVGcontext* nvgCreateGL2(int flags)
  1424. #elif defined NANOVG_GL3
  1425. NVGcontext* nvgCreateGL3(int flags)
  1426. #elif defined NANOVG_GLES2
  1427. NVGcontext* nvgCreateGLES2(int flags)
  1428. #elif defined NANOVG_GLES3
  1429. NVGcontext* nvgCreateGLES3(int flags)
  1430. #endif
  1431. {
  1432. NVGparams params;
  1433. NVGcontext* ctx = NULL;
  1434. GLNVGcontext* gl = (GLNVGcontext*)malloc(sizeof(GLNVGcontext));
  1435. if (gl == NULL) goto error;
  1436. memset(gl, 0, sizeof(GLNVGcontext));
  1437. memset(&params, 0, sizeof(params));
  1438. params.renderCreate = glnvg__renderCreate;
  1439. params.renderCreateTexture = glnvg__renderCreateTexture;
  1440. params.renderDeleteTexture = glnvg__renderDeleteTexture;
  1441. params.renderUpdateTexture = glnvg__renderUpdateTexture;
  1442. params.renderGetTextureSize = glnvg__renderGetTextureSize;
  1443. params.renderViewport = glnvg__renderViewport;
  1444. params.renderCancel = glnvg__renderCancel;
  1445. params.renderFlush = glnvg__renderFlush;
  1446. params.renderFill = glnvg__renderFill;
  1447. params.renderStroke = glnvg__renderStroke;
  1448. params.renderTriangles = glnvg__renderTriangles;
  1449. params.renderDelete = glnvg__renderDelete;
  1450. params.userPtr = gl;
  1451. params.edgeAntiAlias = flags & NVG_ANTIALIAS ? 1 : 0;
  1452. gl->flags = flags;
  1453. /* printf("xxx nvgCreateInternal 1\n"); */
  1454. ctx = nvgCreateInternal(&params);
  1455. /* printf("xxx nvgCreateInternal 2\n"); */
  1456. if (ctx == NULL) goto error;
  1457. return ctx;
  1458. error:
  1459. // 'gl' is freed by nvgDeleteInternal.
  1460. if (ctx != NULL) nvgDeleteInternal(ctx);
  1461. return NULL;
  1462. }
  1463. #if defined NANOVG_GL2
  1464. void nvgDeleteGL2(NVGcontext* ctx)
  1465. #elif defined NANOVG_GL3
  1466. void nvgDeleteGL3(NVGcontext* ctx)
  1467. #elif defined NANOVG_GLES2
  1468. void nvgDeleteGLES2(NVGcontext* ctx)
  1469. #elif defined NANOVG_GLES3
  1470. void nvgDeleteGLES3(NVGcontext* ctx)
  1471. #endif
  1472. {
  1473. nvgDeleteInternal(ctx);
  1474. }
  1475. #if defined NANOVG_GL2
  1476. int nvglCreateImageFromHandleGL2(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags)
  1477. #elif defined NANOVG_GL3
  1478. int nvglCreateImageFromHandleGL3(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags)
  1479. #elif defined NANOVG_GLES2
  1480. int nvglCreateImageFromHandleGLES2(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags)
  1481. #elif defined NANOVG_GLES3
  1482. int nvglCreateImageFromHandleGLES3(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags)
  1483. #endif
  1484. {
  1485. GLNVGcontext* gl = (GLNVGcontext*)nvgInternalParams(ctx)->userPtr;
  1486. GLNVGtexture* tex = glnvg__allocTexture(gl);
  1487. if (tex == NULL) return 0;
  1488. tex->type = NVG_TEXTURE_RGBA;
  1489. tex->tex = textureId;
  1490. tex->flags = imageFlags;
  1491. tex->width = w;
  1492. tex->height = h;
  1493. return tex->id;
  1494. }
  1495. #if defined NANOVG_GL2
  1496. GLuint nvglImageHandleGL2(NVGcontext* ctx, int image)
  1497. #elif defined NANOVG_GL3
  1498. GLuint nvglImageHandleGL3(NVGcontext* ctx, int image)
  1499. #elif defined NANOVG_GLES2
  1500. GLuint nvglImageHandleGLES2(NVGcontext* ctx, int image)
  1501. #elif defined NANOVG_GLES3
  1502. GLuint nvglImageHandleGLES3(NVGcontext* ctx, int image)
  1503. #endif
  1504. {
  1505. GLNVGcontext* gl = (GLNVGcontext*)nvgInternalParams(ctx)->userPtr;
  1506. GLNVGtexture* tex = glnvg__findTexture(gl, image);
  1507. return tex->tex;
  1508. }
  1509. #endif /* NANOVG_GL_IMPLEMENTATION */