You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1526 lines
41KB

  1. //
  2. // Copyright (c) 2009-2013 Mikko Mononen memon@inside.org
  3. //
  4. // This software is provided 'as-is', without any express or implied
  5. // warranty. In no event will the authors be held liable for any damages
  6. // arising from the use of this software.
  7. // Permission is granted to anyone to use this software for any purpose,
  8. // including commercial applications, and to alter it and redistribute it
  9. // freely, subject to the following restrictions:
  10. // 1. The origin of this software must not be misrepresented; you must not
  11. // claim that you wrote the original software. If you use this software
  12. // in a product, an acknowledgment in the product documentation would be
  13. // appreciated but is not required.
  14. // 2. Altered source versions must be plainly marked as such, and must not be
  15. // misrepresented as being the original software.
  16. // 3. This notice may not be removed or altered from any source distribution.
  17. //
  18. #ifndef NANOVG_GL_H
  19. #define NANOVG_GL_H
  20. #ifdef __cplusplus
  21. extern "C" {
  22. #endif
  23. // Create flags
  24. enum NVGcreateFlags {
  25. // Flag indicating if geometry based anti-aliasing is used (may not be needed when using MSAA).
  26. NVG_ANTIALIAS = 1<<0,
  27. // Flag indicating if strokes should be drawn using stencil buffer. The rendering will be a little
  28. // slower, but path overlaps (i.e. self-intersecting or sharp turns) will be drawn just once.
  29. NVG_STENCIL_STROKES = 1<<1,
  30. // Flag indicating that additional debug checks are done.
  31. NVG_DEBUG = 1<<2,
  32. };
  33. #if defined NANOVG_GL2_IMPLEMENTATION
  34. # define NANOVG_GL2 1
  35. # define NANOVG_GL_IMPLEMENTATION 1
  36. #elif defined NANOVG_GL3_IMPLEMENTATION
  37. # define NANOVG_GL3 1
  38. # define NANOVG_GL_IMPLEMENTATION 1
  39. # define NANOVG_GL_USE_UNIFORMBUFFER 1
  40. #elif defined NANOVG_GLES2_IMPLEMENTATION
  41. # define NANOVG_GLES2 1
  42. # define NANOVG_GL_IMPLEMENTATION 1
  43. #elif defined NANOVG_GLES3_IMPLEMENTATION
  44. # define NANOVG_GLES3 1
  45. # define NANOVG_GL_IMPLEMENTATION 1
  46. #endif
  47. #define NANOVG_GL_USE_STATE_FILTER (1)
  48. // Creates NanoVG contexts for different OpenGL (ES) versions.
  49. // Flags should be combination of the create flags above.
  50. #if defined NANOVG_GL2
  51. NVGcontext* nvgCreateGL2(int flags);
  52. void nvgDeleteGL2(NVGcontext* ctx);
  53. #endif
  54. #if defined NANOVG_GL3
  55. NVGcontext* nvgCreateGL3(int flags);
  56. void nvgDeleteGL3(NVGcontext* ctx);
  57. #endif
  58. #if defined NANOVG_GLES2
  59. NVGcontext* nvgCreateGLES2(int flags);
  60. void nvgDeleteGLES2(NVGcontext* ctx);
  61. #endif
  62. #if defined NANOVG_GLES3
  63. NVGcontext* nvgCreateGLES3(int flags);
  64. void nvgDeleteGLES3(NVGcontext* ctx);
  65. #endif
  66. // These are additional flags on top of NVGimageFlags.
  67. enum NVGimageFlagsGL {
  68. NVG_IMAGE_NODELETE = 1<<16, // Do not delete GL texture handle.
  69. };
  70. int nvglCreateImageFromHandle(NVGcontext* ctx, GLuint textureId, int w, int h, int flags);
  71. GLuint nvglImageHandle(NVGcontext* ctx, int image);
  72. #ifdef __cplusplus
  73. }
  74. #endif
  75. #endif /* NANOVG_GL_H */
  76. #ifdef NANOVG_GL_IMPLEMENTATION
  77. #include <stdlib.h>
  78. #include <stdio.h>
  79. #include <string.h>
  80. #include <math.h>
  81. #include "nanovg.h"
  82. enum GLNVGuniformLoc {
  83. GLNVG_LOC_VIEWSIZE,
  84. GLNVG_LOC_TEX,
  85. GLNVG_LOC_FRAG,
  86. GLNVG_MAX_LOCS
  87. };
  88. enum GLNVGshaderType {
  89. NSVG_SHADER_FILLGRAD,
  90. NSVG_SHADER_FILLIMG,
  91. NSVG_SHADER_SIMPLE,
  92. NSVG_SHADER_IMG
  93. };
  94. #if NANOVG_GL_USE_UNIFORMBUFFER
  95. enum GLNVGuniformBindings {
  96. GLNVG_FRAG_BINDING = 0,
  97. };
  98. #endif
  99. struct GLNVGshader {
  100. GLuint prog;
  101. GLuint frag;
  102. GLuint vert;
  103. GLint loc[GLNVG_MAX_LOCS];
  104. };
  105. typedef struct GLNVGshader GLNVGshader;
  106. struct GLNVGtexture {
  107. int id;
  108. GLuint tex;
  109. int width, height;
  110. int type;
  111. int flags;
  112. };
  113. typedef struct GLNVGtexture GLNVGtexture;
  114. enum GLNVGcallType {
  115. GLNVG_NONE = 0,
  116. GLNVG_FILL,
  117. GLNVG_CONVEXFILL,
  118. GLNVG_STROKE,
  119. GLNVG_TRIANGLES,
  120. };
  121. struct GLNVGcall {
  122. int type;
  123. int image;
  124. int pathOffset;
  125. int pathCount;
  126. int triangleOffset;
  127. int triangleCount;
  128. int uniformOffset;
  129. };
  130. typedef struct GLNVGcall GLNVGcall;
  131. struct GLNVGpath {
  132. int fillOffset;
  133. int fillCount;
  134. int strokeOffset;
  135. int strokeCount;
  136. };
  137. typedef struct GLNVGpath GLNVGpath;
  138. struct GLNVGfragUniforms {
  139. #if NANOVG_GL_USE_UNIFORMBUFFER
  140. float scissorMat[12]; // matrices are actually 3 vec4s
  141. float paintMat[12];
  142. struct NVGcolor innerCol;
  143. struct NVGcolor outerCol;
  144. float scissorExt[2];
  145. float scissorScale[2];
  146. float extent[2];
  147. float radius;
  148. float feather;
  149. float strokeMult;
  150. float strokeThr;
  151. int texType;
  152. int type;
  153. #else
  154. // note: after modifying layout or size of uniform array,
  155. // don't forget to also update the fragment shader source!
  156. #define NANOVG_GL_UNIFORMARRAY_SIZE 11
  157. union {
  158. struct {
  159. float scissorMat[12]; // matrices are actually 3 vec4s
  160. float paintMat[12];
  161. struct NVGcolor innerCol;
  162. struct NVGcolor outerCol;
  163. float scissorExt[2];
  164. float scissorScale[2];
  165. float extent[2];
  166. float radius;
  167. float feather;
  168. float strokeMult;
  169. float strokeThr;
  170. float texType;
  171. float type;
  172. };
  173. float uniformArray[NANOVG_GL_UNIFORMARRAY_SIZE][4];
  174. };
  175. #endif
  176. };
  177. typedef struct GLNVGfragUniforms GLNVGfragUniforms;
  178. struct GLNVGcontext {
  179. GLNVGshader shader;
  180. GLNVGtexture* textures;
  181. float view[2];
  182. int ntextures;
  183. int ctextures;
  184. int textureId;
  185. GLuint vertBuf;
  186. #if defined NANOVG_GL3
  187. GLuint vertArr;
  188. #endif
  189. #if NANOVG_GL_USE_UNIFORMBUFFER
  190. GLuint fragBuf;
  191. #endif
  192. int fragSize;
  193. int flags;
  194. // Per frame buffers
  195. GLNVGcall* calls;
  196. int ccalls;
  197. int ncalls;
  198. GLNVGpath* paths;
  199. int cpaths;
  200. int npaths;
  201. struct NVGvertex* verts;
  202. int cverts;
  203. int nverts;
  204. unsigned char* uniforms;
  205. int cuniforms;
  206. int nuniforms;
  207. // cached state
  208. #if NANOVG_GL_USE_STATE_FILTER
  209. GLuint boundTexture;
  210. GLuint stencilMask;
  211. GLenum stencilFunc;
  212. GLint stencilFuncRef;
  213. GLuint stencilFuncMask;
  214. #endif
  215. };
  216. typedef struct GLNVGcontext GLNVGcontext;
  217. static int glnvg__maxi(int a, int b) { return a > b ? a : b; }
  218. #ifdef NANOVG_GLES2
  219. static unsigned int glnvg__nearestPow2(unsigned int num)
  220. {
  221. unsigned n = num > 0 ? num - 1 : 0;
  222. n |= n >> 1;
  223. n |= n >> 2;
  224. n |= n >> 4;
  225. n |= n >> 8;
  226. n |= n >> 16;
  227. n++;
  228. return n;
  229. }
  230. #endif
  231. static void glnvg__bindTexture(GLNVGcontext* gl, GLuint tex)
  232. {
  233. #if NANOVG_GL_USE_STATE_FILTER
  234. if (gl->boundTexture != tex) {
  235. gl->boundTexture = tex;
  236. glBindTexture(GL_TEXTURE_2D, tex);
  237. }
  238. #else
  239. glBindTexture(GL_TEXTURE_2D, tex);
  240. #endif
  241. }
  242. static void glnvg__stencilMask(GLNVGcontext* gl, GLuint mask)
  243. {
  244. #if NANOVG_GL_USE_STATE_FILTER
  245. if (gl->stencilMask != mask) {
  246. gl->stencilMask = mask;
  247. glStencilMask(mask);
  248. }
  249. #else
  250. glStencilMask(mask);
  251. #endif
  252. }
  253. static void glnvg__stencilFunc(GLNVGcontext* gl, GLenum func, GLint ref, GLuint mask)
  254. {
  255. #if NANOVG_GL_USE_STATE_FILTER
  256. if ((gl->stencilFunc != func) ||
  257. (gl->stencilFuncRef != ref) ||
  258. (gl->stencilFuncMask != mask)) {
  259. gl->stencilFunc = func;
  260. gl->stencilFuncRef = ref;
  261. gl->stencilFuncMask = mask;
  262. glStencilFunc(func, ref, mask);
  263. }
  264. #else
  265. glStencilFunc(func, ref, mask);
  266. #endif
  267. }
  268. static GLNVGtexture* glnvg__allocTexture(GLNVGcontext* gl)
  269. {
  270. GLNVGtexture* tex = NULL;
  271. int i;
  272. for (i = 0; i < gl->ntextures; i++) {
  273. if (gl->textures[i].id == 0) {
  274. tex = &gl->textures[i];
  275. break;
  276. }
  277. }
  278. if (tex == NULL) {
  279. if (gl->ntextures+1 > gl->ctextures) {
  280. GLNVGtexture* textures;
  281. int ctextures = glnvg__maxi(gl->ntextures+1, 4) + gl->ctextures/2; // 1.5x Overallocate
  282. textures = (GLNVGtexture*)realloc(gl->textures, sizeof(GLNVGtexture)*ctextures);
  283. if (textures == NULL) return NULL;
  284. gl->textures = textures;
  285. gl->ctextures = ctextures;
  286. }
  287. tex = &gl->textures[gl->ntextures++];
  288. }
  289. memset(tex, 0, sizeof(*tex));
  290. tex->id = ++gl->textureId;
  291. return tex;
  292. }
  293. static GLNVGtexture* glnvg__findTexture(GLNVGcontext* gl, int id)
  294. {
  295. int i;
  296. for (i = 0; i < gl->ntextures; i++)
  297. if (gl->textures[i].id == id)
  298. return &gl->textures[i];
  299. return NULL;
  300. }
  301. static int glnvg__deleteTexture(GLNVGcontext* gl, int id)
  302. {
  303. int i;
  304. for (i = 0; i < gl->ntextures; i++) {
  305. if (gl->textures[i].id == id) {
  306. if (gl->textures[i].tex != 0 && (gl->textures[i].flags & NVG_IMAGE_NODELETE) == 0)
  307. glDeleteTextures(1, &gl->textures[i].tex);
  308. memset(&gl->textures[i], 0, sizeof(gl->textures[i]));
  309. return 1;
  310. }
  311. }
  312. return 0;
  313. }
  314. static void glnvg__dumpShaderError(GLuint shader, const char* name, const char* type)
  315. {
  316. GLchar str[512+1];
  317. GLsizei len = 0;
  318. glGetShaderInfoLog(shader, 512, &len, str);
  319. if (len > 512) len = 512;
  320. str[len] = '\0';
  321. printf("Shader %s/%s error:\n%s\n", name, type, str);
  322. }
  323. static void glnvg__dumpProgramError(GLuint prog, const char* name)
  324. {
  325. GLchar str[512+1];
  326. GLsizei len = 0;
  327. glGetProgramInfoLog(prog, 512, &len, str);
  328. if (len > 512) len = 512;
  329. str[len] = '\0';
  330. printf("Program %s error:\n%s\n", name, str);
  331. }
  332. static void glnvg__checkError(GLNVGcontext* gl, const char* str)
  333. {
  334. GLenum err;
  335. if ((gl->flags & NVG_DEBUG) == 0) return;
  336. err = glGetError();
  337. if (err != GL_NO_ERROR) {
  338. printf("Error %08x after %s\n", err, str);
  339. return;
  340. }
  341. }
  342. static int glnvg__createShader(GLNVGshader* shader, const char* name, const char* header, const char* opts, const char* vshader, const char* fshader)
  343. {
  344. GLint status;
  345. GLuint prog, vert, frag;
  346. const char* str[3];
  347. str[0] = header;
  348. str[1] = opts != NULL ? opts : "";
  349. memset(shader, 0, sizeof(*shader));
  350. prog = glCreateProgram();
  351. vert = glCreateShader(GL_VERTEX_SHADER);
  352. frag = glCreateShader(GL_FRAGMENT_SHADER);
  353. str[2] = vshader;
  354. glShaderSource(vert, 3, str, 0);
  355. str[2] = fshader;
  356. glShaderSource(frag, 3, str, 0);
  357. glCompileShader(vert);
  358. glGetShaderiv(vert, GL_COMPILE_STATUS, &status);
  359. if (status != GL_TRUE) {
  360. glnvg__dumpShaderError(vert, name, "vert");
  361. return 0;
  362. }
  363. glCompileShader(frag);
  364. glGetShaderiv(frag, GL_COMPILE_STATUS, &status);
  365. if (status != GL_TRUE) {
  366. glnvg__dumpShaderError(frag, name, "frag");
  367. return 0;
  368. }
  369. glAttachShader(prog, vert);
  370. glAttachShader(prog, frag);
  371. glBindAttribLocation(prog, 0, "vertex");
  372. glBindAttribLocation(prog, 1, "tcoord");
  373. glLinkProgram(prog);
  374. glGetProgramiv(prog, GL_LINK_STATUS, &status);
  375. if (status != GL_TRUE) {
  376. glnvg__dumpProgramError(prog, name);
  377. return 0;
  378. }
  379. shader->prog = prog;
  380. shader->vert = vert;
  381. shader->frag = frag;
  382. return 1;
  383. }
  384. static void glnvg__deleteShader(GLNVGshader* shader)
  385. {
  386. if (shader->prog != 0)
  387. glDeleteProgram(shader->prog);
  388. if (shader->vert != 0)
  389. glDeleteShader(shader->vert);
  390. if (shader->frag != 0)
  391. glDeleteShader(shader->frag);
  392. }
  393. static void glnvg__getUniforms(GLNVGshader* shader)
  394. {
  395. shader->loc[GLNVG_LOC_VIEWSIZE] = glGetUniformLocation(shader->prog, "viewSize");
  396. shader->loc[GLNVG_LOC_TEX] = glGetUniformLocation(shader->prog, "tex");
  397. #if NANOVG_GL_USE_UNIFORMBUFFER
  398. shader->loc[GLNVG_LOC_FRAG] = glGetUniformBlockIndex(shader->prog, "frag");
  399. #else
  400. shader->loc[GLNVG_LOC_FRAG] = glGetUniformLocation(shader->prog, "frag");
  401. #endif
  402. }
  403. static int glnvg__renderCreate(void* uptr)
  404. {
  405. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  406. int align = 4;
  407. // TODO: mediump float may not be enough for GLES2 in iOS.
  408. // see the following discussion: https://github.com/memononen/nanovg/issues/46
  409. static const char* shaderHeader =
  410. #if defined NANOVG_GL2
  411. "#define NANOVG_GL2 1\n"
  412. #elif defined NANOVG_GL3
  413. "#version 150 core\n"
  414. "#define NANOVG_GL3 1\n"
  415. #elif defined NANOVG_GLES2
  416. "#version 100\n"
  417. "#define NANOVG_GL2 1\n"
  418. #elif defined NANOVG_GLES3
  419. "#version 300 es\n"
  420. "#define NANOVG_GL3 1\n"
  421. #endif
  422. #if NANOVG_GL_USE_UNIFORMBUFFER
  423. "#define USE_UNIFORMBUFFER 1\n"
  424. #else
  425. "#define UNIFORMARRAY_SIZE 11\n"
  426. #endif
  427. "\n";
  428. static const char* fillVertShader =
  429. "#ifdef NANOVG_GL3\n"
  430. " uniform vec2 viewSize;\n"
  431. " in vec2 vertex;\n"
  432. " in vec2 tcoord;\n"
  433. " out vec2 ftcoord;\n"
  434. " out vec2 fpos;\n"
  435. "#else\n"
  436. " uniform vec2 viewSize;\n"
  437. " attribute vec2 vertex;\n"
  438. " attribute vec2 tcoord;\n"
  439. " varying vec2 ftcoord;\n"
  440. " varying vec2 fpos;\n"
  441. "#endif\n"
  442. "void main(void) {\n"
  443. " ftcoord = tcoord;\n"
  444. " fpos = vertex;\n"
  445. " gl_Position = vec4(2.0*vertex.x/viewSize.x - 1.0, 1.0 - 2.0*vertex.y/viewSize.y, 0, 1);\n"
  446. "}\n";
  447. static const char* fillFragShader =
  448. "#ifdef GL_ES\n"
  449. "#if defined(GL_FRAGMENT_PRECISION_HIGH) || defined(NANOVG_GL3)\n"
  450. " precision highp float;\n"
  451. "#else\n"
  452. " precision mediump float;\n"
  453. "#endif\n"
  454. "#endif\n"
  455. "#ifdef NANOVG_GL3\n"
  456. "#ifdef USE_UNIFORMBUFFER\n"
  457. " layout(std140) uniform frag {\n"
  458. " mat3 scissorMat;\n"
  459. " mat3 paintMat;\n"
  460. " vec4 innerCol;\n"
  461. " vec4 outerCol;\n"
  462. " vec2 scissorExt;\n"
  463. " vec2 scissorScale;\n"
  464. " vec2 extent;\n"
  465. " float radius;\n"
  466. " float feather;\n"
  467. " float strokeMult;\n"
  468. " float strokeThr;\n"
  469. " int texType;\n"
  470. " int type;\n"
  471. " };\n"
  472. "#else\n" // NANOVG_GL3 && !USE_UNIFORMBUFFER
  473. " uniform vec4 frag[UNIFORMARRAY_SIZE];\n"
  474. "#endif\n"
  475. " uniform sampler2D tex;\n"
  476. " in vec2 ftcoord;\n"
  477. " in vec2 fpos;\n"
  478. " out vec4 outColor;\n"
  479. "#else\n" // !NANOVG_GL3
  480. " uniform vec4 frag[UNIFORMARRAY_SIZE];\n"
  481. " uniform sampler2D tex;\n"
  482. " varying vec2 ftcoord;\n"
  483. " varying vec2 fpos;\n"
  484. "#endif\n"
  485. "#ifndef USE_UNIFORMBUFFER\n"
  486. " #define scissorMat mat3(frag[0].xyz, frag[1].xyz, frag[2].xyz)\n"
  487. " #define paintMat mat3(frag[3].xyz, frag[4].xyz, frag[5].xyz)\n"
  488. " #define innerCol frag[6]\n"
  489. " #define outerCol frag[7]\n"
  490. " #define scissorExt frag[8].xy\n"
  491. " #define scissorScale frag[8].zw\n"
  492. " #define extent frag[9].xy\n"
  493. " #define radius frag[9].z\n"
  494. " #define feather frag[9].w\n"
  495. " #define strokeMult frag[10].x\n"
  496. " #define strokeThr frag[10].y\n"
  497. " #define texType int(frag[10].z)\n"
  498. " #define type int(frag[10].w)\n"
  499. "#endif\n"
  500. "\n"
  501. "float sdroundrect(vec2 pt, vec2 ext, float rad) {\n"
  502. " vec2 ext2 = ext - vec2(rad,rad);\n"
  503. " vec2 d = abs(pt) - ext2;\n"
  504. " return min(max(d.x,d.y),0.0) + length(max(d,0.0)) - rad;\n"
  505. "}\n"
  506. "\n"
  507. "// Scissoring\n"
  508. "float scissorMask(vec2 p) {\n"
  509. " vec2 sc = (abs((scissorMat * vec3(p,1.0)).xy) - scissorExt);\n"
  510. " sc = vec2(0.5,0.5) - sc * scissorScale;\n"
  511. " return clamp(sc.x,0.0,1.0) * clamp(sc.y,0.0,1.0);\n"
  512. "}\n"
  513. "#ifdef EDGE_AA\n"
  514. "// Stroke - from [0..1] to clipped pyramid, where the slope is 1px.\n"
  515. "float strokeMask() {\n"
  516. " return min(1.0, (1.0-abs(ftcoord.x*2.0-1.0))*strokeMult) * min(1.0, ftcoord.y);\n"
  517. "}\n"
  518. "#endif\n"
  519. "\n"
  520. "void main(void) {\n"
  521. " vec4 result;\n"
  522. " float scissor = scissorMask(fpos);\n"
  523. "#ifdef EDGE_AA\n"
  524. " float strokeAlpha = strokeMask();\n"
  525. "#else\n"
  526. " float strokeAlpha = 1.0;\n"
  527. "#endif\n"
  528. " if (type == 0) { // Gradient\n"
  529. " // Calculate gradient color using box gradient\n"
  530. " vec2 pt = (paintMat * vec3(fpos,1.0)).xy;\n"
  531. " float d = clamp((sdroundrect(pt, extent, radius) + feather*0.5) / feather, 0.0, 1.0);\n"
  532. " vec4 color = mix(innerCol,outerCol,d);\n"
  533. " // Combine alpha\n"
  534. " color *= strokeAlpha * scissor;\n"
  535. " result = color;\n"
  536. " } else if (type == 1) { // Image\n"
  537. " // Calculate color fron texture\n"
  538. " vec2 pt = (paintMat * vec3(fpos,1.0)).xy / extent;\n"
  539. "#ifdef NANOVG_GL3\n"
  540. " vec4 color = texture(tex, pt);\n"
  541. "#else\n"
  542. " vec4 color = texture2D(tex, pt);\n"
  543. "#endif\n"
  544. " if (texType == 1) color = vec4(color.xyz*color.w,color.w);"
  545. " if (texType == 2) color = vec4(color.x);"
  546. " // Apply color tint and alpha.\n"
  547. " color *= innerCol;\n"
  548. " // Combine alpha\n"
  549. " color *= strokeAlpha * scissor;\n"
  550. " result = color;\n"
  551. " } else if (type == 2) { // Stencil fill\n"
  552. " result = vec4(1,1,1,1);\n"
  553. " } else if (type == 3) { // Textured tris\n"
  554. "#ifdef NANOVG_GL3\n"
  555. " vec4 color = texture(tex, ftcoord);\n"
  556. "#else\n"
  557. " vec4 color = texture2D(tex, ftcoord);\n"
  558. "#endif\n"
  559. " if (texType == 1) color = vec4(color.xyz*color.w,color.w);"
  560. " if (texType == 2) color = vec4(color.x);"
  561. " color *= scissor;\n"
  562. " result = color * innerCol;\n"
  563. " }\n"
  564. "#ifdef EDGE_AA\n"
  565. " if (strokeAlpha < strokeThr) discard;\n"
  566. "#endif\n"
  567. "#ifdef NANOVG_GL3\n"
  568. " outColor = result;\n"
  569. "#else\n"
  570. " gl_FragColor = result;\n"
  571. "#endif\n"
  572. "}\n";
  573. glnvg__checkError(gl, "init");
  574. if (gl->flags & NVG_ANTIALIAS) {
  575. if (glnvg__createShader(&gl->shader, "shader", shaderHeader, "#define EDGE_AA 1\n", fillVertShader, fillFragShader) == 0)
  576. return 0;
  577. } else {
  578. if (glnvg__createShader(&gl->shader, "shader", shaderHeader, NULL, fillVertShader, fillFragShader) == 0)
  579. return 0;
  580. }
  581. glnvg__checkError(gl, "uniform locations");
  582. glnvg__getUniforms(&gl->shader);
  583. // Create dynamic vertex array
  584. #if defined NANOVG_GL3
  585. glGenVertexArrays(1, &gl->vertArr);
  586. #endif
  587. glGenBuffers(1, &gl->vertBuf);
  588. #if NANOVG_GL_USE_UNIFORMBUFFER
  589. // Create UBOs
  590. glUniformBlockBinding(gl->shader.prog, gl->shader.loc[GLNVG_LOC_FRAG], GLNVG_FRAG_BINDING);
  591. glGenBuffers(1, &gl->fragBuf);
  592. glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &align);
  593. #endif
  594. gl->fragSize = sizeof(GLNVGfragUniforms) + align - sizeof(GLNVGfragUniforms) % align;
  595. glnvg__checkError(gl, "create done");
  596. glFinish();
  597. return 1;
  598. }
  599. static int glnvg__renderCreateTexture(void* uptr, int type, int w, int h, int imageFlags, const unsigned char* data)
  600. {
  601. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  602. GLNVGtexture* tex = glnvg__allocTexture(gl);
  603. if (tex == NULL) return 0;
  604. #ifdef NANOVG_GLES2
  605. // Check for non-power of 2.
  606. if (glnvg__nearestPow2(w) != (unsigned int)w || glnvg__nearestPow2(h) != (unsigned int)h) {
  607. // No repeat
  608. if ((imageFlags & NVG_IMAGE_REPEATX) != 0 || (imageFlags & NVG_IMAGE_REPEATY) != 0) {
  609. printf("Repeat X/Y is not supported for non power-of-two textures (%d x %d)\n", w, h);
  610. imageFlags &= ~(NVG_IMAGE_REPEATX | NVG_IMAGE_REPEATY);
  611. }
  612. // No mips.
  613. if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) {
  614. printf("Mip-maps is not support for non power-of-two textures (%d x %d)\n", w, h);
  615. imageFlags &= ~NVG_IMAGE_GENERATE_MIPMAPS;
  616. }
  617. }
  618. #endif
  619. glGenTextures(1, &tex->tex);
  620. tex->width = w;
  621. tex->height = h;
  622. tex->type = type;
  623. tex->flags = imageFlags;
  624. glnvg__bindTexture(gl, tex->tex);
  625. glPixelStorei(GL_UNPACK_ALIGNMENT,1);
  626. #ifndef NANOVG_GLES2
  627. glPixelStorei(GL_UNPACK_ROW_LENGTH, tex->width);
  628. glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
  629. glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
  630. #endif
  631. #if defined (NANOVG_GL2)
  632. // GL 1.4 and later has support for generating mipmaps using a tex parameter.
  633. if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) {
  634. glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE);
  635. }
  636. #endif
  637. if (type == NVG_TEXTURE_RGBA)
  638. glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
  639. else
  640. #if defined(NANOVG_GLES2)
  641. glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, w, h, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, data);
  642. #elif defined(NANOVG_GLES3)
  643. glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, w, h, 0, GL_RED, GL_UNSIGNED_BYTE, data);
  644. #else
  645. glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, w, h, 0, GL_RED, GL_UNSIGNED_BYTE, data);
  646. #endif
  647. if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) {
  648. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
  649. } else {
  650. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
  651. }
  652. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
  653. if (imageFlags & NVG_IMAGE_REPEATX)
  654. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
  655. else
  656. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
  657. if (imageFlags & NVG_IMAGE_REPEATY)
  658. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
  659. else
  660. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
  661. glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
  662. #ifndef NANOVG_GLES2
  663. glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
  664. glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
  665. glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
  666. #endif
  667. // The new way to build mipmaps on GLES and GL3
  668. #if !defined(NANOVG_GL2)
  669. if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) {
  670. glGenerateMipmap(GL_TEXTURE_2D);
  671. }
  672. #endif
  673. glnvg__checkError(gl, "create tex");
  674. glnvg__bindTexture(gl, 0);
  675. return tex->id;
  676. }
  677. static int glnvg__renderDeleteTexture(void* uptr, int image)
  678. {
  679. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  680. return glnvg__deleteTexture(gl, image);
  681. }
  682. static int glnvg__renderUpdateTexture(void* uptr, int image, int x, int y, int w, int h, const unsigned char* data)
  683. {
  684. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  685. GLNVGtexture* tex = glnvg__findTexture(gl, image);
  686. if (tex == NULL) return 0;
  687. glnvg__bindTexture(gl, tex->tex);
  688. glPixelStorei(GL_UNPACK_ALIGNMENT,1);
  689. #ifndef NANOVG_GLES2
  690. glPixelStorei(GL_UNPACK_ROW_LENGTH, tex->width);
  691. glPixelStorei(GL_UNPACK_SKIP_PIXELS, x);
  692. glPixelStorei(GL_UNPACK_SKIP_ROWS, y);
  693. #else
  694. // No support for all of skip, need to update a whole row at a time.
  695. if (tex->type == NVG_TEXTURE_RGBA)
  696. data += y*tex->width*4;
  697. else
  698. data += y*tex->width;
  699. x = 0;
  700. w = tex->width;
  701. #endif
  702. if (tex->type == NVG_TEXTURE_RGBA)
  703. glTexSubImage2D(GL_TEXTURE_2D, 0, x,y, w,h, GL_RGBA, GL_UNSIGNED_BYTE, data);
  704. else
  705. #ifdef NANOVG_GLES2
  706. glTexSubImage2D(GL_TEXTURE_2D, 0, x,y, w,h, GL_LUMINANCE, GL_UNSIGNED_BYTE, data);
  707. #else
  708. glTexSubImage2D(GL_TEXTURE_2D, 0, x,y, w,h, GL_RED, GL_UNSIGNED_BYTE, data);
  709. #endif
  710. glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
  711. #ifndef NANOVG_GLES2
  712. glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
  713. glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
  714. glPixelStorei(GL_UNPACK_SKIP_ROWS, 0);
  715. #endif
  716. glnvg__bindTexture(gl, 0);
  717. return 1;
  718. }
  719. static int glnvg__renderGetTextureSize(void* uptr, int image, int* w, int* h)
  720. {
  721. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  722. GLNVGtexture* tex = glnvg__findTexture(gl, image);
  723. if (tex == NULL) return 0;
  724. *w = tex->width;
  725. *h = tex->height;
  726. return 1;
  727. }
  728. static void glnvg__xformToMat3x4(float* m3, float* t)
  729. {
  730. m3[0] = t[0];
  731. m3[1] = t[1];
  732. m3[2] = 0.0f;
  733. m3[3] = 0.0f;
  734. m3[4] = t[2];
  735. m3[5] = t[3];
  736. m3[6] = 0.0f;
  737. m3[7] = 0.0f;
  738. m3[8] = t[4];
  739. m3[9] = t[5];
  740. m3[10] = 1.0f;
  741. m3[11] = 0.0f;
  742. }
  743. static NVGcolor glnvg__premulColor(NVGcolor c)
  744. {
  745. c.r *= c.a;
  746. c.g *= c.a;
  747. c.b *= c.a;
  748. return c;
  749. }
  750. static int glnvg__convertPaint(GLNVGcontext* gl, GLNVGfragUniforms* frag, NVGpaint* paint,
  751. NVGscissor* scissor, float width, float fringe, float strokeThr)
  752. {
  753. GLNVGtexture* tex = NULL;
  754. float invxform[6];
  755. memset(frag, 0, sizeof(*frag));
  756. frag->innerCol = glnvg__premulColor(paint->innerColor);
  757. frag->outerCol = glnvg__premulColor(paint->outerColor);
  758. if (scissor->extent[0] < -0.5f || scissor->extent[1] < -0.5f) {
  759. memset(frag->scissorMat, 0, sizeof(frag->scissorMat));
  760. frag->scissorExt[0] = 1.0f;
  761. frag->scissorExt[1] = 1.0f;
  762. frag->scissorScale[0] = 1.0f;
  763. frag->scissorScale[1] = 1.0f;
  764. } else {
  765. nvgTransformInverse(invxform, scissor->xform);
  766. glnvg__xformToMat3x4(frag->scissorMat, invxform);
  767. frag->scissorExt[0] = scissor->extent[0];
  768. frag->scissorExt[1] = scissor->extent[1];
  769. frag->scissorScale[0] = sqrtf(scissor->xform[0]*scissor->xform[0] + scissor->xform[2]*scissor->xform[2]) / fringe;
  770. frag->scissorScale[1] = sqrtf(scissor->xform[1]*scissor->xform[1] + scissor->xform[3]*scissor->xform[3]) / fringe;
  771. }
  772. memcpy(frag->extent, paint->extent, sizeof(frag->extent));
  773. frag->strokeMult = (width*0.5f + fringe*0.5f) / fringe;
  774. frag->strokeThr = strokeThr;
  775. if (paint->image != 0) {
  776. tex = glnvg__findTexture(gl, paint->image);
  777. if (tex == NULL) return 0;
  778. if ((tex->flags & NVG_IMAGE_FLIPY) != 0) {
  779. float flipped[6];
  780. nvgTransformScale(flipped, 1.0f, -1.0f);
  781. nvgTransformMultiply(flipped, paint->xform);
  782. nvgTransformInverse(invxform, flipped);
  783. } else {
  784. nvgTransformInverse(invxform, paint->xform);
  785. }
  786. frag->type = NSVG_SHADER_FILLIMG;
  787. if (tex->type == NVG_TEXTURE_RGBA)
  788. frag->texType = (tex->flags & NVG_IMAGE_PREMULTIPLIED) ? 0 : 1;
  789. else
  790. frag->texType = 2;
  791. // printf("frag->texType = %d\n", frag->texType);
  792. } else {
  793. frag->type = NSVG_SHADER_FILLGRAD;
  794. frag->radius = paint->radius;
  795. frag->feather = paint->feather;
  796. nvgTransformInverse(invxform, paint->xform);
  797. }
  798. glnvg__xformToMat3x4(frag->paintMat, invxform);
  799. return 1;
  800. }
  801. static GLNVGfragUniforms* nvg__fragUniformPtr(GLNVGcontext* gl, int i);
  802. static void glnvg__setUniforms(GLNVGcontext* gl, int uniformOffset, int image)
  803. {
  804. #if NANOVG_GL_USE_UNIFORMBUFFER
  805. glBindBufferRange(GL_UNIFORM_BUFFER, GLNVG_FRAG_BINDING, gl->fragBuf, uniformOffset, sizeof(GLNVGfragUniforms));
  806. #else
  807. GLNVGfragUniforms* frag = nvg__fragUniformPtr(gl, uniformOffset);
  808. glUniform4fv(gl->shader.loc[GLNVG_LOC_FRAG], NANOVG_GL_UNIFORMARRAY_SIZE, &(frag->uniformArray[0][0]));
  809. #endif
  810. if (image != 0) {
  811. GLNVGtexture* tex = glnvg__findTexture(gl, image);
  812. glnvg__bindTexture(gl, tex != NULL ? tex->tex : 0);
  813. glnvg__checkError(gl, "tex paint tex");
  814. } else {
  815. glnvg__bindTexture(gl, 0);
  816. }
  817. }
  818. static void glnvg__renderViewport(void* uptr, int width, int height)
  819. {
  820. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  821. gl->view[0] = (float)width;
  822. gl->view[1] = (float)height;
  823. }
  824. static void glnvg__fill(GLNVGcontext* gl, GLNVGcall* call)
  825. {
  826. GLNVGpath* paths = &gl->paths[call->pathOffset];
  827. int i, npaths = call->pathCount;
  828. // Draw shapes
  829. glEnable(GL_STENCIL_TEST);
  830. glnvg__stencilMask(gl, 0xff);
  831. glnvg__stencilFunc(gl, GL_ALWAYS, 0, 0xff);
  832. glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
  833. // set bindpoint for solid loc
  834. glnvg__setUniforms(gl, call->uniformOffset, 0);
  835. glnvg__checkError(gl, "fill simple");
  836. glStencilOpSeparate(GL_FRONT, GL_KEEP, GL_KEEP, GL_INCR_WRAP);
  837. glStencilOpSeparate(GL_BACK, GL_KEEP, GL_KEEP, GL_DECR_WRAP);
  838. glDisable(GL_CULL_FACE);
  839. for (i = 0; i < npaths; i++)
  840. glDrawArrays(GL_TRIANGLE_FAN, paths[i].fillOffset, paths[i].fillCount);
  841. glEnable(GL_CULL_FACE);
  842. // Draw anti-aliased pixels
  843. glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
  844. glnvg__setUniforms(gl, call->uniformOffset + gl->fragSize, call->image);
  845. glnvg__checkError(gl, "fill fill");
  846. if (gl->flags & NVG_ANTIALIAS) {
  847. glnvg__stencilFunc(gl, GL_EQUAL, 0x00, 0xff);
  848. glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP);
  849. // Draw fringes
  850. for (i = 0; i < npaths; i++)
  851. glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
  852. }
  853. // Draw fill
  854. glnvg__stencilFunc(gl, GL_NOTEQUAL, 0x0, 0xff);
  855. glStencilOp(GL_ZERO, GL_ZERO, GL_ZERO);
  856. glDrawArrays(GL_TRIANGLES, call->triangleOffset, call->triangleCount);
  857. glDisable(GL_STENCIL_TEST);
  858. }
  859. static void glnvg__convexFill(GLNVGcontext* gl, GLNVGcall* call)
  860. {
  861. GLNVGpath* paths = &gl->paths[call->pathOffset];
  862. int i, npaths = call->pathCount;
  863. glnvg__setUniforms(gl, call->uniformOffset, call->image);
  864. glnvg__checkError(gl, "convex fill");
  865. for (i = 0; i < npaths; i++)
  866. glDrawArrays(GL_TRIANGLE_FAN, paths[i].fillOffset, paths[i].fillCount);
  867. if (gl->flags & NVG_ANTIALIAS) {
  868. // Draw fringes
  869. for (i = 0; i < npaths; i++)
  870. glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
  871. }
  872. }
  873. static void glnvg__stroke(GLNVGcontext* gl, GLNVGcall* call)
  874. {
  875. GLNVGpath* paths = &gl->paths[call->pathOffset];
  876. int npaths = call->pathCount, i;
  877. if (gl->flags & NVG_STENCIL_STROKES) {
  878. glEnable(GL_STENCIL_TEST);
  879. glnvg__stencilMask(gl, 0xff);
  880. // Fill the stroke base without overlap
  881. glnvg__stencilFunc(gl, GL_EQUAL, 0x0, 0xff);
  882. glStencilOp(GL_KEEP, GL_KEEP, GL_INCR);
  883. glnvg__setUniforms(gl, call->uniformOffset + gl->fragSize, call->image);
  884. glnvg__checkError(gl, "stroke fill 0");
  885. for (i = 0; i < npaths; i++)
  886. glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
  887. // Draw anti-aliased pixels.
  888. glnvg__setUniforms(gl, call->uniformOffset, call->image);
  889. glnvg__stencilFunc(gl, GL_EQUAL, 0x00, 0xff);
  890. glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP);
  891. for (i = 0; i < npaths; i++)
  892. glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
  893. // Clear stencil buffer.
  894. glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
  895. glnvg__stencilFunc(gl, GL_ALWAYS, 0x0, 0xff);
  896. glStencilOp(GL_ZERO, GL_ZERO, GL_ZERO);
  897. glnvg__checkError(gl, "stroke fill 1");
  898. for (i = 0; i < npaths; i++)
  899. glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
  900. glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
  901. glDisable(GL_STENCIL_TEST);
  902. // glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, strokeWidth, fringe, 1.0f - 0.5f/255.0f);
  903. } else {
  904. glnvg__setUniforms(gl, call->uniformOffset, call->image);
  905. glnvg__checkError(gl, "stroke fill");
  906. // Draw Strokes
  907. for (i = 0; i < npaths; i++)
  908. glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount);
  909. }
  910. }
  911. static void glnvg__triangles(GLNVGcontext* gl, GLNVGcall* call)
  912. {
  913. glnvg__setUniforms(gl, call->uniformOffset, call->image);
  914. glnvg__checkError(gl, "triangles fill");
  915. glDrawArrays(GL_TRIANGLES, call->triangleOffset, call->triangleCount);
  916. }
  917. static void glnvg__renderCancel(void* uptr) {
  918. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  919. gl->nverts = 0;
  920. gl->npaths = 0;
  921. gl->ncalls = 0;
  922. gl->nuniforms = 0;
  923. }
  924. static void glnvg__renderFlush(void* uptr)
  925. {
  926. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  927. int i;
  928. if (gl->ncalls > 0) {
  929. // Setup require GL state.
  930. glUseProgram(gl->shader.prog);
  931. glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
  932. glEnable(GL_CULL_FACE);
  933. glCullFace(GL_BACK);
  934. glFrontFace(GL_CCW);
  935. glEnable(GL_BLEND);
  936. glDisable(GL_DEPTH_TEST);
  937. glDisable(GL_SCISSOR_TEST);
  938. glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
  939. glStencilMask(0xffffffff);
  940. glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP);
  941. glStencilFunc(GL_ALWAYS, 0, 0xffffffff);
  942. glActiveTexture(GL_TEXTURE0);
  943. glBindTexture(GL_TEXTURE_2D, 0);
  944. #if NANOVG_GL_USE_STATE_FILTER
  945. gl->boundTexture = 0;
  946. gl->stencilMask = 0xffffffff;
  947. gl->stencilFunc = GL_ALWAYS;
  948. gl->stencilFuncRef = 0;
  949. gl->stencilFuncMask = 0xffffffff;
  950. #endif
  951. #if NANOVG_GL_USE_UNIFORMBUFFER
  952. // Upload ubo for frag shaders
  953. glBindBuffer(GL_UNIFORM_BUFFER, gl->fragBuf);
  954. glBufferData(GL_UNIFORM_BUFFER, gl->nuniforms * gl->fragSize, gl->uniforms, GL_STREAM_DRAW);
  955. #endif
  956. // Upload vertex data
  957. #if defined NANOVG_GL3
  958. glBindVertexArray(gl->vertArr);
  959. #endif
  960. glBindBuffer(GL_ARRAY_BUFFER, gl->vertBuf);
  961. glBufferData(GL_ARRAY_BUFFER, gl->nverts * sizeof(NVGvertex), gl->verts, GL_STREAM_DRAW);
  962. glEnableVertexAttribArray(0);
  963. glEnableVertexAttribArray(1);
  964. glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(NVGvertex), (const GLvoid*)(size_t)0);
  965. glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(NVGvertex), (const GLvoid*)(0 + 2*sizeof(float)));
  966. // Set view and texture just once per frame.
  967. glUniform1i(gl->shader.loc[GLNVG_LOC_TEX], 0);
  968. glUniform2fv(gl->shader.loc[GLNVG_LOC_VIEWSIZE], 1, gl->view);
  969. #if NANOVG_GL_USE_UNIFORMBUFFER
  970. glBindBuffer(GL_UNIFORM_BUFFER, gl->fragBuf);
  971. #endif
  972. for (i = 0; i < gl->ncalls; i++) {
  973. GLNVGcall* call = &gl->calls[i];
  974. if (call->type == GLNVG_FILL)
  975. glnvg__fill(gl, call);
  976. else if (call->type == GLNVG_CONVEXFILL)
  977. glnvg__convexFill(gl, call);
  978. else if (call->type == GLNVG_STROKE)
  979. glnvg__stroke(gl, call);
  980. else if (call->type == GLNVG_TRIANGLES)
  981. glnvg__triangles(gl, call);
  982. }
  983. glDisableVertexAttribArray(0);
  984. glDisableVertexAttribArray(1);
  985. #if defined NANOVG_GL3
  986. glBindVertexArray(0);
  987. #endif
  988. glDisable(GL_CULL_FACE);
  989. glBindBuffer(GL_ARRAY_BUFFER, 0);
  990. glUseProgram(0);
  991. glnvg__bindTexture(gl, 0);
  992. }
  993. // Reset calls
  994. gl->nverts = 0;
  995. gl->npaths = 0;
  996. gl->ncalls = 0;
  997. gl->nuniforms = 0;
  998. }
  999. static int glnvg__maxVertCount(const NVGpath* paths, int npaths)
  1000. {
  1001. int i, count = 0;
  1002. for (i = 0; i < npaths; i++) {
  1003. count += paths[i].nfill;
  1004. count += paths[i].nstroke;
  1005. }
  1006. return count;
  1007. }
  1008. static GLNVGcall* glnvg__allocCall(GLNVGcontext* gl)
  1009. {
  1010. GLNVGcall* ret = NULL;
  1011. if (gl->ncalls+1 > gl->ccalls) {
  1012. GLNVGcall* calls;
  1013. int ccalls = glnvg__maxi(gl->ncalls+1, 128) + gl->ccalls/2; // 1.5x Overallocate
  1014. calls = (GLNVGcall*)realloc(gl->calls, sizeof(GLNVGcall) * ccalls);
  1015. if (calls == NULL) return NULL;
  1016. gl->calls = calls;
  1017. gl->ccalls = ccalls;
  1018. }
  1019. ret = &gl->calls[gl->ncalls++];
  1020. memset(ret, 0, sizeof(GLNVGcall));
  1021. return ret;
  1022. }
  1023. static int glnvg__allocPaths(GLNVGcontext* gl, int n)
  1024. {
  1025. int ret = 0;
  1026. if (gl->npaths+n > gl->cpaths) {
  1027. GLNVGpath* paths;
  1028. int cpaths = glnvg__maxi(gl->npaths + n, 128) + gl->cpaths/2; // 1.5x Overallocate
  1029. paths = (GLNVGpath*)realloc(gl->paths, sizeof(GLNVGpath) * cpaths);
  1030. if (paths == NULL) return -1;
  1031. gl->paths = paths;
  1032. gl->cpaths = cpaths;
  1033. }
  1034. ret = gl->npaths;
  1035. gl->npaths += n;
  1036. return ret;
  1037. }
  1038. static int glnvg__allocVerts(GLNVGcontext* gl, int n)
  1039. {
  1040. int ret = 0;
  1041. if (gl->nverts+n > gl->cverts) {
  1042. NVGvertex* verts;
  1043. int cverts = glnvg__maxi(gl->nverts + n, 4096) + gl->cverts/2; // 1.5x Overallocate
  1044. verts = (NVGvertex*)realloc(gl->verts, sizeof(NVGvertex) * cverts);
  1045. if (verts == NULL) return -1;
  1046. gl->verts = verts;
  1047. gl->cverts = cverts;
  1048. }
  1049. ret = gl->nverts;
  1050. gl->nverts += n;
  1051. return ret;
  1052. }
  1053. static int glnvg__allocFragUniforms(GLNVGcontext* gl, int n)
  1054. {
  1055. int ret = 0, structSize = gl->fragSize;
  1056. if (gl->nuniforms+n > gl->cuniforms) {
  1057. unsigned char* uniforms;
  1058. int cuniforms = glnvg__maxi(gl->nuniforms+n, 128) + gl->cuniforms/2; // 1.5x Overallocate
  1059. uniforms = (unsigned char*)realloc(gl->uniforms, structSize * cuniforms);
  1060. if (uniforms == NULL) return -1;
  1061. gl->uniforms = uniforms;
  1062. gl->cuniforms = cuniforms;
  1063. }
  1064. ret = gl->nuniforms * structSize;
  1065. gl->nuniforms += n;
  1066. return ret;
  1067. }
  1068. static GLNVGfragUniforms* nvg__fragUniformPtr(GLNVGcontext* gl, int i)
  1069. {
  1070. return (GLNVGfragUniforms*)&gl->uniforms[i];
  1071. }
  1072. static void glnvg__vset(NVGvertex* vtx, float x, float y, float u, float v)
  1073. {
  1074. vtx->x = x;
  1075. vtx->y = y;
  1076. vtx->u = u;
  1077. vtx->v = v;
  1078. }
  1079. static void glnvg__renderFill(void* uptr, NVGpaint* paint, NVGscissor* scissor, float fringe,
  1080. const float* bounds, const NVGpath* paths, int npaths)
  1081. {
  1082. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  1083. GLNVGcall* call = glnvg__allocCall(gl);
  1084. NVGvertex* quad;
  1085. GLNVGfragUniforms* frag;
  1086. int i, maxverts, offset;
  1087. if (call == NULL) return;
  1088. call->type = GLNVG_FILL;
  1089. call->pathOffset = glnvg__allocPaths(gl, npaths);
  1090. if (call->pathOffset == -1) goto error;
  1091. call->pathCount = npaths;
  1092. call->image = paint->image;
  1093. if (npaths == 1 && paths[0].convex)
  1094. call->type = GLNVG_CONVEXFILL;
  1095. // Allocate vertices for all the paths.
  1096. maxverts = glnvg__maxVertCount(paths, npaths) + 6;
  1097. offset = glnvg__allocVerts(gl, maxverts);
  1098. if (offset == -1) goto error;
  1099. for (i = 0; i < npaths; i++) {
  1100. GLNVGpath* copy = &gl->paths[call->pathOffset + i];
  1101. const NVGpath* path = &paths[i];
  1102. memset(copy, 0, sizeof(GLNVGpath));
  1103. if (path->nfill > 0) {
  1104. copy->fillOffset = offset;
  1105. copy->fillCount = path->nfill;
  1106. memcpy(&gl->verts[offset], path->fill, sizeof(NVGvertex) * path->nfill);
  1107. offset += path->nfill;
  1108. }
  1109. if (path->nstroke > 0) {
  1110. copy->strokeOffset = offset;
  1111. copy->strokeCount = path->nstroke;
  1112. memcpy(&gl->verts[offset], path->stroke, sizeof(NVGvertex) * path->nstroke);
  1113. offset += path->nstroke;
  1114. }
  1115. }
  1116. // Quad
  1117. call->triangleOffset = offset;
  1118. call->triangleCount = 6;
  1119. quad = &gl->verts[call->triangleOffset];
  1120. glnvg__vset(&quad[0], bounds[0], bounds[3], 0.5f, 1.0f);
  1121. glnvg__vset(&quad[1], bounds[2], bounds[3], 0.5f, 1.0f);
  1122. glnvg__vset(&quad[2], bounds[2], bounds[1], 0.5f, 1.0f);
  1123. glnvg__vset(&quad[3], bounds[0], bounds[3], 0.5f, 1.0f);
  1124. glnvg__vset(&quad[4], bounds[2], bounds[1], 0.5f, 1.0f);
  1125. glnvg__vset(&quad[5], bounds[0], bounds[1], 0.5f, 1.0f);
  1126. // Setup uniforms for draw calls
  1127. if (call->type == GLNVG_FILL) {
  1128. call->uniformOffset = glnvg__allocFragUniforms(gl, 2);
  1129. if (call->uniformOffset == -1) goto error;
  1130. // Simple shader for stencil
  1131. frag = nvg__fragUniformPtr(gl, call->uniformOffset);
  1132. memset(frag, 0, sizeof(*frag));
  1133. frag->strokeThr = -1.0f;
  1134. frag->type = NSVG_SHADER_SIMPLE;
  1135. // Fill shader
  1136. glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, fringe, fringe, -1.0f);
  1137. } else {
  1138. call->uniformOffset = glnvg__allocFragUniforms(gl, 1);
  1139. if (call->uniformOffset == -1) goto error;
  1140. // Fill shader
  1141. glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, fringe, fringe, -1.0f);
  1142. }
  1143. return;
  1144. error:
  1145. // We get here if call alloc was ok, but something else is not.
  1146. // Roll back the last call to prevent drawing it.
  1147. if (gl->ncalls > 0) gl->ncalls--;
  1148. }
  1149. static void glnvg__renderStroke(void* uptr, NVGpaint* paint, NVGscissor* scissor, float fringe,
  1150. float strokeWidth, const NVGpath* paths, int npaths)
  1151. {
  1152. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  1153. GLNVGcall* call = glnvg__allocCall(gl);
  1154. int i, maxverts, offset;
  1155. if (call == NULL) return;
  1156. call->type = GLNVG_STROKE;
  1157. call->pathOffset = glnvg__allocPaths(gl, npaths);
  1158. if (call->pathOffset == -1) goto error;
  1159. call->pathCount = npaths;
  1160. call->image = paint->image;
  1161. // Allocate vertices for all the paths.
  1162. maxverts = glnvg__maxVertCount(paths, npaths);
  1163. offset = glnvg__allocVerts(gl, maxverts);
  1164. if (offset == -1) goto error;
  1165. for (i = 0; i < npaths; i++) {
  1166. GLNVGpath* copy = &gl->paths[call->pathOffset + i];
  1167. const NVGpath* path = &paths[i];
  1168. memset(copy, 0, sizeof(GLNVGpath));
  1169. if (path->nstroke) {
  1170. copy->strokeOffset = offset;
  1171. copy->strokeCount = path->nstroke;
  1172. memcpy(&gl->verts[offset], path->stroke, sizeof(NVGvertex) * path->nstroke);
  1173. offset += path->nstroke;
  1174. }
  1175. }
  1176. if (gl->flags & NVG_STENCIL_STROKES) {
  1177. // Fill shader
  1178. call->uniformOffset = glnvg__allocFragUniforms(gl, 2);
  1179. if (call->uniformOffset == -1) goto error;
  1180. glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, strokeWidth, fringe, -1.0f);
  1181. glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, strokeWidth, fringe, 1.0f - 0.5f/255.0f);
  1182. } else {
  1183. // Fill shader
  1184. call->uniformOffset = glnvg__allocFragUniforms(gl, 1);
  1185. if (call->uniformOffset == -1) goto error;
  1186. glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, strokeWidth, fringe, -1.0f);
  1187. }
  1188. return;
  1189. error:
  1190. // We get here if call alloc was ok, but something else is not.
  1191. // Roll back the last call to prevent drawing it.
  1192. if (gl->ncalls > 0) gl->ncalls--;
  1193. }
  1194. static void glnvg__renderTriangles(void* uptr, NVGpaint* paint, NVGscissor* scissor,
  1195. const NVGvertex* verts, int nverts)
  1196. {
  1197. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  1198. GLNVGcall* call = glnvg__allocCall(gl);
  1199. GLNVGfragUniforms* frag;
  1200. if (call == NULL) return;
  1201. call->type = GLNVG_TRIANGLES;
  1202. call->image = paint->image;
  1203. // Allocate vertices for all the paths.
  1204. call->triangleOffset = glnvg__allocVerts(gl, nverts);
  1205. if (call->triangleOffset == -1) goto error;
  1206. call->triangleCount = nverts;
  1207. memcpy(&gl->verts[call->triangleOffset], verts, sizeof(NVGvertex) * nverts);
  1208. // Fill shader
  1209. call->uniformOffset = glnvg__allocFragUniforms(gl, 1);
  1210. if (call->uniformOffset == -1) goto error;
  1211. frag = nvg__fragUniformPtr(gl, call->uniformOffset);
  1212. glnvg__convertPaint(gl, frag, paint, scissor, 1.0f, 1.0f, -1.0f);
  1213. frag->type = NSVG_SHADER_IMG;
  1214. return;
  1215. error:
  1216. // We get here if call alloc was ok, but something else is not.
  1217. // Roll back the last call to prevent drawing it.
  1218. if (gl->ncalls > 0) gl->ncalls--;
  1219. }
  1220. static void glnvg__renderDelete(void* uptr)
  1221. {
  1222. GLNVGcontext* gl = (GLNVGcontext*)uptr;
  1223. int i;
  1224. if (gl == NULL) return;
  1225. glnvg__deleteShader(&gl->shader);
  1226. #if NANOVG_GL3
  1227. #if NANOVG_GL_USE_UNIFORMBUFFER
  1228. if (gl->fragBuf != 0)
  1229. glDeleteBuffers(1, &gl->fragBuf);
  1230. #endif
  1231. if (gl->vertArr != 0)
  1232. glDeleteVertexArrays(1, &gl->vertArr);
  1233. #endif
  1234. if (gl->vertBuf != 0)
  1235. glDeleteBuffers(1, &gl->vertBuf);
  1236. for (i = 0; i < gl->ntextures; i++) {
  1237. if (gl->textures[i].tex != 0 && (gl->textures[i].flags & NVG_IMAGE_NODELETE) == 0)
  1238. glDeleteTextures(1, &gl->textures[i].tex);
  1239. }
  1240. free(gl->textures);
  1241. free(gl->paths);
  1242. free(gl->verts);
  1243. free(gl->uniforms);
  1244. free(gl->calls);
  1245. free(gl);
  1246. }
  1247. #if defined NANOVG_GL2
  1248. NVGcontext* nvgCreateGL2(int flags)
  1249. #elif defined NANOVG_GL3
  1250. NVGcontext* nvgCreateGL3(int flags)
  1251. #elif defined NANOVG_GLES2
  1252. NVGcontext* nvgCreateGLES2(int flags)
  1253. #elif defined NANOVG_GLES3
  1254. NVGcontext* nvgCreateGLES3(int flags)
  1255. #endif
  1256. {
  1257. NVGparams params;
  1258. NVGcontext* ctx = NULL;
  1259. GLNVGcontext* gl = (GLNVGcontext*)malloc(sizeof(GLNVGcontext));
  1260. if (gl == NULL) goto error;
  1261. memset(gl, 0, sizeof(GLNVGcontext));
  1262. memset(&params, 0, sizeof(params));
  1263. params.renderCreate = glnvg__renderCreate;
  1264. params.renderCreateTexture = glnvg__renderCreateTexture;
  1265. params.renderDeleteTexture = glnvg__renderDeleteTexture;
  1266. params.renderUpdateTexture = glnvg__renderUpdateTexture;
  1267. params.renderGetTextureSize = glnvg__renderGetTextureSize;
  1268. params.renderViewport = glnvg__renderViewport;
  1269. params.renderCancel = glnvg__renderCancel;
  1270. params.renderFlush = glnvg__renderFlush;
  1271. params.renderFill = glnvg__renderFill;
  1272. params.renderStroke = glnvg__renderStroke;
  1273. params.renderTriangles = glnvg__renderTriangles;
  1274. params.renderDelete = glnvg__renderDelete;
  1275. params.userPtr = gl;
  1276. params.edgeAntiAlias = flags & NVG_ANTIALIAS ? 1 : 0;
  1277. gl->flags = flags;
  1278. ctx = nvgCreateInternal(&params);
  1279. if (ctx == NULL) goto error;
  1280. return ctx;
  1281. error:
  1282. // 'gl' is freed by nvgDeleteInternal.
  1283. if (ctx != NULL) nvgDeleteInternal(ctx);
  1284. return NULL;
  1285. }
  1286. #if defined NANOVG_GL2
  1287. void nvgDeleteGL2(NVGcontext* ctx)
  1288. #elif defined NANOVG_GL3
  1289. void nvgDeleteGL3(NVGcontext* ctx)
  1290. #elif defined NANOVG_GLES2
  1291. void nvgDeleteGLES2(NVGcontext* ctx)
  1292. #elif defined NANOVG_GLES3
  1293. void nvgDeleteGLES3(NVGcontext* ctx)
  1294. #endif
  1295. {
  1296. nvgDeleteInternal(ctx);
  1297. }
  1298. int nvglCreateImageFromHandle(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags)
  1299. {
  1300. GLNVGcontext* gl = (GLNVGcontext*)nvgInternalParams(ctx)->userPtr;
  1301. GLNVGtexture* tex = glnvg__allocTexture(gl);
  1302. if (tex == NULL) return 0;
  1303. tex->type = NVG_TEXTURE_RGBA;
  1304. tex->tex = textureId;
  1305. tex->flags = imageFlags;
  1306. tex->width = w;
  1307. tex->height = h;
  1308. return tex->id;
  1309. }
  1310. GLuint nvglImageHandle(NVGcontext* ctx, int image)
  1311. {
  1312. GLNVGcontext* gl = (GLNVGcontext*)nvgInternalParams(ctx)->userPtr;
  1313. GLNVGtexture* tex = glnvg__findTexture(gl, image);
  1314. return tex->tex;
  1315. }
  1316. #endif /* NANOVG_GL_IMPLEMENTATION */