From dfa31dfba29dc4b30028046dd0c260e96dae9066 Mon Sep 17 00:00:00 2001 From: Mikko Mononen Date: Wed, 18 Apr 2018 00:02:16 +0300 Subject: [PATCH] Fix for #471 - disable stroke AA gradient when not using AA --- src/nanovg.c | 87 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 51 insertions(+), 36 deletions(-) diff --git a/src/nanovg.c b/src/nanovg.c index d018034..5902552 100644 --- a/src/nanovg.c +++ b/src/nanovg.c @@ -1441,7 +1441,8 @@ static void nvg__chooseBevel(int bevel, NVGpoint* p0, NVGpoint* p1, float w, } static NVGvertex* nvg__roundJoin(NVGvertex* dst, NVGpoint* p0, NVGpoint* p1, - float lw, float rw, float lu, float ru, int ncap, float fringe) + float lw, float rw, float lu, float ru, int ncap, + float fringe) { int i, n; float dlx0 = p0->dy; @@ -1574,36 +1575,39 @@ static NVGvertex* nvg__bevelJoin(NVGvertex* dst, NVGpoint* p0, NVGpoint* p1, } static NVGvertex* nvg__buttCapStart(NVGvertex* dst, NVGpoint* p, - float dx, float dy, float w, float d, float aa) + float dx, float dy, float w, float d, + float aa, float u0, float u1) { float px = p->x - dx*d; float py = p->y - dy*d; float dlx = dy; float dly = -dx; - nvg__vset(dst, px + dlx*w - dx*aa, py + dly*w - dy*aa, 0,0); dst++; - nvg__vset(dst, px - dlx*w - dx*aa, py - dly*w - dy*aa, 1,0); dst++; - nvg__vset(dst, px + dlx*w, py + dly*w, 0,1); dst++; - nvg__vset(dst, px - dlx*w, py - dly*w, 1,1); dst++; + nvg__vset(dst, px + dlx*w - dx*aa, py + dly*w - dy*aa, u0,0); dst++; + nvg__vset(dst, px - dlx*w - dx*aa, py - dly*w - dy*aa, u1,0); dst++; + nvg__vset(dst, px + dlx*w, py + dly*w, u0,1); dst++; + nvg__vset(dst, px - dlx*w, py - dly*w, u1,1); dst++; return dst; } static NVGvertex* nvg__buttCapEnd(NVGvertex* dst, NVGpoint* p, - float dx, float dy, float w, float d, float aa) + float dx, float dy, float w, float d, + float aa, float u0, float u1) { float px = p->x + dx*d; float py = p->y + dy*d; float dlx = dy; float dly = -dx; - nvg__vset(dst, px + dlx*w, py + dly*w, 0,1); dst++; - nvg__vset(dst, px - dlx*w, py - dly*w, 1,1); dst++; - nvg__vset(dst, px + dlx*w + dx*aa, py + dly*w + dy*aa, 0,0); dst++; - nvg__vset(dst, px - dlx*w + dx*aa, py - dly*w + dy*aa, 1,0); dst++; + nvg__vset(dst, px + dlx*w, py + dly*w, u0,1); dst++; + nvg__vset(dst, px - dlx*w, py - dly*w, u1,1); dst++; + nvg__vset(dst, px + dlx*w + dx*aa, py + dly*w + dy*aa, u0,0); dst++; + nvg__vset(dst, px - dlx*w + dx*aa, py - dly*w + dy*aa, u1,0); dst++; return dst; } static NVGvertex* nvg__roundCapStart(NVGvertex* dst, NVGpoint* p, - float dx, float dy, float w, int ncap, float aa) + float dx, float dy, float w, int ncap, + float aa, float u0, float u1) { int i; float px = p->x; @@ -1614,16 +1618,17 @@ static NVGvertex* nvg__roundCapStart(NVGvertex* dst, NVGpoint* p, for (i = 0; i < ncap; i++) { float a = i/(float)(ncap-1)*NVG_PI; float ax = cosf(a) * w, ay = sinf(a) * w; - nvg__vset(dst, px - dlx*ax - dx*ay, py - dly*ax - dy*ay, 0,1); dst++; + nvg__vset(dst, px - dlx*ax - dx*ay, py - dly*ax - dy*ay, u0,1); dst++; nvg__vset(dst, px, py, 0.5f,1); dst++; } - nvg__vset(dst, px + dlx*w, py + dly*w, 0,1); dst++; - nvg__vset(dst, px - dlx*w, py - dly*w, 1,1); dst++; + nvg__vset(dst, px + dlx*w, py + dly*w, u0,1); dst++; + nvg__vset(dst, px - dlx*w, py - dly*w, u1,1); dst++; return dst; } static NVGvertex* nvg__roundCapEnd(NVGvertex* dst, NVGpoint* p, - float dx, float dy, float w, int ncap, float aa) + float dx, float dy, float w, int ncap, + float aa, float u0, float u1) { int i; float px = p->x; @@ -1631,13 +1636,13 @@ static NVGvertex* nvg__roundCapEnd(NVGvertex* dst, NVGpoint* p, float dlx = dy; float dly = -dx; NVG_NOTUSED(aa); - nvg__vset(dst, px + dlx*w, py + dly*w, 0,1); dst++; - nvg__vset(dst, px - dlx*w, py - dly*w, 1,1); dst++; + nvg__vset(dst, px + dlx*w, py + dly*w, u0,1); dst++; + nvg__vset(dst, px - dlx*w, py - dly*w, u1,1); dst++; for (i = 0; i < ncap; i++) { float a = i/(float)(ncap-1)*NVG_PI; float ax = cosf(a) * w, ay = sinf(a) * w; nvg__vset(dst, px, py, 0.5f,1); dst++; - nvg__vset(dst, px - dlx*ax + dx*ay, py - dly*ax + dy*ay, 0,1); dst++; + nvg__vset(dst, px - dlx*ax + dx*ay, py - dly*ax + dy*ay, u0,1); dst++; } return dst; } @@ -1713,15 +1718,24 @@ static void nvg__calculateJoins(NVGcontext* ctx, float w, int lineJoin, float mi } -static int nvg__expandStroke(NVGcontext* ctx, float w, int lineCap, int lineJoin, float miterLimit) +static int nvg__expandStroke(NVGcontext* ctx, float w, float fringe, int lineCap, int lineJoin, float miterLimit) { NVGpathCache* cache = ctx->cache; NVGvertex* verts; NVGvertex* dst; int cverts, i, j; - float aa = ctx->fringeWidth; + float aa = fringe;//ctx->fringeWidth; + float u0 = 0.0f, u1 = 1.0f; int ncap = nvg__curveDivs(w, NVG_PI, ctx->tessTol); // Calculate divisions per half circle. + w += aa * 0.5f; + + // Disable the gradient used for antialiasing when antialiasing is not used. + if (aa == 0.0f) { + u0 = 0.5f; + u1 = 0.5f; + } + nvg__calculateJoins(ctx, w, lineJoin, miterLimit); // Calculate max vertex usage. @@ -1782,42 +1796,42 @@ static int nvg__expandStroke(NVGcontext* ctx, float w, int lineCap, int lineJoin dy = p1->y - p0->y; nvg__normalize(&dx, &dy); if (lineCap == NVG_BUTT) - dst = nvg__buttCapStart(dst, p0, dx, dy, w, -aa*0.5f, aa); + dst = nvg__buttCapStart(dst, p0, dx, dy, w, -aa*0.5f, aa, u0, u1); else if (lineCap == NVG_BUTT || lineCap == NVG_SQUARE) - dst = nvg__buttCapStart(dst, p0, dx, dy, w, w-aa, aa); + dst = nvg__buttCapStart(dst, p0, dx, dy, w, w-aa, aa, u0, u1); else if (lineCap == NVG_ROUND) - dst = nvg__roundCapStart(dst, p0, dx, dy, w, ncap, aa); + dst = nvg__roundCapStart(dst, p0, dx, dy, w, ncap, aa, u0, u1); } for (j = s; j < e; ++j) { if ((p1->flags & (NVG_PT_BEVEL | NVG_PR_INNERBEVEL)) != 0) { if (lineJoin == NVG_ROUND) { - dst = nvg__roundJoin(dst, p0, p1, w, w, 0, 1, ncap, aa); + dst = nvg__roundJoin(dst, p0, p1, w, w, u0, u1, ncap, aa); } else { - dst = nvg__bevelJoin(dst, p0, p1, w, w, 0, 1, aa); + dst = nvg__bevelJoin(dst, p0, p1, w, w, u0, u1, aa); } } else { - nvg__vset(dst, p1->x + (p1->dmx * w), p1->y + (p1->dmy * w), 0,1); dst++; - nvg__vset(dst, p1->x - (p1->dmx * w), p1->y - (p1->dmy * w), 1,1); dst++; + nvg__vset(dst, p1->x + (p1->dmx * w), p1->y + (p1->dmy * w), u0,1); dst++; + nvg__vset(dst, p1->x - (p1->dmx * w), p1->y - (p1->dmy * w), u1,1); dst++; } p0 = p1++; } if (loop) { // Loop it - nvg__vset(dst, verts[0].x, verts[0].y, 0,1); dst++; - nvg__vset(dst, verts[1].x, verts[1].y, 1,1); dst++; + nvg__vset(dst, verts[0].x, verts[0].y, u0,1); dst++; + nvg__vset(dst, verts[1].x, verts[1].y, u1,1); dst++; } else { // Add cap dx = p1->x - p0->x; dy = p1->y - p0->y; nvg__normalize(&dx, &dy); if (lineCap == NVG_BUTT) - dst = nvg__buttCapEnd(dst, p1, dx, dy, w, -aa*0.5f, aa); + dst = nvg__buttCapEnd(dst, p1, dx, dy, w, -aa*0.5f, aa, u0, u1); else if (lineCap == NVG_BUTT || lineCap == NVG_SQUARE) - dst = nvg__buttCapEnd(dst, p1, dx, dy, w, w-aa, aa); + dst = nvg__buttCapEnd(dst, p1, dx, dy, w, w-aa, aa, u0, u1); else if (lineCap == NVG_ROUND) - dst = nvg__roundCapEnd(dst, p1, dx, dy, w, ncap, aa); + dst = nvg__roundCapEnd(dst, p1, dx, dy, w, ncap, aa, u0, u1); } path->nstroke = (int)(dst - verts); @@ -2240,7 +2254,8 @@ void nvgStroke(NVGcontext* ctx) NVGpaint strokePaint = state->stroke; const NVGpath* path; int i; - + + if (strokeWidth < ctx->fringeWidth) { // If the stroke width is less than pixel size, use alpha to emulate coverage. // Since coverage is area, scale by alpha*alpha. @@ -2257,9 +2272,9 @@ void nvgStroke(NVGcontext* ctx) nvg__flattenPaths(ctx); if (ctx->params.edgeAntiAlias && state->shapeAntiAlias) - nvg__expandStroke(ctx, strokeWidth*0.5f + ctx->fringeWidth*0.5f, state->lineCap, state->lineJoin, state->miterLimit); + nvg__expandStroke(ctx, strokeWidth*0.5f, ctx->fringeWidth, state->lineCap, state->lineJoin, state->miterLimit); else - nvg__expandStroke(ctx, strokeWidth*0.5f, state->lineCap, state->lineJoin, state->miterLimit); + nvg__expandStroke(ctx, strokeWidth*0.5f, 0.0f, state->lineCap, state->lineJoin, state->miterLimit); ctx->params.renderStroke(ctx->params.userPtr, &strokePaint, state->compositeOperation, &state->scissor, ctx->fringeWidth, strokeWidth, ctx->cache->paths, ctx->cache->npaths);