diff --git a/dependencies/stb/stb_dxt.h b/dependencies/stb/stb_dxt.h new file mode 100644 index 00000000..08d814f2 --- /dev/null +++ b/dependencies/stb/stb_dxt.h @@ -0,0 +1,719 @@ +// stb_dxt.h - v1.12 - DXT1/DXT5 compressor - public domain +// original by fabian "ryg" giesen - ported to C by stb +// use '#define STB_DXT_IMPLEMENTATION' before including to create the implementation +// +// USAGE: +// call stb_compress_dxt_block() for every block (you must pad) +// source should be a 4x4 block of RGBA data in row-major order; +// Alpha channel is not stored if you specify alpha=0 (but you +// must supply some constant alpha in the alpha channel). +// You can turn on dithering and "high quality" using mode. +// +// version history: +// v1.12 - (ryg) fix bug in single-color table generator +// v1.11 - (ryg) avoid racy global init, better single-color tables, remove dither +// v1.10 - (i.c) various small quality improvements +// v1.09 - (stb) update documentation re: surprising alpha channel requirement +// v1.08 - (stb) fix bug in dxt-with-alpha block +// v1.07 - (stb) bc4; allow not using libc; add STB_DXT_STATIC +// v1.06 - (stb) fix to known-broken 1.05 +// v1.05 - (stb) support bc5/3dc (Arvids Kokins), use extern "C" in C++ (Pavel Krajcevski) +// v1.04 - (ryg) default to no rounding bias for lerped colors (as per S3TC/DX10 spec); +// single color match fix (allow for inexact color interpolation); +// optimal DXT5 index finder; "high quality" mode that runs multiple refinement steps. +// v1.03 - (stb) endianness support +// v1.02 - (stb) fix alpha encoding bug +// v1.01 - (stb) fix bug converting to RGB that messed up quality, thanks ryg & cbloom +// v1.00 - (stb) first release +// +// contributors: +// Rich Geldreich (more accurate index selection) +// Kevin Schmidt (#defines for "freestanding" compilation) +// github:ppiastucki (BC4 support) +// Ignacio Castano - improve DXT endpoint quantization +// Alan Hickman - static table initialization +// +// LICENSE +// +// See end of file for license information. + +#ifndef STB_INCLUDE_STB_DXT_H +#define STB_INCLUDE_STB_DXT_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef STB_DXT_STATIC +#define STBDDEF static +#else +#define STBDDEF extern +#endif + +// compression mode (bitflags) +#define STB_DXT_NORMAL 0 +#define STB_DXT_DITHER 1 // use dithering. was always dubious, now deprecated. does nothing! +#define STB_DXT_HIGHQUAL 2 // high quality mode, does two refinement steps instead of 1. ~30-40% slower. + +STBDDEF void stb_compress_dxt_block(unsigned char *dest, const unsigned char *src_rgba_four_bytes_per_pixel, int alpha, int mode); +STBDDEF void stb_compress_bc4_block(unsigned char *dest, const unsigned char *src_r_one_byte_per_pixel); +STBDDEF void stb_compress_bc5_block(unsigned char *dest, const unsigned char *src_rg_two_byte_per_pixel); + +#define STB_COMPRESS_DXT_BLOCK + +#ifdef __cplusplus +} +#endif +#endif // STB_INCLUDE_STB_DXT_H + +#ifdef STB_DXT_IMPLEMENTATION + +// configuration options for DXT encoder. set them in the project/makefile or just define +// them at the top. + +// STB_DXT_USE_ROUNDING_BIAS +// use a rounding bias during color interpolation. this is closer to what "ideal" +// interpolation would do but doesn't match the S3TC/DX10 spec. old versions (pre-1.03) +// implicitly had this turned on. +// +// in case you're targeting a specific type of hardware (e.g. console programmers): +// NVidia and Intel GPUs (as of 2010) as well as DX9 ref use DXT decoders that are closer +// to STB_DXT_USE_ROUNDING_BIAS. AMD/ATI, S3 and DX10 ref are closer to rounding with no bias. +// you also see "(a*5 + b*3) / 8" on some old GPU designs. +// #define STB_DXT_USE_ROUNDING_BIAS + +#include + +#if !defined(STBD_FABS) +#include +#endif + +#ifndef STBD_FABS +#define STBD_FABS(x) fabs(x) +#endif + +static const unsigned char stb__OMatch5[256][2] = { + { 0, 0 }, { 0, 0 }, { 0, 1 }, { 0, 1 }, { 1, 0 }, { 1, 0 }, { 1, 0 }, { 1, 1 }, + { 1, 1 }, { 1, 1 }, { 1, 2 }, { 0, 4 }, { 2, 1 }, { 2, 1 }, { 2, 1 }, { 2, 2 }, + { 2, 2 }, { 2, 2 }, { 2, 3 }, { 1, 5 }, { 3, 2 }, { 3, 2 }, { 4, 0 }, { 3, 3 }, + { 3, 3 }, { 3, 3 }, { 3, 4 }, { 3, 4 }, { 3, 4 }, { 3, 5 }, { 4, 3 }, { 4, 3 }, + { 5, 2 }, { 4, 4 }, { 4, 4 }, { 4, 5 }, { 4, 5 }, { 5, 4 }, { 5, 4 }, { 5, 4 }, + { 6, 3 }, { 5, 5 }, { 5, 5 }, { 5, 6 }, { 4, 8 }, { 6, 5 }, { 6, 5 }, { 6, 5 }, + { 6, 6 }, { 6, 6 }, { 6, 6 }, { 6, 7 }, { 5, 9 }, { 7, 6 }, { 7, 6 }, { 8, 4 }, + { 7, 7 }, { 7, 7 }, { 7, 7 }, { 7, 8 }, { 7, 8 }, { 7, 8 }, { 7, 9 }, { 8, 7 }, + { 8, 7 }, { 9, 6 }, { 8, 8 }, { 8, 8 }, { 8, 9 }, { 8, 9 }, { 9, 8 }, { 9, 8 }, + { 9, 8 }, { 10, 7 }, { 9, 9 }, { 9, 9 }, { 9, 10 }, { 8, 12 }, { 10, 9 }, { 10, 9 }, + { 10, 9 }, { 10, 10 }, { 10, 10 }, { 10, 10 }, { 10, 11 }, { 9, 13 }, { 11, 10 }, { 11, 10 }, + { 12, 8 }, { 11, 11 }, { 11, 11 }, { 11, 11 }, { 11, 12 }, { 11, 12 }, { 11, 12 }, { 11, 13 }, + { 12, 11 }, { 12, 11 }, { 13, 10 }, { 12, 12 }, { 12, 12 }, { 12, 13 }, { 12, 13 }, { 13, 12 }, + { 13, 12 }, { 13, 12 }, { 14, 11 }, { 13, 13 }, { 13, 13 }, { 13, 14 }, { 12, 16 }, { 14, 13 }, + { 14, 13 }, { 14, 13 }, { 14, 14 }, { 14, 14 }, { 14, 14 }, { 14, 15 }, { 13, 17 }, { 15, 14 }, + { 15, 14 }, { 16, 12 }, { 15, 15 }, { 15, 15 }, { 15, 15 }, { 15, 16 }, { 15, 16 }, { 15, 16 }, + { 15, 17 }, { 16, 15 }, { 16, 15 }, { 17, 14 }, { 16, 16 }, { 16, 16 }, { 16, 17 }, { 16, 17 }, + { 17, 16 }, { 17, 16 }, { 17, 16 }, { 18, 15 }, { 17, 17 }, { 17, 17 }, { 17, 18 }, { 16, 20 }, + { 18, 17 }, { 18, 17 }, { 18, 17 }, { 18, 18 }, { 18, 18 }, { 18, 18 }, { 18, 19 }, { 17, 21 }, + { 19, 18 }, { 19, 18 }, { 20, 16 }, { 19, 19 }, { 19, 19 }, { 19, 19 }, { 19, 20 }, { 19, 20 }, + { 19, 20 }, { 19, 21 }, { 20, 19 }, { 20, 19 }, { 21, 18 }, { 20, 20 }, { 20, 20 }, { 20, 21 }, + { 20, 21 }, { 21, 20 }, { 21, 20 }, { 21, 20 }, { 22, 19 }, { 21, 21 }, { 21, 21 }, { 21, 22 }, + { 20, 24 }, { 22, 21 }, { 22, 21 }, { 22, 21 }, { 22, 22 }, { 22, 22 }, { 22, 22 }, { 22, 23 }, + { 21, 25 }, { 23, 22 }, { 23, 22 }, { 24, 20 }, { 23, 23 }, { 23, 23 }, { 23, 23 }, { 23, 24 }, + { 23, 24 }, { 23, 24 }, { 23, 25 }, { 24, 23 }, { 24, 23 }, { 25, 22 }, { 24, 24 }, { 24, 24 }, + { 24, 25 }, { 24, 25 }, { 25, 24 }, { 25, 24 }, { 25, 24 }, { 26, 23 }, { 25, 25 }, { 25, 25 }, + { 25, 26 }, { 24, 28 }, { 26, 25 }, { 26, 25 }, { 26, 25 }, { 26, 26 }, { 26, 26 }, { 26, 26 }, + { 26, 27 }, { 25, 29 }, { 27, 26 }, { 27, 26 }, { 28, 24 }, { 27, 27 }, { 27, 27 }, { 27, 27 }, + { 27, 28 }, { 27, 28 }, { 27, 28 }, { 27, 29 }, { 28, 27 }, { 28, 27 }, { 29, 26 }, { 28, 28 }, + { 28, 28 }, { 28, 29 }, { 28, 29 }, { 29, 28 }, { 29, 28 }, { 29, 28 }, { 30, 27 }, { 29, 29 }, + { 29, 29 }, { 29, 30 }, { 29, 30 }, { 30, 29 }, { 30, 29 }, { 30, 29 }, { 30, 30 }, { 30, 30 }, + { 30, 30 }, { 30, 31 }, { 30, 31 }, { 31, 30 }, { 31, 30 }, { 31, 30 }, { 31, 31 }, { 31, 31 }, +}; +static const unsigned char stb__OMatch6[256][2] = { + { 0, 0 }, { 0, 1 }, { 1, 0 }, { 1, 1 }, { 1, 1 }, { 1, 2 }, { 2, 1 }, { 2, 2 }, + { 2, 2 }, { 2, 3 }, { 3, 2 }, { 3, 3 }, { 3, 3 }, { 3, 4 }, { 4, 3 }, { 4, 4 }, + { 4, 4 }, { 4, 5 }, { 5, 4 }, { 5, 5 }, { 5, 5 }, { 5, 6 }, { 6, 5 }, { 6, 6 }, + { 6, 6 }, { 6, 7 }, { 7, 6 }, { 7, 7 }, { 7, 7 }, { 7, 8 }, { 8, 7 }, { 8, 8 }, + { 8, 8 }, { 8, 9 }, { 9, 8 }, { 9, 9 }, { 9, 9 }, { 9, 10 }, { 10, 9 }, { 10, 10 }, + { 10, 10 }, { 10, 11 }, { 11, 10 }, { 8, 16 }, { 11, 11 }, { 11, 12 }, { 12, 11 }, { 9, 17 }, + { 12, 12 }, { 12, 13 }, { 13, 12 }, { 11, 16 }, { 13, 13 }, { 13, 14 }, { 14, 13 }, { 12, 17 }, + { 14, 14 }, { 14, 15 }, { 15, 14 }, { 14, 16 }, { 15, 15 }, { 15, 16 }, { 16, 14 }, { 16, 15 }, + { 17, 14 }, { 16, 16 }, { 16, 17 }, { 17, 16 }, { 18, 15 }, { 17, 17 }, { 17, 18 }, { 18, 17 }, + { 20, 14 }, { 18, 18 }, { 18, 19 }, { 19, 18 }, { 21, 15 }, { 19, 19 }, { 19, 20 }, { 20, 19 }, + { 20, 20 }, { 20, 20 }, { 20, 21 }, { 21, 20 }, { 21, 21 }, { 21, 21 }, { 21, 22 }, { 22, 21 }, + { 22, 22 }, { 22, 22 }, { 22, 23 }, { 23, 22 }, { 23, 23 }, { 23, 23 }, { 23, 24 }, { 24, 23 }, + { 24, 24 }, { 24, 24 }, { 24, 25 }, { 25, 24 }, { 25, 25 }, { 25, 25 }, { 25, 26 }, { 26, 25 }, + { 26, 26 }, { 26, 26 }, { 26, 27 }, { 27, 26 }, { 24, 32 }, { 27, 27 }, { 27, 28 }, { 28, 27 }, + { 25, 33 }, { 28, 28 }, { 28, 29 }, { 29, 28 }, { 27, 32 }, { 29, 29 }, { 29, 30 }, { 30, 29 }, + { 28, 33 }, { 30, 30 }, { 30, 31 }, { 31, 30 }, { 30, 32 }, { 31, 31 }, { 31, 32 }, { 32, 30 }, + { 32, 31 }, { 33, 30 }, { 32, 32 }, { 32, 33 }, { 33, 32 }, { 34, 31 }, { 33, 33 }, { 33, 34 }, + { 34, 33 }, { 36, 30 }, { 34, 34 }, { 34, 35 }, { 35, 34 }, { 37, 31 }, { 35, 35 }, { 35, 36 }, + { 36, 35 }, { 36, 36 }, { 36, 36 }, { 36, 37 }, { 37, 36 }, { 37, 37 }, { 37, 37 }, { 37, 38 }, + { 38, 37 }, { 38, 38 }, { 38, 38 }, { 38, 39 }, { 39, 38 }, { 39, 39 }, { 39, 39 }, { 39, 40 }, + { 40, 39 }, { 40, 40 }, { 40, 40 }, { 40, 41 }, { 41, 40 }, { 41, 41 }, { 41, 41 }, { 41, 42 }, + { 42, 41 }, { 42, 42 }, { 42, 42 }, { 42, 43 }, { 43, 42 }, { 40, 48 }, { 43, 43 }, { 43, 44 }, + { 44, 43 }, { 41, 49 }, { 44, 44 }, { 44, 45 }, { 45, 44 }, { 43, 48 }, { 45, 45 }, { 45, 46 }, + { 46, 45 }, { 44, 49 }, { 46, 46 }, { 46, 47 }, { 47, 46 }, { 46, 48 }, { 47, 47 }, { 47, 48 }, + { 48, 46 }, { 48, 47 }, { 49, 46 }, { 48, 48 }, { 48, 49 }, { 49, 48 }, { 50, 47 }, { 49, 49 }, + { 49, 50 }, { 50, 49 }, { 52, 46 }, { 50, 50 }, { 50, 51 }, { 51, 50 }, { 53, 47 }, { 51, 51 }, + { 51, 52 }, { 52, 51 }, { 52, 52 }, { 52, 52 }, { 52, 53 }, { 53, 52 }, { 53, 53 }, { 53, 53 }, + { 53, 54 }, { 54, 53 }, { 54, 54 }, { 54, 54 }, { 54, 55 }, { 55, 54 }, { 55, 55 }, { 55, 55 }, + { 55, 56 }, { 56, 55 }, { 56, 56 }, { 56, 56 }, { 56, 57 }, { 57, 56 }, { 57, 57 }, { 57, 57 }, + { 57, 58 }, { 58, 57 }, { 58, 58 }, { 58, 58 }, { 58, 59 }, { 59, 58 }, { 59, 59 }, { 59, 59 }, + { 59, 60 }, { 60, 59 }, { 60, 60 }, { 60, 60 }, { 60, 61 }, { 61, 60 }, { 61, 61 }, { 61, 61 }, + { 61, 62 }, { 62, 61 }, { 62, 62 }, { 62, 62 }, { 62, 63 }, { 63, 62 }, { 63, 63 }, { 63, 63 }, +}; + +static int stb__Mul8Bit(int a, int b) +{ + int t = a*b + 128; + return (t + (t >> 8)) >> 8; +} + +static void stb__From16Bit(unsigned char *out, unsigned short v) +{ + unsigned short rv = (v & 0xf800) >> 11; + unsigned short gv = (v & 0x07e0) >> 5; + unsigned short bv = (v & 0x001f) >> 0; + + // expand to 8 bits via bit replication + out[0] = (unsigned char)((rv * 33) >> 2); + out[1] = (unsigned char)((gv * 65) >> 4); + out[2] = (unsigned char)((bv * 33) >> 2); + out[3] = 0; +} + +static unsigned short stb__As16Bit(int r, int g, int b) +{ + return (unsigned short)((stb__Mul8Bit(r,31) << 11) + (stb__Mul8Bit(g,63) << 5) + stb__Mul8Bit(b,31)); +} + +// linear interpolation at 1/3 point between a and b, using desired rounding type +static int stb__Lerp13(int a, int b) +{ +#ifdef STB_DXT_USE_ROUNDING_BIAS + // with rounding bias + return a + stb__Mul8Bit(b-a, 0x55); +#else + // without rounding bias + // replace "/ 3" by "* 0xaaab) >> 17" if your compiler sucks or you really need every ounce of speed. + return (2*a + b) / 3; +#endif +} + +// lerp RGB color +static void stb__Lerp13RGB(unsigned char *out, unsigned char *p1, unsigned char *p2) +{ + out[0] = (unsigned char)stb__Lerp13(p1[0], p2[0]); + out[1] = (unsigned char)stb__Lerp13(p1[1], p2[1]); + out[2] = (unsigned char)stb__Lerp13(p1[2], p2[2]); +} + +/****************************************************************************/ + +static void stb__EvalColors(unsigned char *color,unsigned short c0,unsigned short c1) +{ + stb__From16Bit(color+ 0, c0); + stb__From16Bit(color+ 4, c1); + stb__Lerp13RGB(color+ 8, color+0, color+4); + stb__Lerp13RGB(color+12, color+4, color+0); +} + +// The color matching function +static unsigned int stb__MatchColorsBlock(unsigned char *block, unsigned char *color) +{ + unsigned int mask = 0; + int dirr = color[0*4+0] - color[1*4+0]; + int dirg = color[0*4+1] - color[1*4+1]; + int dirb = color[0*4+2] - color[1*4+2]; + int dots[16]; + int stops[4]; + int i; + int c0Point, halfPoint, c3Point; + + for(i=0;i<16;i++) + dots[i] = block[i*4+0]*dirr + block[i*4+1]*dirg + block[i*4+2]*dirb; + + for(i=0;i<4;i++) + stops[i] = color[i*4+0]*dirr + color[i*4+1]*dirg + color[i*4+2]*dirb; + + // think of the colors as arranged on a line; project point onto that line, then choose + // next color out of available ones. we compute the crossover points for "best color in top + // half"/"best in bottom half" and then the same inside that subinterval. + // + // relying on this 1d approximation isn't always optimal in terms of euclidean distance, + // but it's very close and a lot faster. + // http://cbloomrants.blogspot.com/2008/12/12-08-08-dxtc-summary.html + + c0Point = (stops[1] + stops[3]); + halfPoint = (stops[3] + stops[2]); + c3Point = (stops[2] + stops[0]); + + for (i=15;i>=0;i--) { + int dot = dots[i]*2; + mask <<= 2; + + if(dot < halfPoint) + mask |= (dot < c0Point) ? 1 : 3; + else + mask |= (dot < c3Point) ? 2 : 0; + } + + return mask; +} + +// The color optimization function. (Clever code, part 1) +static void stb__OptimizeColorsBlock(unsigned char *block, unsigned short *pmax16, unsigned short *pmin16) +{ + int mind,maxd; + unsigned char *minp, *maxp; + double magn; + int v_r,v_g,v_b; + static const int nIterPower = 4; + float covf[6],vfr,vfg,vfb; + + // determine color distribution + int cov[6]; + int mu[3],min[3],max[3]; + int ch,i,iter; + + for(ch=0;ch<3;ch++) + { + const unsigned char *bp = ((const unsigned char *) block) + ch; + int muv,minv,maxv; + + muv = minv = maxv = bp[0]; + for(i=4;i<64;i+=4) + { + muv += bp[i]; + if (bp[i] < minv) minv = bp[i]; + else if (bp[i] > maxv) maxv = bp[i]; + } + + mu[ch] = (muv + 8) >> 4; + min[ch] = minv; + max[ch] = maxv; + } + + // determine covariance matrix + for (i=0;i<6;i++) + cov[i] = 0; + + for (i=0;i<16;i++) + { + int r = block[i*4+0] - mu[0]; + int g = block[i*4+1] - mu[1]; + int b = block[i*4+2] - mu[2]; + + cov[0] += r*r; + cov[1] += r*g; + cov[2] += r*b; + cov[3] += g*g; + cov[4] += g*b; + cov[5] += b*b; + } + + // convert covariance matrix to float, find principal axis via power iter + for(i=0;i<6;i++) + covf[i] = cov[i] / 255.0f; + + vfr = (float) (max[0] - min[0]); + vfg = (float) (max[1] - min[1]); + vfb = (float) (max[2] - min[2]); + + for(iter=0;iter magn) magn = STBD_FABS(vfg); + if (STBD_FABS(vfb) > magn) magn = STBD_FABS(vfb); + + if(magn < 4.0f) { // too small, default to luminance + v_r = 299; // JPEG YCbCr luma coefs, scaled by 1000. + v_g = 587; + v_b = 114; + } else { + magn = 512.0 / magn; + v_r = (int) (vfr * magn); + v_g = (int) (vfg * magn); + v_b = (int) (vfb * magn); + } + + minp = maxp = block; + mind = maxd = block[0]*v_r + block[1]*v_g + block[2]*v_b; + // Pick colors at extreme points + for(i=1;i<16;i++) + { + int dot = block[i*4+0]*v_r + block[i*4+1]*v_g + block[i*4+2]*v_b; + + if (dot < mind) { + mind = dot; + minp = block+i*4; + } + + if (dot > maxd) { + maxd = dot; + maxp = block+i*4; + } + } + + *pmax16 = stb__As16Bit(maxp[0],maxp[1],maxp[2]); + *pmin16 = stb__As16Bit(minp[0],minp[1],minp[2]); +} + +static const float stb__midpoints5[32] = { + 0.015686f, 0.047059f, 0.078431f, 0.111765f, 0.145098f, 0.176471f, 0.207843f, 0.241176f, 0.274510f, 0.305882f, 0.337255f, 0.370588f, 0.403922f, 0.435294f, 0.466667f, 0.5f, + 0.533333f, 0.564706f, 0.596078f, 0.629412f, 0.662745f, 0.694118f, 0.725490f, 0.758824f, 0.792157f, 0.823529f, 0.854902f, 0.888235f, 0.921569f, 0.952941f, 0.984314f, 1.0f +}; + +static const float stb__midpoints6[64] = { + 0.007843f, 0.023529f, 0.039216f, 0.054902f, 0.070588f, 0.086275f, 0.101961f, 0.117647f, 0.133333f, 0.149020f, 0.164706f, 0.180392f, 0.196078f, 0.211765f, 0.227451f, 0.245098f, + 0.262745f, 0.278431f, 0.294118f, 0.309804f, 0.325490f, 0.341176f, 0.356863f, 0.372549f, 0.388235f, 0.403922f, 0.419608f, 0.435294f, 0.450980f, 0.466667f, 0.482353f, 0.500000f, + 0.517647f, 0.533333f, 0.549020f, 0.564706f, 0.580392f, 0.596078f, 0.611765f, 0.627451f, 0.643137f, 0.658824f, 0.674510f, 0.690196f, 0.705882f, 0.721569f, 0.737255f, 0.754902f, + 0.772549f, 0.788235f, 0.803922f, 0.819608f, 0.835294f, 0.850980f, 0.866667f, 0.882353f, 0.898039f, 0.913725f, 0.929412f, 0.945098f, 0.960784f, 0.976471f, 0.992157f, 1.0f +}; + +static unsigned short stb__Quantize5(float x) +{ + unsigned short q; + x = x < 0 ? 0 : x > 1 ? 1 : x; // saturate + q = (unsigned short)(x * 31); + q += (x > stb__midpoints5[q]); + return q; +} + +static unsigned short stb__Quantize6(float x) +{ + unsigned short q; + x = x < 0 ? 0 : x > 1 ? 1 : x; // saturate + q = (unsigned short)(x * 63); + q += (x > stb__midpoints6[q]); + return q; +} + +// The refinement function. (Clever code, part 2) +// Tries to optimize colors to suit block contents better. +// (By solving a least squares system via normal equations+Cramer's rule) +static int stb__RefineBlock(unsigned char *block, unsigned short *pmax16, unsigned short *pmin16, unsigned int mask) +{ + static const int w1Tab[4] = { 3,0,2,1 }; + static const int prods[4] = { 0x090000,0x000900,0x040102,0x010402 }; + // ^some magic to save a lot of multiplies in the accumulating loop... + // (precomputed products of weights for least squares system, accumulated inside one 32-bit register) + + float f; + unsigned short oldMin, oldMax, min16, max16; + int i, akku = 0, xx,xy,yy; + int At1_r,At1_g,At1_b; + int At2_r,At2_g,At2_b; + unsigned int cm = mask; + + oldMin = *pmin16; + oldMax = *pmax16; + + if((mask ^ (mask<<2)) < 4) // all pixels have the same index? + { + // yes, linear system would be singular; solve using optimal + // single-color match on average color + int r = 8, g = 8, b = 8; + for (i=0;i<16;++i) { + r += block[i*4+0]; + g += block[i*4+1]; + b += block[i*4+2]; + } + + r >>= 4; g >>= 4; b >>= 4; + + max16 = (stb__OMatch5[r][0]<<11) | (stb__OMatch6[g][0]<<5) | stb__OMatch5[b][0]; + min16 = (stb__OMatch5[r][1]<<11) | (stb__OMatch6[g][1]<<5) | stb__OMatch5[b][1]; + } else { + At1_r = At1_g = At1_b = 0; + At2_r = At2_g = At2_b = 0; + for (i=0;i<16;++i,cm>>=2) { + int step = cm&3; + int w1 = w1Tab[step]; + int r = block[i*4+0]; + int g = block[i*4+1]; + int b = block[i*4+2]; + + akku += prods[step]; + At1_r += w1*r; + At1_g += w1*g; + At1_b += w1*b; + At2_r += r; + At2_g += g; + At2_b += b; + } + + At2_r = 3*At2_r - At1_r; + At2_g = 3*At2_g - At1_g; + At2_b = 3*At2_b - At1_b; + + // extract solutions and decide solvability + xx = akku >> 16; + yy = (akku >> 8) & 0xff; + xy = (akku >> 0) & 0xff; + + f = 3.0f / 255.0f / (xx*yy - xy*xy); + + max16 = stb__Quantize5((At1_r*yy - At2_r * xy) * f) << 11; + max16 |= stb__Quantize6((At1_g*yy - At2_g * xy) * f) << 5; + max16 |= stb__Quantize5((At1_b*yy - At2_b * xy) * f) << 0; + + min16 = stb__Quantize5((At2_r*xx - At1_r * xy) * f) << 11; + min16 |= stb__Quantize6((At2_g*xx - At1_g * xy) * f) << 5; + min16 |= stb__Quantize5((At2_b*xx - At1_b * xy) * f) << 0; + } + + *pmin16 = min16; + *pmax16 = max16; + return oldMin != min16 || oldMax != max16; +} + +// Color block compression +static void stb__CompressColorBlock(unsigned char *dest, unsigned char *block, int mode) +{ + unsigned int mask; + int i; + int refinecount; + unsigned short max16, min16; + unsigned char color[4*4]; + + refinecount = (mode & STB_DXT_HIGHQUAL) ? 2 : 1; + + // check if block is constant + for (i=1;i<16;i++) + if (((unsigned int *) block)[i] != ((unsigned int *) block)[0]) + break; + + if(i == 16) { // constant color + int r = block[0], g = block[1], b = block[2]; + mask = 0xaaaaaaaa; + max16 = (stb__OMatch5[r][0]<<11) | (stb__OMatch6[g][0]<<5) | stb__OMatch5[b][0]; + min16 = (stb__OMatch5[r][1]<<11) | (stb__OMatch6[g][1]<<5) | stb__OMatch5[b][1]; + } else { + // first step: PCA+map along principal axis + stb__OptimizeColorsBlock(block,&max16,&min16); + if (max16 != min16) { + stb__EvalColors(color,max16,min16); + mask = stb__MatchColorsBlock(block,color); + } else + mask = 0; + + // third step: refine (multiple times if requested) + for (i=0;i> 8); + dest[2] = (unsigned char) (min16); + dest[3] = (unsigned char) (min16 >> 8); + dest[4] = (unsigned char) (mask); + dest[5] = (unsigned char) (mask >> 8); + dest[6] = (unsigned char) (mask >> 16); + dest[7] = (unsigned char) (mask >> 24); +} + +// Alpha block compression (this is easy for a change) +static void stb__CompressAlphaBlock(unsigned char *dest,unsigned char *src, int stride) +{ + int i,dist,bias,dist4,dist2,bits,mask; + + // find min/max color + int mn,mx; + mn = mx = src[0]; + + for (i=1;i<16;i++) + { + if (src[i*stride] < mn) mn = src[i*stride]; + else if (src[i*stride] > mx) mx = src[i*stride]; + } + + // encode them + dest[0] = (unsigned char)mx; + dest[1] = (unsigned char)mn; + dest += 2; + + // determine bias and emit color indices + // given the choice of mx/mn, these indices are optimal: + // http://fgiesen.wordpress.com/2009/12/15/dxt5-alpha-block-index-determination/ + dist = mx-mn; + dist4 = dist*4; + dist2 = dist*2; + bias = (dist < 8) ? (dist - 1) : (dist/2 + 2); + bias -= mn * 7; + bits = 0,mask=0; + + for (i=0;i<16;i++) { + int a = src[i*stride]*7 + bias; + int ind,t; + + // select index. this is a "linear scale" lerp factor between 0 (val=min) and 7 (val=max). + t = (a >= dist4) ? -1 : 0; ind = t & 4; a -= dist4 & t; + t = (a >= dist2) ? -1 : 0; ind += t & 2; a -= dist2 & t; + ind += (a >= dist); + + // turn linear scale into DXT index (0/1 are extremal pts) + ind = -ind & 7; + ind ^= (2 > ind); + + // write index + mask |= ind << bits; + if((bits += 3) >= 8) { + *dest++ = (unsigned char)mask; + mask >>= 8; + bits -= 8; + } + } +} + +void stb_compress_dxt_block(unsigned char *dest, const unsigned char *src, int alpha, int mode) +{ + unsigned char data[16][4]; + if (alpha) { + int i; + stb__CompressAlphaBlock(dest,(unsigned char*) src+3, 4); + dest += 8; + // make a new copy of the data in which alpha is opaque, + // because code uses a fast test for color constancy + memcpy(data, src, 4*16); + for (i=0; i < 16; ++i) + data[i][3] = 255; + src = &data[0][0]; + } + + stb__CompressColorBlock(dest,(unsigned char*) src,mode); +} + +void stb_compress_bc4_block(unsigned char *dest, const unsigned char *src) +{ + stb__CompressAlphaBlock(dest,(unsigned char*) src, 1); +} + +void stb_compress_bc5_block(unsigned char *dest, const unsigned char *src) +{ + stb__CompressAlphaBlock(dest,(unsigned char*) src,2); + stb__CompressAlphaBlock(dest + 8,(unsigned char*) src+1,2); +} +#endif // STB_DXT_IMPLEMENTATION + +// Compile with STB_DXT_IMPLEMENTATION and STB_DXT_GENERATE_TABLES +// defined to generate the tables above. +#ifdef STB_DXT_GENERATE_TABLES +#include + +int main() +{ + int i, j; + const char *omatch_names[] = { "stb__OMatch5", "stb__OMatch6" }; + int dequant_mults[2] = { 33*4, 65 }; // .4 fixed-point dequant multipliers + + // optimal endpoint tables + for (i = 0; i < 2; ++i) { + int dequant = dequant_mults[i]; + int size = i ? 64 : 32; + printf("static const unsigned char %s[256][2] = {\n", omatch_names[i]); + for (int j = 0; j < 256; ++j) { + int mn, mx; + int best_mn = 0, best_mx = 0; + int best_err = 256 * 100; + for (mn=0;mn> 4; + int maxe = (mx * dequant) >> 4; + int err = abs(stb__Lerp13(maxe, mine) - j) * 100; + + // DX10 spec says that interpolation must be within 3% of "correct" result, + // add this as error term. Normally we'd expect a random distribution of + // +-1.5% error, but nowhere in the spec does it say that the error has to be + // unbiased - better safe than sorry. + err += abs(maxe - mine) * 3; + + if(err < best_err) { + best_mn = mn; + best_mx = mx; + best_err = err; + } + } + } + if ((j % 8) == 0) printf(" "); // 2 spaces, third is done below + printf(" { %2d, %2d },", best_mx, best_mn); + if ((j % 8) == 7) printf("\n"); + } + printf("};\n"); + } + + return 0; +} +#endif + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ \ No newline at end of file diff --git a/examples/example_4.c b/examples/example_4.c index dd689275..bb6719a8 100644 --- a/examples/example_4.c +++ b/examples/example_4.c @@ -290,8 +290,8 @@ pl_app_load(plApiRegistryI* ptApiRegistry, plAppData* ptAppData) { .uByteStride = sizeof(float) * 6, .atAttributes = { - {.uByteOffset = 0, .tFormat = PL_FORMAT_R32G32_FLOAT}, - {.uByteOffset = sizeof(float) * 2, .tFormat = PL_FORMAT_R32G32B32A32_FLOAT}, + {.uByteOffset = 0, .tFormat = PL_VERTEX_FORMAT_FLOAT2}, + {.uByteOffset = sizeof(float) * 2, .tFormat = PL_VERTEX_FORMAT_FLOAT4}, } } }, diff --git a/examples/example_5.c b/examples/example_5.c index cec37b89..e5a166e9 100644 --- a/examples/example_5.c +++ b/examples/example_5.c @@ -362,8 +362,8 @@ pl_app_load(plApiRegistryI* ptApiRegistry, plAppData* ptAppData) { .uByteStride = sizeof(float) * 6, .atAttributes = { - {.uByteOffset = 0, .tFormat = PL_FORMAT_R32G32_FLOAT}, - {.uByteOffset = sizeof(float) * 2, .tFormat = PL_FORMAT_R32G32B32A32_FLOAT}, + {.uByteOffset = 0, .tFormat = PL_VERTEX_FORMAT_FLOAT2}, + {.uByteOffset = sizeof(float) * 2, .tFormat = PL_VERTEX_FORMAT_FLOAT4}, } } }, diff --git a/examples/example_6.c b/examples/example_6.c index 84a5b2cf..d386c5de 100644 --- a/examples/example_6.c +++ b/examples/example_6.c @@ -502,8 +502,8 @@ pl_app_load(plApiRegistryI* ptApiRegistry, plAppData* ptAppData) { .uByteStride = sizeof(float) * 4, .atAttributes = { - {.uByteOffset = 0, .tFormat = PL_FORMAT_R32G32_FLOAT}, - {.uByteOffset = sizeof(float) * 2, .tFormat = PL_FORMAT_R32G32_FLOAT}, + {.uByteOffset = 0, .tFormat = PL_VERTEX_FORMAT_FLOAT2}, + {.uByteOffset = sizeof(float) * 2, .tFormat = PL_VERTEX_FORMAT_FLOAT2}, } } }, diff --git a/extensions/pl_draw_backend_ext.c b/extensions/pl_draw_backend_ext.c index 1d3e9def..10cc09e3 100644 --- a/extensions/pl_draw_backend_ext.c +++ b/extensions/pl_draw_backend_ext.c @@ -397,8 +397,8 @@ pl__get_3d_pipeline(plRenderPassHandle tRenderPass, uint32_t uMSAASampleCount, p { .uByteStride = sizeof(float) * 4, .atAttributes = { - {.uByteOffset = 0, .tFormat = PL_FORMAT_R32G32B32_FLOAT}, - {.uByteOffset = sizeof(float) * 3, .tFormat = PL_FORMAT_R32_UINT}, + {.uByteOffset = 0, .tFormat = PL_VERTEX_FORMAT_FLOAT3}, + {.uByteOffset = sizeof(float) * 3, .tFormat = PL_VERTEX_FORMAT_UINT}, } } }, @@ -441,10 +441,10 @@ pl__get_3d_pipeline(plRenderPassHandle tRenderPass, uint32_t uMSAASampleCount, p { .uByteStride = sizeof(float) * 10, .atAttributes = { - {.uByteOffset = 0, .tFormat = PL_FORMAT_R32G32B32_FLOAT}, - {.uByteOffset = sizeof(float) * 3, .tFormat = PL_FORMAT_R32G32B32_FLOAT}, - {.uByteOffset = sizeof(float) * 6, .tFormat = PL_FORMAT_R32G32B32_FLOAT}, - {.uByteOffset = sizeof(float) * 9, .tFormat = PL_FORMAT_R32_UINT}, + {.uByteOffset = 0, .tFormat = PL_VERTEX_FORMAT_FLOAT3}, + {.uByteOffset = sizeof(float) * 3, .tFormat = PL_VERTEX_FORMAT_FLOAT3}, + {.uByteOffset = sizeof(float) * 6, .tFormat = PL_VERTEX_FORMAT_FLOAT3}, + {.uByteOffset = sizeof(float) * 9, .tFormat = PL_VERTEX_FORMAT_UINT}, } } }, @@ -510,9 +510,9 @@ pl__get_2d_pipeline(plRenderPassHandle tRenderPass, uint32_t uMSAASampleCount, u { .uByteStride = sizeof(float) * 5, .atAttributes = { - {.uByteOffset = 0, .tFormat = PL_FORMAT_R32G32_FLOAT}, - {.uByteOffset = sizeof(float) * 2, .tFormat = PL_FORMAT_R32G32_FLOAT}, - {.uByteOffset = sizeof(float) * 4, .tFormat = PL_FORMAT_R32_UINT}, + {.uByteOffset = 0, .tFormat = PL_VERTEX_FORMAT_FLOAT2}, + {.uByteOffset = sizeof(float) * 2, .tFormat = PL_VERTEX_FORMAT_FLOAT2}, + {.uByteOffset = sizeof(float) * 4, .tFormat = PL_VERTEX_FORMAT_UINT}, } } }, @@ -565,9 +565,9 @@ pl__get_2d_pipeline(plRenderPassHandle tRenderPass, uint32_t uMSAASampleCount, u { .uByteStride = sizeof(float) * 5, .atAttributes = { - {.uByteOffset = 0, .tFormat = PL_FORMAT_R32G32_FLOAT}, - {.uByteOffset = sizeof(float) * 2, .tFormat = PL_FORMAT_R32G32_FLOAT}, - {.uByteOffset = sizeof(float) * 4, .tFormat = PL_FORMAT_R32_UINT}, + {.uByteOffset = 0, .tFormat = PL_VERTEX_FORMAT_FLOAT2}, + {.uByteOffset = sizeof(float) * 2, .tFormat = PL_VERTEX_FORMAT_FLOAT2}, + {.uByteOffset = sizeof(float) * 4, .tFormat = PL_VERTEX_FORMAT_UINT}, } } }, diff --git a/extensions/pl_graphics_ext.h b/extensions/pl_graphics_ext.h index b8f3057d..c183387c 100644 --- a/extensions/pl_graphics_ext.h +++ b/extensions/pl_graphics_ext.h @@ -172,6 +172,7 @@ typedef int plTextureType; // -> enum _plTextureType // Enu typedef int plTextureUsage; // -> enum _plTextureUsage // Flag: texture type (PL_TEXTURE_USAGE_XXXX) typedef int plCompareMode; // -> enum _plCompareMode // Enum: texture sampling comparison modes (PL_COMPARE_MODE_XXXX) typedef int plFormat; // -> enum _plFormat // Enum: formats (PL_FORMAT_XXXX) +typedef int plVertexFormat; // -> enum _plVertexFormat // Enum: formats (PL_FORMAT_VERTEX_XXXX) typedef int plBufferUsage; // -> enum _plBufferUsage // Flag: buffer usage flags (PL_BUFFER_USAGE_XXXX) typedef int plStageFlags; // -> enum _plStageFlags // Flag: GPU pipeline stage (PL_STAGE_XXXX) typedef int plCullMode; // -> enum _plCullMode // Flag: face culling mode (PL_CULL_MODE_XXXX) @@ -644,8 +645,8 @@ typedef struct _plBlendState typedef struct _plVertexAttribute { - uint32_t uByteOffset; - plFormat tFormat; + uint32_t uByteOffset; + plVertexFormat tFormat; } plVertexAttribute; typedef struct _plVertexBufferLayout @@ -1102,27 +1103,6 @@ enum _plCullMode PL_CULL_MODE_CULL_BACK = 1 << 1, }; -enum _plFormat -{ - PL_FORMAT_UNKNOWN = 0, - PL_FORMAT_R32G32B32_FLOAT, - PL_FORMAT_R32G32B32A32_FLOAT, - PL_FORMAT_R8G8B8A8_UNORM, - PL_FORMAT_R32G32_FLOAT, - PL_FORMAT_R32_UINT, - PL_FORMAT_R8_UNORM, - PL_FORMAT_R8G8_UNORM, - PL_FORMAT_R8G8B8A8_SRGB, - PL_FORMAT_B8G8R8A8_SRGB, - PL_FORMAT_B8G8R8A8_UNORM, - PL_FORMAT_D32_FLOAT, - PL_FORMAT_D32_FLOAT_S8_UINT, - PL_FORMAT_D24_UNORM_S8_UINT, - PL_FORMAT_D16_UNORM_S8_UINT, - - PL_FORMAT_COUNT -}; - enum _plBufferBindingType { PL_BUFFER_BINDING_TYPE_UNSPECIFIED, @@ -1249,6 +1229,190 @@ enum _plDataType PL_DATA_TYPE_UNSIGNED_LONG4, }; +enum _plVertexFormat +{ + PL_VERTEX_FORMAT_UNKNOWN = 0, + + PL_VERTEX_FORMAT_HALF, + PL_VERTEX_FORMAT_HALF2, + PL_VERTEX_FORMAT_HALF3, + PL_VERTEX_FORMAT_HALF4, + + PL_VERTEX_FORMAT_FLOAT, + PL_VERTEX_FORMAT_FLOAT2, + PL_VERTEX_FORMAT_FLOAT3, + PL_VERTEX_FORMAT_FLOAT4, + + PL_VERTEX_FORMAT_UCHAR, + PL_VERTEX_FORMAT_UCHAR2, + PL_VERTEX_FORMAT_UCHAR3, + PL_VERTEX_FORMAT_UCHAR4, + + PL_VERTEX_FORMAT_CHAR, + PL_VERTEX_FORMAT_CHAR2, + PL_VERTEX_FORMAT_CHAR3, + PL_VERTEX_FORMAT_CHAR4, + + PL_VERTEX_FORMAT_USHORT, + PL_VERTEX_FORMAT_USHORT2, + PL_VERTEX_FORMAT_USHORT3, + PL_VERTEX_FORMAT_USHORT4, + + PL_VERTEX_FORMAT_SHORT, + PL_VERTEX_FORMAT_SHORT2, + PL_VERTEX_FORMAT_SHORT3, + PL_VERTEX_FORMAT_SHORT4, + + PL_VERTEX_FORMAT_UINT, + PL_VERTEX_FORMAT_UINT2, + PL_VERTEX_FORMAT_UINT3, + PL_VERTEX_FORMAT_UINT4, + + PL_VERTEX_FORMAT_INT, + PL_VERTEX_FORMAT_INT2, + PL_VERTEX_FORMAT_INT3, + PL_VERTEX_FORMAT_INT4, + + PL_VERTEX_FORMAT_COUNT +}; + +enum _plFormat +{ + PL_FORMAT_UNKNOWN = 0, + + // vertex format only + PL_FORMAT_R32G32B32_FLOAT, + + // 8-bit pixel formats + PL_FORMAT_R8_UNORM, + PL_FORMAT_R8_SNORM, + PL_FORMAT_R8_UINT, + PL_FORMAT_R8_SINT, + PL_FORMAT_R8_SRGB, + + // 16-bit pixel formats + PL_FORMAT_R8G8_UNORM, + PL_FORMAT_R16_UNORM, + PL_FORMAT_R16_SNORM, + PL_FORMAT_R16_UINT, + PL_FORMAT_R16_SINT, + PL_FORMAT_R16_FLOAT, + PL_FORMAT_R8G8_SNORM, + PL_FORMAT_R8G8_UINT, + PL_FORMAT_R8G8_SINT, + PL_FORMAT_R8G8_SRGB, + + // packed 16-bit pixel formats + PL_FORMAT_B5G6R5_UNORM, + PL_FORMAT_A1R5G5B5_UNORM, + PL_FORMAT_B5G5R5A1_UNORM, + + // 32-bit pixel formats + PL_FORMAT_R8G8B8A8_SRGB, + PL_FORMAT_B8G8R8A8_SRGB, + PL_FORMAT_B8G8R8A8_UNORM, + PL_FORMAT_R8G8B8A8_UNORM, + PL_FORMAT_R32_UINT, + PL_FORMAT_R32_SINT, + PL_FORMAT_R32_FLOAT, + PL_FORMAT_R16G16_UNORM, + PL_FORMAT_R16G16_SNORM, + PL_FORMAT_R16G16_UINT, + PL_FORMAT_R16G16_SINT, + PL_FORMAT_R16G16_FLOAT, + PL_FORMAT_R8G8B8A8_SNORM, + PL_FORMAT_R8G8B8A8_UINT, + PL_FORMAT_R8G8B8A8_SINT, + + // packed 32-bit pixel formats + PL_FORMAT_B10G10R10A2_UNORM, + PL_FORMAT_R10G10B10A2_UNORM, + PL_FORMAT_R10G10B10A2_UINT, + PL_FORMAT_R11G11B10_FLOAT, + PL_FORMAT_R9G9B9E5_FLOAT, + + // 64-bit pixel formats + PL_FORMAT_R32G32_FLOAT, + PL_FORMAT_R32G32_UINT, + PL_FORMAT_R32G32_SINT, + PL_FORMAT_R16G16B16A16_UNORM, + PL_FORMAT_R16G16B16A16_SNORM, + PL_FORMAT_R16G16B16A16_UINT, + PL_FORMAT_R16G16B16A16_SINT, + PL_FORMAT_R16G16B16A16_FLOAT, + + // 128-bit pixel formats + PL_FORMAT_R32G32B32A32_FLOAT, + PL_FORMAT_R32G32B32A32_UINT, + PL_FORMAT_R32G32B32A32_SINT, + + // compressed bc pixel formats + PL_FORMAT_BC1_RGBA_UNORM, + PL_FORMAT_BC1_RGBA_SRGB, + PL_FORMAT_BC2_UNORM, + PL_FORMAT_BC2_SRGB, + PL_FORMAT_BC3_UNORM, + PL_FORMAT_BC3_SRGB, + PL_FORMAT_BC4_UNORM, + PL_FORMAT_BC4_SNORM, + PL_FORMAT_BC5_UNORM, + PL_FORMAT_BC5_SNORM, + PL_FORMAT_BC6H_UFLOAT, + PL_FORMAT_BC6H_FLOAT, + PL_FORMAT_BC7_UNORM, + PL_FORMAT_BC7_SRGB, + + // compressed eac/etc pixel formats + PL_FORMAT_ETC2_R8G8B8_UNORM, + PL_FORMAT_ETC2_R8G8B8_SRGB, + PL_FORMAT_ETC2_R8G8B8A1_UNORM, + PL_FORMAT_ETC2_R8G8B8A1_SRGB, + PL_FORMAT_EAC_R11_UNORM, + PL_FORMAT_EAC_R11_SNORM, + PL_FORMAT_EAC_R11G11_UNORM, + PL_FORMAT_EAC_R11G11_SNORM, + + // compressed astc pixel formats + PL_FORMAT_ASTC_4x4_UNORM, + PL_FORMAT_ASTC_4x4_SRGB, + PL_FORMAT_ASTC_5x4_UNORM, + PL_FORMAT_ASTC_5x4_SRGB, + PL_FORMAT_ASTC_5x5_UNORM, + PL_FORMAT_ASTC_5x5_SRGB, + PL_FORMAT_ASTC_6x5_UNORM, + PL_FORMAT_ASTC_6x5_SRGB, + PL_FORMAT_ASTC_6x6_UNORM, + PL_FORMAT_ASTC_6x6_SRGB, + PL_FORMAT_ASTC_8x5_UNORM, + PL_FORMAT_ASTC_8x5_SRGB, + PL_FORMAT_ASTC_8x6_UNORM, + PL_FORMAT_ASTC_8x6_SRGB, + PL_FORMAT_ASTC_8x8_UNORM, + PL_FORMAT_ASTC_8x8_SRGB, + PL_FORMAT_ASTC_10x5_UNORM, + PL_FORMAT_ASTC_10x5_SRGB, + PL_FORMAT_ASTC_10x6_UNORM, + PL_FORMAT_ASTC_10x6_SRGB, + PL_FORMAT_ASTC_10x8_UNORM, + PL_FORMAT_ASTC_10x8_SRGB, + PL_FORMAT_ASTC_10x10_UNORM, + PL_FORMAT_ASTC_10x10_SRGB, + PL_FORMAT_ASTC_12x10_UNORM, + PL_FORMAT_ASTC_12x10_SRGB, + PL_FORMAT_ASTC_12x12_UNORM, + PL_FORMAT_ASTC_12x12_SRGB, + + // depth + PL_FORMAT_D32_FLOAT, + PL_FORMAT_D32_FLOAT_S8_UINT, + PL_FORMAT_D24_UNORM_S8_UINT, + PL_FORMAT_D16_UNORM_S8_UINT, + PL_FORMAT_D16_UNORM, + PL_FORMAT_S8_UINT, + + PL_FORMAT_COUNT +}; + //----------------------------------------------------------------------------- // [SECTION] internal enums (NOT FOR PUBLIC CONSUMPTION) //----------------------------------------------------------------------------- diff --git a/extensions/pl_graphics_metal.m b/extensions/pl_graphics_metal.m index 59c1116f..bcecd298 100644 --- a/extensions/pl_graphics_metal.m +++ b/extensions/pl_graphics_metal.m @@ -277,7 +277,7 @@ static MTLCompareFunction pl__metal_compare(plCompareMode tCompare); static MTLStencilOperation pl__metal_stencil_op(plStencilOp tOp); static MTLPixelFormat pl__metal_format(plFormat tFormat); -static MTLVertexFormat pl__metal_vertex_format(plFormat tFormat); +static MTLVertexFormat pl__metal_vertex_format(plVertexFormat tFormat); static MTLCullMode pl__metal_cull(plCullMode tCullMode); static MTLLoadAction pl__metal_load_op (plLoadOp tOp); static MTLStoreAction pl__metal_store_op (plStoreOp tOp); @@ -1231,7 +1231,7 @@ uint32_t uCurrentAttributeCount = 0; for(uint32_t i = 0; i < PL_MAX_VERTEX_ATTRIBUTES; i++) { - if(ptDescription->atVertexBufferLayouts[0].atAttributes[i].tFormat == PL_FORMAT_UNKNOWN) + if(ptDescription->atVertexBufferLayouts[0].atAttributes[i].tFormat == PL_VERTEX_FORMAT_UNKNOWN) break; vertexDescriptor.attributes[i].bufferIndex = 4; vertexDescriptor.attributes[i].offset = ptDescription->atVertexBufferLayouts[0].atAttributes[i].uByteOffset; @@ -2692,6 +2692,98 @@ case PL_FORMAT_D32_FLOAT: return MTLPixelFormatDepth32Float; case PL_FORMAT_D32_FLOAT_S8_UINT: return MTLPixelFormatDepth32Float_Stencil8; case PL_FORMAT_D24_UNORM_S8_UINT: return MTLPixelFormatDepth24Unorm_Stencil8; + case PL_FORMAT_D16_UNORM_S8_UINT: return MTLPixelFormatDepth16Unorm; + case PL_FORMAT_R8_SNORM: return MTLPixelFormatR8Unorm; + case PL_FORMAT_R8_UINT: return MTLPixelFormatR8Uint; + case PL_FORMAT_R8_SINT: return MTLPixelFormatR8Sint; + case PL_FORMAT_R8_SRGB: return MTLPixelFormatR8Unorm_sRGB; + case PL_FORMAT_R16_UNORM: return MTLPixelFormatR16Unorm; + case PL_FORMAT_R16_SNORM: return MTLPixelFormatR16Snorm; + case PL_FORMAT_R16_UINT: return MTLPixelFormatR16Uint; + case PL_FORMAT_R16_SINT: return MTLPixelFormatR16Sint; + case PL_FORMAT_R16_FLOAT: return MTLPixelFormatR16Float; + case PL_FORMAT_R8G8_SNORM: return MTLPixelFormatRG8Snorm; + case PL_FORMAT_R8G8_UINT: return MTLPixelFormatRG8Uint; + case PL_FORMAT_R8G8_SINT: return MTLPixelFormatRG8Sint; + case PL_FORMAT_R8G8_SRGB: return MTLPixelFormatRG8Unorm_sRGB; + case PL_FORMAT_B5G6R5_UNORM: return MTLPixelFormatB5G6R5Unorm; + case PL_FORMAT_A1R5G5B5_UNORM: return MTLPixelFormatA1BGR5Unorm; + case PL_FORMAT_B5G5R5A1_UNORM: return MTLPixelFormatBGR5A1Unorm; + case PL_FORMAT_R32_SINT: return MTLPixelFormatR32Sint; + case PL_FORMAT_R32_FLOAT: return MTLPixelFormatR32Float; + case PL_FORMAT_R16G16_UNORM: return MTLPixelFormatRG16Unorm; + case PL_FORMAT_R16G16_SNORM: return MTLPixelFormatRG16Snorm; + case PL_FORMAT_R16G16_UINT: return MTLPixelFormatRG16Uint; + case PL_FORMAT_R16G16_SINT: return MTLPixelFormatRG16Sint; + case PL_FORMAT_R16G16_FLOAT: return MTLPixelFormatRG16Float; + case PL_FORMAT_R8G8B8A8_SNORM: return MTLPixelFormatRGBA8Snorm; + case PL_FORMAT_R8G8B8A8_UINT: return MTLPixelFormatRGBA8Uint; + case PL_FORMAT_R8G8B8A8_SINT: return MTLPixelFormatRGBA8Sint; + case PL_FORMAT_B10G10R10A2_UNORM: return MTLPixelFormatBGR10A2Unorm; + case PL_FORMAT_R10G10B10A2_UNORM: return MTLPixelFormatRGB10A2Unorm; + case PL_FORMAT_R10G10B10A2_UINT: return MTLPixelFormatRGB10A2Uint; + case PL_FORMAT_R11G11B10_FLOAT: return MTLPixelFormatRG11B10Float; + case PL_FORMAT_R9G9B9E5_FLOAT: return MTLPixelFormatRGB9E5Float; + case PL_FORMAT_R32G32_UINT: return MTLPixelFormatRG32Uint; + case PL_FORMAT_R32G32_SINT: return MTLPixelFormatRG32Sint; + case PL_FORMAT_R16G16B16A16_UNORM: return MTLPixelFormatRGBA16Unorm; + case PL_FORMAT_R16G16B16A16_SNORM: return MTLPixelFormatRGBA16Snorm; + case PL_FORMAT_R16G16B16A16_UINT: return MTLPixelFormatRGBA16Uint; + case PL_FORMAT_R16G16B16A16_SINT: return MTLPixelFormatRGBA16Sint; + case PL_FORMAT_R16G16B16A16_FLOAT: return MTLPixelFormatRGBA16Float; + case PL_FORMAT_R32G32B32A32_UINT: return MTLPixelFormatRGBA32Uint; + case PL_FORMAT_R32G32B32A32_SINT: return MTLPixelFormatRGBA32Sint; + case PL_FORMAT_BC1_RGBA_UNORM: return MTLPixelFormatBC1_RGBA; + case PL_FORMAT_BC1_RGBA_SRGB: return MTLPixelFormatBC1_RGBA_sRGB; + case PL_FORMAT_BC2_UNORM: return MTLPixelFormatBC2_RGBA; + case PL_FORMAT_BC2_SRGB: return MTLPixelFormatBC2_RGBA_sRGB; + case PL_FORMAT_BC3_UNORM: return MTLPixelFormatBC3_RGBA; + case PL_FORMAT_BC3_SRGB: return MTLPixelFormatBC3_RGBA_sRGB; + case PL_FORMAT_BC4_UNORM: return MTLPixelFormatBC4_RUnorm; + case PL_FORMAT_BC4_SNORM: return MTLPixelFormatBC4_RSnorm; + case PL_FORMAT_BC5_UNORM: return MTLPixelFormatBC5_RGUnorm; + case PL_FORMAT_BC5_SNORM: return MTLPixelFormatBC5_RGSnorm; + case PL_FORMAT_BC6H_UFLOAT: return MTLPixelFormatBC6H_RGBUFloat; + case PL_FORMAT_BC6H_FLOAT: return MTLPixelFormatBC6H_RGBFloat; + case PL_FORMAT_BC7_UNORM: return MTLPixelFormatBC7_RGBAUnorm; + case PL_FORMAT_BC7_SRGB: return MTLPixelFormatBC7_RGBAUnorm_sRGB; + case PL_FORMAT_ETC2_R8G8B8_UNORM: return MTLPixelFormatETC2_RGB8; + case PL_FORMAT_ETC2_R8G8B8_SRGB: return MTLPixelFormatETC2_RGB8_sRGB; + case PL_FORMAT_ETC2_R8G8B8A1_UNORM: return MTLPixelFormatETC2_RGB8A1; + case PL_FORMAT_ETC2_R8G8B8A1_SRGB: return MTLPixelFormatETC2_RGB8A1_sRGB; + case PL_FORMAT_EAC_R11_UNORM: return MTLPixelFormatEAC_R11Unorm; + case PL_FORMAT_EAC_R11_SNORM: return MTLPixelFormatEAC_R11Snorm; + case PL_FORMAT_EAC_R11G11_UNORM: return MTLPixelFormatEAC_RG11Unorm; + case PL_FORMAT_EAC_R11G11_SNORM: return MTLPixelFormatEAC_RG11Snorm; + case PL_FORMAT_ASTC_4x4_UNORM: return MTLPixelFormatASTC_4x4_LDR; + case PL_FORMAT_ASTC_4x4_SRGB: return MTLPixelFormatASTC_4x4_sRGB; + case PL_FORMAT_ASTC_5x4_UNORM: return MTLPixelFormatASTC_4x4_LDR; + case PL_FORMAT_ASTC_5x4_SRGB: return MTLPixelFormatASTC_5x4_sRGB; + case PL_FORMAT_ASTC_5x5_UNORM: return MTLPixelFormatASTC_5x5_LDR; + case PL_FORMAT_ASTC_5x5_SRGB: return MTLPixelFormatASTC_5x5_sRGB; + case PL_FORMAT_ASTC_6x5_UNORM: return MTLPixelFormatASTC_6x5_LDR; + case PL_FORMAT_ASTC_6x5_SRGB: return MTLPixelFormatASTC_6x5_sRGB; + case PL_FORMAT_ASTC_6x6_UNORM: return MTLPixelFormatASTC_6x6_LDR; + case PL_FORMAT_ASTC_6x6_SRGB: return MTLPixelFormatASTC_6x6_sRGB; + case PL_FORMAT_ASTC_8x5_UNORM: return MTLPixelFormatASTC_8x5_LDR; + case PL_FORMAT_ASTC_8x5_SRGB: return MTLPixelFormatASTC_8x5_sRGB; + case PL_FORMAT_ASTC_8x6_UNORM: return MTLPixelFormatASTC_8x6_LDR; + case PL_FORMAT_ASTC_8x6_SRGB: return MTLPixelFormatASTC_8x6_sRGB; + case PL_FORMAT_ASTC_8x8_UNORM: return MTLPixelFormatASTC_8x8_LDR; + case PL_FORMAT_ASTC_8x8_SRGB: return MTLPixelFormatASTC_8x8_sRGB; + case PL_FORMAT_ASTC_10x5_UNORM: return MTLPixelFormatASTC_10x5_LDR; + case PL_FORMAT_ASTC_10x5_SRGB: return MTLPixelFormatASTC_10x5_sRGB; + case PL_FORMAT_ASTC_10x6_UNORM: return MTLPixelFormatASTC_10x6_LDR; + case PL_FORMAT_ASTC_10x6_SRGB: return MTLPixelFormatASTC_10x6_sRGB; + case PL_FORMAT_ASTC_10x8_UNORM: return MTLPixelFormatASTC_10x8_LDR; + case PL_FORMAT_ASTC_10x8_SRGB: return MTLPixelFormatASTC_10x8_sRGB; + case PL_FORMAT_ASTC_10x10_UNORM: return MTLPixelFormatASTC_10x10_LDR; + case PL_FORMAT_ASTC_10x10_SRGB: return MTLPixelFormatASTC_10x10_sRGB; + case PL_FORMAT_ASTC_12x10_UNORM: return MTLPixelFormatASTC_12x10_LDR; + case PL_FORMAT_ASTC_12x10_SRGB: return MTLPixelFormatASTC_12x10_sRGB; + case PL_FORMAT_ASTC_12x12_UNORM: return MTLPixelFormatASTC_12x1_LDR; + case PL_FORMAT_ASTC_12x12_SRGB: return MTLPixelFormatASTC_12x12_sRGB; + case PL_FORMAT_S8_UINT: return MTLPixelFormatStencil8; } PL_ASSERT(false && "Unsupported format"); @@ -2699,18 +2791,42 @@ } static MTLVertexFormat -pl__metal_vertex_format(plFormat tFormat) +pl__metal_vertex_format(plVertexFormat tFormat) { switch(tFormat) { - case PL_FORMAT_R32G32B32A32_FLOAT: return MTLVertexFormatFloat4; - case PL_FORMAT_R32G32B32_FLOAT: return MTLVertexFormatFloat3; - case PL_FORMAT_R32G32_FLOAT: return MTLVertexFormatFloat2; - - case PL_FORMAT_B8G8R8A8_UNORM: - case PL_FORMAT_R8G8B8A8_UNORM: return MTLVertexFormatUChar4; - - case PL_FORMAT_R32_UINT: return MTLVertexFormatUInt; + case PL_VERTEX_FORMAT_HALF: return MTLVertexFormatHalf; + case PL_VERTEX_FORMAT_HALF2: return MTLVertexFormatHalf2; + case PL_VERTEX_FORMAT_HALF3: return MTLVertexFormatHalf3; + case PL_VERTEX_FORMAT_HALF4: return MTLVertexFormatHalf4; + case PL_VERTEX_FORMAT_FLOAT: return MTLVertexFormatFloat; + case PL_VERTEX_FORMAT_FLOAT2: return MTLVertexFormatFloat2; + case PL_VERTEX_FORMAT_FLOAT3: return MTLVertexFormatFloat3; + case PL_VERTEX_FORMAT_FLOAT4: return MTLVertexFormatFloat4; + case PL_VERTEX_FORMAT_UCHAR: return MTLVertexFormatUChar; + case PL_VERTEX_FORMAT_UCHAR2: return MTLVertexFormatUChar2; + case PL_VERTEX_FORMAT_UCHAR3: return MTLVertexFormatUChar3; + case PL_VERTEX_FORMAT_UCHAR4: return MTLVertexFormatUChar4; + case PL_VERTEX_FORMAT_CHAR: return MTLVertexFormatChar; + case PL_VERTEX_FORMAT_CHAR2: return MTLVertexFormatChar2; + case PL_VERTEX_FORMAT_CHAR3: return MTLVertexFormatChar3; + case PL_VERTEX_FORMAT_CHAR4: return MTLVertexFormatChar4; + case PL_VERTEX_FORMAT_USHORT: return MTLVertexFormatUShort; + case PL_VERTEX_FORMAT_USHORT2: return MTLVertexFormatUShort2; + case PL_VERTEX_FORMAT_USHORT3: return MTLVertexFormatUShort3; + case PL_VERTEX_FORMAT_USHORT4: return MTLVertexFormatUShort4; + case PL_VERTEX_FORMAT_SHORT: return MTLVertexFormatShort; + case PL_VERTEX_FORMAT_SHORT2: return MTLVertexFormatShort2; + case PL_VERTEX_FORMAT_SHORT3: return MTLVertexFormatShort3; + case PL_VERTEX_FORMAT_SHORT4: return MTLVertexFormatShort4; + case PL_VERTEX_FORMAT_UINT: return MTLVertexFormatUInt; + case PL_VERTEX_FORMAT_UINT2: return MTLVertexFormatUInt2; + case PL_VERTEX_FORMAT_UINT3: return MTLVertexFormatUInt3; + case PL_VERTEX_FORMAT_UINT4: return MTLVertexFormatUInt4; + case PL_VERTEX_FORMAT_INT: return MTLVertexFormatInt; + case PL_VERTEX_FORMAT_INT2: return MTLVertexFormatInt2; + case PL_VERTEX_FORMAT_INT3: return MTLVertexFormatInt3; + case PL_VERTEX_FORMAT_INT4: return MTLVertexFormatInt4; } PL_ASSERT(false && "Unsupported vertex format"); diff --git a/extensions/pl_graphics_vulkan.c b/extensions/pl_graphics_vulkan.c index f700716c..95034181 100644 --- a/extensions/pl_graphics_vulkan.c +++ b/extensions/pl_graphics_vulkan.c @@ -337,19 +337,20 @@ typedef struct _plSwapchain //----------------------------------------------------------------------------- // conversion between pilotlight & vulkan types -static VkFilter pl__vulkan_filter (plFilter); -static VkSamplerAddressMode pl__vulkan_wrap (plAddressMode); -static VkCompareOp pl__vulkan_compare (plCompareMode); -static VkFormat pl__vulkan_format (plFormat); -static VkImageLayout pl__vulkan_layout (plTextureUsage); -static VkAttachmentLoadOp pl__vulkan_load_op (plLoadOp); -static VkAttachmentStoreOp pl__vulkan_store_op (plStoreOp); -static VkCullModeFlags pl__vulkan_cull (plCullMode); -static VkShaderStageFlagBits pl__vulkan_stage_flags (plStageFlags); -static plFormat pl__pilotlight_format (VkFormat); -static VkStencilOp pl__vulkan_stencil_op (plStencilOp); -static VkBlendFactor pl__vulkan_blend_factor(plBlendFactor); -static VkBlendOp pl__vulkan_blend_op (plBlendOp); +static VkFilter pl__vulkan_filter (plFilter); +static VkSamplerAddressMode pl__vulkan_wrap (plAddressMode); +static VkCompareOp pl__vulkan_compare (plCompareMode); +static VkFormat pl__vulkan_format (plFormat); +static VkFormat pl__vulkan_vertex_format(plVertexFormat); +static VkImageLayout pl__vulkan_layout (plTextureUsage); +static VkAttachmentLoadOp pl__vulkan_load_op (plLoadOp); +static VkAttachmentStoreOp pl__vulkan_store_op (plStoreOp); +static VkCullModeFlags pl__vulkan_cull (plCullMode); +static VkShaderStageFlagBits pl__vulkan_stage_flags (plStageFlags); +static plFormat pl__pilotlight_format (VkFormat); +static VkStencilOp pl__vulkan_stencil_op (plStencilOp); +static VkBlendFactor pl__vulkan_blend_factor (plBlendFactor); +static VkBlendOp pl__vulkan_blend_op (plBlendOp); // misc static plDeviceMemoryAllocation pl__allocate_staging_dynamic(struct plDeviceMemoryAllocatorO*, uint32_t uTypeFilter, uint64_t ulSize, uint64_t ulAlignment, const char* pcName); @@ -1342,12 +1343,12 @@ pl_create_shader(plDevice* ptDevice, const plShaderDesc* ptDescription) uint32_t uCurrentAttributeCount = 0; for (uint32_t i = 0; i < PL_MAX_VERTEX_ATTRIBUTES; i++) { - if (ptDescription->atVertexBufferLayouts[0].atAttributes[i].tFormat == PL_FORMAT_UNKNOWN) + if (ptDescription->atVertexBufferLayouts[0].atAttributes[i].tFormat == PL_VERTEX_FORMAT_UNKNOWN) break; atAttributeDescription[i].binding = 0; atAttributeDescription[i].location = i; atAttributeDescription[i].offset = ptDescription->atVertexBufferLayouts[0].atAttributes[i].uByteOffset; - atAttributeDescription[i].format = pl__vulkan_format(ptDescription->atVertexBufferLayouts[0].atAttributes[i].tFormat); + atAttributeDescription[i].format = pl__vulkan_vertex_format(ptDescription->atVertexBufferLayouts[0].atAttributes[i].tFormat); uCurrentAttributeCount++; } @@ -4714,39 +4715,160 @@ pl__vulkan_compare(plCompareMode tCompare) return VK_COMPARE_OP_NEVER; } +static VkFormat +pl__vulkan_vertex_format(plVertexFormat tFormat) +{ + switch(tFormat) + { + case PL_VERTEX_FORMAT_HALF: return VK_FORMAT_R16_SFLOAT; + case PL_VERTEX_FORMAT_HALF2: return VK_FORMAT_R16G16_SFLOAT; + case PL_VERTEX_FORMAT_HALF3: return VK_FORMAT_R16G16B16_SFLOAT; + case PL_VERTEX_FORMAT_HALF4: return VK_FORMAT_R16G16B16A16_SFLOAT; + case PL_VERTEX_FORMAT_FLOAT: return VK_FORMAT_R32_SFLOAT; + case PL_VERTEX_FORMAT_FLOAT2: return VK_FORMAT_R32G32_SFLOAT; + case PL_VERTEX_FORMAT_FLOAT3: return VK_FORMAT_R32G32B32_SFLOAT; + case PL_VERTEX_FORMAT_FLOAT4: return VK_FORMAT_R32G32B32A32_SFLOAT; + case PL_VERTEX_FORMAT_UCHAR: return VK_FORMAT_R8_UNORM; + case PL_VERTEX_FORMAT_UCHAR2: return VK_FORMAT_R8G8_UNORM; + case PL_VERTEX_FORMAT_UCHAR3: return VK_FORMAT_R8G8B8_UNORM; + case PL_VERTEX_FORMAT_UCHAR4: return VK_FORMAT_R8G8B8A8_UNORM; + case PL_VERTEX_FORMAT_CHAR: return VK_FORMAT_R8_SNORM; + case PL_VERTEX_FORMAT_CHAR2: return VK_FORMAT_R8G8_SNORM; + case PL_VERTEX_FORMAT_CHAR3: return VK_FORMAT_R8G8B8_SNORM; + case PL_VERTEX_FORMAT_CHAR4: return VK_FORMAT_R8G8B8A8_SNORM; + case PL_VERTEX_FORMAT_USHORT: return VK_FORMAT_R16_UINT; + case PL_VERTEX_FORMAT_USHORT2: return VK_FORMAT_R16G16_UINT; + case PL_VERTEX_FORMAT_USHORT3: return VK_FORMAT_R16G16B16_UINT; + case PL_VERTEX_FORMAT_USHORT4: return VK_FORMAT_R16G16B16A16_UINT; + case PL_VERTEX_FORMAT_SHORT: return VK_FORMAT_R16_SINT; + case PL_VERTEX_FORMAT_SHORT2: return VK_FORMAT_R16G16_SINT; + case PL_VERTEX_FORMAT_SHORT3: return VK_FORMAT_R16G16B16_SINT; + case PL_VERTEX_FORMAT_SHORT4: return VK_FORMAT_R16G16B16A16_SINT; + case PL_VERTEX_FORMAT_UINT: return VK_FORMAT_R32_UINT; + case PL_VERTEX_FORMAT_UINT2: return VK_FORMAT_R32G32_UINT; + case PL_VERTEX_FORMAT_UINT3: return VK_FORMAT_R32G32B32_UINT; + case PL_VERTEX_FORMAT_UINT4: return VK_FORMAT_R32G32B32A32_UINT; + case PL_VERTEX_FORMAT_INT: return VK_FORMAT_R32_SINT; + case PL_VERTEX_FORMAT_INT2: return VK_FORMAT_R32G32_SINT; + case PL_VERTEX_FORMAT_INT3: return VK_FORMAT_R32G32B32_SINT; + case PL_VERTEX_FORMAT_INT4: return VK_FORMAT_R32G32B32A32_SINT; + } + + PL_ASSERT(false && "Unsupported vertex format"); + return VK_FORMAT_UNDEFINED; +} + static VkFormat pl__vulkan_format(plFormat tFormat) { switch (tFormat) { - case PL_FORMAT_R32G32B32A32_FLOAT: - return VK_FORMAT_R32G32B32A32_SFLOAT; - case PL_FORMAT_R32G32B32_FLOAT: - return VK_FORMAT_R32G32B32_SFLOAT; - case PL_FORMAT_R8G8B8A8_UNORM: - return VK_FORMAT_R8G8B8A8_UNORM; - case PL_FORMAT_R32G32_FLOAT: - return VK_FORMAT_R32G32_SFLOAT; - case PL_FORMAT_R8G8B8A8_SRGB: - return VK_FORMAT_R8G8B8A8_SRGB; - case PL_FORMAT_B8G8R8A8_SRGB: - return VK_FORMAT_B8G8R8A8_SRGB; - case PL_FORMAT_B8G8R8A8_UNORM: - return VK_FORMAT_B8G8R8A8_UNORM; - case PL_FORMAT_D32_FLOAT: - return VK_FORMAT_D32_SFLOAT; - case PL_FORMAT_R8_UNORM: - return VK_FORMAT_R8_UNORM; - case PL_FORMAT_R32_UINT: - return VK_FORMAT_R32_UINT; - case PL_FORMAT_R8G8_UNORM: - return VK_FORMAT_R8G8_UNORM; - case PL_FORMAT_D32_FLOAT_S8_UINT: - return VK_FORMAT_D32_SFLOAT_S8_UINT; - case PL_FORMAT_D24_UNORM_S8_UINT: - return VK_FORMAT_D24_UNORM_S8_UINT; - case PL_FORMAT_D16_UNORM_S8_UINT: - return VK_FORMAT_D16_UNORM_S8_UINT; + case PL_FORMAT_R32G32B32A32_FLOAT: return VK_FORMAT_R32G32B32A32_SFLOAT; + case PL_FORMAT_R32G32B32_FLOAT: return VK_FORMAT_R32G32B32_SFLOAT; + case PL_FORMAT_R8G8B8A8_UNORM: return VK_FORMAT_R8G8B8A8_UNORM; + case PL_FORMAT_R32G32_FLOAT: return VK_FORMAT_R32G32_SFLOAT; + case PL_FORMAT_R8G8B8A8_SRGB: return VK_FORMAT_R8G8B8A8_SRGB; + case PL_FORMAT_B8G8R8A8_SRGB: return VK_FORMAT_B8G8R8A8_SRGB; + case PL_FORMAT_B8G8R8A8_UNORM: return VK_FORMAT_B8G8R8A8_UNORM; + case PL_FORMAT_D32_FLOAT: return VK_FORMAT_D32_SFLOAT; + case PL_FORMAT_R8_UNORM: return VK_FORMAT_R8_UNORM; + case PL_FORMAT_R32_UINT: return VK_FORMAT_R32_UINT; + case PL_FORMAT_R8G8_UNORM: return VK_FORMAT_R8G8_UNORM; + case PL_FORMAT_D32_FLOAT_S8_UINT: return VK_FORMAT_D32_SFLOAT_S8_UINT; + case PL_FORMAT_D24_UNORM_S8_UINT: return VK_FORMAT_D24_UNORM_S8_UINT; + case PL_FORMAT_D16_UNORM_S8_UINT: return VK_FORMAT_D16_UNORM_S8_UINT; + case PL_FORMAT_R8_SNORM: return VK_FORMAT_R8_SNORM; + case PL_FORMAT_R8_UINT: return VK_FORMAT_R8_UINT; + case PL_FORMAT_R8_SINT: return VK_FORMAT_R8_SINT; + case PL_FORMAT_R8_SRGB: return VK_FORMAT_R8_SRGB; + case PL_FORMAT_R16_UNORM: return VK_FORMAT_R16_UNORM; + case PL_FORMAT_R16_SNORM: return VK_FORMAT_R16_SNORM; + case PL_FORMAT_R16_UINT: return VK_FORMAT_R16_UINT; + case PL_FORMAT_R16_SINT: return VK_FORMAT_R16_SINT; + case PL_FORMAT_R16_FLOAT: return VK_FORMAT_R16_SFLOAT; + case PL_FORMAT_R8G8_SNORM: return VK_FORMAT_R8G8_SNORM; + case PL_FORMAT_R8G8_UINT: return VK_FORMAT_R8G8_UINT; + case PL_FORMAT_R8G8_SINT: return VK_FORMAT_R8G8_SINT; + case PL_FORMAT_R8G8_SRGB: return VK_FORMAT_R8G8_SRGB; + case PL_FORMAT_B5G6R5_UNORM: return VK_FORMAT_B5G6R5_UNORM_PACK16; + case PL_FORMAT_A1R5G5B5_UNORM: return VK_FORMAT_A1R5G5B5_UNORM_PACK16; + case PL_FORMAT_B5G5R5A1_UNORM: return VK_FORMAT_B5G5R5A1_UNORM_PACK16; + case PL_FORMAT_R32_SINT: return VK_FORMAT_R32_SINT; + case PL_FORMAT_R32_FLOAT: return VK_FORMAT_R32_SFLOAT; + case PL_FORMAT_R16G16_UNORM: return VK_FORMAT_R16G16_UNORM; + case PL_FORMAT_R16G16_SNORM: return VK_FORMAT_R16G16_SNORM; + case PL_FORMAT_R16G16_UINT: return VK_FORMAT_R16G16_UINT; + case PL_FORMAT_R16G16_SINT: return VK_FORMAT_R16G16_SINT; + case PL_FORMAT_R16G16_FLOAT: return VK_FORMAT_R16G16_SFLOAT; + case PL_FORMAT_R8G8B8A8_SNORM: return VK_FORMAT_R8G8B8A8_SNORM; + case PL_FORMAT_R8G8B8A8_UINT: return VK_FORMAT_R8G8B8A8_UINT; + case PL_FORMAT_R8G8B8A8_SINT: return VK_FORMAT_R8G8B8A8_SINT; + case PL_FORMAT_B10G10R10A2_UNORM: return VK_FORMAT_A2R10G10B10_UNORM_PACK32; + case PL_FORMAT_R10G10B10A2_UNORM: return VK_FORMAT_A2B10G10R10_UNORM_PACK32; + case PL_FORMAT_R10G10B10A2_UINT: return VK_FORMAT_A2B10G10R10_UINT_PACK32; + case PL_FORMAT_R11G11B10_FLOAT: return VK_FORMAT_B10G11R11_UFLOAT_PACK32; + case PL_FORMAT_R9G9B9E5_FLOAT: return VK_FORMAT_E5B9G9R9_UFLOAT_PACK32; + case PL_FORMAT_R32G32_UINT: return VK_FORMAT_R32G32_UINT; + case PL_FORMAT_R32G32_SINT: return VK_FORMAT_R32G32_SINT; + case PL_FORMAT_R16G16B16A16_UNORM: return VK_FORMAT_R16G16B16A16_UNORM; + case PL_FORMAT_R16G16B16A16_SNORM: return VK_FORMAT_R16G16B16A16_SNORM; + case PL_FORMAT_R16G16B16A16_UINT: return VK_FORMAT_R16G16B16A16_UINT; + case PL_FORMAT_R16G16B16A16_SINT: return VK_FORMAT_R16G16B16A16_SINT; + case PL_FORMAT_R16G16B16A16_FLOAT: return VK_FORMAT_R16G16B16A16_SFLOAT; + case PL_FORMAT_R32G32B32A32_UINT: return VK_FORMAT_R32G32B32A32_UINT; + case PL_FORMAT_R32G32B32A32_SINT: return VK_FORMAT_R32G32B32A32_SINT; + case PL_FORMAT_BC1_RGBA_UNORM: return VK_FORMAT_BC1_RGBA_UNORM_BLOCK; + case PL_FORMAT_BC1_RGBA_SRGB: return VK_FORMAT_BC1_RGBA_SRGB_BLOCK; + case PL_FORMAT_BC2_UNORM: return VK_FORMAT_BC2_UNORM_BLOCK; + case PL_FORMAT_BC2_SRGB: return VK_FORMAT_BC2_SRGB_BLOCK; + case PL_FORMAT_BC3_UNORM: return VK_FORMAT_BC3_UNORM_BLOCK; + case PL_FORMAT_BC3_SRGB: return VK_FORMAT_BC3_SRGB_BLOCK; + case PL_FORMAT_BC4_UNORM: return VK_FORMAT_BC4_UNORM_BLOCK; + case PL_FORMAT_BC4_SNORM: return VK_FORMAT_BC4_SNORM_BLOCK; + case PL_FORMAT_BC5_UNORM: return VK_FORMAT_BC5_UNORM_BLOCK; + case PL_FORMAT_BC5_SNORM: return VK_FORMAT_BC5_SNORM_BLOCK; + case PL_FORMAT_BC6H_UFLOAT: return VK_FORMAT_BC6H_UFLOAT_BLOCK; + case PL_FORMAT_BC6H_FLOAT: return VK_FORMAT_BC6H_SFLOAT_BLOCK; + case PL_FORMAT_BC7_UNORM: return VK_FORMAT_BC7_UNORM_BLOCK; + case PL_FORMAT_BC7_SRGB: return VK_FORMAT_BC7_SRGB_BLOCK; + case PL_FORMAT_ETC2_R8G8B8_UNORM: return VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK; + case PL_FORMAT_ETC2_R8G8B8_SRGB: return VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK; + case PL_FORMAT_ETC2_R8G8B8A1_UNORM: return VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK; + case PL_FORMAT_ETC2_R8G8B8A1_SRGB: return VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK; + case PL_FORMAT_EAC_R11_UNORM: return VK_FORMAT_EAC_R11_UNORM_BLOCK; + case PL_FORMAT_EAC_R11_SNORM: return VK_FORMAT_EAC_R11_SNORM_BLOCK; + case PL_FORMAT_EAC_R11G11_UNORM: return VK_FORMAT_EAC_R11G11_UNORM_BLOCK; + case PL_FORMAT_EAC_R11G11_SNORM: return VK_FORMAT_EAC_R11G11_SNORM_BLOCK; + case PL_FORMAT_ASTC_4x4_UNORM: return VK_FORMAT_ASTC_4x4_UNORM_BLOCK; + case PL_FORMAT_ASTC_4x4_SRGB: return VK_FORMAT_ASTC_4x4_SRGB_BLOCK; + case PL_FORMAT_ASTC_5x4_UNORM: return VK_FORMAT_ASTC_5x4_UNORM_BLOCK; + case PL_FORMAT_ASTC_5x4_SRGB: return VK_FORMAT_ASTC_5x4_SRGB_BLOCK; + case PL_FORMAT_ASTC_5x5_UNORM: return VK_FORMAT_ASTC_5x5_UNORM_BLOCK; + case PL_FORMAT_ASTC_5x5_SRGB: return VK_FORMAT_ASTC_5x5_SRGB_BLOCK; + case PL_FORMAT_ASTC_6x5_UNORM: return VK_FORMAT_ASTC_6x5_UNORM_BLOCK; + case PL_FORMAT_ASTC_6x5_SRGB: return VK_FORMAT_ASTC_6x5_SRGB_BLOCK; + case PL_FORMAT_ASTC_6x6_UNORM: return VK_FORMAT_ASTC_6x6_UNORM_BLOCK; + case PL_FORMAT_ASTC_6x6_SRGB: return VK_FORMAT_ASTC_6x6_SRGB_BLOCK; + case PL_FORMAT_ASTC_8x5_UNORM: return VK_FORMAT_ASTC_8x5_UNORM_BLOCK; + case PL_FORMAT_ASTC_8x5_SRGB: return VK_FORMAT_ASTC_8x5_SRGB_BLOCK; + case PL_FORMAT_ASTC_8x6_UNORM: return VK_FORMAT_ASTC_8x6_UNORM_BLOCK; + case PL_FORMAT_ASTC_8x6_SRGB: return VK_FORMAT_ASTC_8x6_SRGB_BLOCK; + case PL_FORMAT_ASTC_8x8_UNORM: return VK_FORMAT_ASTC_8x8_UNORM_BLOCK; + case PL_FORMAT_ASTC_8x8_SRGB: return VK_FORMAT_ASTC_8x8_SRGB_BLOCK; + case PL_FORMAT_ASTC_10x5_UNORM: return VK_FORMAT_ASTC_10x5_UNORM_BLOCK; + case PL_FORMAT_ASTC_10x5_SRGB: return VK_FORMAT_ASTC_10x5_SRGB_BLOCK; + case PL_FORMAT_ASTC_10x6_UNORM: return VK_FORMAT_ASTC_10x6_UNORM_BLOCK; + case PL_FORMAT_ASTC_10x6_SRGB: return VK_FORMAT_ASTC_10x6_SRGB_BLOCK; + case PL_FORMAT_ASTC_10x8_UNORM: return VK_FORMAT_ASTC_10x8_UNORM_BLOCK; + case PL_FORMAT_ASTC_10x8_SRGB: return VK_FORMAT_ASTC_10x8_SRGB_BLOCK; + case PL_FORMAT_ASTC_10x10_UNORM: return VK_FORMAT_ASTC_10x10_UNORM_BLOCK; + case PL_FORMAT_ASTC_10x10_SRGB: return VK_FORMAT_ASTC_10x10_SRGB_BLOCK; + case PL_FORMAT_ASTC_12x10_UNORM: return VK_FORMAT_ASTC_12x10_UNORM_BLOCK; + case PL_FORMAT_ASTC_12x10_SRGB: return VK_FORMAT_ASTC_12x10_SRGB_BLOCK; + case PL_FORMAT_ASTC_12x12_UNORM: return VK_FORMAT_ASTC_12x12_UNORM_BLOCK; + case PL_FORMAT_ASTC_12x12_SRGB: return VK_FORMAT_ASTC_12x12_SRGB_BLOCK; + case PL_FORMAT_D16_UNORM: return VK_FORMAT_D16_UNORM; + case PL_FORMAT_S8_UINT: return VK_FORMAT_S8_UINT; } PL_ASSERT(false && "Unsupported format"); @@ -4943,32 +5065,112 @@ pl__pilotlight_format(VkFormat tFormat) { switch (tFormat) { - case VK_FORMAT_R32G32B32_SFLOAT: - return PL_FORMAT_R32G32B32_FLOAT; - case VK_FORMAT_R8G8B8A8_UNORM: - return PL_FORMAT_R8G8B8A8_UNORM; - case VK_FORMAT_R32G32_SFLOAT: - return PL_FORMAT_R32G32_FLOAT; - case VK_FORMAT_R8G8B8A8_SRGB: - return PL_FORMAT_R8G8B8A8_SRGB; - case VK_FORMAT_B8G8R8A8_SRGB: - return PL_FORMAT_B8G8R8A8_SRGB; - case VK_FORMAT_B8G8R8A8_UNORM: - return PL_FORMAT_B8G8R8A8_UNORM; - case VK_FORMAT_R8_UNORM: - return PL_FORMAT_R8_UNORM; - case VK_FORMAT_R8G8_UNORM: - return PL_FORMAT_R8G8_UNORM; - case VK_FORMAT_R32_UINT: - return PL_FORMAT_R32_UINT; - case VK_FORMAT_D32_SFLOAT: - return PL_FORMAT_D32_FLOAT; - case VK_FORMAT_D32_SFLOAT_S8_UINT: - return PL_FORMAT_D32_FLOAT_S8_UINT; - case VK_FORMAT_D24_UNORM_S8_UINT: - return PL_FORMAT_D24_UNORM_S8_UINT; - case VK_FORMAT_D16_UNORM_S8_UINT: - return PL_FORMAT_D16_UNORM_S8_UINT; + case VK_FORMAT_R32G32B32A32_SFLOAT: return PL_FORMAT_R32G32B32A32_FLOAT; + case VK_FORMAT_R32G32B32_SFLOAT: return PL_FORMAT_R32G32B32_FLOAT; + case VK_FORMAT_R8G8B8A8_UNORM: return PL_FORMAT_R8G8B8A8_UNORM; + case VK_FORMAT_R32G32_SFLOAT: return PL_FORMAT_R32G32_FLOAT; + case VK_FORMAT_R8G8B8A8_SRGB: return PL_FORMAT_R8G8B8A8_SRGB; + case VK_FORMAT_B8G8R8A8_SRGB: return PL_FORMAT_B8G8R8A8_SRGB; + case VK_FORMAT_B8G8R8A8_UNORM: return PL_FORMAT_B8G8R8A8_UNORM; + case VK_FORMAT_D32_SFLOAT: return PL_FORMAT_D32_FLOAT; + case VK_FORMAT_R8_UNORM: return PL_FORMAT_R8_UNORM; + case VK_FORMAT_R32_UINT: return PL_FORMAT_R32_UINT; + case VK_FORMAT_R8G8_UNORM: return PL_FORMAT_R8G8_UNORM; + case VK_FORMAT_D32_SFLOAT_S8_UINT: return PL_FORMAT_D32_FLOAT_S8_UINT; + case VK_FORMAT_D24_UNORM_S8_UINT: return PL_FORMAT_D24_UNORM_S8_UINT; + case VK_FORMAT_D16_UNORM_S8_UINT: return PL_FORMAT_D16_UNORM_S8_UINT; + case VK_FORMAT_R8_SNORM: return PL_FORMAT_R8_SNORM; + case VK_FORMAT_R8_UINT: return PL_FORMAT_R8_UINT; + case VK_FORMAT_R8_SINT: return PL_FORMAT_R8_SINT; + case VK_FORMAT_R8_SRGB: return PL_FORMAT_R8_SRGB; + case VK_FORMAT_R16_UNORM: return PL_FORMAT_R16_UNORM; + case VK_FORMAT_R16_SNORM: return PL_FORMAT_R16_SNORM; + case VK_FORMAT_R16_UINT: return PL_FORMAT_R16_UINT; + case VK_FORMAT_R16_SINT: return PL_FORMAT_R16_SINT; + case VK_FORMAT_R16_SFLOAT: return PL_FORMAT_R16_FLOAT; + case VK_FORMAT_R8G8_SNORM: return PL_FORMAT_R8G8_SNORM; + case VK_FORMAT_R8G8_UINT: return PL_FORMAT_R8G8_UINT; + case VK_FORMAT_R8G8_SINT: return PL_FORMAT_R8G8_SINT; + case VK_FORMAT_R8G8_SRGB: return PL_FORMAT_R8G8_SRGB; + case VK_FORMAT_B5G6R5_UNORM_PACK16: return PL_FORMAT_B5G6R5_UNORM; + case VK_FORMAT_A1R5G5B5_UNORM_PACK16: return PL_FORMAT_A1R5G5B5_UNORM; + case VK_FORMAT_B5G5R5A1_UNORM_PACK16: return PL_FORMAT_B5G5R5A1_UNORM; + case VK_FORMAT_R32_SINT: return PL_FORMAT_R32_SINT; + case VK_FORMAT_R32_SFLOAT: return PL_FORMAT_R32_FLOAT; + case VK_FORMAT_R16G16_UNORM: return PL_FORMAT_R16G16_UNORM; + case VK_FORMAT_R16G16_SNORM: return PL_FORMAT_R16G16_SNORM; + case VK_FORMAT_R16G16_UINT: return PL_FORMAT_R16G16_UINT; + case VK_FORMAT_R16G16_SINT: return PL_FORMAT_R16G16_SINT; + case VK_FORMAT_R16G16_SFLOAT: return PL_FORMAT_R16G16_FLOAT; + case VK_FORMAT_R8G8B8A8_SNORM: return PL_FORMAT_R8G8B8A8_SNORM; + case VK_FORMAT_R8G8B8A8_UINT: return PL_FORMAT_R8G8B8A8_UINT; + case VK_FORMAT_R8G8B8A8_SINT: return PL_FORMAT_R8G8B8A8_SINT; + case VK_FORMAT_A2R10G10B10_UNORM_PACK32: return PL_FORMAT_B10G10R10A2_UNORM; + case VK_FORMAT_A2B10G10R10_UNORM_PACK32: return PL_FORMAT_R10G10B10A2_UNORM; + case VK_FORMAT_A2B10G10R10_UINT_PACK32: return PL_FORMAT_R10G10B10A2_UINT; + case VK_FORMAT_B10G11R11_UFLOAT_PACK32: return PL_FORMAT_R11G11B10_FLOAT; + case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32: return PL_FORMAT_R9G9B9E5_FLOAT; + case VK_FORMAT_R32G32_UINT: return PL_FORMAT_R32G32_UINT; + case VK_FORMAT_R32G32_SINT: return PL_FORMAT_R32G32_SINT; + case VK_FORMAT_R16G16B16A16_UNORM: return PL_FORMAT_R16G16B16A16_UNORM; + case VK_FORMAT_R16G16B16A16_SNORM: return PL_FORMAT_R16G16B16A16_SNORM; + case VK_FORMAT_R16G16B16A16_UINT: return PL_FORMAT_R16G16B16A16_UINT; + case VK_FORMAT_R16G16B16A16_SINT: return PL_FORMAT_R16G16B16A16_SINT; + case VK_FORMAT_R16G16B16A16_SFLOAT: return PL_FORMAT_R16G16B16A16_FLOAT; + case VK_FORMAT_R32G32B32A32_UINT: return PL_FORMAT_R32G32B32A32_UINT; + case VK_FORMAT_R32G32B32A32_SINT: return PL_FORMAT_R32G32B32A32_SINT; + case VK_FORMAT_BC1_RGBA_UNORM_BLOCK: return PL_FORMAT_BC1_RGBA_UNORM; + case VK_FORMAT_BC1_RGBA_SRGB_BLOCK: return PL_FORMAT_BC1_RGBA_SRGB; + case VK_FORMAT_BC2_UNORM_BLOCK: return PL_FORMAT_BC2_UNORM; + case VK_FORMAT_BC2_SRGB_BLOCK: return PL_FORMAT_BC2_SRGB; + case VK_FORMAT_BC3_UNORM_BLOCK: return PL_FORMAT_BC3_UNORM; + case VK_FORMAT_BC3_SRGB_BLOCK: return PL_FORMAT_BC3_SRGB; + case VK_FORMAT_BC4_UNORM_BLOCK: return PL_FORMAT_BC4_UNORM; + case VK_FORMAT_BC4_SNORM_BLOCK: return PL_FORMAT_BC4_SNORM; + case VK_FORMAT_BC5_UNORM_BLOCK: return PL_FORMAT_BC5_UNORM; + case VK_FORMAT_BC5_SNORM_BLOCK: return PL_FORMAT_BC5_SNORM; + case VK_FORMAT_BC6H_UFLOAT_BLOCK: return PL_FORMAT_BC6H_UFLOAT; + case VK_FORMAT_BC6H_SFLOAT_BLOCK: return PL_FORMAT_BC6H_FLOAT; + case VK_FORMAT_BC7_UNORM_BLOCK: return PL_FORMAT_BC7_UNORM; + case VK_FORMAT_BC7_SRGB_BLOCK: return PL_FORMAT_BC7_SRGB; + case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK: return PL_FORMAT_ETC2_R8G8B8_UNORM; + case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK: return PL_FORMAT_ETC2_R8G8B8_SRGB; + case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK: return PL_FORMAT_ETC2_R8G8B8A1_UNORM; + case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK: return PL_FORMAT_ETC2_R8G8B8A1_SRGB; + case VK_FORMAT_EAC_R11_UNORM_BLOCK: return PL_FORMAT_EAC_R11_UNORM; + case VK_FORMAT_EAC_R11_SNORM_BLOCK: return PL_FORMAT_EAC_R11_SNORM; + case VK_FORMAT_EAC_R11G11_UNORM_BLOCK: return PL_FORMAT_EAC_R11G11_UNORM; + case VK_FORMAT_EAC_R11G11_SNORM_BLOCK: return PL_FORMAT_EAC_R11G11_SNORM; + case VK_FORMAT_ASTC_4x4_UNORM_BLOCK: return PL_FORMAT_ASTC_4x4_UNORM; + case VK_FORMAT_ASTC_4x4_SRGB_BLOCK: return PL_FORMAT_ASTC_4x4_SRGB; + case VK_FORMAT_ASTC_5x4_UNORM_BLOCK: return PL_FORMAT_ASTC_5x4_UNORM; + case VK_FORMAT_ASTC_5x4_SRGB_BLOCK: return PL_FORMAT_ASTC_5x4_SRGB; + case VK_FORMAT_ASTC_5x5_UNORM_BLOCK: return PL_FORMAT_ASTC_5x5_UNORM; + case VK_FORMAT_ASTC_5x5_SRGB_BLOCK: return PL_FORMAT_ASTC_5x5_SRGB; + case VK_FORMAT_ASTC_6x5_UNORM_BLOCK: return PL_FORMAT_ASTC_6x5_UNORM; + case VK_FORMAT_ASTC_6x5_SRGB_BLOCK: return PL_FORMAT_ASTC_6x5_SRGB; + case VK_FORMAT_ASTC_6x6_UNORM_BLOCK: return PL_FORMAT_ASTC_6x6_UNORM; + case VK_FORMAT_ASTC_6x6_SRGB_BLOCK: return PL_FORMAT_ASTC_6x6_SRGB; + case VK_FORMAT_ASTC_8x5_UNORM_BLOCK: return PL_FORMAT_ASTC_8x5_UNORM; + case VK_FORMAT_ASTC_8x5_SRGB_BLOCK: return PL_FORMAT_ASTC_8x5_SRGB; + case VK_FORMAT_ASTC_8x6_UNORM_BLOCK: return PL_FORMAT_ASTC_8x6_UNORM; + case VK_FORMAT_ASTC_8x6_SRGB_BLOCK: return PL_FORMAT_ASTC_8x6_SRGB; + case VK_FORMAT_ASTC_8x8_UNORM_BLOCK: return PL_FORMAT_ASTC_8x8_UNORM; + case VK_FORMAT_ASTC_8x8_SRGB_BLOCK: return PL_FORMAT_ASTC_8x8_SRGB; + case VK_FORMAT_ASTC_10x5_UNORM_BLOCK: return PL_FORMAT_ASTC_10x5_UNORM; + case VK_FORMAT_ASTC_10x5_SRGB_BLOCK: return PL_FORMAT_ASTC_10x5_SRGB; + case VK_FORMAT_ASTC_10x6_UNORM_BLOCK: return PL_FORMAT_ASTC_10x6_UNORM; + case VK_FORMAT_ASTC_10x6_SRGB_BLOCK: return PL_FORMAT_ASTC_10x6_SRGB; + case VK_FORMAT_ASTC_10x8_UNORM_BLOCK: return PL_FORMAT_ASTC_10x8_UNORM; + case VK_FORMAT_ASTC_10x8_SRGB_BLOCK: return PL_FORMAT_ASTC_10x8_SRGB; + case VK_FORMAT_ASTC_10x10_UNORM_BLOCK: return PL_FORMAT_ASTC_10x10_UNORM; + case VK_FORMAT_ASTC_10x10_SRGB_BLOCK: return PL_FORMAT_ASTC_10x10_SRGB; + case VK_FORMAT_ASTC_12x10_UNORM_BLOCK: return PL_FORMAT_ASTC_12x10_UNORM; + case VK_FORMAT_ASTC_12x10_SRGB_BLOCK: return PL_FORMAT_ASTC_12x10_SRGB; + case VK_FORMAT_ASTC_12x12_UNORM_BLOCK: return PL_FORMAT_ASTC_12x12_UNORM; + case VK_FORMAT_ASTC_12x12_SRGB_BLOCK: return PL_FORMAT_ASTC_12x12_SRGB; + case VK_FORMAT_D16_UNORM: return PL_FORMAT_D16_UNORM; + case VK_FORMAT_S8_UINT: return PL_FORMAT_S8_UINT; default: break; } diff --git a/extensions/pl_renderer_ext.c b/extensions/pl_renderer_ext.c index 0382a386..7f291230 100644 --- a/extensions/pl_renderer_ext.c +++ b/extensions/pl_renderer_ext.c @@ -788,7 +788,7 @@ pl_refr_initialize(plWindow* ptWindow) .atVertexBufferLayouts = { { .uByteStride = sizeof(float) * 3, - .atAttributes = { {.uByteOffset = 0, .tFormat = PL_FORMAT_R32G32B32_FLOAT}} + .atAttributes = { {.uByteOffset = 0, .tFormat = PL_VERTEX_FORMAT_FLOAT3}} } }, .pTempConstantData = aiConstantData, @@ -866,7 +866,7 @@ pl_refr_initialize(plWindow* ptWindow) .atVertexBufferLayouts = { { .uByteStride = sizeof(float) * 3, - .atAttributes = { {.uByteOffset = 0, .tFormat = PL_FORMAT_R32G32B32_FLOAT}} + .atAttributes = { {.uByteOffset = 0, .tFormat = PL_VERTEX_FORMAT_FLOAT3}} } }, .pTempConstantData = aiConstantData, @@ -953,7 +953,7 @@ pl_refr_initialize(plWindow* ptWindow) .atVertexBufferLayouts = { { .uByteStride = sizeof(float) * 3, - .atAttributes = { {.uByteOffset = 0, .tFormat = PL_FORMAT_R32G32B32_FLOAT}} + .atAttributes = { {.uByteOffset = 0, .tFormat = PL_VERTEX_FORMAT_FLOAT3}} } }, .pTempConstantData = aiConstantData, @@ -1010,7 +1010,7 @@ pl_refr_initialize(plWindow* ptWindow) .atVertexBufferLayouts = { { .uByteStride = sizeof(float) * 3, - .atAttributes = { {.uByteOffset = 0, .tFormat = PL_FORMAT_R32G32B32_FLOAT}} + .atAttributes = { {.uByteOffset = 0, .tFormat = PL_VERTEX_FORMAT_FLOAT3}} } }, .atBlendStates = { @@ -1048,8 +1048,8 @@ pl_refr_initialize(plWindow* ptWindow) { .uByteStride = sizeof(float) * 4, .atAttributes = { - {.uByteOffset = 0, .tFormat = PL_FORMAT_R32G32_FLOAT}, - {.uByteOffset = sizeof(float) * 2, .tFormat = PL_FORMAT_R32G32_FLOAT}, + {.uByteOffset = 0, .tFormat = PL_VERTEX_FORMAT_FLOAT2}, + {.uByteOffset = sizeof(float) * 2, .tFormat = PL_VERTEX_FORMAT_FLOAT2}, } } }, @@ -1913,7 +1913,7 @@ pl_refr_load_skybox_from_panorama(uint32_t uSceneHandle, const char* pcPath, int .atVertexBufferLayouts = { { .uByteStride = sizeof(float) * 3, - .atAttributes = { {.uByteOffset = 0, .tFormat = PL_FORMAT_R32G32B32_FLOAT}} + .atAttributes = { {.uByteOffset = 0, .tFormat = PL_VERTEX_FORMAT_FLOAT3}} } }, .atBlendStates = { @@ -3186,8 +3186,8 @@ pl_refr_finalize_scene(uint32_t uSceneHandle) { .uByteStride = sizeof(float) * 4, .atAttributes = { - {.uByteOffset = 0, .tFormat = PL_FORMAT_R32G32_FLOAT}, - {.uByteOffset = sizeof(float) * 2, .tFormat = PL_FORMAT_R32G32_FLOAT} + {.uByteOffset = 0, .tFormat = PL_VERTEX_FORMAT_FLOAT2}, + {.uByteOffset = sizeof(float) * 2, .tFormat = PL_VERTEX_FORMAT_FLOAT2} } } }, @@ -3276,8 +3276,8 @@ pl_refr_finalize_scene(uint32_t uSceneHandle) { .uByteStride = sizeof(float) * 4, .atAttributes = { - {.uByteOffset = 0, .tFormat = PL_FORMAT_R32G32_FLOAT}, - {.uByteOffset = sizeof(float) * 2, .tFormat = PL_FORMAT_R32G32_FLOAT} + {.uByteOffset = 0, .tFormat = PL_VERTEX_FORMAT_FLOAT2}, + {.uByteOffset = sizeof(float) * 2, .tFormat = PL_VERTEX_FORMAT_FLOAT2} } } },