2 Commits
2.0.4 ... 2.0.2

Author SHA1 Message Date
1df4bb6980 Fix changelog. 2008-04-17 09:04:57 +00:00
0294c4ad93 Tag 2.0.2 release. 2008-04-17 08:59:21 +00:00
33 changed files with 1419 additions and 2146 deletions

View File

@ -1,15 +1,3 @@
NVIDIA Texture Tools version 2.0.4
* Fix error in RGB format output; reported by jonsoh. See issue 49.
* Added support RGB format dithering by jonsoh. Fixes issue 50 and 51.
* Prevent infinite loop in indexMirror when width equal 1. Fixes issue 65.
* Implement general scale filter, including upsampling.
NVIDIA Texture Tools version 2.0.3
* More accurate DXT3 compressor. Fixes issue 38.
* Remove legacy compressors. Fix issue 34.
* Check for single color in all compressors. Fixes issue 43.
* Fix error in fast downsample filter, reported by Noel Llopis.
NVIDIA Texture Tools version 2.0.2 NVIDIA Texture Tools version 2.0.2
* Fix copy ctor error reported by Richard Sim. * Fix copy ctor error reported by Richard Sim.
* Fix indexMirror error reported by Chris Lambert. * Fix indexMirror error reported by Chris Lambert.

View File

@ -1 +1 @@
2.0.4 2.0.2

0
gnuwin32/bin/libpng12.dll Executable file → Normal file
View File

View File

@ -355,10 +355,6 @@
RelativePath="..\..\..\src\nvimage\nvimage.h" RelativePath="..\..\..\src\nvimage\nvimage.h"
> >
</File> </File>
<File
RelativePath="..\..\..\src\nvimage\PixelFormat.h"
>
</File>
<File <File
RelativePath="..\..\..\src\nvimage\PsdFile.h" RelativePath="..\..\..\src\nvimage\PsdFile.h"
> >

View File

@ -53,8 +53,8 @@ END
// //
VS_VERSION_INFO VERSIONINFO VS_VERSION_INFO VERSIONINFO
FILEVERSION 2,0,4,0 FILEVERSION 2,0,2,0
PRODUCTVERSION 2,0,4,0 PRODUCTVERSION 2,0,2,0
FILEFLAGSMASK 0x17L FILEFLAGSMASK 0x17L
#ifdef _DEBUG #ifdef _DEBUG
FILEFLAGS 0x1L FILEFLAGS 0x1L
@ -71,12 +71,12 @@ BEGIN
BEGIN BEGIN
VALUE "CompanyName", "NVIDIA Corporation" VALUE "CompanyName", "NVIDIA Corporation"
VALUE "FileDescription", "NVIDIA Texture Tools Dynamic Link Library" VALUE "FileDescription", "NVIDIA Texture Tools Dynamic Link Library"
VALUE "FileVersion", "2, 0, 4, 0" VALUE "FileVersion", "2, 0, 2, 0"
VALUE "InternalName", "nvtt" VALUE "InternalName", "nvtt"
VALUE "LegalCopyright", "Copyright (C) 2007" VALUE "LegalCopyright", "Copyright (C) 2007"
VALUE "OriginalFilename", "nvtt.dll" VALUE "OriginalFilename", "nvtt.dll"
VALUE "ProductName", "NVIDIA Texture Tools Dynamic Link Library" VALUE "ProductName", "NVIDIA Texture Tools Dynamic Link Library"
VALUE "ProductVersion", "2, 0, 4, 0" VALUE "ProductVersion", "2, 0, 2, 0"
END END
END END
BLOCK "VarFileInfo" BLOCK "VarFileInfo"

View File

@ -711,7 +711,7 @@
> >
<Tool <Tool
Name="VCCustomBuildTool" Name="VCCustomBuildTool"
CommandLine="&quot;$(CUDA_BIN_PATH)\nvcc.exe&quot; -m32 -ccbin &quot;$(VCInstallDir)bin&quot; -c -DNDEBUG -DWIN32 -D_CONSOLE -D_MBCS -Xcompiler /EHsc,/W3,/nologo,/Wp64,/O2,/Zi,/MD -I&quot;$(CUDA_INC_PATH)&quot; -I./ -o $(IntDir)\$(InputName).obj ..\\..\\..\\src\\nvtt\\cuda\\CompressKernel.cu&#x0D;&#x0A;" CommandLine="&quot;$(CUDA_BIN_PATH)\nvcc.exe&quot; -m32 -ccbin &quot;$(VCInstallDir)bin&quot; -c -DNDEBUG -DWIN32 -D_CONSOLE -D_MBCS -Xcompiler /EHsc,/W3,/nologo,/Wp64,/O2,/Zi,/MD -I&quot;$(CUDA_INC_PATH)&quot; -I./ -o $(IntDir)\$(InputName).obj ..\\..\\..\\src\\nvtt\\cuda\\CompressKernel.cu"
AdditionalDependencies="CudaMath.h" AdditionalDependencies="CudaMath.h"
Outputs="$(IntDir)\$(InputName).obj" Outputs="$(IntDir)\$(InputName).obj"
/> />
@ -849,6 +849,10 @@
RelativePath="..\..\..\src\nvtt\cuda\CudaUtils.cpp" RelativePath="..\..\..\src\nvtt\cuda\CudaUtils.cpp"
> >
</File> </File>
<File
RelativePath="..\..\..\src\nvtt\FastCompressDXT.cpp"
>
</File>
<File <File
RelativePath="..\..\..\src\nvtt\InputOptions.cpp" RelativePath="..\..\..\src\nvtt\InputOptions.cpp"
> >
@ -861,10 +865,6 @@
RelativePath="..\..\..\src\nvtt\nvtt_wrapper.cpp" RelativePath="..\..\..\src\nvtt\nvtt_wrapper.cpp"
> >
</File> </File>
<File
RelativePath="..\..\..\src\nvtt\OptimalCompressDXT.cpp"
>
</File>
<File <File
RelativePath="..\..\..\src\nvtt\OutputOptions.cpp" RelativePath="..\..\..\src\nvtt\OutputOptions.cpp"
> >
@ -911,6 +911,10 @@
RelativePath="..\..\..\src\nvtt\cuda\CudaUtils.h" RelativePath="..\..\..\src\nvtt\cuda\CudaUtils.h"
> >
</File> </File>
<File
RelativePath="..\..\..\src\nvtt\FastCompressDXT.h"
>
</File>
<File <File
RelativePath="..\..\..\src\nvtt\InputOptions.h" RelativePath="..\..\..\src\nvtt\InputOptions.h"
> >
@ -923,10 +927,6 @@
RelativePath="..\..\..\src\nvtt\nvtt_wrapper.h" RelativePath="..\..\..\src\nvtt\nvtt_wrapper.h"
> >
</File> </File>
<File
RelativePath="..\..\..\src\nvtt\OptimalCompressDXT.h"
>
</File>
<File <File
RelativePath="..\..\..\src\nvtt\OutputOptions.h" RelativePath="..\..\..\src\nvtt\OutputOptions.h"
> >

View File

@ -532,7 +532,7 @@ DDSHeader::DDSHeader()
// Store version information on the reserved header attributes. // Store version information on the reserved header attributes.
this->reserved[9] = MAKEFOURCC('N', 'V', 'T', 'T'); this->reserved[9] = MAKEFOURCC('N', 'V', 'T', 'T');
this->reserved[10] = (2 << 16) | (0 << 8) | (4); // major.minor.revision this->reserved[10] = (2 << 16) | (0 << 8) | (2); // major.minor.revision
this->pf.size = 32; this->pf.size = 32;
this->pf.flags = 0; this->pf.flags = 0;

View File

@ -244,7 +244,7 @@ SincFilter::SincFilter(float w) : Filter(w) {}
float SincFilter::evaluate(float x) const float SincFilter::evaluate(float x) const
{ {
return sincf(PI * x); return 0.0f;
} }
@ -541,17 +541,12 @@ void Kernel2::initBlendedSobel(const Vector4 & scale)
PolyphaseKernel::PolyphaseKernel(const Filter & f, uint srcLength, uint dstLength, int samples/*= 32*/) PolyphaseKernel::PolyphaseKernel(const Filter & f, uint srcLength, uint dstLength, int samples/*= 32*/)
{ {
nvCheck(srcLength >= dstLength); // @@ Upsampling not implemented!
nvDebugCheck(samples > 0); nvDebugCheck(samples > 0);
float scale = float(dstLength) / float(srcLength); const float scale = float(dstLength) / float(srcLength);
const float iscale = 1.0f / scale; const float iscale = 1.0f / scale;
if (scale > 1) {
// Upsampling.
samples = 1;
scale = 1;
}
m_length = dstLength; m_length = dstLength;
m_width = f.width() * iscale; m_width = f.width() * iscale;
m_windowSize = (int)ceilf(m_width * 2) + 1; m_windowSize = (int)ceilf(m_width * 2) + 1;
@ -582,7 +577,6 @@ PolyphaseKernel::PolyphaseKernel(const Filter & f, uint srcLength, uint dstLengt
m_data[i * m_windowSize + j] /= total; m_data[i * m_windowSize + j] /= total;
} }
} }
} }
PolyphaseKernel::~PolyphaseKernel() PolyphaseKernel::~PolyphaseKernel()

View File

@ -376,7 +376,7 @@ FloatImage * FloatImage::fastDownSample() const
{ {
const uint n = w * h; const uint n = w * h;
if ((m_width * m_height) & 1) if (n & 1)
{ {
const float scale = 1.0f / (2 * n + 1); const float scale = 1.0f / (2 * n + 1);
@ -540,18 +540,73 @@ FloatImage * FloatImage::fastDownSample() const
return dst_image.release(); return dst_image.release();
} }
/*
/// Downsample applying a 1D kernel separately in each dimension.
FloatImage * FloatImage::downSample(const Kernel1 & kernel, WrapMode wm) const
{
const uint w = max(1, m_width / 2);
const uint h = max(1, m_height / 2);
return downSample(kernel, w, h, wm);
}
/// Downsample applying a 1D kernel separately in each dimension.
FloatImage * FloatImage::downSample(const Kernel1 & kernel, uint w, uint h, WrapMode wm) const
{
nvCheck(!(kernel.windowSize() & 1)); // Make sure that kernel m_width is even.
AutoPtr<FloatImage> tmp_image( new FloatImage() );
tmp_image->allocate(m_componentNum, w, m_height);
AutoPtr<FloatImage> dst_image( new FloatImage() );
dst_image->allocate(m_componentNum, w, h);
const float xscale = float(m_width) / float(w);
const float yscale = float(m_height) / float(h);
for(uint c = 0; c < m_componentNum; c++) {
float * tmp_channel = tmp_image->channel(c);
for(uint y = 0; y < m_height; y++) {
for(uint x = 0; x < w; x++) {
float sum = this->applyKernelHorizontal(&kernel, uint(x*xscale), y, c, wm);
const uint tmp_index = tmp_image->index(x, y);
tmp_channel[tmp_index] = sum;
}
}
float * dst_channel = dst_image->channel(c);
for(uint y = 0; y < h; y++) {
for(uint x = 0; x < w; x++) {
float sum = tmp_image->applyKernelVertical(&kernel, uint(x*xscale), uint(y*yscale), c, wm);
const uint dst_index = dst_image->index(x, y);
dst_channel[dst_index] = sum;
}
}
}
return dst_image.release();
}
*/
/// Downsample applying a 1D kernel separately in each dimension. /// Downsample applying a 1D kernel separately in each dimension.
FloatImage * FloatImage::downSample(const Filter & filter, WrapMode wm) const FloatImage * FloatImage::downSample(const Filter & filter, WrapMode wm) const
{ {
const uint w = max(1, m_width / 2); const uint w = max(1, m_width / 2);
const uint h = max(1, m_height / 2); const uint h = max(1, m_height / 2);
return resize(filter, w, h, wm); return downSample(filter, w, h, wm);
} }
/// Downsample applying a 1D kernel separately in each dimension. /// Downsample applying a 1D kernel separately in each dimension.
FloatImage * FloatImage::resize(const Filter & filter, uint w, uint h, WrapMode wm) const FloatImage * FloatImage::downSample(const Filter & filter, uint w, uint h, WrapMode wm) const
{ {
// @@ Use monophase filters when frac(m_width / w) == 0 // @@ Use monophase filters when frac(m_width / w) == 0

View File

@ -63,7 +63,7 @@ public:
NVIMAGE_API FloatImage * fastDownSample() const; NVIMAGE_API FloatImage * fastDownSample() const;
NVIMAGE_API FloatImage * downSample(const Filter & filter, WrapMode wm) const; NVIMAGE_API FloatImage * downSample(const Filter & filter, WrapMode wm) const;
NVIMAGE_API FloatImage * resize(const Filter & filter, uint w, uint h, WrapMode wm) const; NVIMAGE_API FloatImage * downSample(const Filter & filter, uint w, uint h, WrapMode wm) const;
//NVIMAGE_API FloatImage * downSample(const Kernel1 & filter, WrapMode wm) const; //NVIMAGE_API FloatImage * downSample(const Kernel1 & filter, WrapMode wm) const;
//NVIMAGE_API FloatImage * downSample(const Kernel1 & filter, uint w, uint h, WrapMode wm) const; //NVIMAGE_API FloatImage * downSample(const Kernel1 & filter, uint w, uint h, WrapMode wm) const;
@ -226,15 +226,11 @@ inline uint FloatImage::indexRepeat(int x, int y) const
inline uint FloatImage::indexMirror(int x, int y) const inline uint FloatImage::indexMirror(int x, int y) const
{ {
if (m_width == 1) x = 0;
x = abs(x); x = abs(x);
while (x >= m_width) { while (x >= m_width) {
x = abs(m_width + m_width - x - 2); x = abs(m_width + m_width - x - 2);
} }
if (m_height == 1) y = 0;
y = abs(y); y = abs(y);
while (y >= m_height) { while (y >= m_height) {
y = abs(m_height + m_height - y - 2); y = abs(m_height + m_height - y - 2);

View File

@ -16,7 +16,6 @@ http://www.efg2.com/Lab/Library/ImageProcessing/DHALF.TXT
#include <nvimage/Image.h> #include <nvimage/Image.h>
#include <nvimage/Quantize.h> #include <nvimage/Quantize.h>
#include <nvimage/PixelFormat.h>
using namespace nv; using namespace nv;
@ -48,20 +47,94 @@ void nv::Quantize::BinaryAlpha( Image * image, int alpha_threshold /*= 127*/ )
// Simple quantization. // Simple quantization.
void nv::Quantize::RGB16( Image * image ) void nv::Quantize::RGB16( Image * image )
{ {
Truncate(image, 5, 6, 5, 8); nvCheck(image != NULL);
const uint w = image->width();
const uint h = image->height();
for(uint y = 0; y < h; y++) {
for(uint x = 0; x < w; x++) {
Color32 pixel32 = image->pixel(x, y);
// Convert to 16 bit and back to 32 using regular bit expansion.
Color32 pixel16 = toColor32( toColor16(pixel32) );
// Store color.
image->pixel(x, y) = pixel16;
}
}
} }
// Alpha quantization. // Alpha quantization.
void nv::Quantize::Alpha4( Image * image ) void nv::Quantize::Alpha4( Image * image )
{ {
Truncate(image, 8, 8, 8, 4); nvCheck(image != NULL);
const uint w = image->width();
const uint h = image->height();
for(uint y = 0; y < h; y++) {
for(uint x = 0; x < w; x++) {
Color32 pixel = image->pixel(x, y);
// Convert to 4 bit using regular bit expansion.
pixel.a = (pixel.a & 0xF0) | ((pixel.a & 0xF0) >> 4);
// Store color.
image->pixel(x, y) = pixel;
}
}
} }
// Error diffusion. Floyd Steinberg. // Error diffusion. Floyd Steinberg.
void nv::Quantize::FloydSteinberg_RGB16( Image * image ) void nv::Quantize::FloydSteinberg_RGB16( Image * image )
{ {
FloydSteinberg(image, 5, 6, 5, 8); nvCheck(image != NULL);
const uint w = image->width();
const uint h = image->height();
// @@ Use fixed point?
Vector3 * row0 = new Vector3[w+2];
Vector3 * row1 = new Vector3[w+2];
memset(row0, 0, sizeof(Vector3)*(w+2));
memset(row1, 0, sizeof(Vector3)*(w+2));
for(uint y = 0; y < h; y++) {
for(uint x = 0; x < w; x++) {
Color32 pixel32 = image->pixel(x, y);
// Add error. // @@ We shouldn't clamp here!
pixel32.r = clamp(int(pixel32.r) + int(row0[1+x].x()), 0, 255);
pixel32.g = clamp(int(pixel32.g) + int(row0[1+x].y()), 0, 255);
pixel32.b = clamp(int(pixel32.b) + int(row0[1+x].z()), 0, 255);
// Convert to 16 bit. @@ Use regular clamp?
Color32 pixel16 = toColor32( toColor16(pixel32) );
// Store color.
image->pixel(x, y) = pixel16;
// Compute new error.
Vector3 diff(float(pixel32.r - pixel16.r), float(pixel32.g - pixel16.g), float(pixel32.b - pixel16.b));
// Propagate new error.
row0[1+x+1] += 7.0f / 16.0f * diff;
row1[1+x-1] += 3.0f / 16.0f * diff;
row1[1+x+0] += 5.0f / 16.0f * diff;
row1[1+x+1] += 1.0f / 16.0f * diff;
}
swap(row0, row1);
memset(row1, 0, sizeof(Vector3)*(w+2));
}
delete [] row0;
delete [] row1;
} }
@ -115,90 +188,34 @@ void nv::Quantize::FloydSteinberg_BinaryAlpha( Image * image, int alpha_threshol
// Error diffusion. Floyd Steinberg. // Error diffusion. Floyd Steinberg.
void nv::Quantize::FloydSteinberg_Alpha4( Image * image ) void nv::Quantize::FloydSteinberg_Alpha4( Image * image )
{
FloydSteinberg(image, 8, 8, 8, 4);
}
void nv::Quantize::Truncate(Image * image, uint rsize, uint gsize, uint bsize, uint asize)
{ {
nvCheck(image != NULL); nvCheck(image != NULL);
const uint w = image->width(); const uint w = image->width();
const uint h = image->height(); const uint h = image->height();
// @@ Use fixed point?
float * row0 = new float[(w+2)];
float * row1 = new float[(w+2)];
memset(row0, 0, sizeof(float)*(w+2));
memset(row1, 0, sizeof(float)*(w+2));
for(uint y = 0; y < h; y++) { for(uint y = 0; y < h; y++) {
for(uint x = 0; x < w; x++) { for(uint x = 0; x < w; x++) {
Color32 pixel = image->pixel(x, y); Color32 pixel = image->pixel(x, y);
// Convert to our desired size, and reconstruct. // Add error.
pixel.r = PixelFormat::convert(pixel.r, 8, rsize); int alpha = int(pixel.a) + int(row0[1+x]);
pixel.r = PixelFormat::convert(pixel.r, rsize, 8);
pixel.g = PixelFormat::convert(pixel.g, 8, gsize); // Convert to 4 bit using regular bit expansion.
pixel.g = PixelFormat::convert(pixel.g, gsize, 8); pixel.a = (pixel.a & 0xF0) | ((pixel.a & 0xF0) >> 4);
pixel.b = PixelFormat::convert(pixel.b, 8, bsize);
pixel.b = PixelFormat::convert(pixel.b, bsize, 8);
pixel.a = PixelFormat::convert(pixel.a, 8, asize);
pixel.a = PixelFormat::convert(pixel.a, asize, 8);
// Store color. // Store color.
image->pixel(x, y) = pixel; image->pixel(x, y) = pixel;
}
}
}
// Error diffusion. Floyd Steinberg.
void nv::Quantize::FloydSteinberg(Image * image, uint rsize, uint gsize, uint bsize, uint asize)
{
nvCheck(image != NULL);
const uint w = image->width();
const uint h = image->height();
Vector4 * row0 = new Vector4[w+2];
Vector4 * row1 = new Vector4[w+2];
memset(row0, 0, sizeof(Vector4)*(w+2));
memset(row1, 0, sizeof(Vector4)*(w+2));
for (uint y = 0; y < h; y++) {
for (uint x = 0; x < w; x++) {
Color32 pixel = image->pixel(x, y);
// Add error.
pixel.r = clamp(int(pixel.r) + int(row0[1+x].x()), 0, 255);
pixel.g = clamp(int(pixel.g) + int(row0[1+x].y()), 0, 255);
pixel.b = clamp(int(pixel.b) + int(row0[1+x].z()), 0, 255);
pixel.a = clamp(int(pixel.a) + int(row0[1+x].w()), 0, 255);
int r = pixel.r;
int g = pixel.g;
int b = pixel.b;
int a = pixel.a;
// Convert to our desired size, and reconstruct.
r = PixelFormat::convert(r, 8, rsize);
r = PixelFormat::convert(r, rsize, 8);
g = PixelFormat::convert(g, 8, gsize);
g = PixelFormat::convert(g, gsize, 8);
b = PixelFormat::convert(b, 8, bsize);
b = PixelFormat::convert(b, bsize, 8);
a = PixelFormat::convert(a, 8, asize);
a = PixelFormat::convert(a, asize, 8);
// Store color.
image->pixel(x, y) = Color32(r, g, b, a);
// Compute new error. // Compute new error.
Vector4 diff(float(int(pixel.r) - r), float(int(pixel.g) - g), float(int(pixel.b) - b), float(int(pixel.a) - a)); float diff = float(alpha - pixel.a);
// Propagate new error. // Propagate new error.
row0[1+x+1] += 7.0f / 16.0f * diff; row0[1+x+1] += 7.0f / 16.0f * diff;
@ -208,9 +225,10 @@ void nv::Quantize::FloydSteinberg(Image * image, uint rsize, uint gsize, uint bs
} }
swap(row0, row1); swap(row0, row1);
memset(row1, 0, sizeof(Vector4)*(w+2)); memset(row1, 0, sizeof(float)*(w+2));
} }
delete [] row0; delete [] row0;
delete [] row1; delete [] row1;
} }

View File

@ -17,9 +17,6 @@ namespace nv
void FloydSteinberg_BinaryAlpha(Image * img, int alpha_threshold = 127); void FloydSteinberg_BinaryAlpha(Image * img, int alpha_threshold = 127);
void FloydSteinberg_Alpha4(Image * img); void FloydSteinberg_Alpha4(Image * img);
void Truncate(Image * image, uint rsize, uint gsize, uint bsize, uint asize);
void FloydSteinberg(Image * image, uint rsize, uint gsize, uint bsize, uint asize);
// @@ Add palette quantization algorithms! // @@ Add palette quantization algorithms!
} }
} }

View File

@ -13,10 +13,10 @@ SET(NVTT_SRCS
CompressDXT.cpp CompressDXT.cpp
CompressRGB.h CompressRGB.h
CompressRGB.cpp CompressRGB.cpp
FastCompressDXT.h
FastCompressDXT.cpp
QuickCompressDXT.h QuickCompressDXT.h
QuickCompressDXT.cpp QuickCompressDXT.cpp
OptimalCompressDXT.h
OptimalCompressDXT.cpp
SingleColorLookup.h SingleColorLookup.h
CompressionOptions.h CompressionOptions.h
CompressionOptions.cpp CompressionOptions.cpp

View File

@ -29,8 +29,8 @@
#include "nvtt.h" #include "nvtt.h"
#include "CompressDXT.h" #include "CompressDXT.h"
#include "FastCompressDXT.h"
#include "QuickCompressDXT.h" #include "QuickCompressDXT.h"
#include "OptimalCompressDXT.h"
#include "CompressionOptions.h" #include "CompressionOptions.h"
#include "OutputOptions.h" #include "OutputOptions.h"
@ -57,33 +57,26 @@ using namespace nv;
using namespace nvtt; using namespace nvtt;
nv::FastCompressor::FastCompressor() : m_image(NULL), m_alphaMode(AlphaMode_None) void nv::fastCompressDXT1(const Image * image, const OutputOptions::Private & outputOptions)
{ {
} const uint w = image->width();
const uint h = image->height();
nv::FastCompressor::~FastCompressor()
{
}
void nv::FastCompressor::setImage(const Image * image, nvtt::AlphaMode alphaMode)
{
m_image = image;
m_alphaMode = alphaMode;
}
void nv::FastCompressor::compressDXT1(const OutputOptions::Private & outputOptions)
{
const uint w = m_image->width();
const uint h = m_image->height();
ColorBlock rgba; ColorBlock rgba;
BlockDXT1 block; BlockDXT1 block;
for (uint y = 0; y < h; y += 4) { for (uint y = 0; y < h; y += 4) {
for (uint x = 0; x < w; x += 4) { for (uint x = 0; x < w; x += 4) {
rgba.init(m_image, x, y); rgba.init(image, x, y);
QuickCompress::compressDXT1(rgba, &block); if (rgba.isSingleColor())
{
QuickCompress::compressDXT1(rgba.color(0), &block);
}
else
{
QuickCompress::compressDXT1(rgba, &block);
}
if (outputOptions.outputHandler != NULL) { if (outputOptions.outputHandler != NULL) {
outputOptions.outputHandler->writeData(&block, sizeof(block)); outputOptions.outputHandler->writeData(&block, sizeof(block));
@ -93,19 +86,27 @@ void nv::FastCompressor::compressDXT1(const OutputOptions::Private & outputOptio
} }
void nv::FastCompressor::compressDXT1a(const OutputOptions::Private & outputOptions) void nv::fastCompressDXT1a(const Image * image, const OutputOptions::Private & outputOptions)
{ {
const uint w = m_image->width(); const uint w = image->width();
const uint h = m_image->height(); const uint h = image->height();
ColorBlock rgba; ColorBlock rgba;
BlockDXT1 block; BlockDXT1 block;
for (uint y = 0; y < h; y += 4) { for (uint y = 0; y < h; y += 4) {
for (uint x = 0; x < w; x += 4) { for (uint x = 0; x < w; x += 4) {
rgba.init(m_image, x, y); rgba.init(image, x, y);
QuickCompress::compressDXT1a(rgba, &block); // @@ We could do better here: check for single RGB, but varying alpha.
if (rgba.isSingleColor())
{
QuickCompress::compressDXT1a(rgba.color(0), &block);
}
else
{
QuickCompress::compressDXT1a(rgba, &block);
}
if (outputOptions.outputHandler != NULL) { if (outputOptions.outputHandler != NULL) {
outputOptions.outputHandler->writeData(&block, sizeof(block)); outputOptions.outputHandler->writeData(&block, sizeof(block));
@ -115,18 +116,17 @@ void nv::FastCompressor::compressDXT1a(const OutputOptions::Private & outputOpti
} }
void nv::FastCompressor::compressDXT3(const nvtt::OutputOptions::Private & outputOptions) void nv::fastCompressDXT3(const Image * image, const nvtt::OutputOptions::Private & outputOptions)
{ {
const uint w = m_image->width(); const uint w = image->width();
const uint h = m_image->height(); const uint h = image->height();
ColorBlock rgba; ColorBlock rgba;
BlockDXT3 block; BlockDXT3 block;
for (uint y = 0; y < h; y += 4) { for (uint y = 0; y < h; y += 4) {
for (uint x = 0; x < w; x += 4) { for (uint x = 0; x < w; x += 4) {
rgba.init(m_image, x, y); rgba.init(image, x, y);
QuickCompress::compressDXT3(rgba, &block); QuickCompress::compressDXT3(rgba, &block);
if (outputOptions.outputHandler != NULL) { if (outputOptions.outputHandler != NULL) {
@ -137,19 +137,19 @@ void nv::FastCompressor::compressDXT3(const nvtt::OutputOptions::Private & outpu
} }
void nv::FastCompressor::compressDXT5(const nvtt::OutputOptions::Private & outputOptions) void nv::fastCompressDXT5(const Image * image, const nvtt::OutputOptions::Private & outputOptions)
{ {
const uint w = m_image->width(); const uint w = image->width();
const uint h = m_image->height(); const uint h = image->height();
ColorBlock rgba; ColorBlock rgba;
BlockDXT5 block; BlockDXT5 block;
for (uint y = 0; y < h; y += 4) { for (uint y = 0; y < h; y += 4) {
for (uint x = 0; x < w; x += 4) { for (uint x = 0; x < w; x += 4) {
rgba.init(m_image, x, y); rgba.init(image, x, y);
//QuickCompress::compressDXT5(rgba, &block); // @@ Use fast version!!
QuickCompress::compressDXT5(rgba, &block, 0); nv::compressBlock_BoundsRange(rgba, &block);
if (outputOptions.outputHandler != NULL) { if (outputOptions.outputHandler != NULL) {
outputOptions.outputHandler->writeData(&block, sizeof(block)); outputOptions.outputHandler->writeData(&block, sizeof(block));
@ -159,21 +159,23 @@ void nv::FastCompressor::compressDXT5(const nvtt::OutputOptions::Private & outpu
} }
void nv::FastCompressor::compressDXT5n(const nvtt::OutputOptions::Private & outputOptions) void nv::fastCompressDXT5n(const Image * image, const nvtt::OutputOptions::Private & outputOptions)
{ {
const uint w = m_image->width(); const uint w = image->width();
const uint h = m_image->height(); const uint h = image->height();
ColorBlock rgba; ColorBlock rgba;
BlockDXT5 block; BlockDXT5 block;
for (uint y = 0; y < h; y += 4) { for (uint y = 0; y < h; y += 4) {
for (uint x = 0; x < w; x += 4) { for (uint x = 0; x < w; x += 4) {
rgba.init(m_image, x, y); rgba.init(image, x, y);
// copy X coordinate to alpha channel and Y coordinate to green channel.
rgba.swizzleDXT5n(); rgba.swizzleDXT5n();
QuickCompress::compressDXT5(rgba, &block, 0); //QuickCompress::compressDXT5(rgba, &block); // @@ Use fast version!!
nv::compressBlock_BoundsRange(rgba, &block);
if (outputOptions.outputHandler != NULL) { if (outputOptions.outputHandler != NULL) {
outputOptions.outputHandler->writeData(&block, sizeof(block)); outputOptions.outputHandler->writeData(&block, sizeof(block));
@ -183,28 +185,42 @@ void nv::FastCompressor::compressDXT5n(const nvtt::OutputOptions::Private & outp
} }
nv::SlowCompressor::SlowCompressor() : m_image(NULL), m_alphaMode(AlphaMode_None) void nv::fastCompressBC4(const Image * image, const nvtt::OutputOptions::Private & outputOptions)
{ {
// @@ TODO
// compress red channel (X)
} }
nv::SlowCompressor::~SlowCompressor()
void nv::fastCompressBC5(const Image * image, const nvtt::OutputOptions::Private & outputOptions)
{ {
// @@ TODO
// compress red, green channels (X,Y)
} }
void nv::SlowCompressor::setImage(const Image * image, nvtt::AlphaMode alphaMode)
void nv::doPrecomputation()
{ {
m_image = image; static bool done = false; // @@ Stop using statics for reentrancy. Although the worst that could happen is that this stuff is precomputed multiple times.
m_alphaMode = alphaMode;
if (!done)
{
done = true;
squish::FastClusterFit::DoPrecomputation();
}
} }
void nv::SlowCompressor::compressDXT1(const CompressionOptions::Private & compressionOptions, const OutputOptions::Private & outputOptions)
void nv::compressDXT1(const Image * image, const OutputOptions::Private & outputOptions, const CompressionOptions::Private & compressionOptions)
{ {
const uint w = m_image->width(); const uint w = image->width();
const uint h = m_image->height(); const uint h = image->height();
ColorBlock rgba; ColorBlock rgba;
BlockDXT1 block; BlockDXT1 block;
doPrecomputation();
//squish::WeightedClusterFit fit; //squish::WeightedClusterFit fit;
//squish::ClusterFit fit; //squish::ClusterFit fit;
squish::FastClusterFit fit; squish::FastClusterFit fit;
@ -213,11 +229,11 @@ void nv::SlowCompressor::compressDXT1(const CompressionOptions::Private & compre
for (uint y = 0; y < h; y += 4) { for (uint y = 0; y < h; y += 4) {
for (uint x = 0; x < w; x += 4) { for (uint x = 0; x < w; x += 4) {
rgba.init(m_image, x, y); rgba.init(image, x, y);
if (rgba.isSingleColor()) if (rgba.isSingleColor())
{ {
OptimalCompress::compressDXT1(rgba.color(0), &block); QuickCompress::compressDXT1(rgba.color(0), &block);
} }
else else
{ {
@ -234,10 +250,10 @@ void nv::SlowCompressor::compressDXT1(const CompressionOptions::Private & compre
} }
void nv::SlowCompressor::compressDXT1a(const CompressionOptions::Private & compressionOptions, const OutputOptions::Private & outputOptions) void nv::compressDXT1a(const Image * image, const OutputOptions::Private & outputOptions, const CompressionOptions::Private & compressionOptions)
{ {
const uint w = m_image->width(); const uint w = image->width();
const uint h = m_image->height(); const uint h = image->height();
ColorBlock rgba; ColorBlock rgba;
BlockDXT1 block; BlockDXT1 block;
@ -248,20 +264,11 @@ void nv::SlowCompressor::compressDXT1a(const CompressionOptions::Private & compr
for (uint y = 0; y < h; y += 4) { for (uint y = 0; y < h; y += 4) {
for (uint x = 0; x < w; x += 4) { for (uint x = 0; x < w; x += 4) {
rgba.init(m_image, x, y); rgba.init(image, x, y);
bool anyAlpha = false; if (rgba.isSingleColor())
bool allAlpha = true;
for (uint i = 0; i < 16; i++)
{ {
if (rgba.color(i).a < 128) anyAlpha = true; QuickCompress::compressDXT1a(rgba.color(0), &block);
else allAlpha = false;
}
if ((!anyAlpha && rgba.isSingleColor() || allAlpha))
{
OptimalCompress::compressDXT1a(rgba.color(0), &block);
} }
else else
{ {
@ -278,37 +285,29 @@ void nv::SlowCompressor::compressDXT1a(const CompressionOptions::Private & compr
} }
void nv::SlowCompressor::compressDXT3(const CompressionOptions::Private & compressionOptions, const OutputOptions::Private & outputOptions) void nv::compressDXT3(const Image * image, const OutputOptions::Private & outputOptions, const CompressionOptions::Private & compressionOptions)
{ {
const uint w = m_image->width(); const uint w = image->width();
const uint h = m_image->height(); const uint h = image->height();
ColorBlock rgba; ColorBlock rgba;
BlockDXT3 block; BlockDXT3 block;
squish::WeightedClusterFit fit; squish::WeightedClusterFit fit;
//squish::FastClusterFit fit;
fit.SetMetric(compressionOptions.colorWeight.x(), compressionOptions.colorWeight.y(), compressionOptions.colorWeight.z()); fit.SetMetric(compressionOptions.colorWeight.x(), compressionOptions.colorWeight.y(), compressionOptions.colorWeight.z());
for (uint y = 0; y < h; y += 4) { for (uint y = 0; y < h; y += 4) {
for (uint x = 0; x < w; x += 4) { for (uint x = 0; x < w; x += 4) {
rgba.init(m_image, x, y); rgba.init(image, x, y);
// Compress explicit alpha. // Compress explicit alpha.
OptimalCompress::compressDXT3A(rgba, &block.alpha); QuickCompress::compressDXT3A(rgba, &block.alpha);
// Compress color. // Compress color.
if (rgba.isSingleColor()) squish::ColourSet colours((uint8 *)rgba.colors(), squish::kWeightColourByAlpha);
{ fit.SetColourSet(&colours, 0);
OptimalCompress::compressDXT1(rgba.color(0), &block.color); fit.Compress(&block.color);
}
else
{
squish::ColourSet colours((uint8 *)rgba.colors(), squish::kWeightColourByAlpha);
fit.SetColourSet(&colours, 0);
fit.Compress(&block.color);
}
if (outputOptions.outputHandler != NULL) { if (outputOptions.outputHandler != NULL) {
outputOptions.outputHandler->writeData(&block, sizeof(block)); outputOptions.outputHandler->writeData(&block, sizeof(block));
@ -317,10 +316,10 @@ void nv::SlowCompressor::compressDXT3(const CompressionOptions::Private & compre
} }
} }
void nv::SlowCompressor::compressDXT5(const CompressionOptions::Private & compressionOptions, const OutputOptions::Private & outputOptions) void nv::compressDXT5(const Image * image, const OutputOptions::Private & outputOptions, const CompressionOptions::Private & compressionOptions)
{ {
const uint w = m_image->width(); const uint w = image->width();
const uint h = m_image->height(); const uint h = image->height();
ColorBlock rgba; ColorBlock rgba;
BlockDXT5 block; BlockDXT5 block;
@ -331,12 +330,12 @@ void nv::SlowCompressor::compressDXT5(const CompressionOptions::Private & compre
for (uint y = 0; y < h; y += 4) { for (uint y = 0; y < h; y += 4) {
for (uint x = 0; x < w; x += 4) { for (uint x = 0; x < w; x += 4) {
rgba.init(m_image, x, y); rgba.init(image, x, y);
// Compress alpha. // Compress alpha.
if (compressionOptions.quality == Quality_Highest) if (compressionOptions.quality == Quality_Highest)
{ {
OptimalCompress::compressDXT5A(rgba, &block.alpha); compressBlock_BruteForce(rgba, &block.alpha);
} }
else else
{ {
@ -344,16 +343,9 @@ void nv::SlowCompressor::compressDXT5(const CompressionOptions::Private & compre
} }
// Compress color. // Compress color.
if (rgba.isSingleColor()) squish::ColourSet colours((uint8 *)rgba.colors(), squish::kWeightColourByAlpha);
{ fit.SetColourSet(&colours, 0);
OptimalCompress::compressDXT1(rgba.color(0), &block.color); fit.Compress(&block.color);
}
else
{
squish::ColourSet colours((uint8 *)rgba.colors(), squish::kWeightColourByAlpha);
fit.SetColourSet(&colours, 0);
fit.Compress(&block.color);
}
if (outputOptions.outputHandler != NULL) { if (outputOptions.outputHandler != NULL) {
outputOptions.outputHandler->writeData(&block, sizeof(block)); outputOptions.outputHandler->writeData(&block, sizeof(block));
@ -363,25 +355,28 @@ void nv::SlowCompressor::compressDXT5(const CompressionOptions::Private & compre
} }
void nv::SlowCompressor::compressDXT5n(const CompressionOptions::Private & compressionOptions, const OutputOptions::Private & outputOptions) void nv::compressDXT5n(const Image * image, const OutputOptions::Private & outputOptions, const CompressionOptions::Private & compressionOptions)
{ {
const uint w = m_image->width(); const uint w = image->width();
const uint h = m_image->height(); const uint h = image->height();
ColorBlock rgba; ColorBlock rgba;
BlockDXT5 block; BlockDXT5 block;
doPrecomputation();
for (uint y = 0; y < h; y += 4) { for (uint y = 0; y < h; y += 4) {
for (uint x = 0; x < w; x += 4) { for (uint x = 0; x < w; x += 4) {
rgba.init(m_image, x, y); rgba.init(image, x, y);
// copy X coordinate to green channel and Y coordinate to alpha channel.
rgba.swizzleDXT5n(); rgba.swizzleDXT5n();
// Compress X. // Compress X.
if (compressionOptions.quality == Quality_Highest) if (compressionOptions.quality == Quality_Highest)
{ {
OptimalCompress::compressDXT5A(rgba, &block.alpha); compressBlock_BruteForce(rgba, &block.alpha);
} }
else else
{ {
@ -389,7 +384,7 @@ void nv::SlowCompressor::compressDXT5n(const CompressionOptions::Private & compr
} }
// Compress Y. // Compress Y.
OptimalCompress::compressDXT1G(rgba, &block.color); QuickCompress::compressDXT1G(rgba, &block.color);
if (outputOptions.outputHandler != NULL) { if (outputOptions.outputHandler != NULL) {
outputOptions.outputHandler->writeData(&block, sizeof(block)); outputOptions.outputHandler->writeData(&block, sizeof(block));
@ -399,10 +394,10 @@ void nv::SlowCompressor::compressDXT5n(const CompressionOptions::Private & compr
} }
void nv::SlowCompressor::compressBC4(const CompressionOptions::Private & compressionOptions, const nvtt::OutputOptions::Private & outputOptions) void nv::compressBC4(const Image * image, const nvtt::OutputOptions::Private & outputOptions, const CompressionOptions::Private & compressionOptions)
{ {
const uint w = m_image->width(); const uint w = image->width();
const uint h = m_image->height(); const uint h = image->height();
ColorBlock rgba; ColorBlock rgba;
AlphaBlockDXT5 block; AlphaBlockDXT5 block;
@ -410,11 +405,11 @@ void nv::SlowCompressor::compressBC4(const CompressionOptions::Private & compres
for (uint y = 0; y < h; y += 4) { for (uint y = 0; y < h; y += 4) {
for (uint x = 0; x < w; x += 4) { for (uint x = 0; x < w; x += 4) {
rgba.init(m_image, x, y); rgba.init(image, x, y);
if (compressionOptions.quality == Quality_Highest) if (compressionOptions.quality == Quality_Highest)
{ {
OptimalCompress::compressDXT5A(rgba, &block); compressBlock_BruteForce(rgba, &block);
} }
else else
{ {
@ -429,10 +424,10 @@ void nv::SlowCompressor::compressBC4(const CompressionOptions::Private & compres
} }
void nv::SlowCompressor::compressBC5(const CompressionOptions::Private & compressionOptions, const nvtt::OutputOptions::Private & outputOptions) void nv::compressBC5(const Image * image, const nvtt::OutputOptions::Private & outputOptions, const CompressionOptions::Private & compressionOptions)
{ {
const uint w = m_image->width(); const uint w = image->width();
const uint h = m_image->height(); const uint h = image->height();
ColorBlock xcolor; ColorBlock xcolor;
ColorBlock ycolor; ColorBlock ycolor;
@ -442,16 +437,16 @@ void nv::SlowCompressor::compressBC5(const CompressionOptions::Private & compres
for (uint y = 0; y < h; y += 4) { for (uint y = 0; y < h; y += 4) {
for (uint x = 0; x < w; x += 4) { for (uint x = 0; x < w; x += 4) {
xcolor.init(m_image, x, y); xcolor.init(image, x, y);
xcolor.splatX(); xcolor.splatX();
ycolor.init(m_image, x, y); ycolor.init(image, x, y);
ycolor.splatY(); ycolor.splatY();
if (compressionOptions.quality == Quality_Highest) if (compressionOptions.quality == Quality_Highest)
{ {
OptimalCompress::compressDXT5A(xcolor, &block.x); compressBlock_BruteForce(xcolor, &block.x);
OptimalCompress::compressDXT5A(ycolor, &block.y); compressBlock_BruteForce(ycolor, &block.y);
} }
else else
{ {

View File

@ -32,45 +32,25 @@ namespace nv
class Image; class Image;
class FloatImage; class FloatImage;
class FastCompressor void doPrecomputation();
{
public:
FastCompressor();
~FastCompressor();
void setImage(const Image * image, nvtt::AlphaMode alphaMode); // Fast compressors.
void fastCompressDXT1(const Image * image, const nvtt::OutputOptions::Private & outputOptions);
void fastCompressDXT1a(const Image * image, const nvtt::OutputOptions::Private & outputOptions);
void fastCompressDXT3(const Image * image, const nvtt::OutputOptions::Private & outputOptions);
void fastCompressDXT5(const Image * image, const nvtt::OutputOptions::Private & outputOptions);
void fastCompressDXT5n(const Image * image, const nvtt::OutputOptions::Private & outputOptions);
void fastCompressBC4(const Image * image, const nvtt::OutputOptions::Private & outputOptions);
void fastCompressBC5(const Image * image, const nvtt::OutputOptions::Private & outputOptions);
void compressDXT1(const nvtt::OutputOptions::Private & outputOptions); // Normal compressors.
void compressDXT1a(const nvtt::OutputOptions::Private & outputOptions); void compressDXT1(const Image * image, const nvtt::OutputOptions::Private & outputOptions, const nvtt::CompressionOptions::Private & compressionOptions);
void compressDXT3(const nvtt::OutputOptions::Private & outputOptions); void compressDXT1a(const Image * image, const nvtt::OutputOptions::Private & outputOptions, const nvtt::CompressionOptions::Private & compressionOptions);
void compressDXT5(const nvtt::OutputOptions::Private & outputOptions); void compressDXT3(const Image * image, const nvtt::OutputOptions::Private & outputOptions, const nvtt::CompressionOptions::Private & compressionOptions);
void compressDXT5n(const nvtt::OutputOptions::Private & outputOptions); void compressDXT5(const Image * image, const nvtt::OutputOptions::Private & outputOptions, const nvtt::CompressionOptions::Private & compressionOptions);
void compressDXT5n(const Image * image, const nvtt::OutputOptions::Private & outputOptions, const nvtt::CompressionOptions::Private & compressionOptions);
private: void compressBC4(const Image * image, const nvtt::OutputOptions::Private & outputOptions, const nvtt::CompressionOptions::Private & compressionOptions);
const Image * m_image; void compressBC5(const Image * image, const nvtt::OutputOptions::Private & outputOptions, const nvtt::CompressionOptions::Private & compressionOptions);
nvtt::AlphaMode m_alphaMode;
};
class SlowCompressor
{
public:
SlowCompressor();
~SlowCompressor();
void setImage(const Image * image, nvtt::AlphaMode alphaMode);
void compressDXT1(const nvtt::CompressionOptions::Private & compressionOptions, const nvtt::OutputOptions::Private & outputOptions);
void compressDXT1a(const nvtt::CompressionOptions::Private & compressionOptions, const nvtt::OutputOptions::Private & outputOptions);
void compressDXT3(const nvtt::CompressionOptions::Private & compressionOptions, const nvtt::OutputOptions::Private & outputOptions);
void compressDXT5(const nvtt::CompressionOptions::Private & compressionOptions, const nvtt::OutputOptions::Private & outputOptions);
void compressDXT5n(const nvtt::CompressionOptions::Private & compressionOptions, const nvtt::OutputOptions::Private & outputOptions);
void compressBC4(const nvtt::CompressionOptions::Private & compressionOptions, const nvtt::OutputOptions::Private & outputOptions);
void compressBC5(const nvtt::CompressionOptions::Private & compressionOptions, const nvtt::OutputOptions::Private & outputOptions);
private:
const Image * m_image;
nvtt::AlphaMode m_alphaMode;
};
// External compressors. // External compressors.
#if defined(HAVE_S3QUANT) #if defined(HAVE_S3QUANT)

View File

@ -123,7 +123,7 @@ void nv::compressRGB(const Image * image, const OutputOptions::Private & outputO
} }
// Zero padding. // Zero padding.
for (uint x = w * byteCount; x < pitch; x++) for (uint x = w; x < pitch; x++)
{ {
*(dst + x) = 0; *(dst + x) = 0;
} }

View File

@ -34,7 +34,6 @@
#include <nvimage/Filter.h> #include <nvimage/Filter.h>
#include <nvimage/Quantize.h> #include <nvimage/Quantize.h>
#include <nvimage/NormalMap.h> #include <nvimage/NormalMap.h>
#include <nvimage/PixelFormat.h>
#include "Compressor.h" #include "Compressor.h"
#include "InputOptions.h" #include "InputOptions.h"
@ -42,6 +41,7 @@
#include "OutputOptions.h" #include "OutputOptions.h"
#include "CompressDXT.h" #include "CompressDXT.h"
#include "FastCompressDXT.h"
#include "CompressRGB.h" #include "CompressRGB.h"
#include "cuda/CudaUtils.h" #include "cuda/CudaUtils.h"
#include "cuda/CudaCompressDXT.h" #include "cuda/CudaCompressDXT.h"
@ -200,7 +200,7 @@ namespace nvtt
AutoPtr<FloatImage> m_floatImage; AutoPtr<FloatImage> m_floatImage;
}; };
} // nvtt namespace }
Compressor::Compressor() : m(*new Compressor::Private()) Compressor::Compressor() : m(*new Compressor::Private())
@ -422,7 +422,7 @@ bool Compressor::Private::compressMipmaps(uint f, const InputOptions::Private &
quantizeMipmap(mipmap, compressionOptions); quantizeMipmap(mipmap, compressionOptions);
compressMipmap(mipmap, inputOptions, compressionOptions, outputOptions); compressMipmap(mipmap, compressionOptions, outputOptions);
// Compute extents of next mipmap: // Compute extents of next mipmap:
w = max(1U, w / 2); w = max(1U, w / 2);
@ -571,7 +571,7 @@ void Compressor::Private::scaleMipmap(Mipmap & mipmap, const InputOptions::Priva
// Resize image. // Resize image.
BoxFilter boxFilter; BoxFilter boxFilter;
mipmap.setImage(mipmap.asFloatImage()->resize(boxFilter, w, h, (FloatImage::WrapMode)inputOptions.wrapMode)); mipmap.setImage(mipmap.asFloatImage()->downSample(boxFilter, w, h, (FloatImage::WrapMode)inputOptions.wrapMode));
} }
@ -618,6 +618,13 @@ void Compressor::Private::quantizeMipmap(Mipmap & mipmap, const CompressionOptio
{ {
nvDebugCheck(mipmap.asFixedImage() != NULL); nvDebugCheck(mipmap.asFixedImage() != NULL);
if (compressionOptions.enableColorDithering)
{
if (compressionOptions.format >= Format_DXT1 && compressionOptions.format <= Format_DXT5)
{
Quantize::FloydSteinberg_RGB16(mipmap.asMutableFixedImage());
}
}
if (compressionOptions.binaryAlpha) if (compressionOptions.binaryAlpha)
{ {
if (compressionOptions.enableAlphaDithering) if (compressionOptions.enableAlphaDithering)
@ -629,67 +636,30 @@ void Compressor::Private::quantizeMipmap(Mipmap & mipmap, const CompressionOptio
Quantize::BinaryAlpha(mipmap.asMutableFixedImage(), compressionOptions.alphaThreshold); Quantize::BinaryAlpha(mipmap.asMutableFixedImage(), compressionOptions.alphaThreshold);
} }
} }
else
if (compressionOptions.enableColorDithering || compressionOptions.enableAlphaDithering)
{ {
uint rsize = 8;
uint gsize = 8;
uint bsize = 8;
uint asize = 8;
if (compressionOptions.enableColorDithering)
{
if (compressionOptions.format >= Format_DXT1 && compressionOptions.format <= Format_DXT5)
{
rsize = 5;
gsize = 6;
bsize = 5;
}
else if (compressionOptions.format == Format_RGB)
{
uint rshift, gshift, bshift;
PixelFormat::maskShiftAndSize(compressionOptions.rmask, &rshift, &rsize);
PixelFormat::maskShiftAndSize(compressionOptions.gmask, &gshift, &gsize);
PixelFormat::maskShiftAndSize(compressionOptions.bmask, &bshift, &bsize);
}
}
if (compressionOptions.enableAlphaDithering) if (compressionOptions.enableAlphaDithering)
{ {
if (compressionOptions.format == Format_DXT3) if (compressionOptions.format == Format_DXT3)
{ {
asize = 4; Quantize::Alpha4(mipmap.asMutableFixedImage());
} }
else if (compressionOptions.format == Format_RGB) else if (compressionOptions.format == Format_DXT1a)
{ {
uint ashift; Quantize::BinaryAlpha(mipmap.asMutableFixedImage(), compressionOptions.alphaThreshold);
PixelFormat::maskShiftAndSize(compressionOptions.amask, &ashift, &asize);
} }
} }
if (compressionOptions.binaryAlpha)
{
asize = 8; // Already quantized.
}
Quantize::FloydSteinberg(mipmap.asMutableFixedImage(), rsize, gsize, bsize, asize);
} }
} }
// Compress the given mipmap. // Compress the given mipmap.
bool Compressor::Private::compressMipmap(const Mipmap & mipmap, const InputOptions::Private & inputOptions, const CompressionOptions::Private & compressionOptions, const OutputOptions::Private & outputOptions) const bool Compressor::Private::compressMipmap(const Mipmap & mipmap, const CompressionOptions::Private & compressionOptions, const OutputOptions::Private & outputOptions) const
{ {
const Image * image = mipmap.asFixedImage(); const Image * image = mipmap.asFixedImage();
nvDebugCheck(image != NULL); nvDebugCheck(image != NULL);
FastCompressor fast;
fast.setImage(image, inputOptions.alphaMode);
SlowCompressor slow;
slow.setImage(image, inputOptions.alphaMode);
if (compressionOptions.format == Format_RGBA || compressionOptions.format == Format_RGB) if (compressionOptions.format == Format_RGBA || compressionOptions.format == Format_RGB)
{ {
compressRGB(image, outputOptions, compressionOptions); compressRGB(image, outputOptions, compressionOptions);
@ -713,19 +683,18 @@ bool Compressor::Private::compressMipmap(const Mipmap & mipmap, const InputOptio
#endif #endif
if (compressionOptions.quality == Quality_Fastest) if (compressionOptions.quality == Quality_Fastest)
{ {
fast.compressDXT1(outputOptions); fastCompressDXT1(image, outputOptions);
} }
else else
{ {
if (cudaEnabled) if (cudaEnabled)
{ {
nvDebugCheck(cudaSupported); nvDebugCheck(cudaSupported);
cuda->setImage(image, inputOptions.alphaMode); cuda->compressDXT1(image, outputOptions, compressionOptions);
cuda->compressDXT1(compressionOptions, outputOptions);
} }
else else
{ {
slow.compressDXT1(compressionOptions, outputOptions); compressDXT1(image, outputOptions, compressionOptions);
} }
} }
} }
@ -733,18 +702,18 @@ bool Compressor::Private::compressMipmap(const Mipmap & mipmap, const InputOptio
{ {
if (compressionOptions.quality == Quality_Fastest) if (compressionOptions.quality == Quality_Fastest)
{ {
fast.compressDXT1a(outputOptions); fastCompressDXT1a(image, outputOptions);
} }
else else
{ {
if (cudaEnabled) if (cudaEnabled)
{ {
nvDebugCheck(cudaSupported); nvDebugCheck(cudaSupported);
/*cuda*/slow.compressDXT1a(compressionOptions, outputOptions); /*cuda*/compressDXT1a(image, outputOptions, compressionOptions);
} }
else else
{ {
slow.compressDXT1a(compressionOptions, outputOptions); compressDXT1a(image, outputOptions, compressionOptions);
} }
} }
} }
@ -752,19 +721,18 @@ bool Compressor::Private::compressMipmap(const Mipmap & mipmap, const InputOptio
{ {
if (compressionOptions.quality == Quality_Fastest) if (compressionOptions.quality == Quality_Fastest)
{ {
fast.compressDXT3(outputOptions); fastCompressDXT3(image, outputOptions);
} }
else else
{ {
if (cudaEnabled) if (cudaEnabled)
{ {
nvDebugCheck(cudaSupported); nvDebugCheck(cudaSupported);
cuda->setImage(image, inputOptions.alphaMode); cuda->compressDXT3(image, outputOptions, compressionOptions);
cuda->compressDXT3(compressionOptions, outputOptions);
} }
else else
{ {
slow.compressDXT3(compressionOptions, outputOptions); compressDXT3(image, outputOptions, compressionOptions);
} }
} }
} }
@ -772,19 +740,18 @@ bool Compressor::Private::compressMipmap(const Mipmap & mipmap, const InputOptio
{ {
if (compressionOptions.quality == Quality_Fastest) if (compressionOptions.quality == Quality_Fastest)
{ {
fast.compressDXT5(outputOptions); fastCompressDXT5(image, outputOptions);
} }
else else
{ {
if (cudaEnabled) if (cudaEnabled)
{ {
nvDebugCheck(cudaSupported); nvDebugCheck(cudaSupported);
cuda->setImage(image, inputOptions.alphaMode); cuda->compressDXT5(image, outputOptions, compressionOptions);
cuda->compressDXT5(compressionOptions, outputOptions);
} }
else else
{ {
slow.compressDXT5(compressionOptions, outputOptions); compressDXT5(image, outputOptions, compressionOptions);
} }
} }
} }
@ -792,20 +759,20 @@ bool Compressor::Private::compressMipmap(const Mipmap & mipmap, const InputOptio
{ {
if (compressionOptions.quality == Quality_Fastest) if (compressionOptions.quality == Quality_Fastest)
{ {
fast.compressDXT5n(outputOptions); fastCompressDXT5n(image, outputOptions);
} }
else else
{ {
slow.compressDXT5n(compressionOptions, outputOptions); compressDXT5n(image, outputOptions, compressionOptions);
} }
} }
else if (compressionOptions.format == Format_BC4) else if (compressionOptions.format == Format_BC4)
{ {
slow.compressBC4(compressionOptions, outputOptions); compressBC4(image, outputOptions, compressionOptions);
} }
else if (compressionOptions.format == Format_BC5) else if (compressionOptions.format == Format_BC5)
{ {
slow.compressBC5(compressionOptions, outputOptions); compressBC5(image, outputOptions, compressionOptions);
} }
return true; return true;

View File

@ -60,7 +60,7 @@ namespace nvtt
void scaleMipmap(Mipmap & mipmap, const InputOptions::Private & inputOptions, uint w, uint h, uint d) const; void scaleMipmap(Mipmap & mipmap, const InputOptions::Private & inputOptions, uint w, uint h, uint d) const;
void processInputImage(Mipmap & mipmap, const InputOptions::Private & inputOptions) const; void processInputImage(Mipmap & mipmap, const InputOptions::Private & inputOptions) const;
void quantizeMipmap(Mipmap & mipmap, const CompressionOptions::Private & compressionOptions) const; void quantizeMipmap(Mipmap & mipmap, const CompressionOptions::Private & compressionOptions) const;
bool compressMipmap(const Mipmap & mipmap, const InputOptions::Private & inputOptions, const CompressionOptions::Private & compressionOptions, const OutputOptions::Private & outputOptions) const; bool compressMipmap(const Mipmap & mipmap, const CompressionOptions::Private & compressionOptions, const OutputOptions::Private & outputOptions) const;
public: public:

View File

@ -0,0 +1,456 @@
// Copyright NVIDIA Corporation 2007 -- Ignacio Castano <icastano@nvidia.com>
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
#include <nvmath/Color.h>
#include <nvimage/ColorBlock.h>
#include <nvimage/BlockDXT.h>
#include "FastCompressDXT.h"
#if defined(__SSE2__)
#include <emmintrin.h>
#endif
#if defined(__SSE__)
#include <xmmintrin.h>
#endif
#if defined(__MMX__)
#include <mmintrin.h>
#endif
#undef __VEC__
#if defined(__VEC__)
#include <altivec.h>
#undef bool
#endif
// Online Resources:
// - http://www.jasondorie.com/ImageLib.zip
// - http://homepage.hispeed.ch/rscheidegger/dri_experimental/s3tc_index.html
// - http://www.sjbrown.co.uk/?article=dxt
using namespace nv;
#if defined(__SSE2__) && 0
// @@ TODO
typedef __m128i VectorColor;
inline static __m128i loadColor(Color32 c)
{
return ...;
}
inline static __m128i absoluteDifference(__m128i a, __m128i b)
{
return ...;
}
inline uint colorDistance(__m128i a, __m128i b)
{
return 0;
}
#elif defined(__MMX__) && 0
typedef __m64 VectorColor;
inline static __m64 loadColor(Color32 c)
{
return _mm_unpacklo_pi8(_mm_cvtsi32_si64(c), _mm_setzero_si64());
}
inline static __m64 absoluteDifference(__m64 a, __m64 b)
{
// = |a-b| or |b-a|
return _mm_or_si64(_mm_subs_pu16(a, b), _mm_subs_pu16(b, a));
}
inline uint colorDistance(__m64 a, __m64 b)
{
union {
__m64 v;
uint16 part[4];
} s;
s.v = absoluteDifference(a, b);
// @@ This is very slow!
return s.part[0] + s.part[1] + s.part[2] + s.part[3];
}
#define vectorEnd _mm_empty
#elif defined(__VEC__)
typedef vector signed int VectorColor;
inline static vector signed int loadColor(Color32 c)
{
return (vector signed int) (c.r, c.g, c.b, c.a);
}
// Get the absolute distance between the given colors.
inline static uint colorDistance(vector signed int c0, vector signed int c1)
{
int result;
vector signed int v = vec_sums(vec_abs(vec_sub(c0, c1)), (vector signed int)0);
vec_ste(vec_splat(v, 3), 0, &result);
return result;
}
inline void vectorEnd()
{
}
#else
typedef Color32 VectorColor;
inline static Color32 loadColor(Color32 c)
{
return c;
}
inline static Color32 premultiplyAlpha(Color32 c)
{
Color32 pm;
pm.r = (c.r * c.a) >> 8;
pm.g = (c.g * c.a) >> 8;
pm.b = (c.b * c.a) >> 8;
pm.a = c.a;
return pm;
}
inline static uint sqr(uint s)
{
return s*s;
}
// Get the absolute distance between the given colors.
inline static uint colorDistance(Color32 c0, Color32 c1)
{
return sqr(c0.r - c1.r) + sqr(c0.g - c1.g) + sqr(c0.b - c1.b);
//return abs(c0.r - c1.r) + abs(c0.g - c1.g) + abs(c0.b - c1.b);
}
inline void vectorEnd()
{
}
#endif
inline static uint computeIndices(const ColorBlock & rgba, const Color32 palette[4])
{
const VectorColor vcolor0 = loadColor(palette[0]);
const VectorColor vcolor1 = loadColor(palette[1]);
const VectorColor vcolor2 = loadColor(palette[2]);
const VectorColor vcolor3 = loadColor(palette[3]);
uint indices = 0;
for(int i = 0; i < 16; i++) {
const VectorColor vcolor = loadColor(rgba.color(i));
uint d0 = colorDistance(vcolor0, vcolor);
uint d1 = colorDistance(vcolor1, vcolor);
uint d2 = colorDistance(vcolor2, vcolor);
uint d3 = colorDistance(vcolor3, vcolor);
uint b0 = d0 > d3;
uint b1 = d1 > d2;
uint b2 = d0 > d2;
uint b3 = d1 > d3;
uint b4 = d2 > d3;
uint x0 = b1 & b2;
uint x1 = b0 & b3;
uint x2 = b0 & b4;
indices |= (x2 | ((x0 | x1) << 1)) << (2 * i);
}
vectorEnd();
return indices;
}
// Compressor that uses bounding box.
void nv::compressBlock_BoundsRange(const ColorBlock & rgba, BlockDXT1 * block)
{
Color32 c0, c1;
rgba.boundsRange(&c1, &c0);
block->col0 = toColor16(c0);
block->col1 = toColor16(c1);
nvDebugCheck(block->col0.u > block->col1.u);
// Use 4 color mode only.
//if (block->col0.u < block->col1.u) {
// swap(block->col0.u, block->col1.u);
//}
Color32 palette[4];
block->evaluatePalette4(palette);
block->indices = computeIndices(rgba, palette);
}
// Encode DXT3 block.
void nv::compressBlock_BoundsRange(const ColorBlock & rgba, BlockDXT3 * block)
{
compressBlock_BoundsRange(rgba, &block->color);
compressBlock(rgba, &block->alpha);
}
// Encode DXT3 alpha block.
void nv::compressBlock(const ColorBlock & rgba, AlphaBlockDXT3 * block)
{
block->alpha0 = rgba.color(0).a >> 4;
block->alpha1 = rgba.color(1).a >> 4;
block->alpha2 = rgba.color(2).a >> 4;
block->alpha3 = rgba.color(3).a >> 4;
block->alpha4 = rgba.color(4).a >> 4;
block->alpha5 = rgba.color(5).a >> 4;
block->alpha6 = rgba.color(6).a >> 4;
block->alpha7 = rgba.color(7).a >> 4;
block->alpha8 = rgba.color(8).a >> 4;
block->alpha9 = rgba.color(9).a >> 4;
block->alphaA = rgba.color(10).a >> 4;
block->alphaB = rgba.color(11).a >> 4;
block->alphaC = rgba.color(12).a >> 4;
block->alphaD = rgba.color(13).a >> 4;
block->alphaE = rgba.color(14).a >> 4;
block->alphaF = rgba.color(15).a >> 4;
}
static uint computeAlphaIndices(const ColorBlock & rgba, AlphaBlockDXT5 * block)
{
uint8 alphas[8];
block->evaluatePalette(alphas);
uint totalError = 0;
for (uint i = 0; i < 16; i++)
{
uint8 alpha = rgba.color(i).a;
uint besterror = 256*256;
uint best = 8;
for(uint p = 0; p < 8; p++)
{
int d = alphas[p] - alpha;
uint error = d * d;
if (error < besterror)
{
besterror = error;
best = p;
}
}
nvDebugCheck(best < 8);
totalError += besterror;
block->setIndex(i, best);
}
return totalError;
}
static uint computeAlphaError(const ColorBlock & rgba, const AlphaBlockDXT5 * block)
{
uint8 alphas[8];
block->evaluatePalette(alphas);
uint totalError = 0;
for (uint i = 0; i < 16; i++)
{
uint8 alpha = rgba.color(i).a;
uint besterror = 256*256;
uint best;
for(uint p = 0; p < 8; p++)
{
int d = alphas[p] - alpha;
uint error = d * d;
if (error < besterror)
{
besterror = error;
best = p;
}
}
totalError += besterror;
}
return totalError;
}
void nv::compressBlock_BoundsRange(const ColorBlock & rgba, BlockDXT5 * block)
{
Color32 c0, c1;
rgba.boundsRangeAlpha(&c1, &c0);
block->color.col0 = toColor16(c0);
block->color.col1 = toColor16(c1);
nvDebugCheck(block->color.col0.u > block->color.col1.u);
Color32 palette[4];
block->color.evaluatePalette4(palette);
block->color.indices = computeIndices(rgba, palette);
nvDebugCheck(c0.a <= c1.a);
block->alpha.alpha0 = c0.a;
block->alpha.alpha1 = c1.a;
computeAlphaIndices(rgba, &block->alpha);
}
uint nv::compressBlock_BoundsRange(const ColorBlock & rgba, AlphaBlockDXT5 * block)
{
uint8 alpha0 = 0;
uint8 alpha1 = 255;
// Get min/max alpha.
for (uint i = 0; i < 16; i++)
{
uint8 alpha = rgba.color(i).a;
alpha0 = max(alpha0, alpha);
alpha1 = min(alpha1, alpha);
}
alpha0 = alpha0 - (alpha0 - alpha1) / 32;
alpha1 = alpha1 + (alpha0 - alpha1) / 32;
AlphaBlockDXT5 block0;
block0.alpha0 = alpha0;
block0.alpha1 = alpha1;
uint error0 = computeAlphaIndices(rgba, &block0);
AlphaBlockDXT5 block1;
block1.alpha0 = alpha1;
block1.alpha1 = alpha0;
uint error1 = computeAlphaIndices(rgba, &block1);
if (error0 < error1)
{
*block = block0;
return error0;
}
else
{
*block = block1;
return error1;
}
}
uint nv::compressBlock_BruteForce(const ColorBlock & rgba, AlphaBlockDXT5 * block)
{
uint8 mina = 255;
uint8 maxa = 0;
// Get min/max alpha.
for (uint i = 0; i < 16; i++)
{
uint8 alpha = rgba.color(i).a;
mina = min(mina, alpha);
maxa = max(maxa, alpha);
}
block->alpha0 = maxa;
block->alpha1 = mina;
/*int centroidDist = 256;
int centroid;
// Get the closest to the centroid.
for (uint i = 0; i < 16; i++)
{
uint8 alpha = rgba.color(i).a;
int dist = abs(alpha - (maxa + mina) / 2);
if (dist < centroidDist)
{
centroidDist = dist;
centroid = alpha;
}
}*/
if (maxa - mina > 8)
{
int besterror = computeAlphaError(rgba, block);
int besta0 = maxa;
int besta1 = mina;
for (int a0 = mina+9; a0 < maxa; a0++)
{
for (int a1 = mina; a1 < a0-8; a1++)
//for (int a1 = mina; a1 < maxa; a1++)
{
//nvCheck(abs(a1-a0) > 8);
//if (abs(a0 - a1) < 8) continue;
//if ((maxa-a0) + (a1-mina) + min(abs(centroid-a0), abs(centroid-a1)) > besterror)
if ((maxa-a0) + (a1-mina) > besterror)
continue;
block->alpha0 = a0;
block->alpha1 = a1;
int error = computeAlphaError(rgba, block);
if (error < besterror)
{
besterror = error;
besta0 = a0;
besta1 = a1;
}
}
}
block->alpha0 = besta0;
block->alpha1 = besta1;
}
return computeAlphaIndices(rgba, block);
}

View File

@ -0,0 +1,84 @@
// Copyright NVIDIA Corporation 2007 -- Ignacio Castano <icastano@nvidia.com>
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
#ifndef NV_TT_FASTCOMPRESSDXT_H
#define NV_TT_FASTCOMPRESSDXT_H
#include <nvimage/nvimage.h>
namespace nv
{
struct ColorBlock;
struct BlockDXT1;
struct BlockDXT3;
struct BlockDXT5;
struct AlphaBlockDXT3;
struct AlphaBlockDXT5;
// Color compression:
// Compressor that uses the extremes of the luminance axis.
// void compressBlock_DiameterAxis(const ColorBlock & rgba, BlockDXT1 * block);
// Compressor that uses the extremes of the luminance axis.
// void compressBlock_LuminanceAxis(const ColorBlock & rgba, BlockDXT1 * block);
// Compressor that uses bounding box.
void compressBlock_BoundsRange(const ColorBlock & rgba, BlockDXT1 * block);
// Compressor that uses bounding box and takes alpha into account.
// void compressBlock_BoundsRangeAlpha(const ColorBlock & rgba, BlockDXT1 * block);
// Simple, but slow compressor that tests all color pairs.
// void compressBlock_TestAllPairs(const ColorBlock & rgba, BlockDXT1 * block);
// Brute force 6d search along the best fit axis.
// void compressBlock_AnalyzeBestFitAxis(const ColorBlock & rgba, BlockDXT1 * block);
// Spatial greedy search.
// void refineSolution_1dSearch(const ColorBlock & rgba, BlockDXT1 * block);
// void refineSolution_3dSearch(const ColorBlock & rgba, BlockDXT1 * block);
// void refineSolution_6dSearch(const ColorBlock & rgba, BlockDXT1 * block);
// Brute force compressor for DXT5n
// void compressGreenBlock_BruteForce(const ColorBlock & rgba, BlockDXT1 * block);
// Minimize error of the endpoints.
// void optimizeEndPoints(const ColorBlock & rgba, BlockDXT1 * block);
// uint blockError(const ColorBlock & rgba, const BlockDXT1 & block);
// uint blockError(const ColorBlock & rgba, const AlphaBlockDXT5 & block);
// Alpha compression:
void compressBlock(const ColorBlock & rgba, AlphaBlockDXT3 * block);
void compressBlock_BoundsRange(const ColorBlock & rgba, BlockDXT3 * block);
void compressBlock_BoundsRange(const ColorBlock & rgba, BlockDXT5 * block);
uint compressBlock_BoundsRange(const ColorBlock & rgba, AlphaBlockDXT5 * block);
uint compressBlock_BruteForce(const ColorBlock & rgba, AlphaBlockDXT5 * block);
// uint compressBlock_Iterative(const ColorBlock & rgba, AlphaBlockDXT5 * block);
} // nv namespace
#endif // NV_TT_FASTCOMPRESSDXT_H

View File

@ -1,368 +0,0 @@
// Copyright NVIDIA Corporation 2007 -- Ignacio Castano <icastano@nvidia.com>
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
#include <nvcore/Containers.h> // swap
#include <nvmath/Color.h>
#include <nvimage/ColorBlock.h>
#include <nvimage/BlockDXT.h>
#include "OptimalCompressDXT.h"
#include "SingleColorLookup.h"
using namespace nv;
using namespace OptimalCompress;
namespace
{
static int computeGreenError(const ColorBlock & rgba, const BlockDXT1 * block)
{
nvDebugCheck(block != NULL);
int palette[4];
palette[0] = (block->col0.g << 2) | (block->col0.g >> 4);
palette[1] = (block->col1.g << 2) | (block->col1.g >> 4);
palette[2] = (2 * palette[0] + palette[1]) / 3;
palette[3] = (2 * palette[1] + palette[0]) / 3;
int totalError = 0;
for (int i = 0; i < 16; i++)
{
const int green = rgba.color(i).g;
int error = abs(green - palette[0]);
error = min(error, abs(green - palette[1]));
error = min(error, abs(green - palette[2]));
error = min(error, abs(green - palette[3]));
totalError += error;
}
return totalError;
}
static uint computeGreenIndices(const ColorBlock & rgba, const Color32 palette[4])
{
const int color0 = palette[0].g;
const int color1 = palette[1].g;
const int color2 = palette[2].g;
const int color3 = palette[3].g;
uint indices = 0;
for (int i = 0; i < 16; i++)
{
const int color = rgba.color(i).g;
uint d0 = abs(color0 - color);
uint d1 = abs(color1 - color);
uint d2 = abs(color2 - color);
uint d3 = abs(color3 - color);
uint b0 = d0 > d3;
uint b1 = d1 > d2;
uint b2 = d0 > d2;
uint b3 = d1 > d3;
uint b4 = d2 > d3;
uint x0 = b1 & b2;
uint x1 = b0 & b3;
uint x2 = b0 & b4;
indices |= (x2 | ((x0 | x1) << 1)) << (2 * i);
}
return indices;
}
// Choose quantized color that produces less error. Used by DXT3 compressor.
inline static uint quantize4(uint8 a)
{
int q0 = (a >> 4) - 1;
int q1 = (a >> 4);
int q2 = (a >> 4) + 1;
q0 = (q0 << 4) | q0;
q1 = (q1 << 4) | q1;
q2 = (q2 << 4) | q2;
int d0 = abs(q0 - a);
int d1 = abs(q1 - a);
int d2 = abs(q2 - a);
if (d0 < d1 && d0 < d2) return q0 >> 4;
if (d1 < d2) return q1 >> 4;
return q2 >> 4;
}
static uint computeAlphaError(const ColorBlock & rgba, const AlphaBlockDXT5 * block)
{
uint8 alphas[8];
block->evaluatePalette(alphas);
uint totalError = 0;
for (uint i = 0; i < 16; i++)
{
uint8 alpha = rgba.color(i).a;
uint besterror = 256*256;
uint best;
for (uint p = 0; p < 8; p++)
{
int d = alphas[p] - alpha;
uint error = d * d;
if (error < besterror)
{
besterror = error;
best = p;
}
}
totalError += besterror;
}
return totalError;
}
static void computeAlphaIndices(const ColorBlock & rgba, AlphaBlockDXT5 * block)
{
uint8 alphas[8];
block->evaluatePalette(alphas);
for (uint i = 0; i < 16; i++)
{
uint8 alpha = rgba.color(i).a;
uint besterror = 256*256;
uint best = 8;
for(uint p = 0; p < 8; p++)
{
int d = alphas[p] - alpha;
uint error = d * d;
if (error < besterror)
{
besterror = error;
best = p;
}
}
nvDebugCheck(best < 8);
block->setIndex(i, best);
}
}
} // namespace
// Single color compressor, based on:
// https://mollyrocket.com/forums/viewtopic.php?t=392
void OptimalCompress::compressDXT1(Color32 c, BlockDXT1 * dxtBlock)
{
dxtBlock->col0.r = OMatch5[c.r][0];
dxtBlock->col0.g = OMatch6[c.g][0];
dxtBlock->col0.b = OMatch5[c.b][0];
dxtBlock->col1.r = OMatch5[c.r][1];
dxtBlock->col1.g = OMatch6[c.g][1];
dxtBlock->col1.b = OMatch5[c.b][1];
dxtBlock->indices = 0xaaaaaaaa;
if (dxtBlock->col0.u < dxtBlock->col1.u)
{
swap(dxtBlock->col0.u, dxtBlock->col1.u);
dxtBlock->indices ^= 0x55555555;
}
}
void OptimalCompress::compressDXT1a(Color32 rgba, BlockDXT1 * dxtBlock)
{
if (rgba.a < 128)
{
dxtBlock->col0.u = 0;
dxtBlock->col1.u = 0;
dxtBlock->indices = 0xFFFFFFFF;
}
else
{
compressDXT1(rgba, dxtBlock);
}
}
// Brute force green channel compressor
void OptimalCompress::compressDXT1G(const ColorBlock & rgba, BlockDXT1 * block)
{
nvDebugCheck(block != NULL);
uint8 ming = 63;
uint8 maxg = 0;
// Get min/max green.
for (uint i = 0; i < 16; i++)
{
uint8 green = rgba.color(i).g >> 2;
ming = min(ming, green);
maxg = max(maxg, green);
}
block->col0.r = 31;
block->col1.r = 31;
block->col0.g = maxg;
block->col1.g = ming;
block->col0.b = 0;
block->col1.b = 0;
if (maxg - ming > 4)
{
int besterror = computeGreenError(rgba, block);
int bestg0 = maxg;
int bestg1 = ming;
for (int g0 = ming+5; g0 < maxg; g0++)
{
for (int g1 = ming; g1 < g0-4; g1++)
{
if ((maxg-g0) + (g1-ming) > besterror)
continue;
block->col0.g = g0;
block->col1.g = g1;
int error = computeGreenError(rgba, block);
if (error < besterror)
{
besterror = error;
bestg0 = g0;
bestg1 = g1;
}
}
}
block->col0.g = bestg0;
block->col1.g = bestg1;
}
Color32 palette[4];
block->evaluatePalette(palette);
block->indices = computeGreenIndices(rgba, palette);
}
void OptimalCompress::compressDXT3A(const ColorBlock & rgba, AlphaBlockDXT3 * dxtBlock)
{
dxtBlock->alpha0 = quantize4(rgba.color(0).a);
dxtBlock->alpha1 = quantize4(rgba.color(1).a);
dxtBlock->alpha2 = quantize4(rgba.color(2).a);
dxtBlock->alpha3 = quantize4(rgba.color(3).a);
dxtBlock->alpha4 = quantize4(rgba.color(4).a);
dxtBlock->alpha5 = quantize4(rgba.color(5).a);
dxtBlock->alpha6 = quantize4(rgba.color(6).a);
dxtBlock->alpha7 = quantize4(rgba.color(7).a);
dxtBlock->alpha8 = quantize4(rgba.color(8).a);
dxtBlock->alpha9 = quantize4(rgba.color(9).a);
dxtBlock->alphaA = quantize4(rgba.color(10).a);
dxtBlock->alphaB = quantize4(rgba.color(11).a);
dxtBlock->alphaC = quantize4(rgba.color(12).a);
dxtBlock->alphaD = quantize4(rgba.color(13).a);
dxtBlock->alphaE = quantize4(rgba.color(14).a);
dxtBlock->alphaF = quantize4(rgba.color(15).a);
}
void OptimalCompress::compressDXT5A(const ColorBlock & rgba, AlphaBlockDXT5 * dxtBlock)
{
uint8 mina = 255;
uint8 maxa = 0;
// Get min/max alpha.
for (uint i = 0; i < 16; i++)
{
uint8 alpha = rgba.color(i).a;
mina = min(mina, alpha);
maxa = max(maxa, alpha);
}
dxtBlock->alpha0 = maxa;
dxtBlock->alpha1 = mina;
/*int centroidDist = 256;
int centroid;
// Get the closest to the centroid.
for (uint i = 0; i < 16; i++)
{
uint8 alpha = rgba.color(i).a;
int dist = abs(alpha - (maxa + mina) / 2);
if (dist < centroidDist)
{
centroidDist = dist;
centroid = alpha;
}
}*/
if (maxa - mina > 8)
{
int besterror = computeAlphaError(rgba, dxtBlock);
int besta0 = maxa;
int besta1 = mina;
for (int a0 = mina+9; a0 < maxa; a0++)
{
for (int a1 = mina; a1 < a0-8; a1++)
//for (int a1 = mina; a1 < maxa; a1++)
{
//nvCheck(abs(a1-a0) > 8);
//if (abs(a0 - a1) < 8) continue;
//if ((maxa-a0) + (a1-mina) + min(abs(centroid-a0), abs(centroid-a1)) > besterror)
if ((maxa-a0) + (a1-mina) > besterror)
continue;
dxtBlock->alpha0 = a0;
dxtBlock->alpha1 = a1;
int error = computeAlphaError(rgba, dxtBlock);
if (error < besterror)
{
besterror = error;
besta0 = a0;
besta1 = a1;
}
}
}
dxtBlock->alpha0 = besta0;
dxtBlock->alpha1 = besta1;
}
computeAlphaIndices(rgba, dxtBlock);
}

View File

@ -1,49 +0,0 @@
// Copyright NVIDIA Corporation 2008 -- Ignacio Castano <icastano@nvidia.com>
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
#ifndef NV_TT_OPTIMALCOMPRESSDXT_H
#define NV_TT_OPTIMALCOMPRESSDXT_H
#include <nvimage/nvimage.h>
namespace nv
{
struct ColorBlock;
struct BlockDXT1;
struct BlockDXT3;
struct BlockDXT5;
struct AlphaBlockDXT3;
struct AlphaBlockDXT5;
namespace OptimalCompress
{
void compressDXT1(Color32 rgba, BlockDXT1 * dxtBlock);
void compressDXT1a(Color32 rgba, BlockDXT1 * dxtBlock);
void compressDXT1G(const ColorBlock & rgba, BlockDXT1 * block);
void compressDXT3A(const ColorBlock & rgba, AlphaBlockDXT3 * dxtBlock);
void compressDXT5A(const ColorBlock & rgba, AlphaBlockDXT5 * dxtBlock);
}
} // nv namespace
#endif // NV_TT_OPTIMALCOMPRESSDXT_H

View File

@ -27,7 +27,7 @@
#include <nvimage/BlockDXT.h> #include <nvimage/BlockDXT.h>
#include "QuickCompressDXT.h" #include "QuickCompressDXT.h"
#include "OptimalCompressDXT.h" #include "SingleColorLookup.h"
using namespace nv; using namespace nv;
@ -288,6 +288,70 @@ static void optimizeEndPoints4(Vector3 block[16], BlockDXT1 * dxtBlock)
dxtBlock->indices = computeIndices3(block, a, b); dxtBlock->indices = computeIndices3(block, a, b);
}*/ }*/
namespace
{
static int computeGreenError(const ColorBlock & rgba, const BlockDXT1 * block)
{
nvDebugCheck(block != NULL);
int palette[4];
palette[0] = (block->col0.g << 2) | (block->col0.g >> 4);
palette[1] = (block->col1.g << 2) | (block->col1.g >> 4);
palette[2] = (2 * palette[0] + palette[1]) / 3;
palette[3] = (2 * palette[1] + palette[0]) / 3;
int totalError = 0;
for (int i = 0; i < 16; i++)
{
const int green = rgba.color(i).g;
int error = abs(green - palette[0]);
error = min(error, abs(green - palette[1]));
error = min(error, abs(green - palette[2]));
error = min(error, abs(green - palette[3]));
totalError += error;
}
return totalError;
}
static uint computeGreenIndices(const ColorBlock & rgba, const Color32 palette[4])
{
const int color0 = palette[0].g;
const int color1 = palette[1].g;
const int color2 = palette[2].g;
const int color3 = palette[3].g;
uint indices = 0;
for (int i = 0; i < 16; i++)
{
const int color = rgba.color(i).g;
uint d0 = abs(color0 - color);
uint d1 = abs(color1 - color);
uint d2 = abs(color2 - color);
uint d3 = abs(color3 - color);
uint b0 = d0 > d3;
uint b1 = d1 > d2;
uint b2 = d0 > d2;
uint b3 = d1 > d3;
uint b4 = d2 > d3;
uint x0 = b1 & b2;
uint x1 = b0 & b3;
uint x2 = b0 & b4;
indices |= (x2 | ((x0 | x1) << 1)) << (2 * i);
}
return indices;
}
} // namespace
namespace namespace
{ {
@ -439,62 +503,78 @@ namespace
void QuickCompress::compressDXT1(const ColorBlock & rgba, BlockDXT1 * dxtBlock)
// Single color compressor, based on:
// https://mollyrocket.com/forums/viewtopic.php?t=392
void QuickCompress::compressDXT1(Color32 c, BlockDXT1 * dxtBlock)
{ {
if (rgba.isSingleColor()) dxtBlock->col0.r = OMatch5[c.r][0];
dxtBlock->col0.g = OMatch6[c.g][0];
dxtBlock->col0.b = OMatch5[c.b][0];
dxtBlock->col1.r = OMatch5[c.r][1];
dxtBlock->col1.g = OMatch6[c.g][1];
dxtBlock->col1.b = OMatch5[c.b][1];
dxtBlock->indices = 0xaaaaaaaa;
if (dxtBlock->col0.u < dxtBlock->col1.u)
{ {
OptimalCompress::compressDXT1(rgba.color(0), dxtBlock); swap(dxtBlock->col0.u, dxtBlock->col1.u);
} dxtBlock->indices ^= 0x55555555;
else
{
// read block
Vector3 block[16];
extractColorBlockRGB(rgba, block);
// find min and max colors
Vector3 maxColor, minColor;
findMinMaxColorsBox(block, 16, &maxColor, &minColor);
selectDiagonal(block, 16, &maxColor, &minColor);
insetBBox(&maxColor, &minColor);
uint16 color0 = roundAndExpand(&maxColor);
uint16 color1 = roundAndExpand(&minColor);
if (color0 < color1)
{
swap(maxColor, minColor);
swap(color0, color1);
}
dxtBlock->col0 = Color16(color0);
dxtBlock->col1 = Color16(color1);
dxtBlock->indices = computeIndices4(block, maxColor, minColor);
optimizeEndPoints4(block, dxtBlock);
} }
} }
void QuickCompress::compressDXT1(const ColorBlock & rgba, BlockDXT1 * dxtBlock)
void QuickCompress::compressDXT1a(const ColorBlock & rgba, BlockDXT1 * dxtBlock)
{ {
bool hasAlpha = false; // read block
Vector3 block[16];
extractColorBlockRGB(rgba, block);
for (uint i = 0; i < 16; i++) // find min and max colors
Vector3 maxColor, minColor;
findMinMaxColorsBox(block, 16, &maxColor, &minColor);
selectDiagonal(block, 16, &maxColor, &minColor);
insetBBox(&maxColor, &minColor);
uint16 color0 = roundAndExpand(&maxColor);
uint16 color1 = roundAndExpand(&minColor);
if (color0 < color1)
{ {
if (rgba.color(i).a < 128) { swap(maxColor, minColor);
hasAlpha = true; swap(color0, color1);
break;
}
} }
if (!hasAlpha) dxtBlock->col0 = Color16(color0);
dxtBlock->col1 = Color16(color1);
dxtBlock->indices = computeIndices4(block, maxColor, minColor);
optimizeEndPoints4(block, dxtBlock);
}
void QuickCompress::compressDXT1a(Color32 rgba, BlockDXT1 * dxtBlock)
{
if (rgba.a == 0)
{
dxtBlock->col0.u = 0;
dxtBlock->col1.u = 0;
dxtBlock->indices = 0xFFFFFFFF;
}
else
{
compressDXT1(rgba, dxtBlock);
}
}
void QuickCompress::compressDXT1a(const ColorBlock & rgba, BlockDXT1 * dxtBlock)
{
if (!rgba.hasAlpha())
{ {
compressDXT1(rgba, dxtBlock); compressDXT1(rgba, dxtBlock);
} }
// @@ Handle single RGB, with varying alpha? We need tables for single color compressor in 3 color mode.
//else if (rgba.isSingleColorNoAlpha()) { ... }
else else
{ {
// read block // read block
@ -527,14 +607,95 @@ void QuickCompress::compressDXT1a(const ColorBlock & rgba, BlockDXT1 * dxtBlock)
} }
void QuickCompress::compressDXT3(const ColorBlock & rgba, BlockDXT3 * dxtBlock) // Brute force green channel compressor
void QuickCompress::compressDXT1G(const ColorBlock & rgba, BlockDXT1 * block)
{ {
compressDXT1(rgba, &dxtBlock->color); nvDebugCheck(block != NULL);
OptimalCompress::compressDXT3A(rgba, &dxtBlock->alpha);
uint8 ming = 63;
uint8 maxg = 0;
// Get min/max green.
for (uint i = 0; i < 16; i++)
{
uint8 green = rgba.color(i).g >> 2;
ming = min(ming, green);
maxg = max(maxg, green);
}
block->col0.r = 31;
block->col1.r = 31;
block->col0.g = maxg;
block->col1.g = ming;
block->col0.b = 0;
block->col1.b = 0;
if (maxg - ming > 4)
{
int besterror = computeGreenError(rgba, block);
int bestg0 = maxg;
int bestg1 = ming;
for (int g0 = ming+5; g0 < maxg; g0++)
{
for (int g1 = ming; g1 < g0-4; g1++)
{
if ((maxg-g0) + (g1-ming) > besterror)
continue;
block->col0.g = g0;
block->col1.g = g1;
int error = computeGreenError(rgba, block);
if (error < besterror)
{
besterror = error;
bestg0 = g0;
bestg1 = g1;
}
}
}
block->col0.g = bestg0;
block->col1.g = bestg1;
}
Color32 palette[4];
block->evaluatePalette(palette);
block->indices = computeGreenIndices(rgba, palette);
} }
void QuickCompress::compressDXT5A(const ColorBlock & rgba, AlphaBlockDXT5 * dxtBlock, int iterationCount/*=8*/)
void QuickCompress::compressDXT3A(const ColorBlock & rgba, AlphaBlockDXT3 * dxtBlock)
{
// @@ Round instead of truncate. When rounding take into account bit expansion.
dxtBlock->alpha0 = rgba.color(0).a >> 4;
dxtBlock->alpha1 = rgba.color(1).a >> 4;
dxtBlock->alpha2 = rgba.color(2).a >> 4;
dxtBlock->alpha3 = rgba.color(3).a >> 4;
dxtBlock->alpha4 = rgba.color(4).a >> 4;
dxtBlock->alpha5 = rgba.color(5).a >> 4;
dxtBlock->alpha6 = rgba.color(6).a >> 4;
dxtBlock->alpha7 = rgba.color(7).a >> 4;
dxtBlock->alpha8 = rgba.color(8).a >> 4;
dxtBlock->alpha9 = rgba.color(9).a >> 4;
dxtBlock->alphaA = rgba.color(10).a >> 4;
dxtBlock->alphaB = rgba.color(11).a >> 4;
dxtBlock->alphaC = rgba.color(12).a >> 4;
dxtBlock->alphaD = rgba.color(13).a >> 4;
dxtBlock->alphaE = rgba.color(14).a >> 4;
dxtBlock->alphaF = rgba.color(15).a >> 4;
}
void QuickCompress::compressDXT3(const ColorBlock & rgba, BlockDXT3 * dxtBlock)
{
compressDXT1(rgba, &dxtBlock->color);
compressDXT3A(rgba, &dxtBlock->alpha);
}
void QuickCompress::compressDXT5A(const ColorBlock & rgba, AlphaBlockDXT5 * dxtBlock)
{ {
uint8 alpha0 = 0; uint8 alpha0 = 0;
uint8 alpha1 = 255; uint8 alpha1 = 255;
@ -554,7 +715,7 @@ void QuickCompress::compressDXT5A(const ColorBlock & rgba, AlphaBlockDXT5 * dxtB
AlphaBlockDXT5 bestblock = block; AlphaBlockDXT5 bestblock = block;
for (int i = 0; i < iterationCount; i++) while(true)
{ {
optimizeAlpha8(rgba, &block); optimizeAlpha8(rgba, &block);
uint error = computeAlphaIndices(rgba, &block); uint error = computeAlphaIndices(rgba, &block);
@ -578,8 +739,9 @@ void QuickCompress::compressDXT5A(const ColorBlock & rgba, AlphaBlockDXT5 * dxtB
*dxtBlock = bestblock; *dxtBlock = bestblock;
} }
void QuickCompress::compressDXT5(const ColorBlock & rgba, BlockDXT5 * dxtBlock, int iterationCount/*=8*/) void QuickCompress::compressDXT5(const ColorBlock & rgba, BlockDXT5 * dxtBlock)
{ {
compressDXT1(rgba, &dxtBlock->color); compressDXT1(rgba, &dxtBlock->color);
compressDXT5A(rgba, &dxtBlock->alpha, iterationCount); compressDXT5A(rgba, &dxtBlock->alpha);
} }

View File

@ -37,13 +37,17 @@ namespace nv
namespace QuickCompress namespace QuickCompress
{ {
void compressDXT1(Color32 rgba, BlockDXT1 * dxtBlock);
void compressDXT1(const ColorBlock & rgba, BlockDXT1 * dxtBlock); void compressDXT1(const ColorBlock & rgba, BlockDXT1 * dxtBlock);
void compressDXT1a(Color32 rgba, BlockDXT1 * dxtBlock);
void compressDXT1a(const ColorBlock & rgba, BlockDXT1 * dxtBlock); void compressDXT1a(const ColorBlock & rgba, BlockDXT1 * dxtBlock);
void compressDXT1G(const ColorBlock & rgba, BlockDXT1 * block);
void compressDXT3A(const ColorBlock & rgba, AlphaBlockDXT3 * dxtBlock);
void compressDXT3(const ColorBlock & rgba, BlockDXT3 * dxtBlock); void compressDXT3(const ColorBlock & rgba, BlockDXT3 * dxtBlock);
void compressDXT5A(const ColorBlock & rgba, AlphaBlockDXT5 * dxtBlock, int iterationCount=8); void compressDXT5A(const ColorBlock & rgba, AlphaBlockDXT5 * dxtBlock);
void compressDXT5(const ColorBlock & rgba, BlockDXT5 * dxtBlock, int iterationCount=8); void compressDXT5(const ColorBlock & rgba, BlockDXT5 * dxtBlock);
} }
} // nv namespace } // nv namespace

View File

@ -159,7 +159,7 @@ __device__ void loadColorBlock(const uint * image, float3 colors[16], float3 sum
} }
} }
__device__ void loadColorBlock(const uint * image, float3 colors[16], float3 sums[16], float weights[16], int xrefs[16], int * sameColor) __device__ void loadColorBlock(const uint * image, float3 colors[16], float3 sums[16], float weights[16], int xrefs[16])
{ {
const int bid = blockIdx.x; const int bid = blockIdx.x;
const int idx = threadIdx.x; const int idx = threadIdx.x;
@ -189,8 +189,6 @@ __device__ void loadColorBlock(const uint * image, float3 colors[16], float3 sum
colorSums(colors, sums); colorSums(colors, sums);
float3 axis = bestFitLine(colors, sums[0], kColorMetric); float3 axis = bestFitLine(colors, sums[0], kColorMetric);
*sameColor = (axis == make_float3(0, 0, 0));
dps[idx] = dot(rawColors[idx], axis); dps[idx] = dot(rawColors[idx], axis);
#if __DEVICE_EMULATION__ #if __DEVICE_EMULATION__
@ -594,40 +592,6 @@ __device__ void evalAllPermutations(const float3 * colors, const float * weights
} }
*/ */
__device__ void evalLevel4Permutations(const float3 * colors, float3 colorSum, const uint * permutations, ushort & bestStart, ushort & bestEnd, uint & bestPermutation, float * errors)
{
const int idx = threadIdx.x;
float bestError = FLT_MAX;
for(int i = 0; i < 16; i++)
{
int pidx = idx + NUM_THREADS * i;
if (pidx >= 992) break;
ushort start, end;
uint permutation = permutations[pidx];
float error = evalPermutation4(colors, colorSum, permutation, &start, &end);
if (error < bestError)
{
bestError = error;
bestPermutation = permutation;
bestStart = start;
bestEnd = end;
}
}
if (bestStart < bestEnd)
{
swap(bestEnd, bestStart);
bestPermutation ^= 0x55555555; // Flip indices.
}
errors[idx] = bestError;
}
__device__ void evalLevel4Permutations(const float3 * colors, const float * weights, float3 colorSum, const uint * permutations, ushort & bestStart, ushort & bestEnd, uint & bestPermutation, float * errors) __device__ void evalLevel4Permutations(const float3 * colors, const float * weights, float3 colorSum, const uint * permutations, ushort & bestStart, ushort & bestEnd, uint & bestPermutation, float * errors)
{ {
const int idx = threadIdx.x; const int idx = threadIdx.x;
@ -663,6 +627,7 @@ __device__ void evalLevel4Permutations(const float3 * colors, const float * weig
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// Find index with minimum error // Find index with minimum error
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -833,39 +798,6 @@ __global__ void compressDXT1(const uint * permutations, const uint * image, uint
} }
} }
__global__ void compressLevel4DXT1(const uint * permutations, const uint * image, uint2 * result)
{
__shared__ float3 colors[16];
__shared__ float3 sums[16];
__shared__ int xrefs[16];
__shared__ int sameColor;
loadColorBlock(image, colors, sums, xrefs, &sameColor);
__syncthreads();
if (sameColor)
{
if (threadIdx.x == 0) saveSingleColorBlockDXT1(colors[0], result);
return;
}
ushort bestStart, bestEnd;
uint bestPermutation;
__shared__ float errors[NUM_THREADS];
evalLevel4Permutations(colors, sums[0], permutations, bestStart, bestEnd, bestPermutation, errors);
// Use a parallel reduction to find minimum error.
const int minIdx = findMinError(errors);
// Only write the result of the winner thread.
if (threadIdx.x == minIdx)
{
saveBlockDXT1(bestStart, bestEnd, bestPermutation, xrefs, result);
}
}
__global__ void compressWeightedDXT1(const uint * permutations, const uint * image, uint2 * result) __global__ void compressWeightedDXT1(const uint * permutations, const uint * image, uint2 * result)
{ {
@ -873,18 +805,11 @@ __global__ void compressWeightedDXT1(const uint * permutations, const uint * ima
__shared__ float3 sums[16]; __shared__ float3 sums[16];
__shared__ float weights[16]; __shared__ float weights[16];
__shared__ int xrefs[16]; __shared__ int xrefs[16];
__shared__ int sameColor;
loadColorBlock(image, colors, sums, weights, xrefs, &sameColor); loadColorBlock(image, colors, sums, weights, xrefs);
__syncthreads(); __syncthreads();
if (sameColor)
{
if (threadIdx.x == 0) saveSingleColorBlockDXT1(colors[0], result);
return;
}
ushort bestStart, bestEnd; ushort bestStart, bestEnd;
uint bestPermutation; uint bestPermutation;
@ -1108,11 +1033,6 @@ extern "C" void compressKernelDXT1(uint blockNum, uint * d_data, uint * d_result
compressDXT1<<<blockNum, NUM_THREADS>>>(d_bitmaps, d_data, (uint2 *)d_result); compressDXT1<<<blockNum, NUM_THREADS>>>(d_bitmaps, d_data, (uint2 *)d_result);
} }
extern "C" void compressKernelDXT1_Level4(uint blockNum, uint * d_data, uint * d_result, uint * d_bitmaps)
{
compressLevel4DXT1<<<blockNum, NUM_THREADS>>>(d_bitmaps, d_data, (uint2 *)d_result);
}
extern "C" void compressWeightedKernelDXT1(uint blockNum, uint * d_data, uint * d_result, uint * d_bitmaps) extern "C" void compressWeightedKernelDXT1(uint blockNum, uint * d_data, uint * d_result, uint * d_bitmaps)
{ {
compressWeightedDXT1<<<blockNum, NUM_THREADS>>>(d_bitmaps, d_data, (uint2 *)d_result); compressWeightedDXT1<<<blockNum, NUM_THREADS>>>(d_bitmaps, d_data, (uint2 *)d_result);

View File

@ -30,7 +30,6 @@
#include <nvtt/CompressionOptions.h> #include <nvtt/CompressionOptions.h>
#include <nvtt/OutputOptions.h> #include <nvtt/OutputOptions.h>
#include <nvtt/QuickCompressDXT.h> #include <nvtt/QuickCompressDXT.h>
#include <nvtt/OptimalCompressDXT.h>
#include "CudaCompressDXT.h" #include "CudaCompressDXT.h"
#include "CudaUtils.h" #include "CudaUtils.h"
@ -53,7 +52,6 @@ using namespace nvtt;
extern "C" void setupCompressKernel(const float weights[3]); extern "C" void setupCompressKernel(const float weights[3]);
extern "C" void compressKernelDXT1(uint blockNum, uint * d_data, uint * d_result, uint * d_bitmaps); extern "C" void compressKernelDXT1(uint blockNum, uint * d_data, uint * d_result, uint * d_bitmaps);
extern "C" void compressKernelDXT1_Level4(uint blockNum, uint * d_data, uint * d_result, uint * d_bitmaps);
extern "C" void compressWeightedKernelDXT1(uint blockNum, uint * d_data, uint * d_result, uint * d_bitmaps); extern "C" void compressWeightedKernelDXT1(uint blockNum, uint * d_data, uint * d_result, uint * d_bitmaps);
#include "Bitmaps.h" // @@ Rename to BitmapTable.h #include "Bitmaps.h" // @@ Rename to BitmapTable.h
@ -120,25 +118,20 @@ bool CudaCompressor::isValid() const
// @@ This code is very repetitive and needs to be cleaned up. // @@ This code is very repetitive and needs to be cleaned up.
void CudaCompressor::setImage(const Image * image, nvtt::AlphaMode alphaMode)
{
m_image = image;
m_alphaMode = alphaMode;
}
/// Compress image using CUDA. /// Compress image using CUDA.
void CudaCompressor::compressDXT1(const CompressionOptions::Private & compressionOptions, const OutputOptions::Private & outputOptions) void CudaCompressor::compressDXT1(const Image * image, const OutputOptions::Private & outputOptions, const CompressionOptions::Private & compressionOptions)
{ {
nvDebugCheck(cuda::isHardwarePresent()); nvDebugCheck(cuda::isHardwarePresent());
#if defined HAVE_CUDA #if defined HAVE_CUDA
// Image size in blocks. // Image size in blocks.
const uint w = (m_image->width() + 3) / 4; const uint w = (image->width() + 3) / 4;
const uint h = (m_image->height() + 3) / 4; const uint h = (image->height() + 3) / 4;
uint imageSize = w * h * 16 * sizeof(Color32); uint imageSize = w * h * 16 * sizeof(Color32);
uint * blockLinearImage = (uint *) malloc(imageSize); uint * blockLinearImage = (uint *) malloc(imageSize);
convertToBlockLinear(m_image, blockLinearImage); // @@ Do this in parallel with the GPU, or in the GPU! convertToBlockLinear(image, blockLinearImage); // @@ Do this in parallel with the GPU, or in the GPU!
const uint blockNum = w * h; const uint blockNum = w * h;
const uint compressedSize = blockNum * 8; const uint compressedSize = blockNum * 8;
@ -183,7 +176,7 @@ void CudaCompressor::compressDXT1(const CompressionOptions::Private & compressio
} }
clock_t end = clock(); clock_t end = clock();
//printf("\rCUDA time taken: %.3f seconds\n", float(end-start) / CLOCKS_PER_SEC); printf("\rCUDA time taken: %.3f seconds\n", float(end-start) / CLOCKS_PER_SEC);
free(blockLinearImage); free(blockLinearImage);
@ -197,18 +190,18 @@ void CudaCompressor::compressDXT1(const CompressionOptions::Private & compressio
/// Compress image using CUDA. /// Compress image using CUDA.
void CudaCompressor::compressDXT3(const CompressionOptions::Private & compressionOptions, const OutputOptions::Private & outputOptions) void CudaCompressor::compressDXT3(const Image * image, const OutputOptions::Private & outputOptions, const CompressionOptions::Private & compressionOptions)
{ {
nvDebugCheck(cuda::isHardwarePresent()); nvDebugCheck(cuda::isHardwarePresent());
#if defined HAVE_CUDA #if defined HAVE_CUDA
// Image size in blocks. // Image size in blocks.
const uint w = (m_image->width() + 3) / 4; const uint w = (image->width() + 3) / 4;
const uint h = (m_image->height() + 3) / 4; const uint h = (image->height() + 3) / 4;
uint imageSize = w * h * 16 * sizeof(Color32); uint imageSize = w * h * 16 * sizeof(Color32);
uint * blockLinearImage = (uint *) malloc(imageSize); uint * blockLinearImage = (uint *) malloc(imageSize);
convertToBlockLinear(m_image, blockLinearImage); convertToBlockLinear(image, blockLinearImage);
const uint blockNum = w * h; const uint blockNum = w * h;
const uint compressedSize = blockNum * 8; const uint compressedSize = blockNum * 8;
@ -228,20 +221,13 @@ void CudaCompressor::compressDXT3(const CompressionOptions::Private & compressio
cudaMemcpy(m_data, blockLinearImage + bn * 16, count * 64, cudaMemcpyHostToDevice); cudaMemcpy(m_data, blockLinearImage + bn * 16, count * 64, cudaMemcpyHostToDevice);
// Launch kernel. // Launch kernel.
if (m_alphaMode == AlphaMode_Transparency) compressWeightedKernelDXT1(count, m_data, m_result, m_bitmapTable);
{
compressWeightedKernelDXT1(count, m_data, m_result, m_bitmapTable);
}
else
{
compressKernelDXT1_Level4(count, m_data, m_result, m_bitmapTable);
}
// Compress alpha in parallel with the GPU. // Compress alpha in parallel with the GPU.
for (uint i = 0; i < count; i++) for (uint i = 0; i < count; i++)
{ {
ColorBlock rgba(blockLinearImage + (bn + i) * 16); ColorBlock rgba(blockLinearImage + (bn + i) * 16);
OptimalCompress::compressDXT3A(rgba, alphaBlocks + i); QuickCompress::compressDXT3A(rgba, alphaBlocks + i);
} }
// Check for errors. // Check for errors.
@ -273,7 +259,7 @@ void CudaCompressor::compressDXT3(const CompressionOptions::Private & compressio
} }
clock_t end = clock(); clock_t end = clock();
//printf("\rCUDA time taken: %.3f seconds\n", float(end-start) / CLOCKS_PER_SEC); printf("\rCUDA time taken: %.3f seconds\n", float(end-start) / CLOCKS_PER_SEC);
free(alphaBlocks); free(alphaBlocks);
free(blockLinearImage); free(blockLinearImage);
@ -288,18 +274,18 @@ void CudaCompressor::compressDXT3(const CompressionOptions::Private & compressio
/// Compress image using CUDA. /// Compress image using CUDA.
void CudaCompressor::compressDXT5(const CompressionOptions::Private & compressionOptions, const OutputOptions::Private & outputOptions) void CudaCompressor::compressDXT5(const Image * image, const OutputOptions::Private & outputOptions, const CompressionOptions::Private & compressionOptions)
{ {
nvDebugCheck(cuda::isHardwarePresent()); nvDebugCheck(cuda::isHardwarePresent());
#if defined HAVE_CUDA #if defined HAVE_CUDA
// Image size in blocks. // Image size in blocks.
const uint w = (m_image->width() + 3) / 4; const uint w = (image->width() + 3) / 4;
const uint h = (m_image->height() + 3) / 4; const uint h = (image->height() + 3) / 4;
uint imageSize = w * h * 16 * sizeof(Color32); uint imageSize = w * h * 16 * sizeof(Color32);
uint * blockLinearImage = (uint *) malloc(imageSize); uint * blockLinearImage = (uint *) malloc(imageSize);
convertToBlockLinear(m_image, blockLinearImage); convertToBlockLinear(image, blockLinearImage);
const uint blockNum = w * h; const uint blockNum = w * h;
const uint compressedSize = blockNum * 8; const uint compressedSize = blockNum * 8;
@ -319,14 +305,7 @@ void CudaCompressor::compressDXT5(const CompressionOptions::Private & compressio
cudaMemcpy(m_data, blockLinearImage + bn * 16, count * 64, cudaMemcpyHostToDevice); cudaMemcpy(m_data, blockLinearImage + bn * 16, count * 64, cudaMemcpyHostToDevice);
// Launch kernel. // Launch kernel.
if (m_alphaMode == AlphaMode_Transparency) compressWeightedKernelDXT1(count, m_data, m_result, m_bitmapTable);
{
compressWeightedKernelDXT1(count, m_data, m_result, m_bitmapTable);
}
else
{
compressKernelDXT1_Level4(count, m_data, m_result, m_bitmapTable);
}
// Compress alpha in parallel with the GPU. // Compress alpha in parallel with the GPU.
for (uint i = 0; i < count; i++) for (uint i = 0; i < count; i++)
@ -364,7 +343,7 @@ void CudaCompressor::compressDXT5(const CompressionOptions::Private & compressio
} }
clock_t end = clock(); clock_t end = clock();
//printf("\rCUDA time taken: %.3f seconds\n", float(end-start) / CLOCKS_PER_SEC); printf("\rCUDA time taken: %.3f seconds\n", float(end-start) / CLOCKS_PER_SEC);
free(alphaBlocks); free(alphaBlocks);
free(blockLinearImage); free(blockLinearImage);
@ -378,3 +357,185 @@ void CudaCompressor::compressDXT5(const CompressionOptions::Private & compressio
} }
#if 0
class Task
{
public:
explicit Task(uint numBlocks) : blockMaxCount(numBlocks), blockCount(0)
{
// System memory allocations.
blockLinearImage = new uint[blockMaxCount * 16];
xrefs = new uint[blockMaxCount * 16];
// Device memory allocations.
cudaMalloc((void**) &d_blockLinearImage, blockMaxCount * 16 * sizeof(uint));
cudaMalloc((void**) &d_compressedImage, blockMaxCount * 8U);
// @@ Check for allocation errors.
}
~Task()
{
delete [] blockLinearImage;
delete [] xrefs;
cudaFree(d_blockLinearImage);
cudaFree(d_compressedImage);
}
void addColorBlock(const ColorBlock & rgba)
{
nvDebugCheck(!isFull());
// @@ Count unique colors?
/*
// Convert colors to vectors.
Array<Vector3> pointArray(16);
for(int i = 0; i < 16; i++) {
const Color32 color = rgba.color(i);
pointArray.append(Vector3(color.r, color.g, color.b));
}
// Find best fit line.
const Vector3 axis = Fit::bestLine(pointArray).direction();
// Project points to axis.
float dps[16];
uint * order = &xrefs[blockCount * 16];
for (uint i = 0; i < 16; ++i)
{
dps[i] = dot(pointArray[i], axis);
order[i] = i;
}
// Sort them.
for (uint i = 0; i < 16; ++i)
{
for (uint j = i; j > 0 && dps[j] < dps[j - 1]; --j)
{
swap(dps[j], dps[j - 1]);
swap(order[j], order[j - 1]);
}
}
*/
// Write sorted colors to blockLinearImage.
for(uint i = 0; i < 16; ++i)
{
// blockLinearImage[blockCount * 16 + i] = rgba.color(order[i]);
blockLinearImage[blockCount * 16 + i] = rgba.color(i);
}
++blockCount;
}
bool isFull()
{
nvDebugCheck(blockCount <= blockMaxCount);
return blockCount == blockMaxCount;
}
void flush(const OutputOptions::Private & outputOptions)
{
if (blockCount == 0)
{
// Nothing to do.
return;
}
// Copy input color blocks.
cudaMemcpy(d_blockLinearImage, blockLinearImage, blockCount * 64, cudaMemcpyHostToDevice);
// Launch kernel.
compressKernelDXT1(blockCount, d_blockLinearImage, d_compressedImage, d_bitmaps);
// Check for errors.
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
nvDebug("CUDA Error: %s\n", cudaGetErrorString(err));
if (outputOptions.errorHandler != NULL)
{
outputOptions.errorHandler->error(Error_CudaError);
}
}
// Copy result to host, overwrite swizzled image.
uint * compressedImage = blockLinearImage;
cudaMemcpy(compressedImage, d_compressedImage, blockCount * 8, cudaMemcpyDeviceToHost);
// @@ Sort block indices.
// Output result.
if (outputOptions.outputHandler != NULL)
{
// outputOptions.outputHandler->writeData(compressedImage, blockCount * 8);
}
blockCount = 0;
}
private:
const uint blockMaxCount;
uint blockCount;
uint * blockLinearImage;
uint * xrefs;
uint * d_blockLinearImage;
uint * d_compressedImage;
};
void nv::cudaCompressDXT1_2(const Image * image, const OutputOptions::Private & outputOptions, const CompressionOptions::Private & compressionOptions)
{
#if defined HAVE_CUDA
const uint w = image->width();
const uint h = image->height();
const uint blockNum = ((w + 3) / 4) * ((h + 3) / 4);
const uint blockMax = 32768; // 49152, 65535
setupCompressKernelDXT1(compressionOptions.colorWeight.ptr());
ColorBlock rgba;
Task task(min(blockNum, blockMax));
clock_t start = clock();
for (uint y = 0; y < h; y += 4) {
for (uint x = 0; x < w; x += 4) {
rgba.init(image, x, y);
task.addColorBlock(rgba);
if (task.isFull())
{
task.flush(outputOptions);
}
}
}
task.flush(outputOptions);
clock_t end = clock();
printf("\rCUDA time taken: %.3f seconds\n", float(end-start) / CLOCKS_PER_SEC);
#else
if (outputOptions.errorHandler != NULL)
{
outputOptions.errorHandler->error(Error_CudaError);
}
#endif
}
#endif // 0

View File

@ -39,20 +39,15 @@ namespace nv
bool isValid() const; bool isValid() const;
void setImage(const Image * image, nvtt::AlphaMode alphaMode); void compressDXT1(const Image * image, const nvtt::OutputOptions::Private & outputOptions, const nvtt::CompressionOptions::Private & compressionOptions);
void compressDXT3(const Image * image, const nvtt::OutputOptions::Private & outputOptions, const nvtt::CompressionOptions::Private & compressionOptions);
void compressDXT1(const nvtt::CompressionOptions::Private & compressionOptions, const nvtt::OutputOptions::Private & outputOptions); void compressDXT5(const Image * image, const nvtt::OutputOptions::Private & outputOptions, const nvtt::CompressionOptions::Private & compressionOptions);
void compressDXT3(const nvtt::CompressionOptions::Private & compressionOptions, const nvtt::OutputOptions::Private & outputOptions);
void compressDXT5(const nvtt::CompressionOptions::Private & compressionOptions, const nvtt::OutputOptions::Private & outputOptions);
private: private:
uint * m_bitmapTable; uint * m_bitmapTable;
uint * m_data; uint * m_data;
uint * m_result; uint * m_result;
const Image * m_image;
nvtt::AlphaMode m_alphaMode;
}; };
} // nv namespace } // nv namespace

View File

@ -74,7 +74,7 @@ bool nv::cuda::isHardwarePresent()
{ {
#if defined HAVE_CUDA #if defined HAVE_CUDA
#if NV_OS_WIN32 #if NV_OS_WIN32
//if (isWindowsVista()) return false; if (isWindowsVista()) return false;
//if (isWindowsVista() || !isWow32()) return false; //if (isWindowsVista() || !isWow32()) return false;
#endif #endif
int count = deviceCount(); int count = deviceCount();

View File

@ -29,8 +29,6 @@
#include "colourblock.h" #include "colourblock.h"
#include <cfloat> #include <cfloat>
#include "fastclusterlookup.inl"
namespace squish { namespace squish {
FastClusterFit::FastClusterFit() FastClusterFit::FastClusterFit()
@ -99,6 +97,91 @@ void FastClusterFit::SetColourSet( ColourSet const* colours, int flags )
} }
struct Precomp {
float alpha2_sum;
float beta2_sum;
float alphabeta_sum;
float factor;
};
static SQUISH_ALIGN_16 Precomp s_threeElement[153];
static SQUISH_ALIGN_16 Precomp s_fourElement[969];
void FastClusterFit::DoPrecomputation()
{
int i = 0;
// Three element clusters:
for( int c0 = 0; c0 <= 16; c0++) // At least two clusters.
{
for( int c1 = 0; c1 <= 16-c0; c1++)
{
int c2 = 16 - c0 - c1;
/*if (c2 == 16) {
// a = b = x2 / 16
s_threeElement[i].alpha2_sum = 0;
s_threeElement[i].beta2_sum = 16;
s_threeElement[i].alphabeta_sum = -16;
s_threeElement[i].factor = 1.0f / 256.0f;
}
else if (c0 == 16) {
// a = b = x0 / 16
s_threeElement[i].alpha2_sum = 16;
s_threeElement[i].beta2_sum = 0;
s_threeElement[i].alphabeta_sum = -16;
s_threeElement[i].factor = 1.0f / 256.0f;
}
else*/ {
s_threeElement[i].alpha2_sum = c0 + c1 * 0.25f;
s_threeElement[i].beta2_sum = c2 + c1 * 0.25f;
s_threeElement[i].alphabeta_sum = c1 * 0.25f;
s_threeElement[i].factor = 1.0f / (s_threeElement[i].alpha2_sum * s_threeElement[i].beta2_sum - s_threeElement[i].alphabeta_sum * s_threeElement[i].alphabeta_sum);
}
i++;
}
}
//printf("%d three cluster elements\n", i);
// Four element clusters:
i = 0;
for( int c0 = 0; c0 <= 16; c0++)
{
for( int c1 = 0; c1 <= 16-c0; c1++)
{
for( int c2 = 0; c2 <= 16-c0-c1; c2++)
{
int c3 = 16 - c0 - c1 - c2;
/*if (c3 == 16) {
// a = b = x3 / 16
s_fourElement[i].alpha2_sum = 16.0f;
s_fourElement[i].beta2_sum = 0.0f;
s_fourElement[i].alphabeta_sum = -16.0f;
s_fourElement[i].factor = 1.0f / 256.0f;
}
else if (c0 == 16) {
// a = b = x0 / 16
s_fourElement[i].alpha2_sum = 0.0f;
s_fourElement[i].beta2_sum = 16.0f;
s_fourElement[i].alphabeta_sum = -16.0f;
s_fourElement[i].factor = 1.0f / 256.0f;
}
else*/ {
s_fourElement[i].alpha2_sum = c0 + c1 * (4.0f/9.0f) + c2 * (1.0f/9.0f);
s_fourElement[i].beta2_sum = c3 + c2 * (4.0f/9.0f) + c1 * (1.0f/9.0f);
s_fourElement[i].alphabeta_sum = (c1 + c2) * (2.0f/9.0f);
s_fourElement[i].factor = 1.0f / (s_fourElement[i].alpha2_sum * s_fourElement[i].beta2_sum - s_fourElement[i].alphabeta_sum * s_fourElement[i].alphabeta_sum);
}
i++;
}
}
}
//printf("%d four cluster elements\n", i);
}
void FastClusterFit::SetMetric(float r, float g, float b) void FastClusterFit::SetMetric(float r, float g, float b)
{ {
#if SQUISH_USE_SIMD #if SQUISH_USE_SIMD

View File

@ -44,6 +44,8 @@ public:
void SetMetric(float r, float g, float b); void SetMetric(float r, float g, float b);
float GetBestError() const; float GetBestError() const;
static void DoPrecomputation();
// Make them public // Make them public
virtual void Compress3( void* block ); virtual void Compress3( void* block );
virtual void Compress4( void* block ); virtual void Compress4( void* block );

File diff suppressed because it is too large Load Diff

View File

@ -42,11 +42,11 @@ struct MyOutputHandler : public nvtt::OutputHandler
MyOutputHandler(const char * name) : total(0), progress(0), percentage(0), stream(new nv::StdOutputStream(name)) {} MyOutputHandler(const char * name) : total(0), progress(0), percentage(0), stream(new nv::StdOutputStream(name)) {}
virtual ~MyOutputHandler() { delete stream; } virtual ~MyOutputHandler() { delete stream; }
void setTotal(int64 t) virtual void setTotal(int64 t)
{ {
total = t + 128; total = t + 128;
} }
void setDisplayProgress(bool b) virtual void setDisplayProgress(bool b)
{ {
verbose = b; verbose = b;
} }
@ -373,6 +373,7 @@ int main(int argc, char *argv[])
inputOptions.setMipmapGeneration(false); inputOptions.setMipmapGeneration(false);
} }
nvtt::CompressionOptions compressionOptions; nvtt::CompressionOptions compressionOptions;
compressionOptions.setFormat(format); compressionOptions.setFormat(format);
if (fast) if (fast)
@ -396,21 +397,6 @@ int main(int argc, char *argv[])
compressionOptions.setExternalCompressor(externalCompressor); compressionOptions.setExternalCompressor(externalCompressor);
} }
if (format == nvtt::Format_RGB)
{
compressionOptions.setQuantization(true, false, false);
//compressionOptions.setPixelFormat(16, 0xF000, 0x0F00, 0x00F0, 0x000F);
compressionOptions.setPixelFormat(16,
0x0F00,
0x00F0,
0x000F,
0xF000);
// 0x003F0000,
// 0x00003F00,
// 0x0000003F,
// 0x3F000000);
}
MyErrorHandler errorHandler; MyErrorHandler errorHandler;
MyOutputHandler outputHandler(output); MyOutputHandler outputHandler(output);

View File

@ -73,12 +73,10 @@ int main(int argc, char *argv[])
float scale = 0.5f; float scale = 0.5f;
float gamma = 2.2f; float gamma = 2.2f;
nv::AutoPtr<nv::Filter> filter; nv::Filter * filter = NULL;
nv::Path input; nv::Path input;
nv::Path output; nv::Path output;
nv::FloatImage::WrapMode wrapMode = nv::FloatImage::WrapMode_Mirror;
// Parse arguments. // Parse arguments.
for (int i = 1; i < argc; i++) for (int i = 1; i < argc; i++)
{ {
@ -110,18 +108,9 @@ int main(int argc, char *argv[])
else if (strcmp("lanczos", argv[i]) == 0) filter = new nv::LanczosFilter(); else if (strcmp("lanczos", argv[i]) == 0) filter = new nv::LanczosFilter();
else if (strcmp("kaiser", argv[i]) == 0) { else if (strcmp("kaiser", argv[i]) == 0) {
filter = new nv::KaiserFilter(3); filter = new nv::KaiserFilter(3);
((nv::KaiserFilter *)filter.ptr())->setParameters(4.0f, 1.0f); ((nv::KaiserFilter *)filter)->setParameters(4.0f, 1.0f);
} }
} }
else if (strcmp("-f", argv[i]) == 0)
{
if (i+1 == argc) break;
i++;
if (strcmp("mirror", argv[i]) == 0) wrapMode = nv::FloatImage::WrapMode_Mirror;
else if (strcmp("repeat", argv[i]) == 0) wrapMode = nv::FloatImage::WrapMode_Repeat;
else if (strcmp("clamp", argv[i]) == 0) wrapMode = nv::FloatImage::WrapMode_Clamp;
}
else if (argv[i][0] != '-') else if (argv[i][0] != '-')
{ {
input = argv[i]; input = argv[i];
@ -151,10 +140,6 @@ int main(int argc, char *argv[])
printf(" * mitchell\n"); printf(" * mitchell\n");
printf(" * lanczos\n"); printf(" * lanczos\n");
printf(" * kaiser\n"); printf(" * kaiser\n");
printf(" -w mode One of the following: (default = 'mirror')\n");
printf(" * mirror\n");
printf(" * repeat\n");
printf(" * clamp\n");
return 1; return 1;
} }
@ -170,14 +155,15 @@ int main(int argc, char *argv[])
nv::FloatImage fimage(&image); nv::FloatImage fimage(&image);
fimage.toLinear(0, 3, gamma); fimage.toLinear(0, 3, gamma);
nv::AutoPtr<nv::FloatImage> fresult(fimage.resize(*filter, uint(image.width() * scale), uint(image.height() * scale), wrapMode)); nv::AutoPtr<nv::FloatImage> fresult(fimage.downSample(*filter, uint(image.width() * scale), uint(image.height() * scale), nv::FloatImage::WrapMode_Mirror));
nv::AutoPtr<nv::Image> result(fresult->createImageGammaCorrect(gamma)); nv::AutoPtr<nv::Image> result(fresult->createImageGammaCorrect(gamma));
result->setFormat(nv::Image::Format_ARGB);
nv::StdOutputStream stream(output); nv::StdOutputStream stream(output);
nv::ImageIO::saveTGA(stream, result.ptr()); // @@ Add generic save function. Add support for png too. nv::ImageIO::saveTGA(stream, result.ptr()); // @@ Add generic save function. Add support for png too.
delete filter;
return 0; return 0;
} }