diff --git a/CMakeLists.txt b/CMakeLists.txt index e3d3438..a44d56a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -CMAKE_MINIMUM_REQUIRED(VERSION 2.6.0) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8.0) PROJECT(NV) ENABLE_TESTING() diff --git a/cmake/FindCUDA.cmake b/cmake/FindCUDA.cmake index 6539057..4d32400 100644 --- a/cmake/FindCUDA.cmake +++ b/cmake/FindCUDA.cmake @@ -1,1263 +1,1123 @@ -# - Tools for building CUDA C files: libraries and build dependencies. -# This script locates the NVIDIA CUDA C tools. It should work on linux, windows, -# and mac and should be reasonably up to date with CUDA C releases. -# -# This script makes use of the standard find_package arguments of , -# REQUIRED and QUIET. CUDA_FOUND will report if an acceptable version of CUDA -# was found. -# -# The script will prompt the user to specify CUDA_TOOLKIT_ROOT_DIR if the prefix -# cannot be determined by the location of nvcc in the system path and REQUIRED -# is specified to find_package(). To use a different installed version of the -# toolkit set the environment variable CUDA_BIN_PATH before running cmake -# (e.g. CUDA_BIN_PATH=/usr/local/cuda1.0 instead of the default /usr/local/cuda) -# or set CUDA_TOOLKIT_ROOT_DIR after configuring. If you change the value of -# CUDA_TOOLKIT_ROOT_DIR, various components that depend on the path will be -# relocated. -# -# It might be necessary to set CUDA_TOOLKIT_ROOT_DIR manually on certain -# platforms, or to use a cuda runtime not installed in the default location. In -# newer versions of the toolkit the cuda library is included with the graphics -# driver- be sure that the driver version matches what is needed by the cuda -# runtime version. -# -# The following variables affect the behavior of the macros in the script (in -# alphebetical order). Note that any of these flags can be changed multiple -# times in the same directory before calling CUDA_ADD_EXECUTABLE, -# CUDA_ADD_LIBRARY, CUDA_COMPILE, CUDA_COMPILE_PTX or CUDA_WRAP_SRCS. -# -# CUDA_64_BIT_DEVICE_CODE (Default matches host bit size) -# -- Set to ON to compile for 64 bit device code, OFF for 32 bit device code. -# Note that making this different from the host code when generating object -# or C files from CUDA code just won't work, because size_t gets defined by -# nvcc in the generated source. If you compile to PTX and then load the -# file yourself, you can mix bit sizes between device and host. -# -# CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE (Default ON) -# -- Set to ON if you want the custom build rule to be attached to the source -# file in Visual Studio. Turn OFF if you add the same cuda file to multiple -# targets. -# -# This allows the user to build the target from the CUDA file; however, bad -# things can happen if the CUDA source file is added to multiple targets. -# When performing parallel builds it is possible for the custom build -# command to be run more than once and in parallel causing cryptic build -# errors. VS runs the rules for every source file in the target, and a -# source can have only one rule no matter how many projects it is added to. -# When the rule is run from multiple targets race conditions can occur on -# the generated file. Eventually everything will get built, but if the user -# is unaware of this behavior, there may be confusion. It would be nice if -# this script could detect the reuse of source files across multiple targets -# and turn the option off for the user, but no good solution could be found. -# -# CUDA_BUILD_CUBIN (Default OFF) -# -- Set to ON to enable and extra compilation pass with the -cubin option in -# Device mode. The output is parsed and register, shared memory usage is -# printed during build. -# -# CUDA_BUILD_EMULATION (Default OFF for device mode) -# -- Set to ON for Emulation mode. -D_DEVICEEMU is defined for CUDA C files -# when CUDA_BUILD_EMULATION is TRUE. -# -# CUDA_GENERATED_OUTPUT_DIR (Default CMAKE_CURRENT_BINARY_DIR) -# -- Set to the path you wish to have the generated files placed. If it is -# blank output files will be placed in CMAKE_CURRENT_BINARY_DIR. -# Intermediate files will always be placed in -# CMAKE_CURRENT_BINARY_DIR/CMakeFiles. -# -# CUDA_HOST_COMPILATION_CPP (Default ON) -# -- Set to OFF for C compilation of host code. -# -# CUDA_NVCC_FLAGS -# CUDA_NVCC_FLAGS_ -# -- Additional NVCC command line arguments. NOTE: multiple arguments must be -# semi-colon delimited (e.g. --compiler-options;-Wall) -# -# CUDA_PROPAGATE_HOST_FLAGS (Default ON) -# -- Set to ON to propagate CMAKE_{C,CXX}_FLAGS and their configuration -# dependent counterparts (e.g. CMAKE_C_FLAGS_DEBUG) automatically to the -# host compiler through nvcc's -Xcompiler flag. This helps make the -# generated host code match the rest of the system better. Sometimes -# certain flags give nvcc problems, and this will help you turn the flag -# propagation off. This does not affect the flags supplied directly to nvcc -# via CUDA_NVCC_FLAGS or through the OPTION flags specified through -# CUDA_ADD_LIBRARY, CUDA_ADD_EXECUTABLE, or CUDA_WRAP_SRCS. Flags used for -# shared library compilation are not affected by this flag. -# -# CUDA_VERBOSE_BUILD (Default OFF) -# -- Set to ON to see all the commands used when building the CUDA file. When -# using a Makefile generator the value defaults to VERBOSE (run make -# VERBOSE=1 to see output), although setting CUDA_VERBOSE_BUILD to ON will -# always print the output. -# -# The script creates the following macros (in alphebetical order): -# -# CUDA_ADD_CUFFT_TO_TARGET( cuda_target ) -# -- Adds the cufft library to the target (can be any target). Handles whether -# you are in emulation mode or not. -# -# CUDA_ADD_CUBLAS_TO_TARGET( cuda_target ) -# -- Adds the cublas library to the target (can be any target). Handles -# whether you are in emulation mode or not. -# -# CUDA_ADD_EXECUTABLE( cuda_target file0 file1 ... -# [WIN32] [MACOSX_BUNDLE] [EXCLUDE_FROM_ALL] [OPTIONS ...] ) -# -- Creates an executable "cuda_target" which is made up of the files -# specified. All of the non CUDA C files are compiled using the standard -# build rules specified by CMAKE and the cuda files are compiled to object -# files using nvcc and the host compiler. In addition CUDA_INCLUDE_DIRS is -# added automatically to include_directories(). Some standard CMake target -# calls can be used on the target after calling this macro -# (e.g. set_target_properties and target_link_libraries), but setting -# properties that adjust compilation flags will not affect code compiled by -# nvcc. Such flags should be modified before calling CUDA_ADD_EXECUTABLE, -# CUDA_ADD_LIBRARY or CUDA_WRAP_SRCS. -# -# CUDA_ADD_LIBRARY( cuda_target file0 file1 ... -# [STATIC | SHARED | MODULE] [EXCLUDE_FROM_ALL] [OPTIONS ...] ) -# -- Same as CUDA_ADD_EXECUTABLE except that a library is created. -# -# CUDA_BUILD_CLEAN_TARGET() -# -- Creates a convience target that deletes all the dependency files -# generated. You should make clean after running this target to ensure the -# dependency files get regenerated. -# -# CUDA_COMPILE( generated_files file0 file1 ... [STATIC | SHARED | MODULE] -# [OPTIONS ...] ) -# -- Returns a list of generated files from the input source files to be used -# with ADD_LIBRARY or ADD_EXECUTABLE. -# -# CUDA_COMPILE_PTX( generated_files file0 file1 ... [OPTIONS ...] ) -# -- Returns a list of PTX files generated from the input source files. -# -# CUDA_INCLUDE_DIRECTORIES( path0 path1 ... ) -# -- Sets the directories that should be passed to nvcc -# (e.g. nvcc -Ipath0 -Ipath1 ... ). These paths usually contain other .cu -# files. -# -# CUDA_WRAP_SRCS ( cuda_target format generated_files file0 file1 ... -# [STATIC | SHARED | MODULE] [OPTIONS ...] ) -# -- This is where all the magic happens. CUDA_ADD_EXECUTABLE, -# CUDA_ADD_LIBRARY, CUDA_COMPILE, and CUDA_COMPILE_PTX all call this -# function under the hood. -# -# Given the list of files (file0 file1 ... fileN) this macro generates -# custom commands that generate either PTX or linkable objects (use "PTX" or -# "OBJ" for the format argument to switch). Files that don't end with .cu -# or have the HEADER_FILE_ONLY property are ignored. -# -# The arguments passed in after OPTIONS are extra command line options to -# give to nvcc. You can also specify per configuration options by -# specifying the name of the configuration followed by the options. General -# options must preceed configuration specific options. Not all -# configurations need to be specified, only the ones provided will be used. -# -# OPTIONS -DFLAG=2 "-DFLAG_OTHER=space in flag" -# DEBUG -g -# RELEASE --use_fast_math -# RELWITHDEBINFO --use_fast_math;-g -# MINSIZEREL --use_fast_math -# -# For certain configurations (namely VS generating object files with -# CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE set to ON), no generated file will -# be produced for the given cuda file. This is because when you add the -# cuda file to Visual Studio it knows that this file produces an object file -# and will link in the resulting object file automatically. -# -# This script will also generate a separate cmake script that is used at -# build time to invoke nvcc. This is for serveral reasons. -# -# 1. nvcc can return negative numbers as return values which confuses -# Visual Studio into thinking that the command succeeded. The script now -# checks the error codes and produces errors when there was a problem. -# -# 2. nvcc has been known to not delete incomplete results when it -# encounters problems. This confuses build systems into thinking the -# target was generated when in fact an unusable file exists. The script -# now deletes the output files if there was an error. -# -# 3. By putting all the options that affect the build into a file and then -# make the build rule dependent on the file, the output files will be -# regenerated when the options change. -# -# This script also looks at optional arguments STATIC, SHARED, or MODULE to -# determine when to target the object compilation for a shared library. -# BUILD_SHARED_LIBS is ignored in CUDA_WRAP_SRCS, but it is respected in -# CUDA_ADD_LIBRARY. On some systems special flags are added for building -# objects intended for shared libraries. A preprocessor macro, -# _EXPORTS is defined when a shared library compilation is -# detected. -# -# Flags passed into add_definitions with -D or /D are passed along to nvcc. -# -# The script defines the following variables: -# -# CUDA_VERSION_MAJOR -- The major version of cuda as reported by nvcc. -# CUDA_VERSION_MINOR -- The minor version. -# CUDA_VERSION -# CUDA_VERSION_STRING -- CUDA_VERSION_MAJOR.CUDA_VERSION_MINOR -# -# CUDA_TOOLKIT_ROOT_DIR -- Path to the CUDA Toolkit (defined if not set). -# CUDA_SDK_ROOT_DIR -- Path to the CUDA SDK. Use this to find files in the -# SDK. This script will not directly support finding -# specific libraries or headers, as that isn't -# supported by NVIDIA. If you want to change -# libraries when the path changes see the -# FindCUDA.cmake script for an example of how to clear -# these variables. There are also examples of how to -# use the CUDA_SDK_ROOT_DIR to locate headers or -# libraries, if you so choose (at your own risk). -# CUDA_INCLUDE_DIRS -- Include directory for cuda headers. Added automatically -# for CUDA_ADD_EXECUTABLE and CUDA_ADD_LIBRARY. -# CUDA_LIBRARIES -- Cuda RT library. -# CUDA_CUFFT_LIBRARIES -- Device or emulation library for the Cuda FFT -# implementation (alternative to: -# CUDA_ADD_CUFFT_TO_TARGET macro) -# CUDA_CUBLAS_LIBRARIES -- Device or emulation library for the Cuda BLAS -# implementation (alterative to: -# CUDA_ADD_CUBLAS_TO_TARGET macro). -# -# -# James Bigler, NVIDIA Corp (nvidia.com - jbigler) -# Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html -# -# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. -# -# Copyright (c) 2007-2009 -# Scientific Computing and Imaging Institute, University of Utah -# -# This code is licensed under the MIT License. See the FindCUDA.cmake script -# for the text of the license. - -# The MIT License -# -# License for the specific language governing rights and limitations under -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. -# -############################################################################### - -# FindCUDA.cmake - -# We need to have at least this version to support the VERSION_LESS argument to 'if' (2.6.2) and unset (2.6.3) -cmake_policy(PUSH) -cmake_minimum_required(VERSION 2.6.3) -cmake_policy(POP) - -# This macro helps us find the location of helper files we will need the full path to -macro(CUDA_FIND_HELPER_FILE _name _extension) - set(_full_name "${_name}.${_extension}") - # CMAKE_CURRENT_LIST_FILE contains the full path to the file currently being - # processed. Using this variable, we can pull out the current path, and - # provide a way to get access to the other files we need local to here. - get_filename_component(CMAKE_CURRENT_LIST_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) - find_file(CUDA_${_name} ${_full_name} PATHS ${CMAKE_CURRENT_LIST_DIR}/FindCUDA NO_DEFAULT_PATH) - if(NOT CUDA_${_name}) - set(error_message "${_full_name} not found in CMAKE_MODULE_PATH") - if(CUDA_FIND_REQUIRED) - message(FATAL_ERROR "${error_message}") - else(CUDA_FIND_REQUIRED) - if(NOT CUDA_FIND_QUIETLY) - message(STATUS "${error_message}") - endif(NOT CUDA_FIND_QUIETLY) - endif(CUDA_FIND_REQUIRED) - endif(NOT CUDA_${_name}) - # Set this variable as internal, so the user isn't bugged with it. - set(CUDA_${_name} ${CUDA_${_name}} CACHE INTERNAL "Location of ${_full_name}" FORCE) -endmacro(CUDA_FIND_HELPER_FILE) - -##################################################################### -## CUDA_INCLUDE_NVCC_DEPENDENCIES -## - -# So we want to try and include the dependency file if it exists. If -# it doesn't exist then we need to create an empty one, so we can -# include it. - -# If it does exist, then we need to check to see if all the files it -# depends on exist. If they don't then we should clear the dependency -# file and regenerate it later. This covers the case where a header -# file has disappeared or moved. - -macro(CUDA_INCLUDE_NVCC_DEPENDENCIES dependency_file) - set(CUDA_NVCC_DEPEND) - set(CUDA_NVCC_DEPEND_REGENERATE FALSE) - - - # Include the dependency file. Create it first if it doesn't exist . The - # INCLUDE puts a dependency that will force CMake to rerun and bring in the - # new info when it changes. DO NOT REMOVE THIS (as I did and spent a few - # hours figuring out why it didn't work. - if(NOT EXISTS ${dependency_file}) - file(WRITE ${dependency_file} "#FindCUDA.cmake generated file. Do not edit.\n") - endif() - # Always include this file to force CMake to run again next - # invocation and rebuild the dependencies. - #message("including dependency_file = ${dependency_file}") - include(${dependency_file}) - - # Now we need to verify the existence of all the included files - # here. If they aren't there we need to just blank this variable and - # make the file regenerate again. -# if(DEFINED CUDA_NVCC_DEPEND) -# message("CUDA_NVCC_DEPEND set") -# else() -# message("CUDA_NVCC_DEPEND NOT set") -# endif() - if(CUDA_NVCC_DEPEND) - #message("CUDA_NVCC_DEPEND true") - foreach(f ${CUDA_NVCC_DEPEND}) - #message("searching for ${f}") - if(NOT EXISTS ${f}) - #message("file ${f} not found") - set(CUDA_NVCC_DEPEND_REGENERATE TRUE) - endif() - endforeach(f) - else(CUDA_NVCC_DEPEND) - #message("CUDA_NVCC_DEPEND false") - # No dependencies, so regenerate the file. - set(CUDA_NVCC_DEPEND_REGENERATE TRUE) - endif(CUDA_NVCC_DEPEND) - - #message("CUDA_NVCC_DEPEND_REGENERATE = ${CUDA_NVCC_DEPEND_REGENERATE}") - # No incoming dependencies, so we need to generate them. Make the - # output depend on the dependency file itself, which should cause the - # rule to re-run. - if(CUDA_NVCC_DEPEND_REGENERATE) - file(WRITE ${dependency_file} "#FindCUDA.cmake generated file. Do not edit.\n") - endif(CUDA_NVCC_DEPEND_REGENERATE) - -endmacro(CUDA_INCLUDE_NVCC_DEPENDENCIES) - -############################################################################### -############################################################################### -# Setup variables' defaults -############################################################################### -############################################################################### - -# Allow the user to specify if the device code is supposed to be 32 or 64 bit. -if(CMAKE_SIZEOF_VOID_P EQUAL 8) - set(CUDA_64_BIT_DEVICE_CODE_DEFAULT ON) -else() - set(CUDA_64_BIT_DEVICE_CODE_DEFAULT OFF) -endif() -option(CUDA_64_BIT_DEVICE_CODE "Compile device code in 64 bit mode" ${CUDA_64_BIT_DEVICE_CODE_DEFAULT}) - -# Attach the build rule to the source file in VS. This option -option(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE "Attach the build rule to the CUDA source file. Enable only when the CUDA source file is added to at most one target." ON) - -# Prints out extra information about the cuda file during compilation -option(CUDA_BUILD_CUBIN "Generate and parse .cubin files in Device mode." OFF) - -# Set whether we are using emulation or device mode. -option(CUDA_BUILD_EMULATION "Build in Emulation mode" OFF) - -# Where to put the generated output. -set(CUDA_GENERATED_OUTPUT_DIR "" CACHE PATH "Directory to put all the output files. If blank it will default to the CMAKE_CURRENT_BINARY_DIR") - -# Parse HOST_COMPILATION mode. -option(CUDA_HOST_COMPILATION_CPP "Generated file extension" ON) - -# Extra user settable flags -set(CUDA_NVCC_FLAGS "" CACHE STRING "Semi-colon delimit multiple arguments.") - -# Propagate the host flags to the host compiler via -Xcompiler -option(CUDA_PROPAGATE_HOST_FLAGS "Propage C/CXX_FLAGS and friends to the host compiler via -Xcompile" ON) - -# Specifies whether the commands used when compiling the .cu file will be printed out. -option(CUDA_VERBOSE_BUILD "Print out the commands run while compiling the CUDA source file. With the Makefile generator this defaults to VERBOSE variable specified on the command line, but can be forced on with this option." OFF) - -mark_as_advanced( - CUDA_64_BIT_DEVICE_CODE - CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE - CUDA_GENERATED_OUTPUT_DIR - CUDA_HOST_COMPILATION_CPP - CUDA_NVCC_FLAGS - CUDA_PROPAGATE_HOST_FLAGS - ) - -# Makefile and similar generators don't define CMAKE_CONFIGURATION_TYPES, so we -# need to add another entry for the CMAKE_BUILD_TYPE. We also need to add the -# standerd set of 4 build types (Debug, MinSizeRel, Release, and RelWithDebInfo) -# for completeness. We need run this loop in order to accomodate the addition -# of extra configuration types. Duplicate entries will be removed by -# REMOVE_DUPLICATES. -set(CUDA_configuration_types ${CMAKE_CONFIGURATION_TYPES} ${CMAKE_BUILD_TYPE} Debug MinSizeRel Release RelWithDebInfo) -list(REMOVE_DUPLICATES CUDA_configuration_types) -foreach(config ${CUDA_configuration_types}) - string(TOUPPER ${config} config_upper) - set(CUDA_NVCC_FLAGS_${config_upper} "" CACHE STRING "Semi-colon delimit multiple arguments.") - mark_as_advanced(CUDA_NVCC_FLAGS_${config_upper}) -endforeach() - -############################################################################### -############################################################################### -# Locate CUDA, Set Build Type, etc. -############################################################################### -############################################################################### - -# Check to see if the CUDA_TOOLKIT_ROOT_DIR and CUDA_SDK_ROOT_DIR have changed, -# if they have then clear the cache variables, so that will be detected again. -if(NOT "${CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "${CUDA_TOOLKIT_ROOT_DIR_INTERNAL}") - unset(CUDA_NVCC_EXECUTABLE CACHE) - unset(CUDA_VERSION CACHE) - unset(CUDA_TOOLKIT_INCLUDE CACHE) - unset(CUDA_CUDART_LIBRARY CACHE) - unset(CUDA_CUDA_LIBRARY CACHE) - unset(CUDA_cublas_LIBRARY CACHE) - unset(CUDA_cublasemu_LIBRARY CACHE) - unset(CUDA_cufft_LIBRARY CACHE) - unset(CUDA_cufftemu_LIBRARY CACHE) -endif() - -if(NOT "${CUDA_SDK_ROOT_DIR}" STREQUAL "${CUDA_SDK_ROOT_DIR_INTERNAL}") - # No specific variables to catch. Use this kind of code before calling - # find_package(CUDA) to clean up any variables that may depend on this path. - - # unset(MY_SPECIAL_CUDA_SDK_INCLUDE_DIR CACHE) - # unset(MY_SPECIAL_CUDA_SDK_LIBRARY CACHE) -endif() - -# Search for the cuda distribution. -if(NOT CUDA_TOOLKIT_ROOT_DIR) - - # Search in the CUDA_BIN_PATH first. - find_path(CUDA_TOOLKIT_ROOT_DIR - NAMES nvcc nvcc.exe - PATHS ENV CUDA_BIN_PATH - DOC "Toolkit location." - NO_DEFAULT_PATH - ) - # Now search default paths - find_path(CUDA_TOOLKIT_ROOT_DIR - NAMES nvcc nvcc.exe - PATHS /usr/local/bin - /usr/local/cuda/bin - DOC "Toolkit location." - ) - - if (CUDA_TOOLKIT_ROOT_DIR) - string(REGEX REPLACE "[/\\\\]?bin[64]*[/\\\\]?$" "" CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT_DIR}) - # We need to force this back into the cache. - set(CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT_DIR} CACHE PATH "Toolkit location." FORCE) - endif(CUDA_TOOLKIT_ROOT_DIR) - if (NOT EXISTS ${CUDA_TOOLKIT_ROOT_DIR}) - if(CUDA_FIND_REQUIRED) - message(FATAL_ERROR "Specify CUDA_TOOLKIT_ROOT_DIR") - elseif(NOT CUDA_FIND_QUIETLY) - message("CUDA_TOOLKIT_ROOT_DIR not found or specified") - endif() - endif (NOT EXISTS ${CUDA_TOOLKIT_ROOT_DIR}) -endif (NOT CUDA_TOOLKIT_ROOT_DIR) - -# CUDA_NVCC_EXECUTABLE -find_program(CUDA_NVCC_EXECUTABLE - NAMES nvcc - PATHS "${CUDA_TOOLKIT_ROOT_DIR}/bin" - "${CUDA_TOOLKIT_ROOT_DIR}/bin64" - ENV CUDA_BIN_PATH - NO_DEFAULT_PATH - ) -# Search default search paths, after we search our own set of paths. -find_program(CUDA_NVCC_EXECUTABLE nvcc) -mark_as_advanced(CUDA_NVCC_EXECUTABLE) - -if(CUDA_NVCC_EXECUTABLE AND NOT CUDA_VERSION) - # Compute the version. - execute_process (COMMAND ${CUDA_NVCC_EXECUTABLE} "--version" OUTPUT_VARIABLE NVCC_OUT) - string(REGEX REPLACE ".*release ([0-9]+)\\.([0-9]+).*" "\\1" CUDA_VERSION_MAJOR ${NVCC_OUT}) - string(REGEX REPLACE ".*release ([0-9]+)\\.([0-9]+).*" "\\2" CUDA_VERSION_MINOR ${NVCC_OUT}) - set(CUDA_VERSION "${CUDA_VERSION_MAJOR}.${CUDA_VERSION_MINOR}" CACHE STRING "Version of CUDA as computed from nvcc.") - mark_as_advanced(CUDA_VERSION) -endif() - -# Always set this convenience variable -set(CUDA_VERSION_STRING "${CUDA_VERSION}") - -# Here we need to determine if the version we found is acceptable. We will -# assume that is unless CUDA_FIND_VERSION_EXACT or CUDA_FIND_VERSION is -# specified. The presence of either of these options checks the version -# string and signals if the version is acceptable or not. -set(_cuda_version_acceptable TRUE) -# -if(CUDA_FIND_VERSION_EXACT AND NOT CUDA_VERSION VERSION_EQUAL CUDA_FIND_VERSION) - set(_cuda_version_acceptable FALSE) -endif() -# -if(CUDA_FIND_VERSION AND CUDA_VERSION VERSION_LESS CUDA_FIND_VERSION) - set(_cuda_version_acceptable FALSE) -endif() -# -if(NOT _cuda_version_acceptable) - set(_cuda_error_message "Requested CUDA version ${CUDA_FIND_VERSION}, but found unacceptable version ${CUDA_VERSION}") - if(CUDA_FIND_REQUIRED) - message("${_cuda_error_message}") - elseif(NOT CUDA_FIND_QUIETLY) - message("${_cuda_error_message}") - endif() -endif() - -# CUDA_TOOLKIT_INCLUDE -find_path(CUDA_TOOLKIT_INCLUDE - device_functions.h # Header included in toolkit - PATHS "${CUDA_TOOLKIT_ROOT_DIR}/include" - ENV CUDA_INC_PATH - NO_DEFAULT_PATH - ) -# Search default search paths, after we search our own set of paths. -find_path(CUDA_TOOLKIT_INCLUDE device_functions.h) -mark_as_advanced(CUDA_TOOLKIT_INCLUDE) - -# Set the user list of include dir to nothing to initialize it. -set (CUDA_NVCC_INCLUDE_ARGS_USER "") -set (CUDA_INCLUDE_DIRS ${CUDA_TOOLKIT_INCLUDE}) - -macro(FIND_LIBRARY_LOCAL_FIRST _var _names _doc) - if(CMAKE_SIZEOF_VOID_P EQUAL 8) - set(_cuda_64bit_lib_dir "${CUDA_TOOLKIT_ROOT_DIR}/lib64") - endif() - find_library(${_var} - NAMES ${_names} - PATHS ${_cuda_64bit_lib_dir} - "${CUDA_TOOLKIT_ROOT_DIR}/lib" - ENV CUDA_LIB_PATH - DOC ${_doc} - NO_DEFAULT_PATH - ) - # Search default search paths, after we search our own set of paths. - find_library(${_var} NAMES ${_names} DOC ${_doc}) -endmacro() - -# CUDA_LIBRARIES -find_library_local_first(CUDA_CUDART_LIBRARY cudart "\"cudart\" library") -set(CUDA_LIBRARIES ${CUDA_CUDART_LIBRARY}) -if(APPLE) - # We need to add the path to cudart to the linker using rpath, since the - # library name for the cuda libraries is prepended with @rpath. - get_filename_component(_cuda_path_to_cudart "${CUDA_CUDART_LIBRARY}" PATH) - if(_cuda_path_to_cudart) - list(APPEND CUDA_LIBRARIES -Wl,-rpath "-Wl,${_cuda_path_to_cudart}") - endif() -endif() - -# 1.1 toolkit on linux doesn't appear to have a separate library on -# some platforms. -find_library_local_first(CUDA_CUDA_LIBRARY cuda "\"cuda\" library (older versions only).") - -# Add cuda library to the link line only if it is found. -if (CUDA_CUDA_LIBRARY) - set(CUDA_LIBRARIES ${CUDA_LIBRARIES} ${CUDA_CUDA_LIBRARY}) -endif(CUDA_CUDA_LIBRARY) - -mark_as_advanced( - CUDA_CUDA_LIBRARY - CUDA_CUDART_LIBRARY - ) - -####################### -# Look for some of the toolkit helper libraries -macro(FIND_CUDA_HELPER_LIBS _name) - find_library_local_first(CUDA_${_name}_LIBRARY ${_name} "\"${_name}\" library") - mark_as_advanced(CUDA_${_name}_LIBRARY) -endmacro(FIND_CUDA_HELPER_LIBS) - -# Search for cufft and cublas libraries. -find_cuda_helper_libs(cufftemu) -find_cuda_helper_libs(cublasemu) -find_cuda_helper_libs(cufft) -find_cuda_helper_libs(cublas) - -if (CUDA_BUILD_EMULATION) - set(CUDA_CUFFT_LIBRARIES ${CUDA_cufftemu_LIBRARY}) - set(CUDA_CUBLAS_LIBRARIES ${CUDA_cublasemu_LIBRARY}) -else() - set(CUDA_CUFFT_LIBRARIES ${CUDA_cufft_LIBRARY}) - set(CUDA_CUBLAS_LIBRARIES ${CUDA_cublas_LIBRARY}) -endif() - -######################## -# Look for the SDK stuff -find_path(CUDA_SDK_ROOT_DIR common/inc/cutil.h - "$ENV{NVSDKCUDA_ROOT}" - "[HKEY_LOCAL_MACHINE\\SOFTWARE\\NVIDIA Corporation\\Installed Products\\NVIDIA SDK 10\\Compute;InstallDir]" - "/Developer/GPU\ Computing/C" - ) - -# Keep the CUDA_SDK_ROOT_DIR first in order to be able to override the -# environment variables. -set(CUDA_SDK_SEARCH_PATH - "${CUDA_SDK_ROOT_DIR}" - "${CUDA_TOOLKIT_ROOT_DIR}/local/NVSDK0.2" - "${CUDA_TOOLKIT_ROOT_DIR}/NVSDK0.2" - "${CUDA_TOOLKIT_ROOT_DIR}/NV_CUDA_SDK" - "$ENV{HOME}/NVIDIA_CUDA_SDK" - "$ENV{HOME}/NVIDIA_CUDA_SDK_MACOSX" - "/Developer/CUDA" - ) - -# Example of how to find an include file from the CUDA_SDK_ROOT_DIR - -# find_path(CUDA_CUT_INCLUDE_DIR -# cutil.h -# PATHS ${CUDA_SDK_SEARCH_PATH} -# PATH_SUFFIXES "common/inc" -# DOC "Location of cutil.h" -# NO_DEFAULT_PATH -# ) -# # Now search system paths -# find_path(CUDA_CUT_INCLUDE_DIR cutil.h DOC "Location of cutil.h") - -# mark_as_advanced(CUDA_CUT_INCLUDE_DIR) - - -# Example of how to find a library in the CUDA_SDK_ROOT_DIR - -# # cutil library is called cutil64 for 64 bit builds on windows. We don't want -# # to get these confused, so we are setting the name based on the word size of -# # the build. - -# if(CMAKE_SIZEOF_VOID_P EQUAL 8) -# set(cuda_cutil_name cutil64) -# else(CMAKE_SIZEOF_VOID_P EQUAL 8) -# set(cuda_cutil_name cutil32) -# endif(CMAKE_SIZEOF_VOID_P EQUAL 8) - -# find_library(CUDA_CUT_LIBRARY -# NAMES cutil ${cuda_cutil_name} -# PATHS ${CUDA_SDK_SEARCH_PATH} -# # The new version of the sdk shows up in common/lib, but the old one is in lib -# PATH_SUFFIXES "common/lib" "lib" -# DOC "Location of cutil library" -# NO_DEFAULT_PATH -# ) -# # Now search system paths -# find_library(CUDA_CUT_LIBRARY NAMES cutil ${cuda_cutil_name} DOC "Location of cutil library") -# mark_as_advanced(CUDA_CUT_LIBRARY) -# set(CUDA_CUT_LIBRARIES ${CUDA_CUT_LIBRARY}) - - - -############################# -# Check for required components -set(CUDA_FOUND TRUE) - -set(CUDA_TOOLKIT_ROOT_DIR_INTERNAL "${CUDA_TOOLKIT_ROOT_DIR}" CACHE INTERNAL - "This is the value of the last time CUDA_TOOLKIT_ROOT_DIR was set successfully." FORCE) -set(CUDA_SDK_ROOT_DIR_INTERNAL "${CUDA_SDK_ROOT_DIR}" CACHE INTERNAL - "This is the value of the last time CUDA_SDK_ROOT_DIR was set successfully." FORCE) - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(CUDA DEFAULT_MSG - CUDA_TOOLKIT_ROOT_DIR - CUDA_NVCC_EXECUTABLE - CUDA_INCLUDE_DIRS - CUDA_CUDART_LIBRARY - _cuda_version_acceptable - ) - - - -############################################################################### -############################################################################### -# Macros -############################################################################### -############################################################################### - -############################################################################### -# Add include directories to pass to the nvcc command. -macro(CUDA_INCLUDE_DIRECTORIES) - foreach(dir ${ARGN}) - list(APPEND CUDA_NVCC_INCLUDE_ARGS_USER "-I${dir}") - endforeach(dir ${ARGN}) -endmacro(CUDA_INCLUDE_DIRECTORIES) - - -############################################################################## -cuda_find_helper_file(parse_cubin cmake) -cuda_find_helper_file(make2cmake cmake) -cuda_find_helper_file(run_nvcc cmake) - -############################################################################## -# Separate the OPTIONS out from the sources -# -macro(CUDA_GET_SOURCES_AND_OPTIONS _sources _cmake_options _options) - set( ${_sources} ) - set( ${_cmake_options} ) - set( ${_options} ) - set( _found_options FALSE ) - foreach(arg ${ARGN}) - if(arg STREQUAL "OPTIONS") - set( _found_options TRUE ) - elseif( - arg STREQUAL "WIN32" OR - arg STREQUAL "MACOSX_BUNDLE" OR - arg STREQUAL "EXCLUDE_FROM_ALL" OR - arg STREQUAL "STATIC" OR - arg STREQUAL "SHARED" OR - arg STREQUAL "MODULE" - ) - list(APPEND ${_cmake_options} "${arg}") - else() - if ( _found_options ) - list(APPEND ${_options} "${arg}") - else() - # Assume this is a file - list(APPEND ${_sources} "${arg}") - endif() - endif() - endforeach() -endmacro() - -############################################################################## -# Parse the OPTIONS from ARGN and set the variables prefixed by _option_prefix -# -macro(CUDA_PARSE_NVCC_OPTIONS _option_prefix) - set( _found_config ) - foreach(arg ${ARGN}) - # Determine if we are dealing with a perconfiguration flag - foreach(config ${CUDA_configuration_types}) - string(TOUPPER ${config} config_upper) - if (arg STREQUAL "${config_upper}") - set( _found_config _${arg}) - # Set arg to nothing to keep it from being processed further - set( arg ) - endif() - endforeach() - - if ( arg ) - list(APPEND ${_option_prefix}${_found_config} "${arg}") - endif() - endforeach() -endmacro() - -############################################################################## -# Helper to add the include directory for CUDA only once -function(CUDA_ADD_CUDA_INCLUDE_ONCE) - get_directory_property(_include_directories INCLUDE_DIRECTORIES) - set(_add TRUE) - if(_include_directories) - foreach(dir ${_include_directories}) - if("${dir}" STREQUAL "${CUDA_INCLUDE_DIRS}") - set(_add FALSE) - endif() - endforeach() - endif() - if(_add) - include_directories(${CUDA_INCLUDE_DIRS}) - endif() -endfunction() - -function(CUDA_BUILD_SHARED_LIBRARY shared_flag) - set(cmake_args ${ARGN}) - # If SHARED, MODULE, or STATIC aren't already in the list of arguments, then - # add SHARED or STATIC based on the value of BUILD_SHARED_LIBS. - list(FIND cmake_args SHARED _cuda_found_SHARED) - list(FIND cmake_args MODULE _cuda_found_MODULE) - list(FIND cmake_args STATIC _cuda_found_STATIC) - if( _cuda_found_SHARED GREATER -1 OR - _cuda_found_MODULE GREATER -1 OR - _cuda_found_STATIC GREATER -1) - set(_cuda_build_shared_libs) - else() - if (BUILD_SHARED_LIBS) - set(_cuda_build_shared_libs SHARED) - else() - set(_cuda_build_shared_libs STATIC) - endif() - endif() - set(${shared_flag} ${_cuda_build_shared_libs} PARENT_SCOPE) -endfunction() - -############################################################################## -# This helper macro populates the following variables and setups up custom -# commands and targets to invoke the nvcc compiler to generate C or PTX source -# dependant upon the format parameter. The compiler is invoked once with -M -# to generate a dependency file and a second time with -cuda or -ptx to generate -# a .cpp or .ptx file. -# INPUT: -# cuda_target - Target name -# format - PTX or OBJ -# FILE1 .. FILEN - The remaining arguments are the sources to be wrapped. -# OPTIONS - Extra options to NVCC -# OUTPUT: -# generated_files - List of generated files -############################################################################## -############################################################################## - -macro(CUDA_WRAP_SRCS cuda_target format generated_files) - - if( ${format} MATCHES "PTX" ) - set( compile_to_ptx ON ) - elseif( ${format} MATCHES "OBJ") - set( compile_to_ptx OFF ) - else() - message( FATAL_ERROR "Invalid format flag passed to CUDA_WRAP_SRCS: '${format}'. Use OBJ or PTX.") - endif() - - # Set up all the command line flags here, so that they can be overriden on a per target basis. - - set(nvcc_flags "") - - # Emulation if the card isn't present. - if (CUDA_BUILD_EMULATION) - # Emulation. - set(nvcc_flags ${nvcc_flags} --device-emulation -D_DEVICEEMU -g) - else(CUDA_BUILD_EMULATION) - # Device mode. No flags necessary. - endif(CUDA_BUILD_EMULATION) - - if(CUDA_HOST_COMPILATION_CPP) - set(CUDA_C_OR_CXX CXX) - else(CUDA_HOST_COMPILATION_CPP) - if(CUDA_VERSION VERSION_LESS "3.0") - set(nvcc_flags ${nvcc_flags} --host-compilation C) - else() - message(WARNING "--host-compilation flag is deprecated in CUDA version >= 3.0. Removing --host-compilation C flag" ) - endif() - set(CUDA_C_OR_CXX C) - endif(CUDA_HOST_COMPILATION_CPP) - - set(generated_extension ${CMAKE_${CUDA_C_OR_CXX}_OUTPUT_EXTENSION}) - - if(CUDA_64_BIT_DEVICE_CODE) - set(nvcc_flags ${nvcc_flags} -m64) - else() - set(nvcc_flags ${nvcc_flags} -m32) - endif() - - # This needs to be passed in at this stage, because VS needs to fill out the - # value of VCInstallDir from within VS. - if(CMAKE_GENERATOR MATCHES "Visual Studio") - if( CMAKE_SIZEOF_VOID_P EQUAL 8 ) - # Add nvcc flag for 64b Windows - set(ccbin_flags -D "\"CCBIN:PATH=$(VCInstallDir)bin\"" ) - endif() - endif() - - # Figure out which configure we will use and pass that in as an argument to - # the script. We need to defer the decision until compilation time, because - # for VS projects we won't know if we are making a debug or release build - # until build time. - if(CMAKE_GENERATOR MATCHES "Visual Studio") - set( CUDA_build_configuration "$(ConfigurationName)" ) - else() - set( CUDA_build_configuration "${CMAKE_BUILD_TYPE}") - endif() - - # Initialize our list of includes with the user ones followed by the CUDA system ones. - set(CUDA_NVCC_INCLUDE_ARGS ${CUDA_NVCC_INCLUDE_ARGS_USER} "-I${CUDA_INCLUDE_DIRS}") - # Get the include directories for this directory and use them for our nvcc command. - get_directory_property(CUDA_NVCC_INCLUDE_DIRECTORIES INCLUDE_DIRECTORIES) - if(CUDA_NVCC_INCLUDE_DIRECTORIES) - foreach(dir ${CUDA_NVCC_INCLUDE_DIRECTORIES}) - list(APPEND CUDA_NVCC_INCLUDE_ARGS "-I${dir}") - endforeach() - endif() - - # Reset these variables - set(CUDA_WRAP_OPTION_NVCC_FLAGS) - foreach(config ${CUDA_configuration_types}) - string(TOUPPER ${config} config_upper) - set(CUDA_WRAP_OPTION_NVCC_FLAGS_${config_upper}) - endforeach() - - CUDA_GET_SOURCES_AND_OPTIONS(_cuda_wrap_sources _cuda_wrap_cmake_options _cuda_wrap_options ${ARGN}) - CUDA_PARSE_NVCC_OPTIONS(CUDA_WRAP_OPTION_NVCC_FLAGS ${_cuda_wrap_options}) - - # Figure out if we are building a shared library. BUILD_SHARED_LIBS is - # respected in CUDA_ADD_LIBRARY. - set(_cuda_build_shared_libs FALSE) - # SHARED, MODULE - list(FIND _cuda_wrap_cmake_options SHARED _cuda_found_SHARED) - list(FIND _cuda_wrap_cmake_options MODULE _cuda_found_MODULE) - if(_cuda_found_SHARED GREATER -1 OR _cuda_found_MODULE GREATER -1) - set(_cuda_build_shared_libs TRUE) - endif() - # STATIC - list(FIND _cuda_wrap_cmake_options STATIC _cuda_found_STATIC) - if(_cuda_found_STATIC GREATER -1) - set(_cuda_build_shared_libs FALSE) - endif() - - # CUDA_HOST_FLAGS - if(_cuda_build_shared_libs) - # If we are setting up code for a shared library, then we need to add extra flags for - # compiling objects for shared libraries. - set(CUDA_HOST_SHARED_FLAGS ${CMAKE_SHARED_LIBRARY_${CUDA_C_OR_CXX}_FLAGS}) - else() - set(CUDA_HOST_SHARED_FLAGS) - endif() - # Only add the CMAKE_{C,CXX}_FLAGS if we are propagating host flags. We - # always need to set the SHARED_FLAGS, though. - if(CUDA_PROPAGATE_HOST_FLAGS) - set(CUDA_HOST_FLAGS "set(CMAKE_HOST_FLAGS ${CMAKE_${CUDA_C_OR_CXX}_FLAGS} ${CUDA_HOST_SHARED_FLAGS})") - else() - set(CUDA_HOST_FLAGS "set(CMAKE_HOST_FLAGS ${CUDA_HOST_SHARED_FLAGS})") - endif() - - set(CUDA_NVCC_FLAGS_CONFIG "# Build specific configuration flags") - # Loop over all the configuration types to generate appropriate flags for run_nvcc.cmake - foreach(config ${CUDA_configuration_types}) - string(TOUPPER ${config} config_upper) - # CMAKE_FLAGS are strings and not lists. By not putting quotes around CMAKE_FLAGS - # we convert the strings to lists (like we want). - - if(CUDA_PROPAGATE_HOST_FLAGS) - # nvcc chokes on -g3, so replace it with -g - if(CMAKE_COMPILER_IS_GNUCC) - string(REPLACE "-g3" "-g" _cuda_C_FLAGS "${CMAKE_${CUDA_C_OR_CXX}_FLAGS_${config_upper}}") - else() - set(_cuda_C_FLAGS "${CMAKE_${CUDA_C_OR_CXX}_FLAGS_${config_upper}}") - endif() - - set(CUDA_HOST_FLAGS "${CUDA_HOST_FLAGS}\nset(CMAKE_HOST_FLAGS_${config_upper} ${_cuda_C_FLAGS})") - endif() - - # Note that if we ever want CUDA_NVCC_FLAGS_ to be string (instead of a list - # like it is currently), we can remove the quotes around the - # ${CUDA_NVCC_FLAGS_${config_upper}} variable like the CMAKE_HOST_FLAGS_ variable. - set(CUDA_NVCC_FLAGS_CONFIG "${CUDA_NVCC_FLAGS_CONFIG}\nset(CUDA_NVCC_FLAGS_${config_upper} \"${CUDA_NVCC_FLAGS_${config_upper}};;${CUDA_WRAP_OPTION_NVCC_FLAGS_${config_upper}}\")") - endforeach() - - if(compile_to_ptx) - # Don't use any of the host compilation flags for PTX targets. - set(CUDA_HOST_FLAGS) - set(CUDA_NVCC_FLAGS_CONFIG) - endif() - - # Get the list of definitions from the directory property - get_directory_property(CUDA_NVCC_DEFINITIONS COMPILE_DEFINITIONS) - if(CUDA_NVCC_DEFINITIONS) - foreach(_definition ${CUDA_NVCC_DEFINITIONS}) - list(APPEND nvcc_flags "-D${_definition}") - endforeach() - endif() - - if(_cuda_build_shared_libs) - list(APPEND nvcc_flags "-D${cuda_target}_EXPORTS") - endif() - - # Determine output directory - if(CUDA_GENERATED_OUTPUT_DIR) - set(cuda_compile_output_dir "${CUDA_GENERATED_OUTPUT_DIR}") - else() - set(cuda_compile_output_dir "${CMAKE_CURRENT_BINARY_DIR}") - endif() - - # Reset the output variable - set(_cuda_wrap_generated_files "") - - # Iterate over the macro arguments and create custom - # commands for all the .cu files. - foreach(file ${ARGN}) - # Ignore any file marked as a HEADER_FILE_ONLY - get_source_file_property(_is_header ${file} HEADER_FILE_ONLY) - if(${file} MATCHES ".*\\.cu$" AND NOT _is_header) - - # Add a custom target to generate a c or ptx file. ###################### - - get_filename_component( basename ${file} NAME ) - if( compile_to_ptx ) - set(generated_file_path "${cuda_compile_output_dir}") - set(generated_file_basename "${cuda_target}_generated_${basename}.ptx") - set(format_flag "-ptx") - file(MAKE_DIRECTORY "${cuda_compile_output_dir}") - else( compile_to_ptx ) - set(generated_file_path "${cuda_compile_output_dir}/${CMAKE_CFG_INTDIR}") - set(generated_file_basename "${cuda_target}_generated_${basename}${generated_extension}") - set(format_flag "-c") - endif( compile_to_ptx ) - - # Set all of our file names. Make sure that whatever filenames that have - # generated_file_path in them get passed in through as a command line - # argument, so that the ${CMAKE_CFG_INTDIR} gets expanded at run time - # instead of configure time. - set(generated_file "${generated_file_path}/${generated_file_basename}") - set(cmake_dependency_file "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/${generated_file_basename}.depend") - set(NVCC_generated_dependency_file "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/${generated_file_basename}.NVCC-depend") - set(generated_cubin_file "${generated_file_path}/${generated_file_basename}.cubin.txt") - set(custom_target_script "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/${generated_file_basename}.cmake") - - # Setup properties for obj files: - if( NOT compile_to_ptx ) - set_source_files_properties("${generated_file}" - PROPERTIES - EXTERNAL_OBJECT true # This is an object file not to be compiled, but only be linked. - ) - endif() - - # Don't add CMAKE_CURRENT_SOURCE_DIR if the path is already an absolute path. - get_filename_component(file_path "${file}" PATH) - if(IS_ABSOLUTE "${file_path}") - set(source_file "${file}") - else() - set(source_file "${CMAKE_CURRENT_SOURCE_DIR}/${file}") - endif() - - # Bring in the dependencies. Creates a variable CUDA_NVCC_DEPEND ####### - cuda_include_nvcc_dependencies(${cmake_dependency_file}) - - # Convience string for output ########################################### - if(CUDA_BUILD_EMULATION) - set(cuda_build_type "Emulation") - else(CUDA_BUILD_EMULATION) - set(cuda_build_type "Device") - endif(CUDA_BUILD_EMULATION) - - # Build the NVCC made dependency file ################################### - set(build_cubin OFF) - if ( NOT CUDA_BUILD_EMULATION AND CUDA_BUILD_CUBIN ) - if ( NOT compile_to_ptx ) - set ( build_cubin ON ) - endif( NOT compile_to_ptx ) - endif( NOT CUDA_BUILD_EMULATION AND CUDA_BUILD_CUBIN ) - - # Configure the build script - configure_file("${CUDA_run_nvcc}" "${custom_target_script}" @ONLY) - - # So if a user specifies the same cuda file as input more than once, you - # can have bad things happen with dependencies. Here we check an option - # to see if this is the behavior they want. - if(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE) - set(main_dep MAIN_DEPENDENCY ${source_file}) - else() - set(main_dep DEPENDS ${source_file}) - endif() - - if(CUDA_VERBOSE_BUILD) - set(verbose_output ON) - elseif(CMAKE_GENERATOR MATCHES "Makefiles") - set(verbose_output "$(VERBOSE)") - else() - set(verbose_output OFF) - endif() - - # Create up the comment string - file(RELATIVE_PATH generated_file_relative_path "${CMAKE_BINARY_DIR}" "${generated_file}") - if(compile_to_ptx) - set(cuda_build_comment_string "Building NVCC ptx file ${generated_file_relative_path}") - else() - set(cuda_build_comment_string "Building NVCC (${cuda_build_type}) object ${generated_file_relative_path}") - endif() - - # Build the generated file and dependency file ########################## - add_custom_command( - OUTPUT ${generated_file} - # These output files depend on the source_file and the contents of cmake_dependency_file - ${main_dep} - DEPENDS ${CUDA_NVCC_DEPEND} - DEPENDS ${custom_target_script} - # Make sure the output directory exists before trying to write to it. - COMMAND ${CMAKE_COMMAND} -E make_directory "${generated_file_path}" - COMMAND ${CMAKE_COMMAND} ARGS - -D verbose:BOOL=${verbose_output} - ${ccbin_flags} - -D build_configuration:STRING=${CUDA_build_configuration} - -D "generated_file:STRING=${generated_file}" - -D "generated_cubin_file:STRING=${generated_cubin_file}" - -P "${custom_target_script}" - COMMENT "${cuda_build_comment_string}" - ) - - # Make sure the build system knows the file is generated. - set_source_files_properties(${generated_file} PROPERTIES GENERATED TRUE) - - # Don't add the object file to the list of generated files if we are using - # visual studio and we are attaching the build rule to the cuda file. VS - # will add our object file to the linker automatically for us. - set(cuda_add_generated_file TRUE) - - if(NOT compile_to_ptx AND CMAKE_GENERATOR MATCHES "Visual Studio" AND CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE) - # Visual Studio 8 crashes when you close the solution when you don't add the object file. - if(NOT CMAKE_GENERATOR MATCHES "Visual Studio 8") - #message("Not adding ${generated_file}") - set(cuda_add_generated_file FALSE) - endif() - endif() - - if(cuda_add_generated_file) - list(APPEND _cuda_wrap_generated_files ${generated_file}) - endif() - - # Add the other files that we want cmake to clean on a cleanup ########## - list(APPEND CUDA_ADDITIONAL_CLEAN_FILES "${cmake_dependency_file}") - list(REMOVE_DUPLICATES CUDA_ADDITIONAL_CLEAN_FILES) - set(CUDA_ADDITIONAL_CLEAN_FILES ${CUDA_ADDITIONAL_CLEAN_FILES} CACHE INTERNAL "List of intermediate files that are part of the cuda dependency scanning.") - - endif(${file} MATCHES ".*\\.cu$" AND NOT _is_header) - endforeach(file) - - # Set the return parameter - set(${generated_files} ${_cuda_wrap_generated_files}) -endmacro(CUDA_WRAP_SRCS) - - -############################################################################### -############################################################################### -# ADD LIBRARY -############################################################################### -############################################################################### -macro(CUDA_ADD_LIBRARY cuda_target) - - CUDA_ADD_CUDA_INCLUDE_ONCE() - - # Separate the sources from the options - CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN}) - CUDA_BUILD_SHARED_LIBRARY(_cuda_shared_flag ${ARGN}) - # Create custom commands and targets for each file. - CUDA_WRAP_SRCS( ${cuda_target} OBJ _generated_files ${_sources} - ${_cmake_options} ${_cuda_shared_flag} - OPTIONS ${_options} ) - - # Add the library. - add_library(${cuda_target} ${_cmake_options} - ${_generated_files} - ${_sources} - ) - - target_link_libraries(${cuda_target} - ${CUDA_LIBRARIES} - ) - - # We need to set the linker language based on what the expected generated file - # would be. CUDA_C_OR_CXX is computed based on CUDA_HOST_COMPILATION_CPP. - set_target_properties(${cuda_target} - PROPERTIES - LINKER_LANGUAGE ${CUDA_C_OR_CXX} - ) - -endmacro(CUDA_ADD_LIBRARY cuda_target) - - -############################################################################### -############################################################################### -# ADD EXECUTABLE -############################################################################### -############################################################################### -macro(CUDA_ADD_EXECUTABLE cuda_target) - - CUDA_ADD_CUDA_INCLUDE_ONCE() - - # Separate the sources from the options - CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN}) - # Create custom commands and targets for each file. - CUDA_WRAP_SRCS( ${cuda_target} OBJ _generated_files ${_sources} OPTIONS ${_options} ) - - # Add the library. - add_executable(${cuda_target} ${_cmake_options} - ${_generated_files} - ${_sources} - ) - - target_link_libraries(${cuda_target} - ${CUDA_LIBRARIES} - ) - - # We need to set the linker language based on what the expected generated file - # would be. CUDA_C_OR_CXX is computed based on CUDA_HOST_COMPILATION_CPP. - set_target_properties(${cuda_target} - PROPERTIES - LINKER_LANGUAGE ${CUDA_C_OR_CXX} - ) - -endmacro(CUDA_ADD_EXECUTABLE cuda_target) - - -############################################################################### -############################################################################### -# CUDA COMPILE -############################################################################### -############################################################################### -macro(CUDA_COMPILE generated_files) - - # Separate the sources from the options - CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN}) - # Create custom commands and targets for each file. - CUDA_WRAP_SRCS( cuda_compile OBJ _generated_files ${_sources} ${_cmake_options} - OPTIONS ${_options} ) - - set( ${generated_files} ${_generated_files}) - -endmacro(CUDA_COMPILE) - - -############################################################################### -############################################################################### -# CUDA COMPILE PTX -############################################################################### -############################################################################### -macro(CUDA_COMPILE_PTX generated_files) - - # Separate the sources from the options - CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN}) - # Create custom commands and targets for each file. - CUDA_WRAP_SRCS( cuda_compile_ptx PTX _generated_files ${_sources} ${_cmake_options} - OPTIONS ${_options} ) - - set( ${generated_files} ${_generated_files}) - -endmacro(CUDA_COMPILE_PTX) - -############################################################################### -############################################################################### -# CUDA ADD CUFFT TO TARGET -############################################################################### -############################################################################### -macro(CUDA_ADD_CUFFT_TO_TARGET target) - if (CUDA_BUILD_EMULATION) - target_link_libraries(${target} ${CUDA_cufftemu_LIBRARY}) - else() - target_link_libraries(${target} ${CUDA_cufft_LIBRARY}) - endif() -endmacro() - -############################################################################### -############################################################################### -# CUDA ADD CUBLAS TO TARGET -############################################################################### -############################################################################### -macro(CUDA_ADD_CUBLAS_TO_TARGET target) - if (CUDA_BUILD_EMULATION) - target_link_libraries(${target} ${CUDA_cublasemu_LIBRARY}) - else() - target_link_libraries(${target} ${CUDA_cublas_LIBRARY}) - endif() -endmacro() - -############################################################################### -############################################################################### -# CUDA BUILD CLEAN TARGET -############################################################################### -############################################################################### -macro(CUDA_BUILD_CLEAN_TARGET) - # Call this after you add all your CUDA targets, and you will get a convience - # target. You should also make clean after running this target to get the - # build system to generate all the code again. - - set(cuda_clean_target_name clean_cuda_depends) - if (CMAKE_GENERATOR MATCHES "Visual Studio") - string(TOUPPER ${cuda_clean_target_name} cuda_clean_target_name) - endif() - add_custom_target(${cuda_clean_target_name} - COMMAND ${CMAKE_COMMAND} -E remove ${CUDA_ADDITIONAL_CLEAN_FILES}) - - # Clear out the variable, so the next time we configure it will be empty. - # This is useful so that the files won't persist in the list after targets - # have been removed. - set(CUDA_ADDITIONAL_CLEAN_FILES "" CACHE INTERNAL "List of intermediate files that are part of the cuda dependency scanning.") -endmacro(CUDA_BUILD_CLEAN_TARGET) +############################################################################### +# For more information, please see: http://software.sci.utah.edu +# +# The MIT License +# +# Copyright (c) 2008-2009 +# NVIDIA Corp. +# +# Copyright (c) 2007-2009 +# Scientific Computing and Imaging Institute, University of Utah +# +# License for the specific language governing rights and limitations under +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# +# This script locates the Nvidia Compute Unified Driver Architecture (CUDA) +# tools. It should work on linux, windows, and mac and should be reasonably +# up to date with cuda releases. +# +# This script makes use of the standard find_package arguments of , +# REQUIRED and QUIET. CUDA_FOUND will report if an acceptable version of CUDA +# was found. +# +# The script will prompt the user to specify CUDA_TOOLKIT_ROOT_DIR if the +# prefix cannot be determined by the location of nvcc in the system path. To +# use a different installed version of the toolkit set the environment variable +# CUDA_BIN_PATH before running cmake (e.g. CUDA_BIN_PATH=/usr/local/cuda1.0 +# instead of the default /usr/local/cuda). +# +# Set CUDA_BUILD_EMULATION to ON for Emulation mode. Defaults to OFF (device +# mode). +# _DEVICEEMU is defined when CUDA_BUILD_EMULATION is TRUE. +# +# Set CUDA_HOST_COMPILATION_CPP to OFF for C compilation of host code. +# Default TRUE. +# +# Set CUDA_BUILD_CUBIN to "ON" or "OFF" to enable and extra compilation pass +# with the -cubin option in Device mode. The output is parsed and register, +# shared memory usage is printed during build. Default ON. +# +# Set CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE to ON if you want the custom build +# rule to be attached to the source file in Visual Studio. Defaults to ON. +# Turn OFF if you add the same cuda file to multiple targets. +# +# This allows the user to build the target from the CUDA file, however bad +# things can happen if the CUDA source file is added to multiple targets. When +# performing parallel builds it is possible for the custom build command to be +# run more than once and in parallel causing cryptic build errors. This is +# because VS runs the rules for every source file in the target, and a source +# can have only one rule no matter how many projects it is added to. Therefore, +# the rule assigned to the source file really only applies to one target you get +# clashes when it is run from multiple targets. Eventually everything will get +# built, but if the user is unaware of this behavior, there may be confusion. +# It would be nice if we could detect the reuse of source files across multiple +# targets and turn the option off for the user, but no good solution could be +# found. +# +# Set CUDA_64_BIT_DEVICE_CODE to ON to compile for 64 bit devices. Defaults to +# match host bit size. Note that making this different than the host code when +# generating C files from CUDA code just won't work, because size_t gets defined +# by nvcc in the generated source. If you compile to PTX and then load the file +# yourself, you can mix bit sizes between device and host. +# +# Set CUDA_VERBOSE_BUILD to ON to see all the commands used when building the +# CUDA file. When using a Makefile generator the value defaults to VERBOSE (run +# make VERBOSE=1 to see output). You can override this by setting +# CUDA_VERBOSE_BUILD to ON. +# +# Set CUDA_GENERATED_OUTPUT_DIR to the path you wish to have the generated files +# placed. If it is blank output files will be placed in +# CMAKE_CURRENT_BINARY_DIR. Intermediate files will always be placed in +# CMAKE_CURRENT_BINARY_DIR. +# +# The script creates the following macros: +# CUDA_INCLUDE_DIRECTORIES( path0 path1 ... ) +# -- Sets the directories that should be passed to nvcc +# (e.g. nvcc -Ipath0 -Ipath1 ... ). These paths usually contain other .cu +# files. +# +# CUDA_ADD_LIBRARY( cuda_target file0 file1 ... [OPTIONS ...] ) +# -- Creates a shared library "cuda_target" which contains all of the source +# (*.c, *.cc, etc.) specified and all of the nvcc'ed .cu files specified. +# All of the specified source files and generated .cpp files are compiled +# using the standard CMake compiler, so the normal INCLUDE_DIRECTORIES, +# LINK_DIRECTORIES, and TARGET_LINK_LIBRARIES can be used to affect their +# build and link. In addition CUDA_INCLUDE_DIRS is added automatically added +# to include_directories(). +# +# CUDA_ADD_EXECUTABLE( cuda_target file0 file1 ... [OPTIONS ...] ) +# -- Same as CUDA_ADD_LIBRARY except that an exectuable is created. +# +# CUDA_COMPILE( generated_files file0 file1 ... [OPTIONS ...] ) +# -- Returns a list of generated files from the input source files to be used +# with ADD_LIBRARY or ADD_EXECUTABLE. +# +# CUDA_COMPILE_PTX( generated_files file0 file1 ... [OPTIONS ...] ) +# -- Returns a list of PTX files generated from the input source files. +# +# CUDA_WRAP_SRCS ( cuda_target format generated_files file0 file1 ... +# [OPTIONS ...] ) +# -- This is where all the magic happens. CUDA_ADD_EXECUTABLE, +# CUDA_ADD_LIBRARY, CUDA_COMPILE, and CUDA_COMPILE_PTX all call this function +# under the hood. +# +# Given the list of files (file0 file1 ... fileN) this macro generates custom +# commands that generate either PTX or linkable objects (use "PTX" or "OBJ" +# for the format argument to switch. Files that don't end with .cu or have +# the HEADER_FILE_ONLY property are ignored. +# +# The arguments passed in after OPTIONS are extra command line options to +# give to NVCC. You can also specify per configuration options by specifying +# the name of the configuration followed by the options. General options +# must preceed configuration specific options. Not all configurations need +# to be specified, only the ones provided will be used. +# +# OPTIONS -DFLAG=2 "-DFLAG_OTHER=space in flag" +# DEBUG -g +# RELEASE --use_fast_math +# RELWITHDEBINFO --use_fast_math;-g +# MINSIZEREL --use_fast_math +# +# For certain configurations (namely VS generating object files with +# CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE set to ON), no generated file will +# be produced for the given cuda file. This is because when you add the cuda +# file to Visual Studio it knows that this file produces and will link in the +# resulting object file automatically. +# +# This script will also generate a separate cmake script that is used at +# build time to invoke nvcc. This is for serveral reasons. +# +# 1. nvcc can return negative numbers as return values which confuses +# Visual Studio into thinking that the command succeeded. The script now +# checks the error codes and produces errors when there was a problem +# +# 2. nvcc has been known to not delete intermediate results when it +# encounters problems. The build rules then don't complete, because there +# exists a partially written output file. The script now deletes the +# output files if there was an error. +# +# 3. By putting all the options that affect the build into a file and then +# make the build rule dependent on the file, when the options change the +# output files will be regenerated. +# +# CUDA_ADD_CUFFT_TO_TARGET( cuda_target ) +# -- Adds the cufft library to the target. Handles whether you are in emulation +# mode or not. +# +# CUDA_ADD_CUBLAS_TO_TARGET( cuda_target ) +# -- Adds the cublas library to the target. Handles whether you are in emulation +# mode or not. +# +# CUDA_BUILD_CLEAN_TARGET() +# -- Creates a convience target that deletes all the dependency files generated. +# You should make clean after running this target to ensure the dependency +# files get regenerated. +# +# The script defines the following variables: +# +# ( Note CUDA_ADD_* macros setup cuda/cut library dependencies automatically. +# These variables are only needed if a cuda API call must be made from code in +# a outside library or executable. ) +# +# CUDA_VERSION_MAJOR -- The major version of cuda as reported by nvcc. +# CUDA_VERSION_MINOR -- The minor version. +# CUDA_VERSION +# CUDA_VERSION_STRING -- CUDA_VERSION_MAJOR.CUDA_VERSION_MINOR +# +# CUDA_INCLUDE_DIRS -- Include directory for cuda headers. Added automatically +# for CUDA_ADD_EXECUTABLE and CUDA_ADD_LIBRARY. +# CUDA_LIBRARIES -- Cuda RT library. +# CUDA_CUT_INCLUDE_DIR -- Include directory for cuda SDK headers (cutil.h). +# CUDA_CUT_LIBRARIES -- SDK libraries. +# CUDA_NVCC_FLAGS -- Additional NVCC command line arguments. NOTE: +# multiple arguments must be semi-colon delimited +# e.g. --compiler-options;-Wall +# CUDA_NVCC_FLAGS_ -- Confugration specific flags for NVCC. +# CUDA_CUFFT_LIBRARIES -- Device or emulation library for the Cuda FFT +# implementation (alternative to: +# CUDA_ADD_CUFFT_TO_TARGET macro) +# CUDA_CUBLAS_LIBRARIES -- Device or emulation library for the Cuda BLAS +# implementation (alterative to: +# CUDA_ADD_CUBLAS_TO_TARGET macro). +# +# +# The script now builds object files instead of generating C files. In order to +# facilitate this, the script now makes use of the CMAKE_{C,CXX}_FLAGS along +# with their configuration dependent counterparts (i.e. CMAKE_C_FLAGS_DEBUG). +# These flags are passed through nvcc to the native compiler. In addition, on +# some systems special flags are added for building objects intended for shared +# libraries. FindCUDA make use of the CMake variable BUILD_SHARED_LIBS to +# determine if these flags should be used. Please set this variable according +# to how the objects are to be used before calling CUDA_ADD_LIBRARY. A +# preprocessor macro, _EXPORTS is defined when BUILD_SHARED_LIBS is +# defined. In addition, flags passed into add_definitions with -D or /D are +# passed along to nvcc. +# +# Files with the HEADER_FILE_ONLY property set will not be compiled. +# +# It might be necessary to set CUDA_TOOLKIT_ROOT_DIR manually on certain platforms, +# or to use a cuda runtime not installed in the default location. In newer +# versions of the toolkit the cuda library is included with the graphics +# driver- be sure that the driver version matches what is needed by the cuda +# runtime version. +# +# -- Abe Stephens SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html +# -- James Bigler NVIDIA Corp +############################################################################### + +# FindCUDA.cmake + +# We need to have at least this version to support the VERSION_LESS argument to 'if'. +cmake_policy(PUSH) +cmake_minimum_required(VERSION 2.6.2) +cmake_policy(POP) + +# This macro helps us find the location of helper files we will need the full path to +macro(CUDA_FIND_HELPER_FILE _name _extension) + set(_full_name "${_name}.${_extension}") + # CMAKE_CURRENT_LIST_FILE contains the full path to the file currently being + # processed. Using this variable, we can pull out the current path, and + # provide a way to get access to the other files we need local to here. + get_filename_component(CMAKE_CURRENT_LIST_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) + find_file(CUDA_${_name} ${_full_name} PATHS ${CMAKE_CURRENT_LIST_DIR} NO_DEFAULT_PATH) + if(NOT CUDA_${_name}) + set(error_message "${_full_name} not found in CMAKE_MODULE_PATH") + if(CUDA_FIND_REQUIRED) + message(FATAL_ERROR "${error_message}") + else(CUDA_FIND_REQUIRED) + if(NOT CUDA_FIND_QUIETLY) + message(STATUS "${error_message}") + endif(NOT CUDA_FIND_QUIETLY) + endif(CUDA_FIND_REQUIRED) + endif(NOT CUDA_${_name}) + # Set this variable as internal, so the user isn't bugged with it. + set(CUDA_${_name} ${CUDA_${_name}} CACHE INTERNAL "Location of ${_full_name}" FORCE) +endmacro(CUDA_FIND_HELPER_FILE) + +##################################################################### +## CUDA_INCLUDE_NVCC_DEPENDENCIES +## + +# So we want to try and include the dependency file if it exists. If +# it doesn't exist then we need to create an empty one, so we can +# include it. + +# If it does exist, then we need to check to see if all the files it +# depends on exist. If they don't then we should clear the dependency +# file and regenerate it later. This covers the case where a header +# file has disappeared or moved. + +macro(CUDA_INCLUDE_NVCC_DEPENDENCIES dependency_file) + set(CUDA_NVCC_DEPEND) + set(CUDA_NVCC_DEPEND_REGENERATE FALSE) + + + # Include the dependency file. Create it first if it doesn't exist . The + # INCLUDE puts a dependency that will force CMake to rerun and bring in the + # new info when it changes. DO NOT REMOVE THIS (as I did and spent a few + # hours figuring out why it didn't work. + if(NOT EXISTS ${dependency_file}) + file(WRITE ${dependency_file} "#FindCUDA.cmake generated file. Do not edit.\n") + endif() + # Always include this file to force CMake to run again next + # invocation and rebuild the dependencies. + #message("including dependency_file = ${dependency_file}") + include(${dependency_file}) + + # Now we need to verify the existence of all the included files + # here. If they aren't there we need to just blank this variable and + # make the file regenerate again. +# if(DEFINED CUDA_NVCC_DEPEND) +# message("CUDA_NVCC_DEPEND set") +# else() +# message("CUDA_NVCC_DEPEND NOT set") +# endif() + if(CUDA_NVCC_DEPEND) + #message("CUDA_NVCC_DEPEND true") + foreach(f ${CUDA_NVCC_DEPEND}) + #message("searching for ${f}") + if(NOT EXISTS ${f}) + #message("file ${f} not found") + set(CUDA_NVCC_DEPEND_REGENERATE TRUE) + endif() + endforeach(f) + else(CUDA_NVCC_DEPEND) + #message("CUDA_NVCC_DEPEND false") + # No dependencies, so regenerate the file. + set(CUDA_NVCC_DEPEND_REGENERATE TRUE) + endif(CUDA_NVCC_DEPEND) + + #message("CUDA_NVCC_DEPEND_REGENERATE = ${CUDA_NVCC_DEPEND_REGENERATE}") + # No incoming dependencies, so we need to generate them. Make the + # output depend on the dependency file itself, which should cause the + # rule to re-run. + if(CUDA_NVCC_DEPEND_REGENERATE) + file(WRITE ${dependency_file} "#FindCUDA.cmake generated file. Do not edit.\n") + endif(CUDA_NVCC_DEPEND_REGENERATE) + +endmacro(CUDA_INCLUDE_NVCC_DEPENDENCIES) + +############################################################################### +############################################################################### +# Setup default variables +############################################################################### +############################################################################### + +# Set whether we are using emulation or device mode. +option(CUDA_BUILD_EMULATION "Build in Emulation mode" OFF) +# Parse HOST_COMPILATION mode. +option(CUDA_HOST_COMPILATION_CPP "Generated file extension" ON) +# Allow the user to specify if the device code is supposed to be 32 or 64 bit. +if(CMAKE_SIZEOF_VOID_P EQUAL 8) + set(CUDA_64_BIT_DEVICE_CODE_DEFAULT ON) +else() + set(CUDA_64_BIT_DEVICE_CODE_DEFAULT OFF) +endif() +option(CUDA_64_BIT_DEVICE_CODE "Compile device code in 64 bit mode" ${CUDA_64_BIT_DEVICE_CODE_DEFAULT}) +# Prints out extra information about the cuda file during compilation +option(CUDA_BUILD_CUBIN "Generate and parse .cubin files in Device mode." ON) +# Extra user settable flags +set(CUDA_NVCC_FLAGS "" CACHE STRING "Semi-colon delimit multiple arguments.") +# Attach the build rule to the source file in VS. This option +option(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE "Attach the build rule to the CUDA source file. Enable only when the CUDA source file is added to at most one target." ON) +# Specifies whether the commands used when compiling the .cu file will be printed out. +option(CUDA_VERBOSE_BUILD "Print out the commands run while compiling the CUDA source file. With the Makefile generator this defaults to VERBOSE variable specified on the command line, but can be forced on with this option." OFF) +# Where to put the generated output. +set(CUDA_GENERATED_OUTPUT_DIR "" CACHE PATH "Directory to put all the output files. If blank it will default to the CMAKE_CURRENT_BINARY_DIR") +mark_as_advanced( + CUDA_HOST_COMPILATION_CPP + CUDA_64_BIT_DEVICE_CODE + CUDA_NVCC_FLAGS + CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE + CUDA_GENERATED_OUTPUT_DIR + ) + +# Makefile and similar generators don't define CMAKE_CONFIGURATION_TYPES, so we +# need to add another entry for the CMAKE_BUILD_TYPE. We also need to add the +# standerd set of 4 build types (Debug, MinSizeRel, Release, and RelWithDebInfo) +# for completeness. We need run this loop in order to accomodate the addition +# of extra configuration types. Duplicate entries will be removed by +# REMOVE_DUPLICATES. +set(CUDA_configuration_types ${CMAKE_CONFIGURATION_TYPES} ${CMAKE_BUILD_TYPE} Debug MinSizeRel Release RelWithDebInfo) +list(REMOVE_DUPLICATES CUDA_configuration_types) +foreach(config ${CUDA_configuration_types}) + string(TOUPPER ${config} config_upper) + set(CUDA_NVCC_FLAGS_${config_upper} "" CACHE STRING "Semi-colon delimit multiple arguments.") + mark_as_advanced(CUDA_NVCC_FLAGS_${config_upper}) +endforeach() + +############################################################################### +############################################################################### +# Locate CUDA, Set Build Type, etc. +############################################################################### +############################################################################### + +# Check to see if the CUDA_TOOLKIT_ROOT_DIR and CUDA_SDK_ROOT_DIR have changed, +# if they have then clear the cache variables, so that will be detected again. +if(NOT "${CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "${CUDA_TOOLKIT_ROOT_DIR_INTERNAL}") + unset(CUDA_NVCC_EXECUTABLE CACHE) + unset(CUDA_VERSION CACHE) + unset(CUDA_TOOLKIT_INCLUDE CACHE) + unset(CUDA_CUDART_LIBRARY CACHE) + unset(CUDA_CUDA_LIBRARY CACHE) + unset(CUDA_cublas_LIBRARY CACHE) + unset(CUDA_cublasemu_LIBRARY CACHE) + unset(CUDA_cufft_LIBRARY CACHE) + unset(CUDA_cufftemu_LIBRARY CACHE) +endif() + +if(NOT "${CUDA_SDK_ROOT_DIR}" STREQUAL "${CUDA_SDK_ROOT_DIR_INTERNAL}") + unset(CUDA_CUT_INCLUDE_DIR CACHE) + unset(CUDA_CUT_LIBRARY CACHE) +endif() + +# Search for the cuda distribution. +if(NOT CUDA_TOOLKIT_ROOT_DIR) + + # Search in the CUDA_BIN_PATH first. + find_path(CUDA_TOOLKIT_ROOT_DIR + NAMES nvcc nvcc.exe + PATHS ENV CUDA_BIN_PATH + DOC "Toolkit location." + NO_DEFAULT_PATH + ) + # Now search default paths + find_path(CUDA_TOOLKIT_ROOT_DIR + NAMES nvcc nvcc.exe + PATHS /usr/local/bin + /usr/local/cuda/bin + DOC "Toolkit location." + ) + + if (CUDA_TOOLKIT_ROOT_DIR) + string(REGEX REPLACE "[/\\\\]?bin[/\\\\]?$" "" CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT_DIR}) + # We need to force this back into the cache. + set(CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT_DIR} CACHE PATH "Toolkit location." FORCE) + endif(CUDA_TOOLKIT_ROOT_DIR) + if (NOT EXISTS ${CUDA_TOOLKIT_ROOT_DIR}) + if(CUDA_FIND_REQUIRED) + message(FATAL_ERROR "Specify CUDA_TOOLKIT_ROOT_DIR") + elseif(NOT CUDA_FIND_QUIETLY) + message("CUDA_TOOLKIT_ROOT_DIR not found or specified") + endif() + endif (NOT EXISTS ${CUDA_TOOLKIT_ROOT_DIR}) +endif (NOT CUDA_TOOLKIT_ROOT_DIR) + +# CUDA_NVCC_EXECUTABLE +find_program(CUDA_NVCC_EXECUTABLE + NAMES nvcc + PATHS "${CUDA_TOOLKIT_ROOT_DIR}/bin" + ENV CUDA_BIN_PATH + NO_DEFAULT_PATH + ) +# Search default search paths, after we search our own set of paths. +find_program(CUDA_NVCC_EXECUTABLE nvcc) +mark_as_advanced(CUDA_NVCC_EXECUTABLE) + +if(CUDA_NVCC_EXECUTABLE AND NOT CUDA_VERSION) + # Compute the version. + exec_program(${CUDA_NVCC_EXECUTABLE} ARGS "--version" OUTPUT_VARIABLE NVCC_OUT) + string(REGEX REPLACE ".*release ([0-9]+)\\.([0-9]+).*" "\\1" CUDA_VERSION_MAJOR ${NVCC_OUT}) + string(REGEX REPLACE ".*release ([0-9]+)\\.([0-9]+).*" "\\2" CUDA_VERSION_MINOR ${NVCC_OUT}) + set(CUDA_VERSION "${CUDA_VERSION_MAJOR}.${CUDA_VERSION_MINOR}" CACHE STRING "Version of CUDA as computed from nvcc.") + mark_as_advanced(CUDA_VERSION) +endif() + +# Always set this convenience variable +set(CUDA_VERSION_STRING "${CUDA_VERSION}") + +# Here we need to determine if the version we found is acceptable. We will +# assume that is unless CUDA_FIND_VERSION_EXACT or CUDA_FIND_VERSION is +# specified. The presence of either of these options checks the version +# string and signals if the version is acceptable or not. +set(_cuda_version_acceptable TRUE) +# +if(CUDA_FIND_VERSION_EXACT AND NOT CUDA_VERSION VERSION_EQUAL CUDA_FIND_VERSION) + set(_cuda_version_acceptable FALSE) +endif() +# +if(CUDA_FIND_VERSION AND CUDA_VERSION VERSION_LESS CUDA_FIND_VERSION) + set(_cuda_version_acceptable FALSE) +endif() +# +if(NOT _cuda_version_acceptable) + set(_cuda_error_message "Requested CUDA version ${CUDA_FIND_VERSION}, but found unacceptable version ${CUDA_VERSION}") + if(CUDA_FIND_REQUIRED) + message("${_cuda_error_message}") + elseif(NOT CUDA_FIND_QUIETLY) + message("${_cuda_error_message}") + endif() +endif() + +# CUDA_TOOLKIT_INCLUDE +find_path(CUDA_TOOLKIT_INCLUDE + device_functions.h # Header included in toolkit + PATHS "${CUDA_TOOLKIT_ROOT_DIR}/include" + ENV CUDA_INC_PATH + NO_DEFAULT_PATH + ) +# Search default search paths, after we search our own set of paths. +find_path(CUDA_TOOLKIT_INCLUDE device_functions.h) +mark_as_advanced(CUDA_TOOLKIT_INCLUDE) + +# Set the user list of include dir to nothing to initialize it. +set (CUDA_NVCC_INCLUDE_ARGS_USER "") +set (CUDA_INCLUDE_DIRS ${CUDA_TOOLKIT_INCLUDE}) + +macro(FIND_LIBRARY_LOCAL_FIRST _var _names _doc) + find_library(${_var} + NAMES ${_names} + PATHS "${CUDA_TOOLKIT_ROOT_DIR}/lib" + ENV CUDA_LIB_PATH + DOC ${_doc} + NO_DEFAULT_PATH + ) + # Search default search paths, after we search our own set of paths. + find_library(${_var} NAMES ${_names} DOC ${_doc}) +endmacro() + +# CUDA_LIBRARIES +find_library_local_first(CUDA_CUDART_LIBRARY cudart "\"cudart\" library") +set(CUDA_LIBRARIES ${CUDA_CUDART_LIBRARY}) + +# 1.1 toolkit on linux doesn't appear to have a separate library on +# some platforms. +find_library_local_first(CUDA_CUDA_LIBRARY cuda "\"cuda\" library (older versions only).") + +# Add cuda library to the link line only if it is found. +if (CUDA_CUDA_LIBRARY) + set(CUDA_LIBRARIES ${CUDA_LIBRARIES} ${CUDA_CUDA_LIBRARY}) +endif(CUDA_CUDA_LIBRARY) + +mark_as_advanced( + CUDA_CUDA_LIBRARY + CUDA_CUDART_LIBRARY + ) + +####################### +# Look for some of the toolkit helper libraries +macro(FIND_CUDA_HELPER_LIBS _name) + find_library_local_first(CUDA_${_name}_LIBRARY ${_name} "\"${_name}\" library") + mark_as_advanced(CUDA_${_name}_LIBRARY) +endmacro(FIND_CUDA_HELPER_LIBS) + +# Search for cufft and cublas libraries. +find_cuda_helper_libs(cufftemu) +find_cuda_helper_libs(cublasemu) +find_cuda_helper_libs(cufft) +find_cuda_helper_libs(cublas) + +if (CUDA_BUILD_EMULATION) + set(CUDA_CUFFT_LIBRARIES ${CUDA_cufftemu_LIBRARY}) + set(CUDA_CUBLAS_LIBRARIES ${CUDA_cublasemu_LIBRARY}) +else() + set(CUDA_CUFFT_LIBRARIES ${CUDA_cufft_LIBRARY}) + set(CUDA_CUBLAS_LIBRARIES ${CUDA_cublas_LIBRARY}) +endif() + +######################## +# Look for the SDK stuff +find_path(CUDA_SDK_ROOT_DIR common/inc/cutil.h + "$ENV{NVSDKCUDA_ROOT}" + "[HKEY_LOCAL_MACHINE\\SOFTWARE\\NVIDIA Corporation\\Installed Products\\NVIDIA SDK 10\\Compute;InstallDir]" + ) + +# Keep the CUDA_SDK_ROOT_DIR first in order to be able to override the +# environment variables. +set(CUDA_SDK_SEARCH_PATH + "${CUDA_SDK_ROOT_DIR}" + "${CUDA_TOOLKIT_ROOT_DIR}/local/NVSDK0.2" + "${CUDA_TOOLKIT_ROOT_DIR}/NVSDK0.2" + "${CUDA_TOOLKIT_ROOT_DIR}/NV_CUDA_SDK" + "$ENV{HOME}/NVIDIA_CUDA_SDK" + "$ENV{HOME}/NVIDIA_CUDA_SDK_MACOSX" + "/Developer/CUDA" + ) +# CUDA_CUT_INCLUDE_DIR +find_path(CUDA_CUT_INCLUDE_DIR + cutil.h + PATHS ${CUDA_SDK_SEARCH_PATH} + PATH_SUFFIXES "common/inc" + DOC "Location of cutil.h" + NO_DEFAULT_PATH + ) +# Now search system paths +find_path(CUDA_CUT_INCLUDE_DIR cutil.h DOC "Location of cutil.h") + +mark_as_advanced(CUDA_CUT_INCLUDE_DIR) + + +# CUDA_CUT_LIBRARIES + +# cutil library is called cutil64 for 64 bit builds on windows. We don't want +# to get these confused, so we are setting the name based on the word size of +# the build. +if(CMAKE_SIZEOF_VOID_P EQUAL 8) + set(cuda_cutil_name cutil64) +else(CMAKE_SIZEOF_VOID_P EQUAL 8) + set(cuda_cutil_name cutil32) +endif(CMAKE_SIZEOF_VOID_P EQUAL 8) + +find_library(CUDA_CUT_LIBRARY + NAMES cutil ${cuda_cutil_name} + PATHS ${CUDA_SDK_SEARCH_PATH} + # The new version of the sdk shows up in common/lib, but the old one is in lib + PATH_SUFFIXES "common/lib" "lib" + DOC "Location of cutil library" + NO_DEFAULT_PATH + ) +# Now search system paths +find_library(CUDA_CUT_LIBRARY NAMES cutil ${cuda_cutil_name} DOC "Location of cutil library") +mark_as_advanced(CUDA_CUT_LIBRARY) +set(CUDA_CUT_LIBRARIES ${CUDA_CUT_LIBRARY}) + + + +############################# +# Check for required components +set(CUDA_FOUND TRUE) + +set(CUDA_TOOLKIT_ROOT_DIR_INTERNAL "${CUDA_TOOLKIT_ROOT_DIR}" CACHE INTERNAL + "This is the value of the last time CUDA_TOOLKIT_ROOT_DIR was set successfully." FORCE) +set(CUDA_SDK_ROOT_DIR_INTERNAL "${CUDA_SDK_ROOT_DIR}" CACHE INTERNAL + "This is the value of the last time CUDA_SDK_ROOT_DIR was set successfully." FORCE) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(CUDA DEFAULT_MSG + CUDA_TOOLKIT_ROOT_DIR + CUDA_NVCC_EXECUTABLE + CUDA_INCLUDE_DIRS + CUDA_CUDART_LIBRARY + _cuda_version_acceptable + ) + + + +############################################################################### +############################################################################### +# Macros +############################################################################### +############################################################################### + +############################################################################### +# Add include directories to pass to the nvcc command. +macro(CUDA_INCLUDE_DIRECTORIES) + foreach(dir ${ARGN}) + list(APPEND CUDA_NVCC_INCLUDE_ARGS_USER "-I${dir}") + endforeach(dir ${ARGN}) +endmacro(CUDA_INCLUDE_DIRECTORIES) + + +############################################################################## +cuda_find_helper_file(parse_cubin cmake) +cuda_find_helper_file(make2cmake cmake) +cuda_find_helper_file(run_nvcc cmake) + +############################################################################## +# Separate the OPTIONS out from the sources +# +macro(CUDA_GET_SOURCES_AND_OPTIONS _sources _options) + set( ${_sources} ) + set( ${_options} ) + set( _found_options FALSE ) + foreach(arg ${ARGN}) + if(arg STREQUAL "OPTIONS") + set( _found_options TRUE ) + else() + if ( _found_options ) + list(APPEND ${_options} "${arg}") + else() + # Assume this is a file + list(APPEND ${_sources} "${arg}") + endif() + endif() + endforeach() +endmacro() + +############################################################################## +# Parse the OPTIONS from ARGN and set the variables prefixed by _option_prefix +# +macro(CUDA_PARSE_NVCC_OPTIONS _option_prefix) + set( _found_config ) + foreach(arg ${ARGN}) + # Determine if we are dealing with a perconfiguration flag + foreach(config ${CUDA_configuration_types}) + string(TOUPPER ${config} config_upper) + if (arg STREQUAL "${config_upper}") + set( _found_config _${arg}) + # Set arg to nothing to keep it from being processed further + set( arg ) + endif() + endforeach() + + if ( arg ) + list(APPEND ${_option_prefix}${_found_config} "${arg}") + endif() + endforeach() +endmacro() + +############################################################################## +# Helper to add the include directory for CUDA only once +function(CUDA_ADD_CUDA_INCLUDE_ONCE) + get_directory_property(_include_directories INCLUDE_DIRECTORIES) + set(_add TRUE) + if(_include_directories) + foreach(dir ${_include_directories}) + if("${dir}" STREQUAL "${CUDA_INCLUDE_DIRS}") + set(_add FALSE) + endif() + endforeach() + endif() + if(_add) + include_directories(${CUDA_INCLUDE_DIRS}) + endif() +endfunction() + +############################################################################## +# This helper macro populates the following variables and setups up custom +# commands and targets to invoke the nvcc compiler to generate C or PTX source +# dependant upon the format parameter. The compiler is invoked once with -M +# to generate a dependency file and a second time with -cuda or -ptx to generate +# a .cpp or .ptx file. +# INPUT: +# cuda_target - Target name +# format - PTX or OBJ +# FILE1 .. FILEN - The remaining arguments are the sources to be wrapped. +# OPTIONS - Extra options to NVCC +# OUTPUT: +# generated_files - List of generated files +############################################################################## +############################################################################## + +macro(CUDA_WRAP_SRCS cuda_target format generated_files) + + if( ${format} MATCHES "PTX" ) + set( compile_to_ptx ON ) + elseif( ${format} MATCHES "OBJ") + set( compile_to_ptx OFF ) + else() + message( FATAL_ERROR "Invalid format flag passed to CUDA_WRAP_SRCS: '${format}'. Use OBJ or PTX.") + endif() + + # Set up all the command line flags here, so that they can be overriden on a per target basis. + + set(nvcc_flags "") + + # Emulation if the card isn't present. + if (CUDA_BUILD_EMULATION) + # Emulation. + set(nvcc_flags ${nvcc_flags} --device-emulation -D_DEVICEEMU -g) + else(CUDA_BUILD_EMULATION) + # Device mode. No flags necessary. + endif(CUDA_BUILD_EMULATION) + + if(CUDA_HOST_COMPILATION_CPP) + set(CUDA_C_OR_CXX CXX) + else(CUDA_HOST_COMPILATION_CPP) + set(nvcc_flags ${nvcc_flags} --host-compilation C) + set(CUDA_C_OR_CXX C) + endif(CUDA_HOST_COMPILATION_CPP) + + set(generated_extension ${CMAKE_${CUDA_C_OR_CXX}_OUTPUT_EXTENSION}) + + if(CUDA_64_BIT_DEVICE_CODE) + set(nvcc_flags ${nvcc_flags} -m64) + else() + set(nvcc_flags ${nvcc_flags} -m32) + endif() + + # This needs to be passed in at this stage, because VS needs to fill out the + # value of VCInstallDir from within VS. + if(CMAKE_GENERATOR MATCHES "Visual Studio") + if( CMAKE_SIZEOF_VOID_P EQUAL 8 ) + # Add nvcc flag for 64b Windows + set(ccbin_flags -D "\"CCBIN:PATH=$(VCInstallDir)bin\"" ) + endif() + endif() + + # Figure out which configure we will use and pass that in as an argument to + # the script. We need to defer the decision until compilation time, because + # for VS projects we won't know if we are making a debug or release build + # until build time. + if(CMAKE_GENERATOR MATCHES "Visual Studio") + set( CUDA_build_configuration "$(ConfigurationName)" ) + else() + set( CUDA_build_configuration "${CMAKE_BUILD_TYPE}") + endif() + + # Initialize our list of includes with the user ones followed by the CUDA system ones. + set(CUDA_NVCC_INCLUDE_ARGS ${CUDA_NVCC_INCLUDE_ARGS_USER} "-I${CUDA_INCLUDE_DIRS}") + # Get the include directories for this directory and use them for our nvcc command. + get_directory_property(CUDA_NVCC_INCLUDE_DIRECTORIES INCLUDE_DIRECTORIES) + if(CUDA_NVCC_INCLUDE_DIRECTORIES) + foreach(dir ${CUDA_NVCC_INCLUDE_DIRECTORIES}) + list(APPEND CUDA_NVCC_INCLUDE_ARGS "-I${dir}") + endforeach() + endif() + + # Reset these variables + set(CUDA_WRAP_OPTION_NVCC_FLAGS) + foreach(config ${CUDA_configuration_types}) + string(TOUPPER ${config} config_upper) + set(CUDA_WRAP_OPTION_NVCC_FLAGS_${config_upper}) + endforeach() + + CUDA_GET_SOURCES_AND_OPTIONS(_cuda_wrap_sources _cuda_wrap_options ${ARGN}) + CUDA_PARSE_NVCC_OPTIONS(CUDA_WRAP_OPTION_NVCC_FLAGS ${_cuda_wrap_options}) + + # CUDA_HOST_FLAGS + if(BUILD_SHARED_LIBS) + # If BUILD_SHARED_LIBS is true, then we need to add extra flags for + # compiling objects for shared libraries. + set(CUDA_HOST_SHARED_FLAGS ${CMAKE_SHARED_LIBRARY_${CUDA_C_OR_CXX}_FLAGS}) + endif() + set(CUDA_HOST_FLAGS "set(CMAKE_HOST_FLAGS ${CMAKE_${CUDA_C_OR_CXX}_FLAGS} ${CUDA_HOST_SHARED_FLAGS})") + set(CUDA_NVCC_FLAGS_CONFIG "# Build specific configuration flags") + # Loop over all the configuration types to generate appropriate flags for run_nvcc.cmake + foreach(config ${CUDA_configuration_types}) + string(TOUPPER ${config} config_upper) + # CMAKE_FLAGS are strings and not lists. By not putting quotes around CMAKE_FLAGS + # we convert the strings to lists (like we want). + + # nvcc chokes on -g3, so replace it with -g + if(CMAKE_COMPILER_IS_GNUCC) + string(REPLACE "-g3" "-g" _cuda_C_FLAGS "${CMAKE_${CUDA_C_OR_CXX}_FLAGS_${config_upper}}") + else() + set(_cuda_C_FLAGS "${CMAKE_${CUDA_C_OR_CXX}_FLAGS_${config_upper}}") + endif() + set(CUDA_HOST_FLAGS "${CUDA_HOST_FLAGS}\nset(CMAKE_HOST_FLAGS_${config_upper} ${_cuda_C_FLAGS})") + # Note that if we ever want CUDA_NVCC_FLAGS_ to be string (instead of a list + # like it is currently), we can remove the quotes around the + # ${CUDA_NVCC_FLAGS_${config_upper}} variable like the CMAKE_HOST_FLAGS_ variable. + set(CUDA_NVCC_FLAGS_CONFIG "${CUDA_NVCC_FLAGS_CONFIG}\nset(CUDA_NVCC_FLAGS_${config_upper} \"${CUDA_NVCC_FLAGS_${config_upper}};${CUDA_WRAP_OPTION_NVCC_FLAGS_${config_upper}}\")") + endforeach() + + # Get the list of definitions from the directory property + get_directory_property(CUDA_NVCC_DEFINITIONS COMPILE_DEFINITIONS) + if(CUDA_NVCC_DEFINITIONS) + foreach(_definition ${CUDA_NVCC_DEFINITIONS}) + list(APPEND nvcc_flags "-D${_definition}") + endforeach() + endif() + + if(BUILD_SHARED_LIBS) + list(APPEND nvcc_flags "-D${cuda_target}_EXPORTS") + endif() + + # Determine output directory + if(CUDA_GENERATED_OUTPUT_DIR) + set(cuda_compile_output_dir "${CUDA_GENERATED_OUTPUT_DIR}") + else() + set(cuda_compile_output_dir "${CMAKE_CURRENT_BINARY_DIR}") + endif() + + # Reset the output variable + set(_cuda_wrap_generated_files "") + + # Iterate over the macro arguments and create custom + # commands for all the .cu files. + foreach(file ${ARGN}) + # Ignore any file marked as a HEADER_FILE_ONLY + get_source_file_property(_is_header ${file} HEADER_FILE_ONLY) + if(${file} MATCHES ".*\\.cu$" AND NOT _is_header) + + # Add a custom target to generate a c or ptx file. ###################### + + get_filename_component( basename ${file} NAME ) + if( compile_to_ptx ) + set(generated_file_path "${cuda_compile_output_dir}") + set(generated_file_basename "${cuda_target}_generated_${basename}.ptx") + set(format_flag "-ptx") + file(MAKE_DIRECTORY "${cuda_compile_output_dir}") + else( compile_to_ptx ) + set(generated_file_path "${cuda_compile_output_dir}/${CMAKE_CFG_INTDIR}") + set(generated_file_basename "${cuda_target}_generated_${basename}${generated_extension}") + set(format_flag "-c") + endif( compile_to_ptx ) + + # Set all of our file names. Make sure that whatever filenames that have + # generated_file_path in them get passed in through as a command line + # argument, so that the ${CMAKE_CFG_INTDIR} gets expanded at run time + # instead of configure time. + set(generated_file "${generated_file_path}/${generated_file_basename}") + set(cmake_dependency_file "${CMAKE_CURRENT_BINARY_DIR}/${generated_file_basename}.depend") + set(NVCC_generated_dependency_file "${CMAKE_CURRENT_BINARY_DIR}/${generated_file_basename}.NVCC-depend") + set(generated_cubin_file "${generated_file_path}/${generated_file_basename}.cubin.txt") + set(custom_target_script "${CMAKE_CURRENT_BINARY_DIR}/${generated_file_basename}.cmake") + + # Setup properties for obj files: + if( NOT compile_to_ptx ) + set_source_files_properties("${generated_file}" + PROPERTIES + EXTERNAL_OBJECT true # This is an object file not to be compiled, but only be linked. + ) + endif() + + # Don't add CMAKE_CURRENT_SOURCE_DIR if the path is already an absolute path. + get_filename_component(file_path "${file}" PATH) + if(IS_ABSOLUTE "${file_path}") + set(source_file "${file}") + else() + set(source_file "${CMAKE_CURRENT_SOURCE_DIR}/${file}") + endif() + + # Bring in the dependencies. Creates a variable CUDA_NVCC_DEPEND ####### + cuda_include_nvcc_dependencies(${cmake_dependency_file}) + + # Convience string for output ########################################### + if(CUDA_BUILD_EMULATION) + set(cuda_build_type "Emulation") + else(CUDA_BUILD_EMULATION) + set(cuda_build_type "Device") + endif(CUDA_BUILD_EMULATION) + + # Build the NVCC made dependency file ################################### + set(build_cubin OFF) + if ( NOT CUDA_BUILD_EMULATION AND CUDA_BUILD_CUBIN ) + if ( NOT compile_to_ptx ) + set ( build_cubin ON ) + endif( NOT compile_to_ptx ) + endif( NOT CUDA_BUILD_EMULATION AND CUDA_BUILD_CUBIN ) + + # Configure the build script + configure_file("${CUDA_run_nvcc}" "${custom_target_script}" @ONLY) + + # So if a user specifies the same cuda file as input more than once, you + # can have bad things happen with dependencies. Here we check an option + # to see if this is the behavior they want. + if(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE) + set(main_dep MAIN_DEPENDENCY ${source_file}) + else() + set(main_dep DEPENDS ${source_file}) + endif() + + if(CUDA_VERBOSE_BUILD) + set(verbose_output ON) + elseif(CMAKE_GENERATOR MATCHES "Makefiles") + set(verbose_output "$(VERBOSE)") + else() + set(verbose_output OFF) + endif() + + # Create up the comment string + file(RELATIVE_PATH generated_file_relative_path "${CMAKE_BINARY_DIR}" "${generated_file}") + if(compile_to_ptx) + set(cuda_build_comment_string "Building NVCC ptx file ${generated_file_relative_path}") + else() + set(cuda_build_comment_string "Building NVCC (${cuda_build_type}) object ${generated_file_relative_path}") + endif() + + # Build the generated file and dependency file ########################## + add_custom_command( + OUTPUT ${generated_file} + # These output files depend on the source_file and the contents of cmake_dependency_file + ${main_dep} + DEPENDS ${CUDA_NVCC_DEPEND} + DEPENDS ${custom_target_script} + COMMAND ${CMAKE_COMMAND} ARGS + -D verbose:BOOL=${verbose_output} + ${ccbin_flags} + -D build_configuration:STRING=${CUDA_build_configuration} + -D "generated_file:STRING=${generated_file}" + -D "generated_cubin_file:STRING=${generated_cubin_file}" + -P "${custom_target_script}" + COMMENT "${cuda_build_comment_string}" + ) + + # Make sure the build system knows the file is generated. + set_source_files_properties(${generated_file} PROPERTIES GENERATED TRUE) + + # Don't add the object file to the list of generated files if we are using + # visual studio and we are attaching the build rule to the cuda file. VS + # will add our object file to the linker automatically for us. + set(cuda_add_generated_file TRUE) + + if(NOT compile_to_ptx AND CMAKE_GENERATOR MATCHES "Visual Studio" AND CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE) + # Visual Studio 8 crashes when you close the solution when you don't add the object file. + if(NOT CMAKE_GENERATOR MATCHES "Visual Studio 8") + set(cuda_add_generated_file FALSE) + endif() + endif() + + if(cuda_add_generated_file) + list(APPEND _cuda_wrap_generated_files ${generated_file}) + endif() + + # Add the other files that we want cmake to clean on a cleanup ########## + list(APPEND CUDA_ADDITIONAL_CLEAN_FILES "${cmake_dependency_file}") + list(REMOVE_DUPLICATES CUDA_ADDITIONAL_CLEAN_FILES) + set(CUDA_ADDITIONAL_CLEAN_FILES ${CUDA_ADDITIONAL_CLEAN_FILES} CACHE INTERNAL "List of intermediate files that are part of the cuda dependency scanning.") + + endif(${file} MATCHES ".*\\.cu$" AND NOT _is_header) + endforeach(file) + + # Set the return parameter + set(${generated_files} ${_cuda_wrap_generated_files}) +endmacro(CUDA_WRAP_SRCS) + + +############################################################################### +############################################################################### +# ADD LIBRARY +############################################################################### +############################################################################### +macro(CUDA_ADD_LIBRARY cuda_target) + + CUDA_ADD_CUDA_INCLUDE_ONCE() + + # Separate the sources from the options + CUDA_GET_SOURCES_AND_OPTIONS(_sources _options ${ARGN}) + # Create custom commands and targets for each file. + CUDA_WRAP_SRCS( ${cuda_target} OBJ _generated_files ${_sources} OPTIONS ${_options} ) + + # Add the library. + add_library(${cuda_target} + ${_generated_files} + ${_sources} + ) + + target_link_libraries(${cuda_target} + ${CUDA_LIBRARIES} + ) + + # We need to set the linker language based on what the expected generated file + # would be. CUDA_C_OR_CXX is computed based on CUDA_HOST_COMPILATION_CPP. + set_target_properties(${cuda_target} + PROPERTIES + LINKER_LANGUAGE ${CUDA_C_OR_CXX} + ) + +endmacro(CUDA_ADD_LIBRARY cuda_target) + + +############################################################################### +############################################################################### +# ADD EXECUTABLE +############################################################################### +############################################################################### +macro(CUDA_ADD_EXECUTABLE cuda_target) + + CUDA_ADD_CUDA_INCLUDE_ONCE() + + # Separate the sources from the options + CUDA_GET_SOURCES_AND_OPTIONS(_sources _options ${ARGN}) + # Create custom commands and targets for each file. + CUDA_WRAP_SRCS( ${cuda_target} OBJ _generated_files ${_sources} OPTIONS ${_options} ) + + # Add the library. + add_executable(${cuda_target} + ${_generated_files} + ${_sources} + ) + + target_link_libraries(${cuda_target} + ${CUDA_LIBRARIES} + ) + + # We need to set the linker language based on what the expected generated file + # would be. CUDA_C_OR_CXX is computed based on CUDA_HOST_COMPILATION_CPP. + set_target_properties(${cuda_target} + PROPERTIES + LINKER_LANGUAGE ${CUDA_C_OR_CXX} + ) + +endmacro(CUDA_ADD_EXECUTABLE cuda_target) + + +############################################################################### +############################################################################### +# CUDA COMPILE +############################################################################### +############################################################################### +macro(CUDA_COMPILE generated_files) + + # Separate the sources from the options + CUDA_GET_SOURCES_AND_OPTIONS(_sources _options ${ARGN}) + # Create custom commands and targets for each file. + CUDA_WRAP_SRCS( cuda_compile OBJ _generated_files ${_sources} OPTIONS ${_options} ) + + set( ${generated_files} ${_generated_files}) + +endmacro(CUDA_COMPILE) + + +############################################################################### +############################################################################### +# CUDA COMPILE PTX +############################################################################### +############################################################################### +macro(CUDA_COMPILE_PTX generated_files) + + # Separate the sources from the options + CUDA_GET_SOURCES_AND_OPTIONS(_sources _options ${ARGN}) + # Create custom commands and targets for each file. + CUDA_WRAP_SRCS( cuda_compile_ptx PTX _generated_files ${_sources} OPTIONS ${_options} ) + + set( ${generated_files} ${_generated_files}) + +endmacro(CUDA_COMPILE_PTX) + +############################################################################### +############################################################################### +# CUDA ADD CUFFT TO TARGET +############################################################################### +############################################################################### +macro(CUDA_ADD_CUFFT_TO_TARGET target) + if (CUDA_BUILD_EMULATION) + target_link_libraries(${target} ${CUDA_cufftemu_LIBRARY}) + else() + target_link_libraries(${target} ${CUDA_cufft_LIBRARY}) + endif() +endmacro() + +############################################################################### +############################################################################### +# CUDA ADD CUBLAS TO TARGET +############################################################################### +############################################################################### +macro(CUDA_ADD_CUBLAS_TO_TARGET target) + if (CUDA_BUILD_EMULATION) + target_link_libraries(${target} ${CUDA_cublasemu_LIBRARY}) + else() + target_link_libraries(${target} ${CUDA_cublas_LIBRARY}) + endif() +endmacro() + +############################################################################### +############################################################################### +# CUDA BUILD CLEAN TARGET +############################################################################### +############################################################################### +macro(CUDA_BUILD_CLEAN_TARGET) + # Call this after you add all your CUDA targets, and you will get a convience + # target. You should also make clean after running this target to get the + # build system to generate all the code again. + + set(cuda_clean_target_name clean_cuda_depends) + if (CMAKE_GENERATOR MATCHES "Visual Studio") + string(TOUPPER ${cuda_clean_target_name} cuda_clean_target_name) + endif() + add_custom_target(${cuda_clean_target_name} + COMMAND ${CMAKE_COMMAND} -E remove ${CUDA_ADDITIONAL_CLEAN_FILES}) + + # Clear out the variable, so the next time we configure it will be empty. + # This is useful so that the files won't persist in the list after targets + # have been removed. + set(CUDA_ADDITIONAL_CLEAN_FILES "" CACHE INTERNAL "List of intermediate files that are part of the cuda dependency scanning.") +endmacro(CUDA_BUILD_CLEAN_TARGET) diff --git a/cmake/FindGLEW.cmake b/cmake/FindGLEW.cmake index de35f6d..a5ac202 100644 --- a/cmake/FindGLEW.cmake +++ b/cmake/FindGLEW.cmake @@ -13,25 +13,13 @@ IF (WIN32) ${GLEW_ROOT_DIR}/include DOC "The directory where GL/glew.h resides") - IF (NV_SYSTEM_PROCESSOR STREQUAL "AMD64") - FIND_LIBRARY( GLEW_LIBRARY - NAMES glew64 glew64s - PATHS - $ENV{PROGRAMFILES}/GLEW/lib - ${PROJECT_SOURCE_DIR}/src/nvgl/glew/bin - ${PROJECT_SOURCE_DIR}/src/nvgl/glew/lib - DOC "The GLEW library (64-bit)" - ) - ELSE(NV_SYSTEM_PROCESSOR STREQUAL "AMD64") - FIND_LIBRARY( GLEW_LIBRARY - NAMES glew GLEW glew32 glew32s - PATHS - $ENV{PROGRAMFILES}/GLEW/lib - ${PROJECT_SOURCE_DIR}/src/nvgl/glew/bin - ${PROJECT_SOURCE_DIR}/src/nvgl/glew/lib - DOC "The GLEW library" - ) - ENDIF(NV_SYSTEM_PROCESSOR STREQUAL "AMD64") + FIND_LIBRARY( GLEW_LIBRARY + NAMES glew GLEW glew32 glew32s + PATHS + $ENV{PROGRAMFILES}/GLEW/lib + ${PROJECT_SOURCE_DIR}/src/nvgl/glew/bin + ${PROJECT_SOURCE_DIR}/src/nvgl/glew/lib + DOC "The GLEW library") ELSE (WIN32) FIND_PATH( GLEW_INCLUDE_PATH GL/glew.h /usr/include @@ -59,4 +47,3 @@ IF (GLEW_INCLUDE_PATH AND GLEW_LIBRARY) SET(GLEW_LIBRARIES ${GLEW_LIBRARY}) SET(GLEW_FOUND "YES") ENDIF (GLEW_INCLUDE_PATH AND GLEW_LIBRARY) - diff --git a/cmake/make2cmake.cmake b/cmake/make2cmake.cmake index 7fce167..6ae2642 100755 --- a/cmake/make2cmake.cmake +++ b/cmake/make2cmake.cmake @@ -1,79 +1,61 @@ -# James Bigler, NVIDIA Corp (nvidia.com - jbigler) -# Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html -# -# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. -# -# Copyright (c) 2007-2009 -# Scientific Computing and Imaging Institute, University of Utah -# -# This code is licensed under the MIT License. See the FindCUDA.cmake script -# for the text of the license. - -# The MIT License -# -# License for the specific language governing rights and limitations under -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. -# - -####################################################################### -# This converts a file written in makefile syntax into one that can be included -# by CMake. - -file(READ ${input_file} depend_text) - -if (${depend_text} MATCHES ".+") - - # message("FOUND DEPENDS") - - # Remember, four backslashes is escaped to one backslash in the string. - string(REGEX REPLACE "\\\\ " " " depend_text ${depend_text}) - - # This works for the nvcc -M generated dependency files. - string(REGEX REPLACE "^.* : " "" depend_text ${depend_text}) - string(REGEX REPLACE "[ \\\\]*\n" ";" depend_text ${depend_text}) - - set(dependency_list "") - - foreach(file ${depend_text}) - - string(REGEX REPLACE "^ +" "" file ${file}) - - if(NOT IS_DIRECTORY ${file}) - # If softlinks start to matter, we should change this to REALPATH. For now we need - # to flatten paths, because nvcc can generate stuff like /bin/../include instead of - # just /include. - get_filename_component(file_absolute "${file}" ABSOLUTE) - list(APPEND dependency_list "${file_absolute}") - endif(NOT IS_DIRECTORY ${file}) - - endforeach(file) - -else() - # message("FOUND NO DEPENDS") -endif() - -# Remove the duplicate entries and sort them. -list(REMOVE_DUPLICATES dependency_list) -list(SORT dependency_list) - -foreach(file ${dependency_list}) - set(cuda_nvcc_depend "${cuda_nvcc_depend} \"${file}\"\n") -endforeach() - -file(WRITE ${output_file} "# Generated by: make2cmake.cmake\nSET(CUDA_NVCC_DEPEND\n ${cuda_nvcc_depend})\n\n") + +# For more information, please see: http://software.sci.utah.edu +# +# The MIT License +# +# Copyright (c) 2007 +# Scientific Computing and Imaging Institute, University of Utah +# +# License for the specific language governing rights and limitations under +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +# Make2cmake CMake Script +# Abe Stephens and James Bigler +# (c) 2007 Scientific Computing and Imaging Institute, University of Utah +# Note that the REGEX expressions may need to be tweaked for different dependency generators. + +file(READ ${input_file} depend_text) + +if (${depend_text} MATCHES ".+") + + # message("FOUND DEPENDS") + + # Remember, four backslashes is escaped to one backslash in the string. + string(REGEX REPLACE "\\\\ " " " depend_text ${depend_text}) + + # This works for the nvcc -M generated dependency files. + string(REGEX REPLACE "^.* : " "" depend_text ${depend_text}) + string(REGEX REPLACE "[ \\\\]*\n" ";" depend_text ${depend_text}) + + foreach(file ${depend_text}) + + string(REGEX REPLACE "^ +" "" file ${file}) + + if(NOT IS_DIRECTORY ${file}) + set(cuda_nvcc_depend "${cuda_nvcc_depend} \"${file}\"\n") + endif(NOT IS_DIRECTORY ${file}) + + endforeach(file) + +else() + # message("FOUND NO DEPENDS") +endif() + + +file(WRITE ${output_file} "# Generated by: make2cmake.cmake\nSET(CUDA_NVCC_DEPEND\n ${cuda_nvcc_depend})\n\n") diff --git a/cmake/parse_cubin.cmake b/cmake/parse_cubin.cmake index 2518c68..6b3d82a 100644 --- a/cmake/parse_cubin.cmake +++ b/cmake/parse_cubin.cmake @@ -1,39 +1,32 @@ -# James Bigler, NVIDIA Corp (nvidia.com - jbigler) -# Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html +# For more information, please see: http://software.sci.utah.edu # -# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. +# The MIT License # -# Copyright (c) 2007-2009 +# Copyright (c) 2007 # Scientific Computing and Imaging Institute, University of Utah # -# This code is licensed under the MIT License. See the FindCUDA.cmake script -# for the text of the license. - -# The MIT License -# -# License for the specific language governing rights and limitations under -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. +# License for the specific language governing rights and limitations under +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: # -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. # - -####################################################################### -# Parses a .cubin file produced by nvcc and reports statistics about the file. - +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +# .cubin Parsing CMake Script +# Abe Stephens +# (c) 2007 Scientific Computing and Imaging Institute, University of Utah file(READ ${input_file} file_text) @@ -75,26 +68,26 @@ if (${file_text} MATCHES ".+") if(NOT skip) # Registers - if (${entry} MATCHES "reg([ ]+)=([ ]+)([^ ]+)") - string(REGEX REPLACE ".*([ ]+)=([ ]+)([^ ]+)" "\\3" entry ${entry}) + if (${entry} MATCHES "reg = ([^ ]+)") + string(REGEX REPLACE ".* = ([^ ]+)" "\\1" entry ${entry}) message("Registers: ${entry}") - endif() + endif(${entry} MATCHES "reg = ([^ ]+)") # Local memory - if (${entry} MATCHES "lmem([ ]+)=([ ]+)([^ ]+)") - string(REGEX REPLACE ".*([ ]+)=([ ]+)([^ ]+)" "\\3" entry ${entry}) + if (${entry} MATCHES "lmem = ([^ ]+)") + string(REGEX REPLACE ".* = ([^ ]+)" "\\1" entry ${entry}) message("Local: ${entry}") - endif() + endif(${entry} MATCHES "lmem = ([^ ]+)") # Shared memory - if (${entry} MATCHES "smem([ ]+)=([ ]+)([^ ]+)") - string(REGEX REPLACE ".*([ ]+)=([ ]+)([^ ]+)" "\\3" entry ${entry}) + if (${entry} MATCHES "smem = ([^ ]+)") + string(REGEX REPLACE ".* = ([^ ]+)" "\\1" entry ${entry}) message("Shared: ${entry}") - endif() + endif(${entry} MATCHES "smem = ([^ ]+)") if (${entry} MATCHES "^}") message("") - endif() + endif(${entry} MATCHES "^}") endif(NOT skip) diff --git a/cmake/run_nvcc.cmake b/cmake/run_nvcc.cmake index 7349da3..09d1624 100755 --- a/cmake/run_nvcc.cmake +++ b/cmake/run_nvcc.cmake @@ -1,280 +1,227 @@ -# James Bigler, NVIDIA Corp (nvidia.com - jbigler) -# -# Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. -# -# This code is licensed under the MIT License. See the FindCUDA.cmake script -# for the text of the license. - -# The MIT License -# -# License for the specific language governing rights and limitations under -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. - - -########################################################################## -# This file runs the nvcc commands to produce the desired output file along with -# the dependency file needed by CMake to compute dependencies. In addition the -# file checks the output of each command and if the command fails it deletes the -# output files. - -# Input variables -# -# verbose:BOOL=<> OFF: Be as quiet as possible (default) -# ON : Describe each step -# -# build_configuration:STRING=<> Typically one of Debug, MinSizeRel, Release, or -# RelWithDebInfo, but it should match one of the -# entries in CUDA_HOST_FLAGS. This is the build -# configuration used when compiling the code. If -# blank or unspecified Debug is assumed as this is -# what CMake does. -# -# generated_file:STRING=<> File to generate. This argument must be passed in. -# -# generated_cubin_file:STRING=<> File to generate. This argument must be passed -# in if build_cubin is true. - -if(NOT generated_file) - message(FATAL_ERROR "You must specify generated_file on the command line") -endif() - -# Set these up as variables to make reading the generated file easier -set(CMAKE_COMMAND "@CMAKE_COMMAND@") -set(source_file "@source_file@") -set(NVCC_generated_dependency_file "@NVCC_generated_dependency_file@") -set(cmake_dependency_file "@cmake_dependency_file@") -set(CUDA_make2cmake "@CUDA_make2cmake@") -set(CUDA_parse_cubin "@CUDA_parse_cubin@") -set(build_cubin @build_cubin@) -# We won't actually use these variables for now, but we need to set this, in -# order to force this file to be run again if it changes. -set(generated_file_path "@generated_file_path@") -set(generated_file_internal "@generated_file@") -set(generated_cubin_file_internal "@generated_cubin_file@") - -set(CUDA_NVCC_EXECUTABLE "@CUDA_NVCC_EXECUTABLE@") -set(CUDA_NVCC_FLAGS "@CUDA_NVCC_FLAGS@;;@CUDA_WRAP_OPTION_NVCC_FLAGS@") -@CUDA_NVCC_FLAGS_CONFIG@ -set(nvcc_flags "@nvcc_flags@") -set(CUDA_NVCC_INCLUDE_ARGS "@CUDA_NVCC_INCLUDE_ARGS@") -set(format_flag "@format_flag@") - -if(build_cubin AND NOT generated_cubin_file) - message(FATAL_ERROR "You must specify generated_cubin_file on the command line") -endif() - -# This is the list of host compilation flags. It C or CXX should already have -# been chosen by FindCUDA.cmake. -@CUDA_HOST_FLAGS@ - -# Take the compiler flags and package them up to be sent to the compiler via -Xcompiler -set(nvcc_host_compiler_flags "") -# If we weren't given a build_configuration, use Debug. -if(NOT build_configuration) - set(build_configuration Debug) -endif() -string(TOUPPER "${build_configuration}" build_configuration) -#message("CUDA_NVCC_HOST_COMPILER_FLAGS = ${CUDA_NVCC_HOST_COMPILER_FLAGS}") -foreach(flag ${CMAKE_HOST_FLAGS} ${CMAKE_HOST_FLAGS_${build_configuration}}) - # Extra quotes are added around each flag to help nvcc parse out flags with spaces. - set(nvcc_host_compiler_flags "${nvcc_host_compiler_flags},\"${flag}\"") -endforeach() -if (nvcc_host_compiler_flags) - set(nvcc_host_compiler_flags "-Xcompiler" ${nvcc_host_compiler_flags}) -endif() -#message("nvcc_host_compiler_flags = \"${nvcc_host_compiler_flags}\"") -# Add the build specific configuration flags -list(APPEND CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS_${build_configuration}}) - -if(DEFINED CCBIN) - set(CCBIN -ccbin "${CCBIN}") -endif() - -# cuda_execute_process - Executes a command with optional command echo and status message. -# -# status - Status message to print if verbose is true -# command - COMMAND argument from the usual execute_process argument structure -# ARGN - Remaining arguments are the command with arguments -# -# CUDA_result - return value from running the command -# -# Make this a macro instead of a function, so that things like RESULT_VARIABLE -# and other return variables are present after executing the process. -macro(cuda_execute_process status command) - set(_command ${command}) - if(NOT _command STREQUAL "COMMAND") - message(FATAL_ERROR "Malformed call to cuda_execute_process. Missing COMMAND as second argument. (command = ${command})") - endif() - if(verbose) - execute_process(COMMAND "${CMAKE_COMMAND}" -E echo -- ${status}) - # Now we need to build up our command string. We are accounting for quotes - # and spaces, anything else is left up to the user to fix if they want to - # copy and paste a runnable command line. - set(cuda_execute_process_string) - foreach(arg ${ARGN}) - # If there are quotes, excape them, so they come through. - string(REPLACE "\"" "\\\"" arg ${arg}) - # Args with spaces need quotes around them to get them to be parsed as a single argument. - if(arg MATCHES " ") - list(APPEND cuda_execute_process_string "\"${arg}\"") - else() - list(APPEND cuda_execute_process_string ${arg}) - endif() - endforeach() - # Echo the command - execute_process(COMMAND ${CMAKE_COMMAND} -E echo ${cuda_execute_process_string}) - endif(verbose) - # Run the command - execute_process(COMMAND ${ARGN} RESULT_VARIABLE CUDA_result ) -endmacro() - -# Delete the target file -cuda_execute_process( - "Removing ${generated_file}" - COMMAND "${CMAKE_COMMAND}" -E remove "${generated_file}" - ) - -# For CUDA 2.3 and below, -G -M doesn't work, so remove the -G flag -# for dependency generation and hope for the best. -set(depends_CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS}") -set(CUDA_VERSION @CUDA_VERSION@) -if(CUDA_VERSION VERSION_LESS "3.0") - cmake_policy(PUSH) - # CMake policy 0007 NEW states that empty list elements are not - # ignored. I'm just setting it to avoid the warning that's printed. - cmake_policy(SET CMP0007 NEW) - # Note that this will remove all occurances of -G. - list(REMOVE_ITEM depends_CUDA_NVCC_FLAGS "-G") - cmake_policy(POP) -endif() - -# nvcc doesn't define __CUDACC__ for some reason when generating dependency files. This -# can cause incorrect dependencies when #including files based on this macro which is -# defined in the generating passes of nvcc invokation. We will go ahead and manually -# define this for now until a future version fixes this bug. -set(CUDACC_DEFINE -D__CUDACC__) - -# Generate the dependency file -cuda_execute_process( - "Generating dependency file: ${NVCC_generated_dependency_file}" - COMMAND "${CUDA_NVCC_EXECUTABLE}" - -M - ${CUDACC_DEFINE} - "${source_file}" - -o "${NVCC_generated_dependency_file}" - ${CCBIN} - ${nvcc_flags} - ${nvcc_host_compiler_flags} - ${depends_CUDA_NVCC_FLAGS} - -DNVCC - ${CUDA_NVCC_INCLUDE_ARGS} - ) - -if(CUDA_result) - message(FATAL_ERROR "Error generating ${generated_file}") -endif() - -# Generate the cmake readable dependency file to a temp file. Don't put the -# quotes just around the filenames for the input_file and output_file variables. -# CMake will pass the quotes through and not be able to find the file. -cuda_execute_process( - "Generating temporary cmake readable file: ${cmake_dependency_file}.tmp" - COMMAND "${CMAKE_COMMAND}" - -D "input_file:FILEPATH=${NVCC_generated_dependency_file}" - -D "output_file:FILEPATH=${cmake_dependency_file}.tmp" - -P "${CUDA_make2cmake}" - ) - -if(CUDA_result) - message(FATAL_ERROR "Error generating ${generated_file}") -endif() - -# Copy the file if it is different -cuda_execute_process( - "Copy if different ${cmake_dependency_file}.tmp to ${cmake_dependency_file}" - COMMAND "${CMAKE_COMMAND}" -E copy_if_different "${cmake_dependency_file}.tmp" "${cmake_dependency_file}" - ) - -if(CUDA_result) - message(FATAL_ERROR "Error generating ${generated_file}") -endif() - -# Delete the temporary file -cuda_execute_process( - "Removing ${cmake_dependency_file}.tmp and ${NVCC_generated_dependency_file}" - COMMAND "${CMAKE_COMMAND}" -E remove "${cmake_dependency_file}.tmp" "${NVCC_generated_dependency_file}" - ) - -if(CUDA_result) - message(FATAL_ERROR "Error generating ${generated_file}") -endif() - -# Generate the code -cuda_execute_process( - "Generating ${generated_file}" - COMMAND "${CUDA_NVCC_EXECUTABLE}" - "${source_file}" - ${format_flag} -o "${generated_file}" - ${CCBIN} - ${nvcc_flags} - ${nvcc_host_compiler_flags} - ${CUDA_NVCC_FLAGS} - -DNVCC - ${CUDA_NVCC_INCLUDE_ARGS} - ) - -if(CUDA_result) - # Since nvcc can sometimes leave half done files make sure that we delete the output file. - cuda_execute_process( - "Removing ${generated_file}" - COMMAND "${CMAKE_COMMAND}" -E remove "${generated_file}" - ) - message(FATAL_ERROR "Error generating file ${generated_file}") -else() - if(verbose) - message("Generated ${generated_file} successfully.") - endif() -endif() - -# Cubin resource report commands. -if( build_cubin ) - # Run with -cubin to produce resource usage report. - cuda_execute_process( - "Generating ${generated_cubin_file}" - COMMAND "${CUDA_NVCC_EXECUTABLE}" - "${source_file}" - ${CUDA_NVCC_FLAGS} - ${nvcc_flags} - ${CCBIN} - ${nvcc_host_compiler_flags} - -DNVCC - -cubin - -o "${generated_cubin_file}" - ${CUDA_NVCC_INCLUDE_ARGS} - ) - - # Execute the parser script. - cuda_execute_process( - "Executing the parser script" - COMMAND "${CMAKE_COMMAND}" - -D "input_file:STRING=${generated_cubin_file}" - -P "${CUDA_parse_cubin}" - ) - -endif( build_cubin ) +# This file runs the nvcc commands to produce the desired output file along with +# the dependency file needed by CMake to compute dependencies. In addition the +# file checks the output of each command and if the command fails it deletes the +# output files. + +# Input variables +# +# verbose:BOOL=<> OFF: Be as quiet as possible (default) +# ON : Describe each step +# +# build_configuration:STRING=<> Typically one of Debug, MinSizeRel, Release, or +# RelWithDebInfo, but it should match one of the +# entries in CUDA_HOST_FLAGS. This is the build +# configuration used when compiling the code. If +# blank or unspecified Debug is assumed as this is +# what CMake does. +# +# generated_file:STRING=<> File to generate. This argument must be passed in. +# +# generated_cubin_file:STRING=<> File to generate. This argument must be passed +# in if build_cubin is true. + +if(NOT generated_file) + message(FATAL_ERROR "You must specify generated_file on the command line") +endif() + +# Set these up as variables to make reading the generated file easier +set(CMAKE_COMMAND "@CMAKE_COMMAND@") +set(source_file "@source_file@") +set(NVCC_generated_dependency_file "@NVCC_generated_dependency_file@") +set(cmake_dependency_file "@cmake_dependency_file@") +set(CUDA_make2cmake "@CUDA_make2cmake@") +set(CUDA_parse_cubin "@CUDA_parse_cubin@") +set(build_cubin @build_cubin@) +# We won't actually use these variables for now, but we need to set this, in +# order to force this file to be run again if it changes. +set(generated_file_path "@generated_file_path@") +set(generated_file_internal "@generated_file@") +set(generated_cubin_file_internal "@generated_cubin_file@") + +set(CUDA_NVCC_EXECUTABLE "@CUDA_NVCC_EXECUTABLE@") +set(CUDA_NVCC_FLAGS "@CUDA_NVCC_FLAGS@;@CUDA_WRAP_OPTION_NVCC_FLAGS@") +@CUDA_NVCC_FLAGS_CONFIG@ +set(nvcc_flags "@nvcc_flags@") +set(CUDA_NVCC_INCLUDE_ARGS "@CUDA_NVCC_INCLUDE_ARGS@") +set(format_flag "@format_flag@") + +if(build_cubin AND NOT generated_cubin_file) + message(FATAL_ERROR "You must specify generated_cubin_file on the command line") +endif() + +# This is the list of host compilation flags. It C or CXX should already have +# been chosen by FindCUDA.cmake. +@CUDA_HOST_FLAGS@ + +# Take the compiler flags and package them up to be sent to the compiler via -Xcompiler +set(nvcc_host_compiler_flags "") +# If we weren't given a build_configuration, use Debug. +if(NOT build_configuration) + set(build_configuration Debug) +endif() +string(TOUPPER "${build_configuration}" build_configuration) +#message("CUDA_NVCC_HOST_COMPILER_FLAGS = ${CUDA_NVCC_HOST_COMPILER_FLAGS}") +foreach(flag ${CMAKE_HOST_FLAGS} ${CMAKE_HOST_FLAGS_${build_configuration}}) + # Extra quotes are added around each flag to help nvcc parse out flags with spaces. + set(nvcc_host_compiler_flags "${nvcc_host_compiler_flags},\"${flag}\"") +endforeach() +if (nvcc_host_compiler_flags) + set(nvcc_host_compiler_flags "-Xcompiler" ${nvcc_host_compiler_flags}) +endif() +#message("nvcc_host_compiler_flags = \"${nvcc_host_compiler_flags}\"") +# Add the build specific configuration flags +list(APPEND CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS_${build_configuration}}) + +if(DEFINED CCBIN) + set(CCBIN -ccbin "${CCBIN}") +endif() + +# cuda_execute_process - Executes a command with optional command echo and status message. +# +# status - Status message to print if verbose is true +# command - COMMAND argument from the usual execute_process argument structure +# ARGN - Remaining arguments are the command with arguments +# +# CUDA_result - return value from running the command +# +# Make this a macro instead of a function, so that things like RESULT_VARIABLE +# and other return variables are present after executing the process. +macro(cuda_execute_process status command) + set(_command ${command}) + if(NOT _command STREQUAL "COMMAND") + message(FATAL_ERROR "Malformed call to cuda_execute_process. Missing COMMAND as second argument. (command = ${command})") + endif() + if(verbose) + execute_process(COMMAND "${CMAKE_COMMAND}" -E echo -- ${status}) + # Now we need to build up our command string. We are accounting for quotes + # and spaces, anything else is left up to the user to fix if they want to + # copy and paste a runnable command line. + set(cuda_execute_process_string) + foreach(arg ${ARGN}) + # If there are quotes, excape them, so they come through. + string(REPLACE "\"" "\\\"" arg ${arg}) + # Args with spaces need quotes around them to get them to be parsed as a single argument. + if(arg MATCHES " ") + list(APPEND cuda_execute_process_string "\"${arg}\"") + else() + list(APPEND cuda_execute_process_string ${arg}) + endif() + endforeach() + # Echo the command + execute_process(COMMAND ${CMAKE_COMMAND} -E echo ${cuda_execute_process_string}) + endif(verbose) + # Run the command + execute_process(COMMAND ${ARGN} RESULT_VARIABLE CUDA_result ) +endmacro() + +# Delete the target file +cuda_execute_process( + "Removing ${generated_file}" + COMMAND "${CMAKE_COMMAND}" -E remove "${generated_file}" + ) + +# Generate the dependency file +cuda_execute_process( + "Generating dependency file: ${NVCC_generated_dependency_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + "${source_file}" + ${CUDA_NVCC_FLAGS} + ${nvcc_flags} + ${CCBIN} + ${nvcc_host_compiler_flags} + -DNVCC + -M + -o "${NVCC_generated_dependency_file}" + ${CUDA_NVCC_INCLUDE_ARGS} + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Generate the cmake readable dependency file to a temp file. Don't put the +# quotes just around the filenames for the input_file and output_file variables. +# CMake will pass the quotes through and not be able to find the file. +cuda_execute_process( + "Generating temporary cmake readable file: ${cmake_dependency_file}.tmp" + COMMAND "${CMAKE_COMMAND}" + -D "input_file:FILEPATH=${NVCC_generated_dependency_file}" + -D "output_file:FILEPATH=${cmake_dependency_file}.tmp" + -P "${CUDA_make2cmake}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Copy the file if it is different +cuda_execute_process( + "Copy if different ${cmake_dependency_file}.tmp to ${cmake_dependency_file}" + COMMAND "${CMAKE_COMMAND}" -E copy_if_different "${cmake_dependency_file}.tmp" "${cmake_dependency_file}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Delete the temporary file +cuda_execute_process( + "Removing ${cmake_dependency_file}.tmp and ${NVCC_generated_dependency_file}" + COMMAND "${CMAKE_COMMAND}" -E remove "${cmake_dependency_file}.tmp" "${NVCC_generated_dependency_file}" + ) + +if(CUDA_result) + message(FATAL_ERROR "Error generating ${generated_file}") +endif() + +# Generate the code +cuda_execute_process( + "Generating ${generated_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + "${source_file}" + ${CUDA_NVCC_FLAGS} + ${nvcc_flags} + ${CCBIN} + ${nvcc_host_compiler_flags} + -DNVCC + ${format_flag} -o "${generated_file}" + ${CUDA_NVCC_INCLUDE_ARGS} + ) + +if(CUDA_result) + # Since nvcc can sometimes leave half done files make sure that we delete the output file. + cuda_execute_process( + "Removing ${generated_file}" + COMMAND "${CMAKE_COMMAND}" -E remove "${generated_file}" + ) + message(FATAL_ERROR "Error generating file ${generated_file}") +else() + message("Generated ${generated_file} successfully.") +endif() + +# Cubin resource report commands. +if( build_cubin ) + # Run with -cubin to produce resource usage report. + cuda_execute_process( + "Generating ${generated_cubin_file}" + COMMAND "${CUDA_NVCC_EXECUTABLE}" + "${source_file}" + ${CUDA_NVCC_FLAGS} + ${nvcc_flags} + ${CCBIN} + ${nvcc_host_compiler_flags} + -DNVCC + -cubin + -o "${generated_cubin_file}" + ${CUDA_NVCC_INCLUDE_ARGS} + ) + + # Execute the parser script. + cuda_execute_process( + "Executing the parser script" + COMMAND "${CMAKE_COMMAND}" + -D "input_file:STRING=${generated_cubin_file}" + -P "${CUDA_parse_cubin}" + ) + +endif( build_cubin ) diff --git a/project/vc9/imperativeapi/imperativeapi.vcproj b/project/vc9/imperativeapi/imperativeapi.vcproj index 2d48f89..20aa9ec 100755 --- a/project/vc9/imperativeapi/imperativeapi.vcproj +++ b/project/vc9/imperativeapi/imperativeapi.vcproj @@ -90,13 +90,12 @@ /> @@ -133,13 +133,10 @@ /> @@ -207,10 +209,13 @@ /> diff --git a/project/vc9/nvcore/nvcore.vcproj b/project/vc9/nvcore/nvcore.vcproj index dff9fde..ca5ae81 100644 --- a/project/vc9/nvcore/nvcore.vcproj +++ b/project/vc9/nvcore/nvcore.vcproj @@ -173,16 +173,17 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - - - - - - - - - - - - - - - - + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + diff --git a/project/vc9/nvddsinfo/nvddsinfo.vcproj b/project/vc9/nvddsinfo/nvddsinfo.vcproj index b2d1dd5..7cf51f3 100644 --- a/project/vc9/nvddsinfo/nvddsinfo.vcproj +++ b/project/vc9/nvddsinfo/nvddsinfo.vcproj @@ -192,11 +192,16 @@ diff --git a/project/vc9/nvimage/nvimage.vcproj b/project/vc9/nvimage/nvimage.vcproj index e203c91..11d6be8 100644 --- a/project/vc9/nvimage/nvimage.vcproj +++ b/project/vc9/nvimage/nvimage.vcproj @@ -173,14 +173,16 @@ diff --git a/project/vc9/nvmath/nvmath.vcproj b/project/vc9/nvmath/nvmath.vcproj index dadf125..2c95ba6 100644 --- a/project/vc9/nvmath/nvmath.vcproj +++ b/project/vc9/nvmath/nvmath.vcproj @@ -173,16 +173,17 @@ + + diff --git a/project/vc9/nvtt/nvtt.vcproj b/project/vc9/nvtt/nvtt.vcproj index 3ba94ef..802d4ab 100644 --- a/project/vc9/nvtt/nvtt.vcproj +++ b/project/vc9/nvtt/nvtt.vcproj @@ -207,16 +207,17 @@ - - diff --git a/project/vc9/squish/squish.vcproj b/project/vc9/squish/squish.vcproj index d45986d..83cde7d 100644 --- a/project/vc9/squish/squish.vcproj +++ b/project/vc9/squish/squish.vcproj @@ -173,16 +173,17 @@ # endif # endif +# pragma comment(lib,"dbghelp.lib") #endif #if !NV_OS_WIN32 && defined(HAVE_SIGNAL_H) @@ -76,55 +77,191 @@ namespace #if NV_OS_WIN32 && NV_CC_MSVC - // TODO write minidump - - static LONG WINAPI nvTopLevelFilter( struct _EXCEPTION_POINTERS * pExceptionInfo) + static bool writeMiniDump(EXCEPTION_POINTERS * pExceptionInfo) { - NV_UNUSED(pExceptionInfo); - /* BOOL (WINAPI * Dump) (HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION ); + // create the file + HANDLE hFile = CreateFile("crash.dmp", GENERIC_WRITE, FILE_SHARE_WRITE, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); + if (hFile == INVALID_HANDLE_VALUE) { + nvDebug("*** Failed to create dump file.\n"); + return false; + } - AutoString dbghelp_path(512); - getcwd(dbghelp_path, 512); - dbghelp_path.Append("\\DbgHelp.dll"); - nvTranslatePath(dbghelp_path); + MINIDUMP_EXCEPTION_INFORMATION ExInfo; + ExInfo.ThreadId = ::GetCurrentThreadId(); + ExInfo.ExceptionPointers = pExceptionInfo; + ExInfo.ClientPointers = NULL; - PiLibrary DbgHelp_lib(dbghelp_path, true); + // write the dump + BOOL ok = MiniDumpWriteDump(GetCurrentProcess(), GetCurrentProcessId(), hFile, MiniDumpNormal, &ExInfo, NULL, NULL) != 0; + CloseHandle(hFile); - if( !DbgHelp_lib.IsValid() ) { - nvDebug("*** 'DbgHelp.dll' not found.\n"); - return EXCEPTION_CONTINUE_SEARCH; + if (ok == FALSE) { + nvDebug("*** Failed to save dump file.\n"); + return false; } - if( !DbgHelp_lib.BindSymbol( (void **)&Dump, "MiniDumpWriteDump" ) ) { - nvDebug("*** 'DbgHelp.dll' too old.\n"); - return EXCEPTION_CONTINUE_SEARCH; + nvDebug("\nDump file saved.\n"); + + return true; + } + + static bool hasStackTrace() { + return true; + } + + /*static NV_NOINLINE int backtrace(void * trace[], int maxcount) { + + // In Windows XP and Windows Server 2003, the sum of the FramesToSkip and FramesToCapture parameters must be less than 63. + int xp_maxcount = min(63-1, maxcount); + + int count = RtlCaptureStackBackTrace(1, xp_maxcount, trace, NULL); + nvDebugCheck(count <= maxcount); + + return count; + }*/ + + static NV_NOINLINE int backtraceWithSymbols(CONTEXT * ctx, void * trace[], int maxcount, int skip = 0) { + + // Init the stack frame for this function + STACKFRAME64 stackFrame = { 0 }; + + #if NV_CPU_X86_64 + DWORD dwMachineType = IMAGE_FILE_MACHINE_AMD64; + stackFrame.AddrPC.Offset = ctx->Rip; + stackFrame.AddrFrame.Offset = ctx->Rbp; + stackFrame.AddrStack.Offset = ctx->Rsp; + #elif NV_CPU_X86 + DWORD dwMachineType = IMAGE_FILE_MACHINE_I386; + stackFrame.AddrPC.Offset = ctx->Eip; + stackFrame.AddrFrame.Offset = ctx->Ebp; + stackFrame.AddrStack.Offset = ctx->Esp; + #else + #error "Platform not supported!" + #endif + stackFrame.AddrPC.Mode = AddrModeFlat; + stackFrame.AddrFrame.Mode = AddrModeFlat; + stackFrame.AddrStack.Mode = AddrModeFlat; + + // Walk up the stack + const HANDLE hThread = GetCurrentThread(); + const HANDLE hProcess = GetCurrentProcess(); + int i; + for (i = 0; i < maxcount; i++) + { + // walking once first makes us skip self + if (!StackWalk64(dwMachineType, hProcess, hThread, &stackFrame, ctx, NULL, &SymFunctionTableAccess64, &SymGetModuleBase64, NULL)) { + break; + } + + /*if (stackFrame.AddrPC.Offset == stackFrame.AddrReturn.Offset || stackFrame.AddrPC.Offset == 0) { + break; + }*/ + + if (i >= skip) { + trace[i - skip] = (PVOID)stackFrame.AddrPC.Offset; + } } - // create the file - HANDLE hFile = ::CreateFile( "nv.dmp", GENERIC_WRITE, FILE_SHARE_WRITE, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL ); - if( hFile == INVALID_HANDLE_VALUE ) { - nvDebug("*** Failed to create dump file.\n"); - return EXCEPTION_CONTINUE_SEARCH; + return i - skip; + } + + static NV_NOINLINE int backtrace(void * trace[], int maxcount) { + CONTEXT ctx = { 0 }; +#if NV_CPU_X86 && !NV_CPU_X86_64 + ctx.ContextFlags = CONTEXT_CONTROL; + _asm { + call x + x: pop eax + mov ctx.Eip, eax + mov ctx.Ebp, ebp + mov ctx.Esp, esp } +#else + RtlCaptureContext(&ctx); +#endif + return backtraceWithSymbols(&ctx, trace, maxcount, 1); + } - _MINIDUMP_EXCEPTION_INFORMATION ExInfo; - ExInfo.ThreadId = ::GetCurrentThreadId(); - ExInfo.ExceptionPointers = pExceptionInfo; - ExInfo.ClientPointers = NULL; + static NV_NOINLINE void printStackTrace(void * trace[], int size, int start=0) + { + HANDLE hProcess = GetCurrentProcess(); + + nvDebug( "\nDumping stacktrace:\n" ); - // write the dump - bool ok = Dump( GetCurrentProcess(), GetCurrentProcessId(), hFile, MiniDumpNormal, &ExInfo, NULL, NULL )!=0; - ::CloseHandle(hFile); + // Resolve PC to function names + for (int i = start; i < size; i++) + { + // Check for end of stack walk + DWORD64 ip = (DWORD64)trace[i]; + if (ip == NULL) + break; + + // Get function name + #define MAX_STRING_LEN (512) + unsigned char byBuffer[sizeof(IMAGEHLP_SYMBOL64) + MAX_STRING_LEN] = { 0 }; + IMAGEHLP_SYMBOL64 * pSymbol = (IMAGEHLP_SYMBOL64*)byBuffer; + pSymbol->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64); + pSymbol->MaxNameLength = MAX_STRING_LEN; + + DWORD64 dwDisplacement; + + if (SymGetSymFromAddr64(hProcess, ip, &dwDisplacement, pSymbol)) + { + pSymbol->Name[MAX_STRING_LEN-1] = 0; + + /* + // Make the symbol readable for humans + UnDecorateSymbolName( pSym->Name, lpszNonUnicodeUnDSymbol, BUFFERSIZE, + UNDNAME_COMPLETE | + UNDNAME_NO_THISTYPE | + UNDNAME_NO_SPECIAL_SYMS | + UNDNAME_NO_MEMBER_TYPE | + UNDNAME_NO_MS_KEYWORDS | + UNDNAME_NO_ACCESS_SPECIFIERS ); + */ + + // pSymbol->Name + const char * pFunc = pSymbol->Name; + + // Get file/line number + IMAGEHLP_LINE64 theLine = { 0 }; + theLine.SizeOfStruct = sizeof(theLine); + + DWORD dwDisplacement; + if (!SymGetLineFromAddr64(hProcess, ip, &dwDisplacement, &theLine)) + { + nvDebug("unknown(%08X) : %s\n", (uint32)ip, pFunc); + } + else + { + /* + const char* pFile = strrchr(theLine.FileName, '\\'); + if ( pFile == NULL ) pFile = theLine.FileName; + else pFile++; + */ + const char * pFile = theLine.FileName; + + int line = theLine.LineNumber; + + nvDebug("%s(%d) : %s\n", pFile, line, pFunc); + } + } + } + } - if( !ok ) { - nvDebug("*** Failed to save dump file.\n"); - return EXCEPTION_CONTINUE_SEARCH; - } - nvDebug("--- Dump file saved.\n"); - */ + // Write mini dump and print stack trace. + static LONG WINAPI topLevelFilter(EXCEPTION_POINTERS * pExceptionInfo) + { + void * trace[64]; + + int size = backtraceWithSymbols(pExceptionInfo->ContextRecord, trace, 64); + printStackTrace(trace, size, 0); + + writeMiniDump(pExceptionInfo); + return EXCEPTION_CONTINUE_SEARCH; } @@ -132,7 +269,7 @@ namespace #if defined(HAVE_EXECINFO_H) // NV_OS_LINUX - static bool nvHasStackTrace() { + static bool hasStackTrace() { #if NV_OS_DARWIN return backtrace != NULL; #else @@ -140,7 +277,7 @@ namespace #endif } - static void nvPrintStackTrace(void * trace[], int size, int start=0) { + static void printStackTrace(void * trace[], int size, int start=0) { char ** string_array = backtrace_symbols(trace, size); nvDebug( "\nDumping stacktrace:\n" ); @@ -260,7 +397,7 @@ namespace } #if defined(HAVE_EXECINFO_H) - if (nvHasStackTrace()) // in case of weak linking + if (hasStackTrace()) // in case of weak linking { void * trace[64]; int size = backtrace(trace, 64); @@ -270,7 +407,7 @@ namespace trace[1] = pnt; } - nvPrintStackTrace(trace, size, 1); + printStackTrace(trace, size, 1); } #endif // defined(HAVE_EXECINFO_H) @@ -289,18 +426,14 @@ namespace // Code from Daniel Vogel. static bool isDebuggerPresent() { - bool result = false; - - HINSTANCE kern_lib = LoadLibraryExA( "kernel32.dll", NULL, 0 ); - if( kern_lib ) { - FARPROC lIsDebuggerPresent = GetProcAddress( kern_lib, "IsDebuggerPresent" ); - if( lIsDebuggerPresent && lIsDebuggerPresent() ) { - result = true; + HINSTANCE kernel32 = GetModuleHandle("kernel32.dll"); + if (kernel32) { + FARPROC IsDebuggerPresent = GetProcAddress(kernel32, "IsDebuggerPresent"); + if (IsDebuggerPresent != NULL && IsDebuggerPresent()) { + return true; } - - FreeLibrary( kern_lib ); } - return result; + return false; } // Flush the message queue. This is necessary for the message box to show up. @@ -322,11 +455,11 @@ namespace StringBuilder error_string; if( func != NULL ) { error_string.format( "*** Assertion failed: %s\n On file: %s\n On function: %s\n On line: %d\n ", exp, file, func, line ); - nvDebug( error_string ); + nvDebug( error_string.str() ); } else { error_string.format( "*** Assertion failed: %s\n On file: %s\n On line: %d\n ", exp, file, line ); - nvDebug( error_string ); + nvDebug( error_string.str() ); } if (isDebuggerPresent()) { @@ -334,7 +467,7 @@ namespace } flushMessageQueue(); - int action = MessageBoxA(NULL, error_string, "Assertion failed", MB_ABORTRETRYIGNORE|MB_ICONERROR); + int action = MessageBoxA(NULL, error_string.str(), "Assertion failed", MB_ABORTRETRYIGNORE|MB_ICONERROR); switch( action ) { case IDRETRY: ret = NV_ABORT_DEBUG; @@ -352,7 +485,7 @@ namespace }*/ if( ret == NV_ABORT_EXIT ) { - // Exit cleanly. + // Exit cleanly. throw "Assertion failed"; } @@ -402,16 +535,16 @@ namespace #endif #if defined(HAVE_EXECINFO_H) - if (nvHasStackTrace()) + if (hasStackTrace()) { void * trace[64]; int size = backtrace(trace, 64); - nvPrintStackTrace(trace, size, 2); + printStackTrace(trace, size, 2); } #endif // Exit cleanly. - throw std::runtime_error("Assertion failed"); + throw "Assertion failed"; } }; @@ -453,14 +586,12 @@ void NV_CDECL nvDebugPrint(const char *msg, ...) /// Dump debug info. void debug::dumpInfo() { -#if !NV_OS_WIN32 && defined(HAVE_SIGNAL_H) && defined(HAVE_EXECINFO_H) - if (nvHasStackTrace()) + if (hasStackTrace()) { void * trace[64]; int size = backtrace(trace, 64); - nvPrintStackTrace(trace, size, 1); + printStackTrace(trace, size, 1); } -#endif } @@ -497,7 +628,12 @@ void debug::enableSigHandler() #if NV_OS_WIN32 && NV_CC_MSVC - s_old_exception_filter = ::SetUnhandledExceptionFilter( nvTopLevelFilter ); + s_old_exception_filter = ::SetUnhandledExceptionFilter( topLevelFilter ); + + // SYMOPT_DEFERRED_LOADS make us not take a ton of time unless we actual log traces + SymSetOptions(SYMOPT_DEFERRED_LOADS|SYMOPT_FAIL_CRITICAL_ERRORS|SYMOPT_LOAD_LINES|SYMOPT_UNDNAME); + + SymInitialize(GetCurrentProcess(), NULL, TRUE); #elif !NV_OS_WIN32 && defined(HAVE_SIGNAL_H) @@ -526,6 +662,8 @@ void debug::disableSigHandler() ::SetUnhandledExceptionFilter( s_old_exception_filter ); s_old_exception_filter = NULL; + SymCleanup(GetCurrentProcess()); + #elif !NV_OS_WIN32 && defined(HAVE_SIGNAL_H) sigaction(SIGSEGV, &s_old_sigsegv, NULL); diff --git a/src/nvcore/Memory.cpp b/src/nvcore/Memory.cpp index b44a975..4aa0877 100644 --- a/src/nvcore/Memory.cpp +++ b/src/nvcore/Memory.cpp @@ -3,35 +3,54 @@ #include "Memory.h" #include "Debug.h" -//#if HAVE_MALLOC_H -//#include -//#endif - #include +#define USE_EFENCE 0 + +#if USE_EFENCE +extern "C" void *EF_malloc(size_t size); +extern "C" void *EF_realloc(void * oldBuffer, size_t newSize); +extern "C" void EF_free(void * address); +#endif using namespace nv; void * nv::mem::malloc(size_t size) { +#if USE_EFENCE + return EF_malloc(size); +#else return ::malloc(size); +#endif } void * nv::mem::malloc(size_t size, const char * file, int line) { NV_UNUSED(file); NV_UNUSED(line); +#if USE_EFENCE + return EF_malloc(size); +#else return ::malloc(size); +#endif } void nv::mem::free(const void * ptr) { +#if USE_EFENCE + return EF_free(const_cast(ptr)); +#else ::free(const_cast(ptr)); +#endif } void * nv::mem::realloc(void * ptr, size_t size) { nvDebugCheck(ptr != NULL || size != 0); // undefined realloc behavior. +#if USE_EFENCE + return EF_realloc(ptr, size); +#else return ::realloc(ptr, size); +#endif } diff --git a/src/nvcore/StrLib.cpp b/src/nvcore/StrLib.cpp index 957e4cd..d65d112 100644 --- a/src/nvcore/StrLib.cpp +++ b/src/nvcore/StrLib.cpp @@ -211,9 +211,7 @@ StringBuilder::StringBuilder( const char * s, int extra_size_hint/*=0*/ ) : m_si /** Delete the string. */ StringBuilder::~StringBuilder() { - m_size = 0; strFree(m_str); - m_str = NULL; } @@ -237,7 +235,7 @@ StringBuilder & StringBuilder::formatList( const char * fmt, va_list arg ) { nvDebugCheck(fmt != NULL); - if( m_size == 0 ) { + if (m_size == 0) { m_size = 64; m_str = strAlloc( m_size ); } @@ -287,21 +285,19 @@ StringBuilder & StringBuilder::append( const char * s ) const uint slen = uint(strlen( s )); - if( m_str == NULL ) { + if (m_str == NULL) { m_size = slen + 1; m_str = strAlloc(m_size); - strCpy( m_str, m_size, s ); + memcpy(m_str, s, m_size + 1); } else { - const uint len = uint(strlen( m_str )); - - if( m_size < len + slen + 1 ) { + if (m_size < len + slen + 1) { m_size = len + slen + 1; m_str = strReAlloc(m_str, m_size); } - strCat( m_str, m_size, s ); + memcpy(m_str + len, s, slen + 1); } return *this; @@ -338,7 +334,7 @@ StringBuilder & StringBuilder::appendFormatList( const char * fmt, va_list arg ) else { StringBuilder tmp_str; tmp_str.formatList( fmt, tmp ); - append( tmp_str ); + append( tmp_str.str() ); } va_end(tmp); @@ -391,7 +387,7 @@ StringBuilder & StringBuilder::number( uint i, int base ) StringBuilder & StringBuilder::reserve( uint size_hint ) { nvCheck(size_hint != 0); - if( size_hint > m_size ) { + if (size_hint > m_size) { m_str = strReAlloc(m_str, size_hint); m_size = size_hint; } @@ -403,9 +399,9 @@ StringBuilder & StringBuilder::reserve( uint size_hint ) StringBuilder & StringBuilder::copy( const char * s, int extra_size/*=0*/ ) { nvCheck( s != NULL ); - uint str_size = uint(strlen( s )) + 1; + const uint str_size = uint(strlen( s )) + 1; reserve(str_size + extra_size); - strCpy( m_str, str_size, s ); + memcpy(m_str, s, str_size); return *this; } @@ -413,11 +409,9 @@ StringBuilder & StringBuilder::copy( const char * s, int extra_size/*=0*/ ) /** Copy an StringBuilder. */ StringBuilder & StringBuilder::copy( const StringBuilder & s ) { - if( s.m_str == NULL ) { + if (s.m_str == NULL) { nvCheck( s.m_size == 0 ); - m_size = 0; - strFree( m_str ); - m_str = NULL; + reset(); } else { reserve( s.m_size ); @@ -428,8 +422,8 @@ StringBuilder & StringBuilder::copy( const StringBuilder & s ) bool StringBuilder::endsWith(const char * str) const { - size_t l = strlen(str); - size_t ml = strlen(m_str); + uint l = uint(strlen(str)); + uint ml = uint(strlen(m_str)); if (ml < l) return false; return strncmp(m_str + ml - l, str, l) == 0; } @@ -596,7 +590,7 @@ void String::setString(const StringBuilder & str) data = NULL; } else { - allocString(str); + allocString(str.str()); addRef(); } } diff --git a/src/nvcore/StrLib.h b/src/nvcore/StrLib.h index 6f14136..5b1a2dd 100644 --- a/src/nvcore/StrLib.h +++ b/src/nvcore/StrLib.h @@ -89,8 +89,8 @@ namespace nv bool isNull() const { return m_size == 0; } // const char * accessors - operator const char * () const { return m_str; } - operator char * () { return m_str; } + //operator const char * () const { return m_str; } + //operator char * () { return m_str; } const char * str() const { return m_str; } char * str() { return m_str; } diff --git a/src/nvimage/DirectDrawSurface.cpp b/src/nvimage/DirectDrawSurface.cpp index 8edbc16..ae539a1 100644 --- a/src/nvimage/DirectDrawSurface.cpp +++ b/src/nvimage/DirectDrawSurface.cpp @@ -42,6 +42,8 @@ using namespace nv; (uint(uint8(ch2)) << 16) | (uint(uint8(ch3)) << 24 )) #endif +const uint nv::FOURCC_NVTT = MAKEFOURCC('N', 'V', 'T', 'T'); + namespace { static const uint FOURCC_DDS = MAKEFOURCC('D', 'D', 'S', ' '); @@ -58,6 +60,8 @@ namespace static const uint FOURCC_DX10 = MAKEFOURCC('D', 'X', '1', '0'); + static const uint FOURCC_UVER = MAKEFOURCC('U', 'V', 'E', 'R'); + // 32 bit RGB formats. static const uint D3DFMT_R8G8B8 = 20; static const uint D3DFMT_A8R8G8B8 = 21; @@ -480,63 +484,62 @@ namespace nv } // nv namespace -/* Not used! namespace { -struct FormatDescriptor -{ -uint format; -uint bitcount; -uint rmask; -uint gmask; -uint bmask; -uint amask; -}; - -static const FormatDescriptor s_d3dFormats[] = -{ -{ D3DFMT_R8G8B8, 24, 0xFF0000, 0xFF00, 0xFF, 0 }, -{ D3DFMT_A8R8G8B8, 32, 0xFF0000, 0xFF00, 0xFF, 0xFF000000 }, // DXGI_FORMAT_B8G8R8A8_UNORM -{ D3DFMT_X8R8G8B8, 32, 0xFF0000, 0xFF00, 0xFF, 0 }, // DXGI_FORMAT_B8G8R8X8_UNORM -{ D3DFMT_R5G6B5, 16, 0xF800, 0x7E0, 0x1F, 0 }, // DXGI_FORMAT_B5G6R5_UNORM -{ D3DFMT_X1R5G5B5, 16, 0x7C00, 0x3E0, 0x1F, 0 }, -{ D3DFMT_A1R5G5B5, 16, 0x7C00, 0x3E0, 0x1F, 0x8000 }, // DXGI_FORMAT_B5G5R5A1_UNORM -{ D3DFMT_A4R4G4B4, 16, 0xF00, 0xF0, 0xF, 0xF000 }, -{ D3DFMT_R3G3B2, 8, 0xE0, 0x1C, 0x3, 0 }, -{ D3DFMT_A8, 8, 0, 0, 0, 8 }, // DXGI_FORMAT_A8_UNORM -{ D3DFMT_A8R3G3B2, 16, 0xE0, 0x1C, 0x3, 0xFF00 }, -{ D3DFMT_X4R4G4B4, 16, 0xF00, 0xF0, 0xF, 0 }, -{ D3DFMT_A2B10G10R10, 32, 0x3FF, 0xFFC00, 0x3FF00000, 0xC0000000 }, // DXGI_FORMAT_R10G10B10A2 -{ D3DFMT_A8B8G8R8, 32, 0xFF, 0xFF00, 0xFF0000, 0xFF000000 }, // DXGI_FORMAT_R8G8B8A8_UNORM -{ D3DFMT_X8B8G8R8, 32, 0xFF, 0xFF00, 0xFF0000, 0 }, -{ D3DFMT_G16R16, 32, 0xFFFF, 0xFFFF0000, 0, 0 }, // DXGI_FORMAT_R16G16_UNORM -{ D3DFMT_A2R10G10B10, 32, 0x3FF00000, 0xFFC00, 0x3FF, 0xC0000000 }, - -{ D3DFMT_L8, 8, 8, 0, 0, 0 }, // DXGI_FORMAT_R8_UNORM -{ D3DFMT_L16, 16, 16, 0, 0, 0 }, // DXGI_FORMAT_R16_UNORM -}; - -static const uint s_d3dFormatCount = sizeof(s_d3dFormats) / sizeof(s_d3dFormats[0]); - -static uint findD3D9Format(uint bitcount, uint rmask, uint gmask, uint bmask, uint amask) -{ -for (int i = 0; i < s_d3dFormatCount; i++) -{ -if (s_d3dFormats[i].bitcount == bitcount && -s_d3dFormats[i].rmask == rmask && -s_d3dFormats[i].gmask == gmask && -s_d3dFormats[i].bmask == bmask && -s_d3dFormats[i].amask == amask) -{ -return s_d3dFormats[i].format; -} -} + struct FormatDescriptor + { + uint format; + uint bitcount; + uint rmask; + uint gmask; + uint bmask; + uint amask; + }; -return 0; -} + static const FormatDescriptor s_d3dFormats[] = + { + { D3DFMT_R8G8B8, 24, 0xFF0000, 0xFF00, 0xFF, 0 }, + { D3DFMT_A8R8G8B8, 32, 0xFF0000, 0xFF00, 0xFF, 0xFF000000 }, // DXGI_FORMAT_B8G8R8A8_UNORM + { D3DFMT_X8R8G8B8, 32, 0xFF0000, 0xFF00, 0xFF, 0 }, // DXGI_FORMAT_B8G8R8X8_UNORM + { D3DFMT_R5G6B5, 16, 0xF800, 0x7E0, 0x1F, 0 }, // DXGI_FORMAT_B5G6R5_UNORM + { D3DFMT_X1R5G5B5, 16, 0x7C00, 0x3E0, 0x1F, 0 }, + { D3DFMT_A1R5G5B5, 16, 0x7C00, 0x3E0, 0x1F, 0x8000 }, // DXGI_FORMAT_B5G5R5A1_UNORM + { D3DFMT_A4R4G4B4, 16, 0xF00, 0xF0, 0xF, 0xF000 }, + { D3DFMT_R3G3B2, 8, 0xE0, 0x1C, 0x3, 0 }, + { D3DFMT_A8, 8, 0, 0, 0, 8 }, // DXGI_FORMAT_A8_UNORM + { D3DFMT_A8R3G3B2, 16, 0xE0, 0x1C, 0x3, 0xFF00 }, + { D3DFMT_X4R4G4B4, 16, 0xF00, 0xF0, 0xF, 0 }, + { D3DFMT_A2B10G10R10, 32, 0x3FF, 0xFFC00, 0x3FF00000, 0xC0000000 }, // DXGI_FORMAT_R10G10B10A2 + { D3DFMT_A8B8G8R8, 32, 0xFF, 0xFF00, 0xFF0000, 0xFF000000 }, // DXGI_FORMAT_R8G8B8A8_UNORM + { D3DFMT_X8B8G8R8, 32, 0xFF, 0xFF00, 0xFF0000, 0 }, + { D3DFMT_G16R16, 32, 0xFFFF, 0xFFFF0000, 0, 0 }, // DXGI_FORMAT_R16G16_UNORM + { D3DFMT_A2R10G10B10, 32, 0x3FF00000, 0xFFC00, 0x3FF, 0xC0000000 }, + { D3DFMT_A2B10G10R10, 32, 0x3FF, 0xFFC00, 0x3FF00000, 0xC0000000 }, + + { D3DFMT_L8, 8, 8, 0, 0, 0 }, // DXGI_FORMAT_R8_UNORM + { D3DFMT_L16, 16, 16, 0, 0, 0 }, // DXGI_FORMAT_R16_UNORM + }; -} // nv namespace -*/ + static const uint s_d3dFormatCount = sizeof(s_d3dFormats) / sizeof(s_d3dFormats[0]); + + static uint findD3D9Format(uint bitcount, uint rmask, uint gmask, uint bmask, uint amask) + { + for (int i = 0; i < s_d3dFormatCount; i++) + { + if (s_d3dFormats[i].bitcount == bitcount && + s_d3dFormats[i].rmask == rmask && + s_d3dFormats[i].gmask == gmask && + s_d3dFormats[i].bmask == bmask && + s_d3dFormats[i].amask == amask) + { + return s_d3dFormats[i].format; + } + } + + return 0; + } + +} // namespace DDSHeader::DDSHeader() { @@ -551,7 +554,7 @@ DDSHeader::DDSHeader() memset(this->reserved, 0, sizeof(this->reserved)); // Store version information on the reserved header attributes. - this->reserved[9] = MAKEFOURCC('N', 'V', 'T', 'T'); + this->reserved[9] = FOURCC_NVTT; this->reserved[10] = (2 << 16) | (1 << 8) | (0); // major.minor.revision this->pf.size = 32; @@ -598,7 +601,7 @@ void DDSHeader::setMipmapCount(uint count) if (count == 0 || count == 1) { this->flags &= ~DDSD_MIPMAPCOUNT; - this->mipmapcount = 0; + this->mipmapcount = 1; if (this->caps.caps2 == 0) { this->caps.caps1 = DDSCAPS_TEXTURE; @@ -725,15 +728,13 @@ void DDSHeader::setPixelFormat(uint bitcount, uint rmask, uint gmask, uint bmask } } - nvCheck(bitcount > 0 && bitcount <= 32); - - // Align to 8. - if (bitcount <= 8) bitcount = 8; - else if (bitcount <= 16) bitcount = 16; - else if (bitcount <= 24) bitcount = 24; - else bitcount = 32; - + // D3DX functions do not like this: this->pf.fourcc = 0; //findD3D9Format(bitcount, rmask, gmask, bmask, amask); + /*if (this->pf.fourcc) { + this->pf.flags |= DDPF_FOURCC; + }*/ + + nvCheck(bitcount > 0 && bitcount <= 32); this->pf.bitcount = bitcount; this->pf.rmask = rmask; this->pf.gmask = gmask; @@ -760,6 +761,12 @@ void DDSHeader::setHasAlphaFlag(bool b) else this->pf.flags &= ~DDPF_ALPHAPIXELS; } +void DDSHeader::setUserVersion(int version) +{ + this->reserved[7] = FOURCC_UVER; + this->reserved[8] = version; +} + void DDSHeader::swapBytes() { this->fourcc = POSH_LittleU32(this->fourcc); @@ -798,10 +805,48 @@ void DDSHeader::swapBytes() bool DDSHeader::hasDX10Header() const { - return this->pf.fourcc == FOURCC_DX10; // @@ This is according to AMD - //return this->pf.flags == 0; // @@ This is according to MS + return this->pf.fourcc == FOURCC_DX10; +} + +uint DDSHeader::signature() const +{ + return this->reserved[9]; +} + +uint DDSHeader::toolVersion() const +{ + return this->reserved[10]; +} + +uint DDSHeader::userVersion() const +{ + if (this->reserved[7] == FOURCC_UVER) { + return this->reserved[8]; + } + return 0; } +bool DDSHeader::isNormalMap() const +{ + return (pf.flags & DDPF_NORMAL) != 0; +} + +bool DDSHeader::hasAlpha() const +{ + return (pf.flags & DDPF_ALPHAPIXELS) != 0; +} + +uint DDSHeader::d3d9Format() const +{ + if (pf.flags & DDPF_FOURCC) { + return pf.fourcc; + } + else { + return findD3D9Format(pf.bitcount, pf.rmask, pf.gmask, pf.bmask, pf.amask); + } +} + + DirectDrawSurface::DirectDrawSurface() : stream(NULL) { @@ -955,7 +1000,7 @@ bool DirectDrawSurface::hasAlpha() const } else { - // @@ Here we could check the ALPHA_PIXELS flag, but nobody sets it. + // @@ Here we could check the ALPHA_PIXELS flag, but nobody sets it. (except us?) return true; } } @@ -1047,6 +1092,11 @@ void DirectDrawSurface::setHasAlphaFlag(bool b) header.setHasAlphaFlag(b); } +void DirectDrawSurface::setUserVersion(int version) +{ + nvDebugCheck(isValid()); + header.setUserVersion(version); +} void DirectDrawSurface::mipmap(Image * img, uint face, uint mipmap) { @@ -1093,6 +1143,28 @@ void DirectDrawSurface::mipmap(Image * img, uint face, uint mipmap) } } +void * DirectDrawSurface::readData(uint * sizePtr) +{ + uint header_size = 128; // sizeof(DDSHeader); + + if (header.hasDX10Header()) + { + header_size += 20; // sizeof(DDSHeader10); + } + + stream->seek(header_size); + + int size = stream->size() - header_size; + *sizePtr = size; + + void * data = new unsigned char [size]; + + size = stream->serialize(data, size); + nvDebugCheck(size == *sizePtr); + + return data; +} + void DirectDrawSurface::readLinearImage(Image * img) { nvDebugCheck(stream != NULL); @@ -1411,26 +1483,33 @@ void DirectDrawSurface::printInfo() const if (header.pf.flags & DDPF_ALPHAPREMULT) printf("\t\tDDPF_ALPHAPREMULT\n"); if (header.pf.flags & DDPF_NORMAL) printf("\t\tDDPF_NORMAL\n"); - printf("\tFourCC: '%c%c%c%c'\n", - ((header.pf.fourcc >> 0) & 0xFF), - ((header.pf.fourcc >> 8) & 0xFF), - ((header.pf.fourcc >> 16) & 0xFF), - ((header.pf.fourcc >> 24) & 0xFF)); - if ((header.pf.fourcc & DDPF_FOURCC) && (header.pf.bitcount != 0)) + if (header.pf.fourcc != 0) { + // Display fourcc code even when DDPF_FOURCC flag not set. + printf("\tFourCC: '%c%c%c%c' (0x%.8X)\n", + ((header.pf.fourcc >> 0) & 0xFF), + ((header.pf.fourcc >> 8) & 0xFF), + ((header.pf.fourcc >> 16) & 0xFF), + ((header.pf.fourcc >> 24) & 0xFF), + header.pf.fourcc); + } + + if ((header.pf.flags & DDPF_FOURCC) && (header.pf.bitcount != 0)) { - printf("\tSwizzle: '%c%c%c%c'\n", + printf("\tSwizzle: '%c%c%c%c' (0x%.8X)\n", (header.pf.bitcount >> 0) & 0xFF, (header.pf.bitcount >> 8) & 0xFF, (header.pf.bitcount >> 16) & 0xFF, - (header.pf.bitcount >> 24) & 0xFF); + (header.pf.bitcount >> 24) & 0xFF, + header.pf.bitcount); } else { printf("\tBit count: %d\n", header.pf.bitcount); } - printf("\tRed mask: 0x%.8X\n", header.pf.rmask); + + printf("\tRed mask: 0x%.8X\n", header.pf.rmask); printf("\tGreen mask: 0x%.8X\n", header.pf.gmask); - printf("\tBlue mask: 0x%.8X\n", header.pf.bmask); + printf("\tBlue mask: 0x%.8X\n", header.pf.bmask); printf("\tAlpha mask: 0x%.8X\n", header.pf.amask); printf("Caps:\n"); @@ -1467,7 +1546,7 @@ void DirectDrawSurface::printInfo() const printf("\tArray size: %u\n", header.header10.arraySize); } - if (header.reserved[9] == MAKEFOURCC('N', 'V', 'T', 'T')) + if (header.reserved[9] == FOURCC_NVTT) { int major = (header.reserved[10] >> 16) & 0xFF; int minor = (header.reserved[10] >> 8) & 0xFF; @@ -1476,5 +1555,10 @@ void DirectDrawSurface::printInfo() const printf("Version:\n"); printf("\tNVIDIA Texture Tools %d.%d.%d\n", major, minor, revision); } + + if (header.reserved[7] == FOURCC_UVER) + { + printf("User Version: %d\n", header.reserved[8]); + } } diff --git a/src/nvimage/DirectDrawSurface.h b/src/nvimage/DirectDrawSurface.h index 502aa1f..3b294c9 100644 --- a/src/nvimage/DirectDrawSurface.h +++ b/src/nvimage/DirectDrawSurface.h @@ -33,6 +33,8 @@ namespace nv class Stream; struct ColorBlock; + extern const uint FOURCC_NVTT; + struct NVIMAGE_CLASS DDSPixelFormat { uint size; @@ -100,10 +102,17 @@ namespace nv void setDX10Format(uint format); void setNormalFlag(bool b); void setHasAlphaFlag(bool b); + void setUserVersion(int version); void swapBytes(); bool hasDX10Header() const; + uint signature() const; + uint toolVersion() const; + uint userVersion() const; + bool isNormalMap() const; + bool hasAlpha() const; + uint d3d9Format() const; }; NVIMAGE_API Stream & operator<< (Stream & s, DDSHeader & header); @@ -137,9 +146,11 @@ namespace nv void setNormalFlag(bool b); void setHasAlphaFlag(bool b); + void setUserVersion(int version); void mipmap(Image * img, uint f, uint m); // void mipmap(FloatImage * img, uint f, uint m); + void * readData(uint * sizePtr); void printInfo() const; diff --git a/src/nvimage/FloatImage.cpp b/src/nvimage/FloatImage.cpp index 7fabfc5..f2bf44f 100644 --- a/src/nvimage/FloatImage.cpp +++ b/src/nvimage/FloatImage.cpp @@ -199,12 +199,12 @@ void FloatImage::normalize(uint base_component) void FloatImage::packNormals(uint base_component) { - scaleBias(base_component, 3, 0.5f, 1.0f); + scaleBias(base_component, 3, 0.5f, 0.5f); } void FloatImage::expandNormals(uint base_component) { - scaleBias(base_component, 3, 2, -0.5); + scaleBias(base_component, 3, 2, -1.0); } void FloatImage::scaleBias(uint base_component, uint num, float scale, float bias) @@ -215,7 +215,7 @@ void FloatImage::scaleBias(uint base_component, uint num, float scale, float bia float * ptr = this->channel(base_component + c); for(uint i = 0; i < size; i++) { - ptr[i] = scale * (ptr[i] + bias); + ptr[i] = scale * ptr[i] + bias; } } } diff --git a/src/nvimage/ImageIO.cpp b/src/nvimage/ImageIO.cpp index 5023f91..8c10934 100644 --- a/src/nvimage/ImageIO.cpp +++ b/src/nvimage/ImageIO.cpp @@ -5,8 +5,10 @@ #include "FloatImage.h" #include "TgaFile.h" #include "PsdFile.h" +#include "DirectDrawSurface.h" #include "nvmath/Color.h" +#include "nvmath/Half.h" #include "nvcore/Ptr.h" #include "nvcore/Utils.h" @@ -93,6 +95,8 @@ namespace nv #endif // defined(HAVE_FREEIMAGE) + static FloatImage * loadFloatDDS(Stream & s); + } // ImageIO namespace } // nv namespace @@ -207,12 +211,18 @@ FloatImage * nv::ImageIO::loadFloat(const char * fileName, Stream & s) const uint spos = s.tell(); // Save stream position. + if (strCaseCmp(extension, ".dds") == 0) { + floatImage = loadFloatDDS(s); + } + // Try to load as a floating point image. #if defined(HAVE_FREEIMAGE) - FREE_IMAGE_FORMAT fif = FreeImage_GetFIFFromFilename(fileName); - if (fif != FIF_UNKNOWN && FreeImage_FIFSupportsReading(fif)) { - floatImage = loadFloatFreeImage(fif, s); - } + if (floatImage == NULL) { + FREE_IMAGE_FORMAT fif = FreeImage_GetFIFFromFilename(fileName); + if (fif != FIF_UNKNOWN && FreeImage_FIFSupportsReading(fif)) { + floatImage = loadFloatFreeImage(fif, s); + } + } #else // defined(HAVE_FREEIMAGE) #pragma message(NV_FILE_LINE "TODO: Load TIFF and EXR files from stream.") #if defined(HAVE_TIFF) @@ -1697,3 +1707,44 @@ bool nv::ImageIO::saveFloatEXR(const char * fileName, const FloatImage * fimage, #endif // defined(HAVE_OPENEXR) #endif // defined(HAVE_FREEIMAGE) + +FloatImage * nv::ImageIO::loadFloatDDS(Stream & s) +{ + nvCheck(s.isLoading()); + nvCheck(!s.isError()); + + DDSHeader header; + s << header; + + static const uint D3DFMT_A16B16G16R16F = 113; + + // @@ We only support RGBA16F for now. + if (header.pf.fourcc == D3DFMT_A16B16G16R16F) { + const int size = header.width * header.height; + uint16 * const data = new uint16[size * 4]; + + s.serialize(data, size * 4 * sizeof(uint16)); + + FloatImage * img = new FloatImage; + img->allocate(4, header.width, header.height); + + uint32 * r = (uint32 *)img->channel(0); + uint32 * g = (uint32 *)img->channel(1); + uint32 * b = (uint32 *)img->channel(2); + uint32 * a = (uint32 *)img->channel(3); + + uint16 * ptr = data; + for (int i = 0; i < size; i++) { + *r++ = half_to_float( *ptr++ ); + *g++ = half_to_float( *ptr++ ); + *b++ = half_to_float( *ptr++ ); + *a++ = half_to_float( *ptr++ ); + } + + delete [] data; + + return img; + } + + return NULL; +} diff --git a/src/nvtt/CompressionOptions.cpp b/src/nvtt/CompressionOptions.cpp index c86b162..e59a95d 100644 --- a/src/nvtt/CompressionOptions.cpp +++ b/src/nvtt/CompressionOptions.cpp @@ -59,7 +59,9 @@ void CompressionOptions::reset() m.gsize = 8; m.bsize = 8; m.asize = 8; - m.pixelType = PixelType_UnsignedNorm; + + m.pixelType = PixelType_UnsignedNorm; + m.pitchAlignment = 1; m.enableColorDithering = false; m.enableAlphaDithering = false; @@ -98,10 +100,10 @@ void CompressionOptions::setColorWeights(float red, float green, float blue, flo /// Set color mask to describe the RGB/RGBA format. -void CompressionOptions::setPixelFormat(uint bitcount, uint rmask, uint gmask, uint bmask, uint amask) +void CompressionOptions::setPixelFormat(uint bitCount, uint rmask, uint gmask, uint bmask, uint amask) { // Validate arguments. - nvCheck(bitcount == 8 || bitcount == 16 || bitcount == 24 || bitcount == 32); + nvCheck(bitCount <= 32); nvCheck((rmask & gmask) == 0); nvCheck((rmask & bmask) == 0); nvCheck((rmask & amask) == 0); @@ -109,16 +111,16 @@ void CompressionOptions::setPixelFormat(uint bitcount, uint rmask, uint gmask, u nvCheck((gmask & amask) == 0); nvCheck((bmask & amask) == 0); - if (bitcount != 32) + if (bitCount != 32) { - uint maxMask = (1 << bitcount); + uint maxMask = (1 << bitCount); nvCheck(maxMask > rmask); nvCheck(maxMask > gmask); nvCheck(maxMask > bmask); nvCheck(maxMask > amask); } - m.bitcount = bitcount; + m.bitcount = bitCount; m.rmask = rmask; m.gmask = gmask; m.bmask = bmask; @@ -153,6 +155,14 @@ void CompressionOptions::setPixelType(PixelType pixelType) } +/// Set pitch alignment in bytes. +void CompressionOptions::setPitchAlignment(int pitchAlignment) +{ + nvDebugCheck(pitchAlignment > 0 && isPowerOfTwo(pitchAlignment)); + m.pitchAlignment = pitchAlignment; +} + + /// Use external compressor. void CompressionOptions::setExternalCompressor(const char * name) { diff --git a/src/nvtt/CompressionOptions.h b/src/nvtt/CompressionOptions.h index ba69bfb..08420c6 100644 --- a/src/nvtt/CompressionOptions.h +++ b/src/nvtt/CompressionOptions.h @@ -51,7 +51,8 @@ namespace nvtt uint8 asize; PixelType pixelType; - + uint pitchAlignment; + nv::String externalCompressor; // Quantization. diff --git a/src/nvtt/CompressorRGB.cpp b/src/nvtt/CompressorRGB.cpp index e1d75d6..eb64b79 100644 --- a/src/nvtt/CompressorRGB.cpp +++ b/src/nvtt/CompressorRGB.cpp @@ -40,12 +40,9 @@ using namespace nvtt; namespace { - inline uint computePitch(uint w, uint bitsize) + inline uint computePitch(uint w, uint bitsize, uint alignment) { - uint p = w * ((bitsize + 7) / 8); - - // Align to 32 bits. - return ((p + 3) / 4) * 4; + return ((w * bitsize + 8 * alignment - 1) / (8 * alignment)) * alignment; } inline void convert_to_a8r8g8b8(const void * src, void * dst, uint w) @@ -65,6 +62,67 @@ namespace return half_from_float(c.u); } + struct BitStream + { + BitStream(uint8 * ptr) : ptr(ptr), buffer(0), bits(0) { + } + + void putBits(uint p, int bitCount) + { + nvDebugCheck(bits < 8); + nvDebugCheck(bitCount <= 32); + + uint64 buffer = (this->buffer << bitCount) | p; + uint bits = this->bits + bitCount; + + while (bits >= 8) + { + *ptr++ = (buffer & 0xFF); + + buffer >>= 8; + bits -= 8; + } + + this->buffer = (uint8)buffer; + this->bits = bits; + } + + void putFloat(float f) + { + nvDebugCheck(bits == 0); + *((float *)ptr) = f; + ptr += 4; + } + + void putHalf(float f) + { + nvDebugCheck(bits == 0); + *((uint16 *)ptr) = to_half(f); + ptr += 2; + } + + void flush() + { + nvDebugCheck(bits < 8); + if (bits) { + *ptr++ = buffer; + buffer = 0; + bits = 0; + } + } + + void align(int alignment) + { + nvDebugCheck(alignment >= 1); + flush(); + putBits(0, ((size_t)ptr % alignment) * 8); + } + + uint8 * ptr; + uint8 buffer; + uint8 bits; + }; + } // namespace @@ -96,7 +154,7 @@ void PixelFormatConverter::compress(nvtt::InputFormat inputFormat, nvtt::AlphaMo if (compressionOptions.bitcount != 0) { bitCount = compressionOptions.bitcount; - nvCheck(bitCount == 8 || bitCount == 16 || bitCount == 24 || bitCount == 32); + nvCheck(bitCount <= 32); rmask = compressionOptions.rmask; gmask = compressionOptions.gmask; @@ -130,20 +188,16 @@ void PixelFormatConverter::compress(nvtt::InputFormat inputFormat, nvtt::AlphaMo } } - uint byteCount = (bitCount + 7) / 8; - uint pitch = computePitch(w, bitCount); - - uint srcPitch = w; - uint srcPlane = w * h; - + const uint pitch = computePitch(w, bitCount, compressionOptions.pitchAlignment); + const uint wh = w * h; // Allocate output scanline. - uint8 * dst = (uint8 *)mem::malloc(pitch + 4); + uint8 * const dst = (uint8 *)mem::malloc(pitch); for (uint y = 0; y < h; y++) { - const uint * src = (const uint *)data + y * srcPitch; - const float * fsrc = (const float *)data + y * srcPitch; + const uint * src = (const uint *)data + y * w; + const float * fsrc = (const float *)data + y * w; if (inputFormat == nvtt::InputFormat_BGRA_8UB && compressionOptions.pixelType == nvtt::PixelType_UnsignedNorm && bitCount == 32 && rmask == 0xFF0000 && gmask == 0xFF00 && bmask == 0xFF && amask == 0xFF000000) { @@ -151,7 +205,7 @@ void PixelFormatConverter::compress(nvtt::InputFormat inputFormat, nvtt::AlphaMo } else { - uint8 * ptr = dst; + BitStream stream(dst); for (uint x = 0; x < w; x++) { @@ -171,29 +225,25 @@ void PixelFormatConverter::compress(nvtt::InputFormat inputFormat, nvtt::AlphaMo //g = ((float *)src)[4 * x + 1]; //b = ((float *)src)[4 * x + 2]; //a = ((float *)src)[4 * x + 3]; - r = fsrc[x + 0 * srcPlane]; - g = fsrc[x + 1 * srcPlane]; - b = fsrc[x + 2 * srcPlane]; - a = fsrc[x + 3 * srcPlane]; + r = fsrc[x + 0 * wh]; + g = fsrc[x + 1 * wh]; + b = fsrc[x + 2 * wh]; + a = fsrc[x + 3 * wh]; } if (compressionOptions.pixelType == nvtt::PixelType_Float) { - if (rsize == 32) *((float *)ptr) = r; - else if (rsize == 16) *((uint16 *)ptr) = to_half(r); - ptr += rsize / 8; + if (rsize == 32) stream.putFloat(r); + else if (rsize == 16) stream.putHalf(r); - if (gsize == 32) *((float *)ptr) = g; - else if (gsize == 16) *((uint16 *)ptr) = to_half(g); - ptr += gsize / 8; + if (gsize == 32) stream.putFloat(g); + else if (gsize == 16) stream.putHalf(g); - if (bsize == 32) *((float *)ptr) = b; - else if (bsize == 16) *((uint16 *)ptr) = to_half(b); - ptr += bsize / 8; + if (bsize == 32) stream.putFloat(b); + else if (bsize == 16) stream.putHalf(b); - if (asize == 32) *((float *)ptr) = a; - else if (asize == 16) *((uint16 *)ptr) = to_half(a); - ptr += asize / 8; + if (asize == 32) stream.putFloat(a); + else if (asize == 16) stream.putHalf(a); } else { @@ -212,25 +262,27 @@ void PixelFormatConverter::compress(nvtt::InputFormat inputFormat, nvtt::AlphaMo p |= PixelFormat::convert(c.b, 8, bsize) << bshift; p |= PixelFormat::convert(c.a, 8, asize) << ashift; + stream.putBits(p, bitCount); + // Output one byte at a time. - for (uint i = 0; i < byteCount; i++) + /*for (uint i = 0; i < byteCount; i++) { *(dst + x * byteCount + i) = (p >> (i * 8)) & 0xFF; - } + }*/ } } // Zero padding. - for (uint x = w * byteCount; x < pitch; x++) + stream.align(compressionOptions.pitchAlignment); + nvDebugCheck(stream.ptr == dst + pitch); + + /*for (uint x = w * byteCount; x < pitch; x++) { *(dst + x) = 0; - } + }*/ } - if (outputOptions.outputHandler != NULL) - { - outputOptions.outputHandler->writeData(dst, pitch); - } + outputOptions.writeData(dst, pitch); } mem::free(dst); diff --git a/src/nvtt/Context.cpp b/src/nvtt/Context.cpp index 7a60314..667bc7c 100644 --- a/src/nvtt/Context.cpp +++ b/src/nvtt/Context.cpp @@ -89,18 +89,15 @@ namespace return 0; } - inline uint computePitch(uint w, uint bitsize) - { - uint p = w * ((bitsize + 7) / 8); - - // Align to 32 bits. - return ((p + 3) / 4) * 4; - } + inline uint computePitch(uint w, uint bitsize, uint alignment) + { + return ((w * bitsize + 8 * alignment - 1) / (8 * alignment)) * alignment; + } - static int computeImageSize(uint w, uint h, uint d, uint bitCount, Format format) + static int computeImageSize(uint w, uint h, uint d, uint bitCount, uint alignment, Format format) { if (format == Format_RGBA) { - return d * h * computePitch(w, bitCount); + return d * h * computePitch(w, bitCount, alignment); } else { // @@ Handle 3D textures. DXT and VTC have different behaviors. @@ -324,7 +321,7 @@ int Compressor::estimateSize(int w, int h, int d, const CompressionOptions & com uint bitCount = co.getBitCount(); - return computeImageSize(w, h, d, bitCount, format); + return computeImageSize(w, h, d, bitCount, co.pitchAlignment, format); } @@ -413,7 +410,20 @@ bool Compressor::Private::outputHeader(const InputOptions::Private & inputOption if (outputOptions.container == Container_DDS || outputOptions.container == Container_DDS10) { DDSHeader header; - + + header.setUserVersion(outputOptions.version); + + if (inputOptions.textureType == TextureType_2D) { + header.setTexture2D(); + } + else if (inputOptions.textureType == TextureType_Cube) { + header.setTextureCube(); + } + /*else if (inputOptions.textureType == TextureType_3D) { + header.setTexture3D(); + header.setDepth(inputOptions.targetDepth); + }*/ + header.setWidth(inputOptions.targetWidth); header.setHeight(inputOptions.targetHeight); @@ -499,7 +509,7 @@ bool Compressor::Private::outputHeader(const InputOptions::Private & inputOption if (compressionOptions.format == Format_RGBA) { // Get output bit count. - header.setPitch(computePitch(inputOptions.targetWidth, compressionOptions.getBitCount())); + header.setPitch(computePitch(inputOptions.targetWidth, compressionOptions.getBitCount(), compressionOptions.pitchAlignment)); if (compressionOptions.pixelType == PixelType_Float) { @@ -564,7 +574,7 @@ bool Compressor::Private::outputHeader(const InputOptions::Private & inputOption } else { - header.setLinearSize(computeImageSize(inputOptions.targetWidth, inputOptions.targetHeight, inputOptions.targetDepth, compressionOptions.bitcount, compressionOptions.format)); + header.setLinearSize(computeImageSize(inputOptions.targetWidth, inputOptions.targetHeight, inputOptions.targetDepth, compressionOptions.bitcount, compressionOptions.pitchAlignment, compressionOptions.format)); if (compressionOptions.format == Format_DXT1 || compressionOptions.format == Format_DXT1a || compressionOptions.format == Format_DXT1n) { header.setFourCC('D', 'X', 'T', '1'); @@ -618,17 +628,6 @@ bool Compressor::Private::outputHeader(const InputOptions::Private & inputOption return false; } - if (inputOptions.textureType == TextureType_2D) { - header.setTexture2D(); - } - else if (inputOptions.textureType == TextureType_Cube) { - header.setTextureCube(); - } - /*else if (inputOptions.textureType == TextureType_3D) { - header.setTexture3D(); - header.setDepth(inputOptions.targetDepth); - }*/ - // Swap bytes if necessary. header.swapBytes(); @@ -669,6 +668,19 @@ bool Compressor::Private::outputHeader(const TexImage & tex, int mipmapCount, co { DDSHeader header; + header.setUserVersion(outputOptions.version); + + if (tex.textureType() == TextureType_2D) { + header.setTexture2D(); + } + else if (tex.textureType() == TextureType_Cube) { + header.setTextureCube(); + } + /*else if (tex.textureType() == TextureType_3D) { + header.setTexture3D(); + header.setDepth(tex.depth()); + }*/ + header.setWidth(tex.width()); header.setHeight(tex.height()); header.setMipmapCount(mipmapCount); @@ -750,7 +762,7 @@ bool Compressor::Private::outputHeader(const TexImage & tex, int mipmapCount, co if (compressionOptions.format == Format_RGBA) { // Get output bit count. - header.setPitch(computePitch(tex.width(), compressionOptions.getBitCount())); + header.setPitch(computePitch(tex.width(), compressionOptions.getBitCount(), compressionOptions.pitchAlignment)); if (compressionOptions.pixelType == PixelType_Float) { @@ -815,7 +827,7 @@ bool Compressor::Private::outputHeader(const TexImage & tex, int mipmapCount, co } else { - header.setLinearSize(computeImageSize(tex.width(), tex.height(), tex.depth(), compressionOptions.bitcount, compressionOptions.format)); + header.setLinearSize(computeImageSize(tex.width(), tex.height(), tex.depth(), compressionOptions.bitcount, compressionOptions.pitchAlignment, compressionOptions.format)); if (compressionOptions.format == Format_DXT1 || compressionOptions.format == Format_DXT1a || compressionOptions.format == Format_DXT1n) { header.setFourCC('D', 'X', 'T', '1'); @@ -869,17 +881,6 @@ bool Compressor::Private::outputHeader(const TexImage & tex, int mipmapCount, co return false; } - if (tex.textureType() == TextureType_2D) { - header.setTexture2D(); - } - else if (tex.textureType() == TextureType_Cube) { - header.setTextureCube(); - } - /*else if (tex.textureType() == TextureType_3D) { - header.setTexture3D(); - header.setDepth(tex.depth()); - }*/ - // Swap bytes if necessary. header.swapBytes(); @@ -890,7 +891,7 @@ bool Compressor::Private::outputHeader(const TexImage & tex, int mipmapCount, co headerSize = 128 + 20; } - bool writeSucceed = outputOptions.outputHandler->writeData(&header, headerSize); + bool writeSucceed = outputOptions.writeData(&header, headerSize); if (!writeSucceed) { outputOptions.error(Error_FileWrite); @@ -916,7 +917,7 @@ bool Compressor::Private::compressMipmaps(uint f, const InputOptions::Private & for (uint m = 0; m < mipmapCount; m++) { - int size = computeImageSize(w, h, d, compressionOptions.getBitCount(), compressionOptions.format); + int size = computeImageSize(w, h, d, compressionOptions.getBitCount(), compressionOptions.pitchAlignment, compressionOptions.format); outputOptions.beginImage(size, w, h, d, f, m); if (!initMipmap(mipmap, inputOptions, w, h, d, f, m)) @@ -1553,12 +1554,12 @@ int Compressor::Private::estimateSize(const InputOptions::Private & inputOptions { const Format format = compressionOptions.format; - uint bitCount = compressionOptions.bitcount; - if (format == Format_RGBA && bitCount == 0) bitCount = compressionOptions.rsize + compressionOptions.gsize + compressionOptions.bsize + compressionOptions.asize; + const uint bitCount = compressionOptions.getBitCount(); + const uint pitchAlignment = compressionOptions.pitchAlignment; inputOptions.computeTargetExtents(); - uint mipmapCount = inputOptions.realMipmapCount(); + const uint mipmapCount = inputOptions.realMipmapCount(); int size = 0; @@ -1570,7 +1571,7 @@ int Compressor::Private::estimateSize(const InputOptions::Private & inputOptions for (uint m = 0; m < mipmapCount; m++) { - size += computeImageSize(w, h, d, bitCount, format); + size += computeImageSize(w, h, d, bitCount, pitchAlignment, format); // Compute extents of next mipmap: w = max(1U, w / 2); diff --git a/src/nvtt/OutputOptions.cpp b/src/nvtt/OutputOptions.cpp index 05c18ef..38ea7ed 100644 --- a/src/nvtt/OutputOptions.cpp +++ b/src/nvtt/OutputOptions.cpp @@ -49,13 +49,20 @@ void OutputOptions::reset() m.outputHeader = true; m.container = Container_DDS; + m.version = 0; } /// Set output file name. void OutputOptions::setFileName(const char * fileName) { - m.fileName = fileName; // @@ Do we need to record filename? + if (!m.fileName.isNull()) + { + // To close the file and avoid leak. + delete m.outputHandler; + } + + m.fileName = fileName; m.outputHandler = NULL; DefaultOutputHandler * oh = new DefaultOutputHandler(fileName); @@ -94,6 +101,11 @@ void OutputOptions::setContainer(Container container) m.container = container; } +/// Set user version. +void OutputOptions::setUserVersion(int version) +{ + m.version = version; +} bool OutputOptions::Private::hasValidOutputHandler() const { diff --git a/src/nvtt/OutputOptions.h b/src/nvtt/OutputOptions.h index 84121af..619ad7c 100644 --- a/src/nvtt/OutputOptions.h +++ b/src/nvtt/OutputOptions.h @@ -35,9 +35,7 @@ namespace nvtt { DefaultOutputHandler(const char * fileName) : stream(fileName) {} - virtual ~DefaultOutputHandler() - { - } + virtual ~DefaultOutputHandler() {} virtual void beginImage(int size, int width, int height, int depth, int face, int miplevel) { @@ -66,6 +64,7 @@ namespace nvtt bool outputHeader; Container container; + int version; bool hasValidOutputHandler() const; diff --git a/src/nvtt/TexImage.cpp b/src/nvtt/TexImage.cpp index 87f9ff3..4c1f8dd 100644 --- a/src/nvtt/TexImage.cpp +++ b/src/nvtt/TexImage.cpp @@ -271,23 +271,19 @@ float TexImage::alphaTestCoverage(float alphaRef/*= 0.5*/) const bool TexImage::load(const char * fileName) { -#pragma message(NV_FILE_LINE "TODO: Add support for DDS textures in TexImage::load().") - - AutoPtr img(ImageIO::loadFloat(fileName)); - - if (img == NULL) - { - return false; - } + AutoPtr img(ImageIO::loadFloat(fileName)); + if (img == NULL) { + return false; + } - detach(); + detach(); - img->resizeChannelCount(4); + img->resizeChannelCount(4); - m->imageArray.resize(1); - m->imageArray[0] = img.release(); + m->imageArray.resize(1); + m->imageArray[0] = img.release(); - return true; + return true; } bool TexImage::save(const char * fileName) const @@ -560,25 +556,25 @@ void TexImage::resize(int w, int h, ResizeFilter filter) if (filter == ResizeFilter_Box) { BoxFilter filter; - m->imageArray[i]->resize(filter, w, h, wrapMode, 3); + img = img->resize(filter, w, h, wrapMode, 3); } else if (filter == ResizeFilter_Triangle) { TriangleFilter filter; - m->imageArray[i]->resize(filter, w, h, wrapMode, 3); + img = img->resize(filter, w, h, wrapMode, 3); } else if (filter == ResizeFilter_Kaiser) { //KaiserFilter filter(inputOptions.kaiserWidth); //filter.setParameters(inputOptions.kaiserAlpha, inputOptions.kaiserStretch); KaiserFilter filter(3); - m->imageArray[i]->resize(filter, w, h, wrapMode, 3); + img = img->resize(filter, w, h, wrapMode, 3); } else //if (filter == ResizeFilter_Mitchell) { nvDebugCheck(filter == ResizeFilter_Mitchell); MitchellFilter filter; - m->imageArray[i]->resize(filter, w, h, wrapMode, 3); + img = img->resize(filter, w, h, wrapMode, 3); } } else @@ -586,27 +582,30 @@ void TexImage::resize(int w, int h, ResizeFilter filter) if (filter == ResizeFilter_Box) { BoxFilter filter; - m->imageArray[i]->resize(filter, w, h, wrapMode); + img = img->resize(filter, w, h, wrapMode); } else if (filter == ResizeFilter_Triangle) { TriangleFilter filter; - m->imageArray[i]->resize(filter, w, h, wrapMode); + img = img->resize(filter, w, h, wrapMode); } else if (filter == ResizeFilter_Kaiser) { //KaiserFilter filter(inputOptions.kaiserWidth); //filter.setParameters(inputOptions.kaiserAlpha, inputOptions.kaiserStretch); KaiserFilter filter(3); - m->imageArray[i]->resize(filter, w, h, wrapMode); + img = img->resize(filter, w, h, wrapMode); } else //if (filter == ResizeFilter_Mitchell) { nvDebugCheck(filter == ResizeFilter_Mitchell); MitchellFilter filter; - m->imageArray[i]->resize(filter, w, h, wrapMode); + img = img->resize(filter, w, h, wrapMode); } } + + delete m->imageArray[i]; + m->imageArray[i] = img; } } @@ -813,6 +812,18 @@ void TexImage::scaleBias(int channel, float scale, float bias) } } +void TexImage::clamp(int channel, float low, float high) +{ + detach(); + + foreach (i, m->imageArray) + { + if (m->imageArray[i] == NULL) continue; + + m->imageArray[i]->clamp(channel, 1, low, high); + } +} + void TexImage::packNormal() { scaleBias(0, 0.5f, 0.5f); diff --git a/src/nvtt/nvtt.h b/src/nvtt/nvtt.h index ca61ffe..3fbc1dc 100644 --- a/src/nvtt/nvtt.h +++ b/src/nvtt/nvtt.h @@ -48,7 +48,7 @@ # define NVTT_API #endif -#define NVTT_VERSION 020100 +#define NVTT_VERSION 20100 #define NVTT_FORBID_COPY(Class) \ private: \ @@ -144,6 +144,8 @@ namespace nvtt NVTT_API void setPixelType(PixelType pixelType); + NVTT_API void setPitchAlignment(int pitchAlignment); + NVTT_API void setQuantization(bool colorDithering, bool alphaDithering, bool binaryAlpha, int alphaThreshold = 127); }; @@ -345,6 +347,7 @@ namespace nvtt NVTT_API void setOutputHeader(bool outputHeader); NVTT_API void setContainer(Container container); + NVTT_API void setUserVersion(int version); }; @@ -432,6 +435,7 @@ namespace nvtt NVTT_API void transform(const float w0[4], const float w1[4], const float w2[4], const float w3[4], const float offset[4]); NVTT_API void swizzle(int r, int g, int b, int a); NVTT_API void scaleBias(int channel, float scale, float bias); + NVTT_API void clamp(int channel, float low = 0.0f, float high = 1.0f); NVTT_API void packNormal(); NVTT_API void expandNormal(); NVTT_API void blend(float r, float g, float b, float a, float t); diff --git a/src/nvtt/nvtt_wrapper.h b/src/nvtt/nvtt_wrapper.h index 63c9b5a..01ccc0d 100644 --- a/src/nvtt/nvtt_wrapper.h +++ b/src/nvtt/nvtt_wrapper.h @@ -47,7 +47,7 @@ # define NVTT_API #endif -#define NVTT_VERSION 020100 +#define NVTT_VERSION 20100 #ifdef __cplusplus typedef struct nvtt::InputOptions NvttInputOptions; diff --git a/src/nvtt/tests/imperativeapi.cpp b/src/nvtt/tests/imperativeapi.cpp index 4791e41..5e9589c 100644 --- a/src/nvtt/tests/imperativeapi.cpp +++ b/src/nvtt/tests/imperativeapi.cpp @@ -55,7 +55,7 @@ int main(int argc, char *argv[]) outputFileName.stripExtension(); outputFileName.append(".dds"); - outputOptions.setFileName(outputFileName); + outputOptions.setFileName(outputFileName.str()); // Output compressed image. context.outputHeader(image, image.countMipmaps(), compressionOptions, outputOptions); diff --git a/src/nvtt/tests/testsuite.cpp b/src/nvtt/tests/testsuite.cpp index 713b074..09475bb 100644 --- a/src/nvtt/tests/testsuite.cpp +++ b/src/nvtt/tests/testsuite.cpp @@ -423,7 +423,7 @@ int main(int argc, char *argv[]) Path csvFileName; csvFileName.format("%s/result.csv", outPath); - StdOutputStream csvStream(csvFileName); + StdOutputStream csvStream(csvFileName.str()); TextWriter csvWriter(&csvStream); float totalTime = 0; @@ -465,7 +465,7 @@ int main(int argc, char *argv[]) outputFileName.format("%s/%s", outPath, fileNames[i]); outputFileName.stripExtension(); outputFileName.append(".png"); - if (!ImageIO::save(outputFileName, img_out.ptr())) + if (!ImageIO::save(outputFileName.str(), img_out.ptr())) { printf("Error saving file '%s'.\n", outputFileName.str()); } diff --git a/src/nvtt/tools/assemble.cpp b/src/nvtt/tools/assemble.cpp index 2f8afff..c3086e2 100644 --- a/src/nvtt/tools/assemble.cpp +++ b/src/nvtt/tools/assemble.cpp @@ -114,7 +114,7 @@ int main(int argc, char *argv[]) for (uint i = 0; i < imageCount; i++) { - if (!images[i].load(files[i])) + if (!images[i].load(files[i].str())) { printf("*** error loading file\n"); return 1; @@ -138,7 +138,7 @@ int main(int argc, char *argv[]) } - nv::StdOutputStream stream(output); + nv::StdOutputStream stream(output.str()); if (stream.isError()) { printf("Error opening '%s' for writting\n", output.str()); return 1; diff --git a/src/nvtt/tools/compress.cpp b/src/nvtt/tools/compress.cpp index caeedfb..f21afb8 100644 --- a/src/nvtt/tools/compress.cpp +++ b/src/nvtt/tools/compress.cpp @@ -295,11 +295,12 @@ int main(int argc, char *argv[]) } const uint version = nvtt::version(); - const uint major = version / 100; - const uint minor = version % 100; + const uint major = version / 100 / 100; + const uint minor = (version / 100) % 100; + const uint rev = version % 100; - printf("NVIDIA Texture Tools %u.%u - Copyright NVIDIA Corporation 2007\n\n", major, minor); + printf("NVIDIA Texture Tools %u.%u.%u - Copyright NVIDIA Corporation 2007\n\n", major, minor, rev); if (input.isNull()) { @@ -351,7 +352,7 @@ int main(int argc, char *argv[]) if (nv::strCaseCmp(input.extension(), ".dds") == 0) { // Load surface. - nv::DirectDrawSurface dds(input); + nv::DirectDrawSurface dds(input.str()); if (!dds.isValid()) { fprintf(stderr, "The file '%s' is not a valid DDS file.\n", input.str()); @@ -400,7 +401,7 @@ int main(int argc, char *argv[]) if (loadAsFloat) { - nv::AutoPtr image(nv::ImageIO::loadFloat(input)); + nv::AutoPtr image(nv::ImageIO::loadFloat(input.str())); if (image == NULL) { @@ -420,7 +421,7 @@ int main(int argc, char *argv[]) { // Regular image. nv::Image image; - if (!image.load(input)) + if (!image.load(input.str())) { fprintf(stderr, "The file '%s' is not a supported image type.\n", input.str()); return 1; @@ -449,6 +450,9 @@ int main(int argc, char *argv[]) inputOptions.setAlphaMode(nvtt::AlphaMode_None); } + inputOptions.setRoundMode(nvtt::RoundMode_ToNearestPowerOfTwo); + + if (normal) { setNormalMap(inputOptions); @@ -524,7 +528,7 @@ int main(int argc, char *argv[]) MyErrorHandler errorHandler; - MyOutputHandler outputHandler(output); + MyOutputHandler outputHandler(output.str()); if (outputHandler.stream->isError()) { fprintf(stderr, "Error opening '%s' for writting\n", output.str()); diff --git a/src/nvtt/tools/decompress.cpp b/src/nvtt/tools/decompress.cpp index b51df5c..9a7b11a 100644 --- a/src/nvtt/tools/decompress.cpp +++ b/src/nvtt/tools/decompress.cpp @@ -112,7 +112,7 @@ int main(int argc, char *argv[]) } // Load surface. - nv::DirectDrawSurface dds(input); + nv::DirectDrawSurface dds(input.str()); if (!dds.isValid()) { fprintf(stderr, "The file '%s' is not a valid DDS file.\n", input.str()); @@ -179,7 +179,7 @@ int main(int argc, char *argv[]) return 1; } - nv::ImageIO::save(name, stream, &mipmap); + nv::ImageIO::save(name.str(), stream, &mipmap); } } diff --git a/src/nvtt/tools/imgdiff.cpp b/src/nvtt/tools/imgdiff.cpp index 60e3f4d..bc9d98b 100644 --- a/src/nvtt/tools/imgdiff.cpp +++ b/src/nvtt/tools/imgdiff.cpp @@ -204,8 +204,8 @@ int main(int argc, char *argv[]) } nv::Image image0, image1; - if (!loadImage(image0, input0)) return 0; - if (!loadImage(image1, input1)) return 0; + if (!loadImage(image0, input0.str())) return 0; + if (!loadImage(image1, input1.str())) return 0; const uint w0 = image0.width(); const uint h0 = image0.height(); diff --git a/src/nvtt/tools/resize.cpp b/src/nvtt/tools/resize.cpp index d72fbdb..6d74403 100644 --- a/src/nvtt/tools/resize.cpp +++ b/src/nvtt/tools/resize.cpp @@ -164,7 +164,7 @@ int main(int argc, char *argv[]) } nv::Image image; - if (!loadImage(image, input)) return 0; + if (!loadImage(image, input.str())) return 0; nv::FloatImage fimage(&image); fimage.toLinear(0, 3, gamma);