First Push

This commit is contained in:
2024-10-30 03:27:58 -04:00
parent ba4d678cdd
commit dc910d651e
1066 changed files with 1899832 additions and 0 deletions

View File

@@ -0,0 +1,34 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _CONFTEST_H
#define _CONFTEST_H
#include "conftest/headers.h"
#include "conftest/functions.h"
#include "conftest/generic.h"
#include "conftest/macros.h"
#include "conftest/symbols.h"
#include "conftest/types.h"
#endif

View File

@@ -0,0 +1,437 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*! \brief
* Define compile time symbols for CPU type and operating system type.
* This file should only contain preprocessor commands so that
* there are no dependencies on other files.
*
* cpuopsys.h
*
* Copyright (c) 2001, Nvidia Corporation. All rights reserved.
*/
/*!
* Uniform names are defined for compile time options to distinguish
* CPU types and Operating systems.
* Distinctions between CPU and OpSys should be orthogonal.
*
* These uniform names have initially been defined by keying off the
* makefile/build names defined for builds in the OpenGL group.
* Getting the uniform names defined for other builds may require
* different qualifications.
*
* The file is placed here to allow for the possibility of all driver
* components using the same naming convention for conditional compilation.
*/
#ifndef CPUOPSYS_H
#define CPUOPSYS_H
/*****************************************************************************/
/* Define all OS/CPU-Chip related symbols */
/* ***** WINDOWS variations */
#if defined(_WIN32) || defined(_WIN16)
# define NV_WINDOWS
# if defined(_WIN32_WINNT)
# define NV_WINDOWS_NT
# elif defined(_WIN32_WCE)
# define NV_WINDOWS_CE
# elif !defined(NV_MODS)
# define NV_WINDOWS_9X
# endif
#endif /* _WIN32 || defined(_WIN16) */
/* ***** Unix variations */
#if defined(__linux__) && !defined(NV_LINUX) && !defined(NV_VMWARE)
# define NV_LINUX
#endif /* defined(__linux__) */
#if defined(__VMWARE__) && !defined(NV_VMWARE)
# define NV_VMWARE
#endif /* defined(__VMWARE__) */
/* SunOS + gcc */
#if defined(__sun__) && defined(__svr4__) && !defined(NV_SUNOS)
# define NV_SUNOS
#endif /* defined(__sun__) && defined(__svr4__) */
/* SunOS + Sun Compiler (named SunPro, Studio or Forte) */
#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
# define NV_SUNPRO_C
# define NV_SUNOS
#endif /* defined(_SUNPRO_C) || defined(__SUNPRO_CC) */
#if defined(__FreeBSD__) && !defined(NV_BSD)
# define NV_BSD
#endif /* defined(__FreeBSD__) */
/* XXXar don't define NV_UNIX on MacOSX or vxworks or QNX */
#if (defined(__unix__) || defined(__unix) || defined(__INTEGRITY) ) && !defined(nvmacosx) && !defined(vxworks) && !defined(NV_UNIX) && !defined(__QNX__) && !defined(__QNXNTO__)/* XXX until removed from Makefiles */
# define NV_UNIX
#endif /* defined(__unix__) */
#if (defined(__QNX__) || defined(__QNXNTO__)) && !defined(NV_QNX)
# define NV_QNX
#endif
#if (defined(__ANDROID__) || defined(ANDROID)) && !defined(NV_ANDROID)
# define NV_ANDROID
#endif
#if defined(DceCore) && !defined(NV_DCECORE)
# define NV_DCECORE
#endif
/* ***** Apple variations */
#if defined(macintosh) || defined(__APPLE__)
# define NV_MACINTOSH
# if defined(__MACH__)
# define NV_MACINTOSH_OSX
# else
# define NV_MACINTOSH_OS9
# endif
# if defined(__LP64__)
# define NV_MACINTOSH_64
# endif
#endif /* defined(macintosh) */
/* ***** VxWorks */
/* Tornado 2.21 is gcc 2.96 and #defines __vxworks. */
/* Tornado 2.02 is gcc 2.7.2 and doesn't define any OS symbol, so we rely on */
/* the build system #defining vxworks. */
#if defined(__vxworks) || defined(vxworks)
# define NV_VXWORKS
#endif
/* ***** Integrity OS */
#if defined(__INTEGRITY)
# if !defined(NV_INTEGRITY)
# define NV_INTEGRITY
# endif
#endif
/* ***** Processor type variations */
/* Note: The prefix NV_CPU_* is taken by Nvcm.h */
#if ((defined(_M_IX86) || defined(__i386__) || defined(__i386)) && !defined(NVCPU_X86)) /* XXX until removed from Makefiles */
/* _M_IX86 for windows, __i386__ for Linux (or any x86 using gcc) */
/* __i386 for Studio compiler on Solaris x86 */
# define NVCPU_X86 /* any IA32 machine (not x86-64) */
# define NVCPU_MIN_PAGE_SHIFT 12
#endif
#if defined(_WIN32) && defined(_M_IA64)
# define NVCPU_IA64_WINDOWS /* any IA64 for Windows opsys */
#endif
#if defined(NV_LINUX) && defined(__ia64__)
# define NVCPU_IA64_LINUX /* any IA64 for Linux opsys */
#endif
#if defined(NVCPU_IA64_WINDOWS) || defined(NVCPU_IA64_LINUX) || defined(IA64)
# define NVCPU_IA64 /* any IA64 for any opsys */
#endif
#if (defined(NV_MACINTOSH) && !(defined(__i386__) || defined(__x86_64__))) || defined(__PPC__) || defined(__ppc)
# if defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)
# ifndef NVCPU_PPC64LE
# define NVCPU_PPC64LE /* PPC 64-bit little endian */
# endif
# else
# ifndef NVCPU_PPC
# define NVCPU_PPC /* any non-PPC64LE PowerPC architecture */
# endif
# ifndef NV_BIG_ENDIAN
# define NV_BIG_ENDIAN
# endif
# endif
# define NVCPU_FAMILY_PPC
#endif
#if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64)
# define NVCPU_X86_64 /* any x86-64 for any opsys */
#endif
#if defined(NVCPU_X86) || defined(NVCPU_X86_64)
# define NVCPU_FAMILY_X86
#endif
#if defined(__riscv) && (__riscv_xlen==64)
# define NVCPU_RISCV64
# if defined(__nvriscv)
# define NVCPU_NVRISCV64
# endif
#endif
#if defined(__arm__) || defined(_M_ARM)
/*
* 32-bit instruction set on, e.g., ARMv7 or AArch32 execution state
* on ARMv8
*/
# define NVCPU_ARM
# define NVCPU_MIN_PAGE_SHIFT 12
#endif
#if defined(__aarch64__) || defined(__ARM64__) || defined(_M_ARM64)
# define NVCPU_AARCH64 /* 64-bit A64 instruction set on ARMv8 */
# define NVCPU_MIN_PAGE_SHIFT 12
#endif
#if defined(NVCPU_ARM) || defined(NVCPU_AARCH64)
# define NVCPU_FAMILY_ARM
#endif
#if defined(__SH4__)
# ifndef NVCPU_SH4
# define NVCPU_SH4 /* Renesas (formerly Hitachi) SH4 */
# endif
# if defined NV_WINDOWS_CE
# define NVCPU_MIN_PAGE_SHIFT 12
# endif
#endif
/* For Xtensa processors */
#if defined(__XTENSA__)
# define NVCPU_XTENSA
# if defined(__XTENSA_EB__)
# define NV_BIG_ENDIAN
# endif
#endif
/*
* Other flavors of CPU type should be determined at run-time.
* For example, an x86 architecture with/without SSE.
* If it can compile, then there's no need for a compile time option.
* For some current GCC limitations, these may be fixed by using the Intel
* compiler for certain files in a Linux build.
*/
/* The minimum page size can be determined from the minimum page shift */
#if defined(NVCPU_MIN_PAGE_SHIFT)
#define NVCPU_MIN_PAGE_SIZE (1 << NVCPU_MIN_PAGE_SHIFT)
#endif
#if defined(NVCPU_IA64) || defined(NVCPU_X86_64) || \
defined(NV_MACINTOSH_64) || defined(NVCPU_AARCH64) || \
defined(NVCPU_PPC64LE) || defined(NVCPU_RISCV64)
# define NV_64_BITS /* all architectures where pointers are 64 bits */
#else
/* we assume 32 bits. I don't see a need for NV_16_BITS. */
#endif
/* For verification-only features not intended to be included in normal drivers */
#if defined(ENABLE_VERIF_FEATURES)
#define NV_VERIF_FEATURES
#endif
/*
* New, safer family of #define's -- these ones use 0 vs. 1 rather than
* defined/!defined. This is advantageous because if you make a typo,
* say misspelled ENDIAN:
*
* #if NVCPU_IS_BIG_ENDAIN
*
* ...some compilers can give you a warning telling you that you screwed up.
* The compiler can also give you a warning if you forget to #include
* "cpuopsys.h" in your code before the point where you try to use these
* conditionals.
*
* Also, the names have been prefixed in more cases with "CPU" or "OS" for
* increased clarity. You can tell the names apart from the old ones because
* they all use "_IS_" in the name.
*
* Finally, these can be used in "if" statements and not just in #if's. For
* example:
*
* if (NVCPU_IS_BIG_ENDIAN) x = Swap32(x);
*
* Maybe some day in the far-off future these can replace the old #define's.
*/
#if defined(NV_MODS)
#define NV_IS_MODS 1
#else
#define NV_IS_MODS 0
#endif
#if defined(NV_WINDOWS)
#define NVOS_IS_WINDOWS 1
#else
#define NVOS_IS_WINDOWS 0
#endif
#if defined(NV_WINDOWS_CE)
#define NVOS_IS_WINDOWS_CE 1
#else
#define NVOS_IS_WINDOWS_CE 0
#endif
#if defined(NV_LINUX)
#define NVOS_IS_LINUX 1
#else
#define NVOS_IS_LINUX 0
#endif
#if defined(NV_UNIX)
#define NVOS_IS_UNIX 1
#else
#define NVOS_IS_UNIX 0
#endif
#if defined(NV_BSD)
#define NVOS_IS_FREEBSD 1
#else
#define NVOS_IS_FREEBSD 0
#endif
#if defined(NV_SUNOS)
#define NVOS_IS_SOLARIS 1
#else
#define NVOS_IS_SOLARIS 0
#endif
#if defined(NV_VMWARE)
#define NVOS_IS_VMWARE 1
#else
#define NVOS_IS_VMWARE 0
#endif
#if defined(NV_QNX)
#define NVOS_IS_QNX 1
#else
#define NVOS_IS_QNX 0
#endif
#if defined(NV_ANDROID)
#define NVOS_IS_ANDROID 1
#else
#define NVOS_IS_ANDROID 0
#endif
#if defined(NV_MACINTOSH)
#define NVOS_IS_MACINTOSH 1
#else
#define NVOS_IS_MACINTOSH 0
#endif
#if defined(NV_VXWORKS)
#define NVOS_IS_VXWORKS 1
#else
#define NVOS_IS_VXWORKS 0
#endif
#if defined(NV_LIBOS)
#define NVOS_IS_LIBOS 1
#else
#define NVOS_IS_LIBOS 0
#endif
#if defined(NV_INTEGRITY)
#define NVOS_IS_INTEGRITY 1
#else
#define NVOS_IS_INTEGRITY 0
#endif
#if defined(NVCPU_X86)
#define NVCPU_IS_X86 1
#else
#define NVCPU_IS_X86 0
#endif
#if defined(NVCPU_RISCV64)
#define NVCPU_IS_RISCV64 1
#else
#define NVCPU_IS_RISCV64 0
#endif
#if defined(NVCPU_NVRISCV64)
#define NVCPU_IS_NVRISCV64 1
#else
#define NVCPU_IS_NVRISCV64 0
#endif
#if defined(NVCPU_IA64)
#define NVCPU_IS_IA64 1
#else
#define NVCPU_IS_IA64 0
#endif
#if defined(NVCPU_X86_64)
#define NVCPU_IS_X86_64 1
#else
#define NVCPU_IS_X86_64 0
#endif
#if defined(NVCPU_FAMILY_X86)
#define NVCPU_IS_FAMILY_X86 1
#else
#define NVCPU_IS_FAMILY_X86 0
#endif
#if defined(NVCPU_PPC)
#define NVCPU_IS_PPC 1
#else
#define NVCPU_IS_PPC 0
#endif
#if defined(NVCPU_PPC64LE)
#define NVCPU_IS_PPC64LE 1
#else
#define NVCPU_IS_PPC64LE 0
#endif
#if defined(NVCPU_FAMILY_PPC)
#define NVCPU_IS_FAMILY_PPC 1
#else
#define NVCPU_IS_FAMILY_PPC 0
#endif
#if defined(NVCPU_ARM)
#define NVCPU_IS_ARM 1
#else
#define NVCPU_IS_ARM 0
#endif
#if defined(NVCPU_AARCH64)
#define NVCPU_IS_AARCH64 1
#else
#define NVCPU_IS_AARCH64 0
#endif
#if defined(NVCPU_FAMILY_ARM)
#define NVCPU_IS_FAMILY_ARM 1
#else
#define NVCPU_IS_FAMILY_ARM 0
#endif
#if defined(NVCPU_SH4)
#define NVCPU_IS_SH4 1
#else
#define NVCPU_IS_SH4 0
#endif
#if defined(NVCPU_XTENSA)
#define NVCPU_IS_XTENSA 1
#else
#define NVCPU_IS_XTENSA 0
#endif
#if defined(NV_BIG_ENDIAN)
#define NVCPU_IS_BIG_ENDIAN 1
#else
#define NVCPU_IS_BIG_ENDIAN 0
#endif
#if defined(NV_64_BITS)
#define NVCPU_IS_64_BITS 1
#else
#define NVCPU_IS_64_BITS 0
#endif
#if defined(NVCPU_FAMILY_ARM)
#define NVCPU_IS_PCIE_CACHE_COHERENT 0
#else
#define NVCPU_IS_PCIE_CACHE_COHERENT 1
#endif
#if defined(NV_DCECORE)
#define NVOS_IS_DCECORE 1
#else
#define NVOS_IS_DCECORE 0
#endif
/*****************************************************************************/
#endif /* CPUOPSYS_H */

View File

@@ -0,0 +1,94 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_CAPS_H_
#define _NV_CAPS_H_
#include <nv-kernel-interface-api.h>
/*
* Opaque OS-specific struct; on Linux, this has member
* 'struct proc_dir_entry'.
*/
typedef struct nv_cap nv_cap_t;
/*
* Creates directory named "capabilities" under the provided path.
*
* @param[in] path Absolute path
*
* Returns a valid nv_cap_t upon success. Otherwise, returns NULL.
*/
nv_cap_t* NV_API_CALL nv_cap_init(const char *path);
/*
* Creates capability directory entry
*
* @param[in] parent_cap Parent capability directory
* @param[in] name Capability directory's name
* @param[in] mode Capability directory's access mode
*
* Returns a valid nv_cap_t upon success. Otherwise, returns NULL.
*/
nv_cap_t* NV_API_CALL nv_cap_create_dir_entry(nv_cap_t *parent_cap, const char *name, int mode);
/*
* Creates capability file entry
*
* @param[in] parent_cap Parent capability directory
* @param[in] name Capability file's name
* @param[in] mode Capability file's access mode
*
* Returns a valid nv_cap_t upon success. Otherwise, returns NULL.
*/
nv_cap_t* NV_API_CALL nv_cap_create_file_entry(nv_cap_t *parent_cap, const char *name, int mode);
/*
* Destroys capability entry
*
* @param[in] cap Capability entry
*/
void NV_API_CALL nv_cap_destroy_entry(nv_cap_t *cap);
/*
* Validates and duplicates the provided file descriptor
*
* @param[in] cap Capability entry
* @param[in] fd File descriptor to be validated
*
* Returns duplicate fd upon success. Otherwise, returns -1.
*/
int NV_API_CALL nv_cap_validate_and_dup_fd(const nv_cap_t *cap, int fd);
/*
* Closes file descriptor
*
* This function should be used to close duplicate file descriptors
* returned by nv_cap_validate_and_dup_fd.
*
* @param[in] fd File descriptor to be validated
*
*/
void NV_API_CALL nv_cap_close_fd(int fd);
#endif /* _NV_CAPS_H_ */

View File

@@ -0,0 +1,31 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_DMABUF_H_
#define _NV_DMABUF_H_
#include "nv-linux.h"
NV_STATUS nv_dma_buf_export(nv_state_t *, nv_ioctl_export_to_dma_buf_fd_t *);
#endif // _NV_DMABUF_H_

View File

@@ -0,0 +1,83 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
//
// This file holds GPU firmware related registry key definitions that are
// shared between Windows and Unix
//
#ifndef NV_FIRMWARE_REGISTRY_H
#define NV_FIRMWARE_REGISTRY_H
//
// Registry key that when enabled, will enable use of GPU firmware.
//
// Possible mode values:
// 0 - Do not enable GPU firmware
// 1 - Enable GPU firmware
// 2 - (Default) Use the default enablement policy for GPU firmware
//
// Setting this to anything other than 2 will alter driver firmware-
// enablement policies, possibly disabling GPU firmware where it would
// have otherwise been enabled by default.
//
// Policy bits:
//
// POLICY_ALLOW_FALLBACK:
// As the normal behavior is to fail GPU initialization if this registry
// entry is set in such a way that results in an invalid configuration, if
// instead the user would like the driver to automatically try to fallback
// to initializing the failing GPU with firmware disabled, then this bit can
// be set (ex: 0x11 means try to enable GPU firmware but fall back if needed).
// Note that this can result in a mixed mode configuration (ex: GPU0 has
// firmware enabled, but GPU1 does not).
//
#define NV_REG_STR_ENABLE_GPU_FIRMWARE "EnableGpuFirmware"
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_MASK 0x0000000F
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DISABLED 0x00000000
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_ENABLED 0x00000001
#define NV_REG_ENABLE_GPU_FIRMWARE_MODE_DEFAULT 0x00000002
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_MASK 0x000000F0
#define NV_REG_ENABLE_GPU_FIRMWARE_POLICY_ALLOW_FALLBACK 0x00000010
#define NV_REG_ENABLE_GPU_FIRMWARE_DEFAULT_VALUE 0x00000012
//
// Registry key that when enabled, will send GPU firmware logs
// to the system log, when possible.
//
// Possible values:
// 0 - Do not send GPU firmware logs to the system log
// 1 - Enable sending of GPU firmware logs to the system log
// 2 - (Default) Enable sending of GPU firmware logs to the system log for
// the debug kernel driver build only
//
#define NV_REG_STR_ENABLE_GPU_FIRMWARE_LOGS "EnableGpuFirmwareLogs"
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_DISABLE 0x00000000
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE 0x00000001
#define NV_REG_ENABLE_GPU_FIRMWARE_LOGS_ENABLE_ON_DEBUG 0x00000002
#endif // NV_FIRMWARE_REGISTRY_H

View File

@@ -0,0 +1,132 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_FIRMWARE_H
#define NV_FIRMWARE_H
#include <nvtypes.h>
#include <nvmisc.h>
typedef enum
{
NV_FIRMWARE_TYPE_GSP,
NV_FIRMWARE_TYPE_GSP_LOG
} nv_firmware_type_t;
typedef enum
{
NV_FIRMWARE_CHIP_FAMILY_NULL = 0,
NV_FIRMWARE_CHIP_FAMILY_TU10X = 1,
NV_FIRMWARE_CHIP_FAMILY_TU11X = 2,
NV_FIRMWARE_CHIP_FAMILY_GA100 = 3,
NV_FIRMWARE_CHIP_FAMILY_GA10X = 4,
NV_FIRMWARE_CHIP_FAMILY_AD10X = 5,
NV_FIRMWARE_CHIP_FAMILY_GH100 = 6,
NV_FIRMWARE_CHIP_FAMILY_END,
} nv_firmware_chip_family_t;
static inline const char *nv_firmware_chip_family_to_string(
nv_firmware_chip_family_t fw_chip_family
)
{
switch (fw_chip_family) {
case NV_FIRMWARE_CHIP_FAMILY_GH100: return "gh100";
case NV_FIRMWARE_CHIP_FAMILY_AD10X: return "ad10x";
case NV_FIRMWARE_CHIP_FAMILY_GA10X: return "ga10x";
case NV_FIRMWARE_CHIP_FAMILY_GA100: return "ga100";
case NV_FIRMWARE_CHIP_FAMILY_TU11X: return "tu11x";
case NV_FIRMWARE_CHIP_FAMILY_TU10X: return "tu10x";
case NV_FIRMWARE_CHIP_FAMILY_END: // fall through
case NV_FIRMWARE_CHIP_FAMILY_NULL:
return NULL;
}
return NULL;
}
// The includer (presumably nv.c) may optionally define
// NV_FIRMWARE_PATH_FOR_FILENAME(filename)
// to return a string "path" given a gsp_*.bin or gsp_log_*.bin filename.
//
// The function nv_firmware_path will then be available.
#if defined(NV_FIRMWARE_PATH_FOR_FILENAME)
static inline const char *nv_firmware_path(
nv_firmware_type_t fw_type,
nv_firmware_chip_family_t fw_chip_family
)
{
if (fw_type == NV_FIRMWARE_TYPE_GSP)
{
switch (fw_chip_family)
{
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_ga10x.bin");
case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_tu10x.bin");
case NV_FIRMWARE_CHIP_FAMILY_END: // fall through
case NV_FIRMWARE_CHIP_FAMILY_NULL:
return "";
}
}
else if (fw_type == NV_FIRMWARE_TYPE_GSP_LOG)
{
switch (fw_chip_family)
{
case NV_FIRMWARE_CHIP_FAMILY_GH100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_AD10X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_GA10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_log_ga10x.bin");
case NV_FIRMWARE_CHIP_FAMILY_GA100: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU11X: // fall through
case NV_FIRMWARE_CHIP_FAMILY_TU10X:
return NV_FIRMWARE_PATH_FOR_FILENAME("gsp_log_tu10x.bin");
case NV_FIRMWARE_CHIP_FAMILY_END: // fall through
case NV_FIRMWARE_CHIP_FAMILY_NULL:
return "";
}
}
return "";
}
#endif // defined(NV_FIRMWARE_PATH_FOR_FILENAME)
// The includer (presumably nv.c) may optionally define
// NV_FIRMWARE_DECLARE_GSP_FILENAME(filename)
// which will then be invoked (at the top-level) for each
// gsp_*.bin (but not gsp_log_*.bin)
#if defined(NV_FIRMWARE_DECLARE_GSP_FILENAME)
NV_FIRMWARE_DECLARE_GSP_FILENAME("gsp_ga10x.bin")
NV_FIRMWARE_DECLARE_GSP_FILENAME("gsp_tu10x.bin")
#endif // defined(NV_FIRMWARE_DECLARE_GSP_FILENAME)
#endif // NV_FIRMWARE_DECLARE_GSP_FILENAME

View File

@@ -0,0 +1,44 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_GPU_INFO_H_
#define _NV_GPU_INFO_H_
typedef struct {
NvU32 gpu_id;
struct {
NvU32 domain;
NvU8 bus, slot, function;
} pci_info;
/*
* opaque OS-specific pointer; on Linux, this is a pointer to the
* 'struct device' for the GPU.
*/
void *os_device_ptr;
} nv_gpu_info_t;
#define NV_MAX_GPUS 32
#endif /* _NV_GPU_INFO_H_ */

View File

@@ -0,0 +1,96 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-22 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NV_HASH_H__
#define __NV_HASH_H__
#include "conftest.h"
#include "nv-list-helpers.h"
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/hash.h>
#if defined(NV_LINUX_STRINGHASH_H_PRESENT)
#include <linux/stringhash.h> /* full_name_hash() */
#else
#include <linux/dcache.h>
#endif
#if (NV_FULL_NAME_HASH_ARGUMENT_COUNT == 3)
#define nv_string_hash(_str) full_name_hash(NULL, _str, strlen(_str))
#else
#define nv_string_hash(_str) full_name_hash(_str, strlen(_str))
#endif
/**
* This naive hashtable was introduced by commit d9b482c8ba19 (v3.7, 2012-10-31).
* To support older kernels import necessary functionality from
* <linux/hashtable.h>.
*/
#define NV_HASH_SIZE(name) (ARRAY_SIZE(name))
#define NV_HASH_BITS(name) ilog2(NV_HASH_SIZE(name))
/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
#define NV_HASH_MIN(val, bits) \
(sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
#define NV_DECLARE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)]
static inline void _nv_hash_init(struct hlist_head *ht, unsigned int sz)
{
unsigned int i;
for (i = 0; i < sz; i++)
{
INIT_HLIST_HEAD(&ht[i]);
}
}
/**
* nv_hash_init - initialize a hash table
* @hashtable: hashtable to be initialized
*/
#define nv_hash_init(hashtable) _nv_hash_init(hashtable, NV_HASH_SIZE(hashtable))
/**
* nv_hash_add - add an object to a hashtable
* @hashtable: hashtable to add to
* @node: the &struct hlist_node of the object to be added
* @key: the key of the object to be added
*/
#define nv_hash_add(hashtable, node, key) \
hlist_add_head(node, &hashtable[NV_HASH_MIN(key, NV_HASH_BITS(hashtable))])
/**
* nv_hash_for_each_possible - iterate over all possible objects hashing to the
* same bucket
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define nv_hash_for_each_possible(name, obj, member, key) \
hlist_for_each_entry(obj, &name[NV_HASH_MIN(key, NV_HASH_BITS(name))], member)
#endif // __NV_HASH_H__

View File

@@ -0,0 +1,101 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_HYPERVISOR_H_
#define _NV_HYPERVISOR_H_
#include <nv-kernel-interface-api.h>
// Enums for supported hypervisor types.
// New hypervisor type should be added before OS_HYPERVISOR_UNKNOWN
typedef enum _HYPERVISOR_TYPE
{
OS_HYPERVISOR_XEN = 0,
OS_HYPERVISOR_VMWARE,
OS_HYPERVISOR_HYPERV,
OS_HYPERVISOR_KVM,
OS_HYPERVISOR_UNKNOWN
} HYPERVISOR_TYPE;
#define CMD_VGPU_VFIO_WAKE_WAIT_QUEUE 0
#define CMD_VGPU_VFIO_INJECT_INTERRUPT 1
#define CMD_VGPU_VFIO_REGISTER_MDEV 2
#define CMD_VGPU_VFIO_PRESENT 3
#define CMD_VFIO_PCI_CORE_PRESENT 4
#define MAX_VF_COUNT_PER_GPU 64
typedef enum _VGPU_TYPE_INFO
{
VGPU_TYPE_NAME = 0,
VGPU_TYPE_DESCRIPTION,
VGPU_TYPE_INSTANCES,
} VGPU_TYPE_INFO;
typedef struct
{
void *vgpuVfioRef;
void *waitQueue;
void *nv;
NvU32 *vgpuTypeIds;
NvU8 **vgpuNames;
NvU32 numVgpuTypes;
NvU32 domain;
NvU8 bus;
NvU8 slot;
NvU8 function;
NvBool is_virtfn;
} vgpu_vfio_info;
typedef struct
{
NvU32 domain;
NvU8 bus;
NvU8 slot;
NvU8 function;
NvBool isNvidiaAttached;
NvBool isMdevAttached;
} vgpu_vf_pci_info;
typedef enum VGPU_CMD_PROCESS_VF_INFO_E
{
NV_VGPU_SAVE_VF_INFO = 0,
NV_VGPU_REMOVE_VF_PCI_INFO = 1,
NV_VGPU_REMOVE_VF_MDEV_INFO = 2,
NV_VGPU_GET_VF_INFO = 3
} VGPU_CMD_PROCESS_VF_INFO;
typedef enum VGPU_DEVICE_STATE_E
{
NV_VGPU_DEV_UNUSED = 0,
NV_VGPU_DEV_OPENED = 1,
NV_VGPU_DEV_IN_USE = 2
} VGPU_DEVICE_STATE;
/*
* Function prototypes
*/
HYPERVISOR_TYPE NV_API_CALL nv_get_hypervisor_type(void);
#endif // _NV_HYPERVISOR_H_

View File

@@ -0,0 +1,85 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_IOCTL_NUMA_H
#define NV_IOCTL_NUMA_H
#if defined(NV_LINUX)
#include <nv-ioctl-numbers.h>
#if defined(NV_KERNEL_INTERFACE_LAYER)
#include <linux/types.h>
#else
#include <stdint.h>
#if !defined(__aligned)
#define __aligned(n) __attribute__((aligned(n)))
#endif
#endif
#define NV_ESC_NUMA_INFO (NV_IOCTL_BASE + 15)
#define NV_ESC_SET_NUMA_STATUS (NV_IOCTL_BASE + 16)
#define NV_IOCTL_NUMA_INFO_MAX_OFFLINE_ADDRESSES 64
typedef struct offline_addresses
{
uint64_t addresses[NV_IOCTL_NUMA_INFO_MAX_OFFLINE_ADDRESSES] __aligned(8);
uint32_t numEntries;
} nv_offline_addresses_t;
/* per-device NUMA memory info as assigned by the system */
typedef struct nv_ioctl_numa_info
{
int32_t nid;
int32_t status;
uint64_t memblock_size __aligned(8);
uint64_t numa_mem_addr __aligned(8);
uint64_t numa_mem_size __aligned(8);
uint8_t use_auto_online;
nv_offline_addresses_t offline_addresses __aligned(8);
} nv_ioctl_numa_info_t;
/* set the status of the device NUMA memory */
typedef struct nv_ioctl_set_numa_status
{
int32_t status;
} nv_ioctl_set_numa_status_t;
#define NV_IOCTL_NUMA_STATUS_DISABLED 0
#define NV_IOCTL_NUMA_STATUS_OFFLINE 1
#define NV_IOCTL_NUMA_STATUS_ONLINE_IN_PROGRESS 2
#define NV_IOCTL_NUMA_STATUS_ONLINE 3
#define NV_IOCTL_NUMA_STATUS_ONLINE_FAILED 4
#define NV_IOCTL_NUMA_STATUS_OFFLINE_IN_PROGRESS 5
#define NV_IOCTL_NUMA_STATUS_OFFLINE_FAILED 6
#endif
#endif

View File

@@ -0,0 +1,43 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_IOCTL_NUMBERS_H
#define NV_IOCTL_NUMBERS_H
/* NOTE: using an ioctl() number > 55 will overflow! */
#define NV_IOCTL_MAGIC 'F'
#define NV_IOCTL_BASE 200
#define NV_ESC_CARD_INFO (NV_IOCTL_BASE + 0)
#define NV_ESC_REGISTER_FD (NV_IOCTL_BASE + 1)
#define NV_ESC_ALLOC_OS_EVENT (NV_IOCTL_BASE + 6)
#define NV_ESC_FREE_OS_EVENT (NV_IOCTL_BASE + 7)
#define NV_ESC_STATUS_CODE (NV_IOCTL_BASE + 9)
#define NV_ESC_CHECK_VERSION_STR (NV_IOCTL_BASE + 10)
#define NV_ESC_IOCTL_XFER_CMD (NV_IOCTL_BASE + 11)
#define NV_ESC_ATTACH_GPUS_TO_FD (NV_IOCTL_BASE + 12)
#define NV_ESC_QUERY_DEVICE_INTR (NV_IOCTL_BASE + 13)
#define NV_ESC_SYS_PARAMS (NV_IOCTL_BASE + 14)
#define NV_ESC_EXPORT_TO_DMABUF_FD (NV_IOCTL_BASE + 17)
#endif

View File

@@ -0,0 +1,145 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_IOCTL_H
#define NV_IOCTL_H
#include <nv-ioctl-numbers.h>
#include <nvtypes.h>
typedef struct {
NvU32 domain; /* PCI domain number */
NvU8 bus; /* PCI bus number */
NvU8 slot; /* PCI slot number */
NvU8 function; /* PCI function number */
NvU16 vendor_id; /* PCI vendor ID */
NvU16 device_id; /* PCI device ID */
} nv_pci_info_t;
/*
* ioctl()'s with parameter structures too large for the
* _IOC cmd layout use the nv_ioctl_xfer_t structure
* and the NV_ESC_IOCTL_XFER_CMD ioctl() to pass the actual
* size and user argument pointer into the RM, which
* will then copy it to/from kernel space in separate steps.
*/
typedef struct nv_ioctl_xfer
{
NvU32 cmd;
NvU32 size;
NvP64 ptr NV_ALIGN_BYTES(8);
} nv_ioctl_xfer_t;
typedef struct nv_ioctl_card_info
{
NvBool valid;
nv_pci_info_t pci_info; /* PCI config information */
NvU32 gpu_id;
NvU16 interrupt_line;
NvU64 reg_address NV_ALIGN_BYTES(8);
NvU64 reg_size NV_ALIGN_BYTES(8);
NvU64 fb_address NV_ALIGN_BYTES(8);
NvU64 fb_size NV_ALIGN_BYTES(8);
NvU32 minor_number;
NvU8 dev_name[10]; /* device names such as vmgfx[0-32] for vmkernel */
} nv_ioctl_card_info_t;
/* alloc event */
typedef struct nv_ioctl_alloc_os_event
{
NvHandle hClient;
NvHandle hDevice;
NvU32 fd;
NvU32 Status;
} nv_ioctl_alloc_os_event_t;
/* free event */
typedef struct nv_ioctl_free_os_event
{
NvHandle hClient;
NvHandle hDevice;
NvU32 fd;
NvU32 Status;
} nv_ioctl_free_os_event_t;
/* status code */
typedef struct nv_ioctl_status_code
{
NvU32 domain;
NvU8 bus;
NvU8 slot;
NvU32 status;
} nv_ioctl_status_code_t;
/* check version string */
#define NV_RM_API_VERSION_STRING_LENGTH 64
typedef struct nv_ioctl_rm_api_version
{
NvU32 cmd;
NvU32 reply;
char versionString[NV_RM_API_VERSION_STRING_LENGTH];
} nv_ioctl_rm_api_version_t;
#define NV_RM_API_VERSION_CMD_STRICT 0
#define NV_RM_API_VERSION_CMD_RELAXED '1'
#define NV_RM_API_VERSION_CMD_QUERY '2'
#define NV_RM_API_VERSION_REPLY_UNRECOGNIZED 0
#define NV_RM_API_VERSION_REPLY_RECOGNIZED 1
typedef struct nv_ioctl_query_device_intr
{
NvU32 intrStatus NV_ALIGN_BYTES(4);
NvU32 status;
} nv_ioctl_query_device_intr;
/* system parameters that the kernel driver may use for configuration */
typedef struct nv_ioctl_sys_params
{
NvU64 memblock_size NV_ALIGN_BYTES(8);
} nv_ioctl_sys_params_t;
typedef struct nv_ioctl_register_fd
{
int ctl_fd;
} nv_ioctl_register_fd_t;
#define NV_DMABUF_EXPORT_MAX_HANDLES 128
typedef struct nv_ioctl_export_to_dma_buf_fd
{
int fd;
NvHandle hClient;
NvU32 totalObjects;
NvU32 numObjects;
NvU32 index;
NvU64 totalSize NV_ALIGN_BYTES(8);
NvHandle handles[NV_DMABUF_EXPORT_MAX_HANDLES];
NvU64 offsets[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);
NvU64 sizes[NV_DMABUF_EXPORT_MAX_HANDLES] NV_ALIGN_BYTES(8);
NvU32 status;
} nv_ioctl_export_to_dma_buf_fd_t;
#endif

View File

@@ -0,0 +1,41 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018-2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_KERNEL_INTERFACE_API_H
#define _NV_KERNEL_INTERFACE_API_H
/**************************************************************************************************************
*
* File: nv-kernel-interface-api.h
*
* Description:
* Defines the NV API related macros.
*
**************************************************************************************************************/
#if NVOS_IS_UNIX && NVCPU_IS_X86_64 && defined(__use_altstack__)
#define NV_API_CALL __attribute__((altstack(0)))
#else
#define NV_API_CALL
#endif
#endif /* _NV_KERNEL_INTERFACE_API_H */

View File

@@ -0,0 +1,61 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NV_KREF_H__
#define __NV_KREF_H__
#include <asm/atomic.h>
typedef struct nv_kref
{
atomic_t refcount;
} nv_kref_t;
static inline void nv_kref_init(nv_kref_t *nv_kref)
{
atomic_set(&nv_kref->refcount, 1);
}
static inline void nv_kref_get(nv_kref_t *nv_kref)
{
atomic_inc(&nv_kref->refcount);
}
static inline int nv_kref_put(nv_kref_t *nv_kref,
void (*release)(nv_kref_t *nv_kref))
{
if (atomic_dec_and_test(&nv_kref->refcount))
{
release(nv_kref);
return 1;
}
return 0;
}
static inline unsigned int nv_kref_read(const nv_kref_t *nv_kref)
{
return atomic_read(&nv_kref->refcount);
}
#endif // __NV_KREF_H__

View File

@@ -0,0 +1,239 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NV_KTHREAD_QUEUE_H__
#define __NV_KTHREAD_QUEUE_H__
#include <linux/types.h> // atomic_t
#include <linux/list.h> // list
#include <linux/sched.h> // task_struct
#include <linux/numa.h> // NUMA_NO_NODE
#include <linux/semaphore.h>
#include "conftest.h"
////////////////////////////////////////////////////////////////////////////////
// nv_kthread_q:
//
// 1. API and overview
//
// This "nv_kthread_q" system implements a simple queuing system for deferred
// work. The nv_kthread_q system has goals and use cases that are similar to
// the named workqueues in the Linux kernel, but nv_kthread_q is much (10x or
// so) smaller, simpler--and correspondingly less general. Deferred work
// items are put into a queue, and run within the context of a dedicated set
// of kernel threads (kthread).
//
// In order to avoid confusion with the Linux workqueue system, I have
// avoided using the term "work", and instead refer to "queues" (also called
// "q's") and "queue items" (also called "q_items"), in both variable names
// and comments.
//
// This module depends only upon the Linux kernel.
//
// Queue items that are submitted to separate nv_kthread_q instances are
// guaranteed to be run in different kthreads.
//
// Queue items that are submitted to the same nv_kthread_q are not guaranteed
// to be serialized, nor are they guaranteed to run concurrently.
//
// 2. Allocations
//
// The caller allocates queues and queue items. The nv_kthread_q APIs do
// the initialization (zeroing and setup) of queues and queue items.
// Allocation is handled that way, because one of the first use cases is a
// bottom half interrupt handler, and for that, queue items should be
// pre-allocated (for example, one per GPU), so that no allocation is
// required in the top-half interrupt handler. Relevant API calls:
//
// 3. Queue initialization
//
// nv_kthread_q_init() initializes a queue on the current NUMA node.
//
// or
//
// nv_kthread_q_init_on_node() initializes a queue on a specific NUMA node.
//
// 3. Scheduling things for the queue to run
//
// The nv_kthread_q_schedule_q_item() routine will schedule a q_item to run.
//
// 4. Stopping the queue(s)
//
// The nv_kthread_q_stop() routine will flush the queue, and safely stop
// the kthread, before returning.
//
////////////////////////////////////////////////////////////////////////////////
typedef struct nv_kthread_q nv_kthread_q_t;
typedef struct nv_kthread_q_item nv_kthread_q_item_t;
typedef void (*nv_q_func_t)(void *args);
struct nv_kthread_q
{
struct list_head q_list_head;
spinlock_t q_lock;
// This is a counting semaphore. It gets incremented and decremented
// exactly once for each item that is added to the queue.
struct semaphore q_sem;
atomic_t main_loop_should_exit;
struct task_struct *q_kthread;
};
struct nv_kthread_q_item
{
struct list_head q_list_node;
nv_q_func_t function_to_run;
void *function_args;
};
#ifndef NUMA_NO_NODE
#define NUMA_NO_NODE (-1)
#endif
#define NV_KTHREAD_NO_NODE NUMA_NO_NODE
//
// The queue must not be used before calling this routine.
//
// The caller allocates an nv_kthread_q_t item. This routine initializes
// the queue, and starts up a kernel thread ("kthread") to service the queue.
// The queue will initially be empty; there is intentionally no way to
// pre-initialize the queue with items to run.
//
// In order to avoid external dependencies (specifically, NV_STATUS codes), this
// returns a Linux kernel (negative) errno on failure, and zero on success. It
// is safe to call nv_kthread_q_stop() on a queue that nv_kthread_q_init()
// failed for.
//
// A short prefix of the qname arg will show up in []'s, via the ps(1) utility.
//
// The kernel thread stack is preferably allocated on the specified NUMA node,
// but fallback to another node is possible because kernel allocators do not
// guarantee affinity. Note that NUMA-affinity applies only to
// the kthread stack. This API does not do anything about limiting the CPU
// affinity of the kthread. That is left to the caller.
//
// Reusing a queue: once a queue is initialized, it must be safely shut down
// (see "Stopping the queue(s)", below), before it can be reused. So, for
// a simple queue use case, the following will work:
//
// nv_kthread_q_init_on_node(&some_q, "display_name", preferred_node);
// nv_kthread_q_stop(&some_q);
// nv_kthread_q_init_on_node(&some_q, "reincarnated", preferred_node);
// nv_kthread_q_stop(&some_q);
//
int nv_kthread_q_init_on_node(nv_kthread_q_t *q,
const char *qname,
int preferred_node);
//
// This routine is the same as nv_kthread_q_init_on_node() with the exception
// that the queue stack will be allocated on the NUMA node of the caller.
//
static inline int nv_kthread_q_init(nv_kthread_q_t *q, const char *qname)
{
return nv_kthread_q_init_on_node(q, qname, NV_KTHREAD_NO_NODE);
}
//
// The caller is responsible for stopping all queues, by calling this routine
// before, for example, kernel module unloading. This nv_kthread_q_stop()
// routine will flush the queue, and safely stop the kthread, before returning.
//
// You may ONLY call nv_kthread_q_stop() once, unless you reinitialize the
// queue in between, as shown in the nv_kthread_q_init() documentation, above.
//
// Do not add any more items to the queue after calling nv_kthread_q_stop.
//
// Calling nv_kthread_q_stop() on a queue which has been zero-initialized or
// for which nv_kthread_q_init() failed, is a no-op.
//
void nv_kthread_q_stop(nv_kthread_q_t *q);
//
// All items that were in the queue before nv_kthread_q_flush was called, and
// all items scheduled by those items, will get run before this function
// returns.
//
// You may NOT call nv_kthread_q_flush() after having called nv_kthread_q_stop.
//
// This actually flushes the queue twice. That ensures that the queue is fully
// flushed, for an important use case: rescheduling from within one's own
// callback. In order to do that safely, you need to:
//
// -- set a flag that tells the callback to stop rescheduling itself.
//
// -- call either nv_kthread_q_flush or nv_kthread_q_stop (which internally
// calls nv_kthread_q_flush). The nv_kthread_q_flush, in turn, actually
// flushes the queue *twice*. The first flush waits for any callbacks
// to finish, that missed seeing the "stop_rescheduling" flag. The
// second flush waits for callbacks that were already scheduled when the
// first flush finished.
//
void nv_kthread_q_flush(nv_kthread_q_t *q);
// Assigns function_to_run and function_args to the q_item.
//
// This must be called before calling nv_kthread_q_schedule_q_item.
void nv_kthread_q_item_init(nv_kthread_q_item_t *q_item,
nv_q_func_t function_to_run,
void *function_args);
//
// The caller must have already set up the queue, via nv_kthread_q_init().
// The caller owns the lifetime of the q_item. The nv_kthread_q system runs
// q_items, and adds or removes them from the queue. However, due to the first
// law of q-dynamics, it neither creates nor destroys q_items.
//
// When the callback (the function_to_run argument) is actually run, it is OK
// to free the q_item from within that routine. The nv_kthread_q system
// promises to be done with the q_item before that point.
//
// nv_kthread_q_schedule_q_item may be called from multiple threads at once,
// without danger of corrupting anything. This routine may also be safely
// called from interrupt context, including top-half ISRs.
//
// It is OK to reschedule the same q_item from within its own callback function.
//
// It is also OK to attempt to reschedule the same q_item, if that q_item is
// already pending in the queue. The q_item will not be rescheduled if it is
// already pending.
//
// Returns true (non-zero) if the item was actually scheduled. Returns false if
// the item was not scheduled, which can happen if:
//
// -- The q_item was already pending in a queue, or
// -- The queue is shutting down (or not yet started up).
//
int nv_kthread_q_schedule_q_item(nv_kthread_q_t *q,
nv_kthread_q_item_t *q_item);
// Built-in test. Returns -1 if any subtest failed, or 0 upon success.
int nv_kthread_q_run_self_test(void);
#endif // __NV_KTHREAD_QUEUE_H__

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,76 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2013-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NV_LIST_HELPERS_H__
#define __NV_LIST_HELPERS_H__
#include <linux/list.h>
#include "conftest.h"
/*
* list_first_entry_or_null added by commit 6d7581e62f8b ("list: introduce
* list_first_entry_or_null") in v3.10 (2013-05-29).
*/
#if !defined(list_first_entry_or_null)
#define list_first_entry_or_null(ptr, type, member) \
(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
#endif
/*
* Added by commit 93be3c2eb337 ("list: introduce list_last_entry(), use
* list_{first,last}_entry()") in v3.13 (2013-11-12).
*/
#if !defined(list_last_entry)
#define list_last_entry(ptr, type, member) \
list_entry((ptr)->prev, type, member)
#endif
/* list_last_entry_or_null() doesn't actually exist in the kernel */
#if !defined(list_last_entry_or_null)
#define list_last_entry_or_null(ptr, type, member) \
(!list_empty(ptr) ? list_last_entry(ptr, type, member) : NULL)
#endif
/*
* list_prev_entry() and list_next_entry added by commit 008208c6b26f
* ("list: introduce list_next_entry() and list_prev_entry()") in
* v3.13 (2013-11-12).
*/
#if !defined(list_prev_entry)
#define list_prev_entry(pos, member) \
list_entry((pos)->member.prev, typeof(*(pos)), member)
#endif
#if !defined(list_next_entry)
#define list_next_entry(pos, member) \
list_entry((pos)->member.next, typeof(*(pos)), member)
#endif
#if !defined(NV_LIST_IS_FIRST_PRESENT)
static inline int list_is_first(const struct list_head *list,
const struct list_head *head)
{
return list->prev == head;
}
#endif
#endif // __NV_LIST_HELPERS_H__

View File

@@ -0,0 +1,74 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_LOCK_H_
#define _NV_LOCK_H_
#include "conftest.h"
#include <linux/spinlock.h>
#include <linux/rwsem.h>
#include <linux/sched.h> /* signal_pending, cond_resched */
#include <linux/semaphore.h>
#if defined(NV_LINUX_SCHED_SIGNAL_H_PRESENT)
#include <linux/sched/signal.h> /* signal_pending for kernels >= 4.11 */
#endif
#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_PREEMPT_RT_FULL)
typedef raw_spinlock_t nv_spinlock_t;
#define NV_SPIN_LOCK_INIT(lock) raw_spin_lock_init(lock)
#define NV_SPIN_LOCK_IRQ(lock) raw_spin_lock_irq(lock)
#define NV_SPIN_UNLOCK_IRQ(lock) raw_spin_unlock_irq(lock)
#define NV_SPIN_LOCK_IRQSAVE(lock,flags) raw_spin_lock_irqsave(lock,flags)
#define NV_SPIN_UNLOCK_IRQRESTORE(lock,flags) raw_spin_unlock_irqrestore(lock,flags)
#define NV_SPIN_LOCK(lock) raw_spin_lock(lock)
#define NV_SPIN_UNLOCK(lock) raw_spin_unlock(lock)
#define NV_SPIN_UNLOCK_WAIT(lock) raw_spin_unlock_wait(lock)
#else
typedef spinlock_t nv_spinlock_t;
#define NV_SPIN_LOCK_INIT(lock) spin_lock_init(lock)
#define NV_SPIN_LOCK_IRQ(lock) spin_lock_irq(lock)
#define NV_SPIN_UNLOCK_IRQ(lock) spin_unlock_irq(lock)
#define NV_SPIN_LOCK_IRQSAVE(lock,flags) spin_lock_irqsave(lock,flags)
#define NV_SPIN_UNLOCK_IRQRESTORE(lock,flags) spin_unlock_irqrestore(lock,flags)
#define NV_SPIN_LOCK(lock) spin_lock(lock)
#define NV_SPIN_UNLOCK(lock) spin_unlock(lock)
#define NV_SPIN_UNLOCK_WAIT(lock) spin_unlock_wait(lock)
#endif
#define NV_INIT_MUTEX(mutex) sema_init(mutex, 1)
static inline int nv_down_read_interruptible(struct rw_semaphore *lock)
{
while (!down_read_trylock(lock))
{
if (signal_pending(current))
return -EINTR;
cond_resched();
}
return 0;
}
#endif /* _NV_LOCK_H_ */

View File

@@ -0,0 +1,49 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NVMEMDBG_H_
#define _NVMEMDBG_H_
#include <nvtypes.h>
void nv_memdbg_init(void);
void nv_memdbg_add(void *addr, NvU64 size, const char *file, int line);
void nv_memdbg_remove(void *addr, NvU64 size, const char *file, int line);
void nv_memdbg_exit(void);
#if defined(NV_MEM_LOGGER)
#define NV_MEMDBG_ADD(ptr, size) \
nv_memdbg_add(ptr, size, __FILE__, __LINE__)
#define NV_MEMDBG_REMOVE(ptr, size) \
nv_memdbg_remove(ptr, size, __FILE__, __LINE__)
#else
#define NV_MEMDBG_ADD(ptr, size)
#define NV_MEMDBG_REMOVE(ptr, size)
#endif /* NV_MEM_LOGGER */
#endif /* _NVMEMDBG_H_ */

View File

@@ -0,0 +1,313 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2016-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NV_MM_H__
#define __NV_MM_H__
#include "conftest.h"
#if !defined(NV_VM_FAULT_T_IS_PRESENT)
typedef int vm_fault_t;
#endif
/* pin_user_pages
* Presence of pin_user_pages() also implies the presence of unpin-user_page().
* Both were added in the v5.6-rc1
*
* pin_user_pages() was added by commit eddb1c228f7951d399240
* ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6-rc1 (2020-01-30)
*
* Removed vmas parameter from pin_user_pages() by commit 40896a02751
* ("mm/gup: remove vmas parameter from pin_user_pages()")
* in linux-next, expected in v6.5-rc1 (2023-05-17)
*
*/
#include <linux/mm.h>
#include <linux/sched.h>
#if defined(NV_PIN_USER_PAGES_PRESENT)
#if defined(NV_PIN_USER_PAGES_HAS_ARGS_VMAS)
#define NV_PIN_USER_PAGES pin_user_pages
#else
#define NV_PIN_USER_PAGES(start, nr_pages, gup_flags, pages, vmas) \
pin_user_pages(start, nr_pages, gup_flags, pages)
#endif // NV_PIN_USER_PAGES_HAS_ARGS_VMAS
#define NV_UNPIN_USER_PAGE unpin_user_page
#else
#define NV_PIN_USER_PAGES NV_GET_USER_PAGES
#define NV_UNPIN_USER_PAGE put_page
#endif // NV_PIN_USER_PAGES_PRESENT
/* get_user_pages
*
* The 8-argument version of get_user_pages was deprecated by commit
* (2016 Feb 12: cde70140fed8429acf7a14e2e2cbd3e329036653)for the non-remote case
* (calling get_user_pages with current and current->mm).
*
* Completely moved to the 6 argument version of get_user_pages -
* 2016 Apr 4: c12d2da56d0e07d230968ee2305aaa86b93a6832
*
* write and force parameters were replaced with gup_flags by -
* 2016 Oct 12: 768ae309a96103ed02eb1e111e838c87854d8b51
*
* A 7-argument version of get_user_pages was introduced into linux-4.4.y by
* commit 8e50b8b07f462ab4b91bc1491b1c91bd75e4ad40 which cherry-picked the
* replacement of the write and force parameters with gup_flags
*
* Removed vmas parameter from get_user_pages() by commit 7bbf9c8c99
* ("mm/gup: remove unused vmas parameter from get_user_pages()")
* in linux-next, expected in v6.5-rc1 (2023-05-17)
*
*/
#if defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS)
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages, vmas) \
get_user_pages(start, nr_pages, flags, pages)
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS_VMAS)
#define NV_GET_USER_PAGES get_user_pages
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS_VMAS)
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages, vmas) \
get_user_pages(current, current->mm, start, nr_pages, flags, pages, vmas)
#else
static inline long NV_GET_USER_PAGES(unsigned long start,
unsigned long nr_pages,
unsigned int flags,
struct page **pages,
struct vm_area_struct **vmas)
{
int write = flags & FOLL_WRITE;
int force = flags & FOLL_FORCE;
#if defined(NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS)
return get_user_pages(start, nr_pages, write, force, pages, vmas);
#else
// NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS
return get_user_pages(current, current->mm, start, nr_pages, write,
force, pages, vmas);
#endif // NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE_VMAS
}
#endif // NV_GET_USER_PAGES_HAS_ARGS_FLAGS
/* pin_user_pages_remote
*
* pin_user_pages_remote() was added by commit eddb1c228f7951d399240
* ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6 (2020-01-30)
*
* pin_user_pages_remote() removed 'tsk' parameter by commit
* 64019a2e467a ("mm/gup: remove task_struct pointer for all gup code")
* in v5.9-rc1 (2020-08-11). *
*
* Removed unused vmas parameter from pin_user_pages_remote() by commit
* 83bcc2e132("mm/gup: remove unused vmas parameter from pin_user_pages_remote()")
* in linux-next, expected in v6.5-rc1 (2023-05-14)
*
*/
#if defined(NV_PIN_USER_PAGES_REMOTE_PRESENT)
#if defined(NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS)
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
pin_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas, locked)
#elif defined(NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_VMAS)
#define NV_PIN_USER_PAGES_REMOTE pin_user_pages_remote
#else
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
pin_user_pages_remote(mm, start, nr_pages, flags, pages, locked)
#endif // NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK_VMAS
#else
#define NV_PIN_USER_PAGES_REMOTE NV_GET_USER_PAGES_REMOTE
#endif // NV_PIN_USER_PAGES_REMOTE_PRESENT
/*
* get_user_pages_remote() was added by commit 1e9877902dc7
* ("mm/gup: Introduce get_user_pages_remote()") in v4.6 (2016-02-12).
*
* Note that get_user_pages_remote() requires the caller to hold a reference on
* the task_struct (if non-NULL and if this API has tsk argument) and the mm_struct.
* This will always be true when using current and current->mm. If the kernel passes
* the driver a vma via driver callback, the kernel holds a reference on vma->vm_mm
* over that callback.
*
* get_user_pages_remote() write/force parameters were replaced
* with gup_flags by commit 9beae1ea8930 ("mm: replace get_user_pages_remote()
* write/force parameters with gup_flags") in v4.9 (2016-10-13).
*
* get_user_pages_remote() added 'locked' parameter by commit 5b56d49fc31d
* ("mm: add locked parameter to get_user_pages_remote()") in
* v4.10 (2016-12-14).
*
* get_user_pages_remote() removed 'tsk' parameter by
* commit 64019a2e467a ("mm/gup: remove task_struct pointer for
* all gup code") in v5.9-rc1 (2020-08-11).
*
* Removed vmas parameter from get_user_pages_remote() by commit a4bde14d549
* ("mm/gup: remove vmas parameter from get_user_pages_remote()")
* in linux-next, expected in v6.5-rc1 (2023-05-14)
*
*/
#if defined(NV_GET_USER_PAGES_REMOTE_PRESENT)
#if defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED)
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
get_user_pages_remote(mm, start, nr_pages, flags, pages, locked)
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED_VMAS)
#define NV_GET_USER_PAGES_REMOTE get_user_pages_remote
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED_VMAS)
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas, locked)
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_VMAS)
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas)
#else
// NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE_VMAS
static inline long NV_GET_USER_PAGES_REMOTE(struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
unsigned int flags,
struct page **pages,
struct vm_area_struct **vmas,
int *locked)
{
int write = flags & FOLL_WRITE;
int force = flags & FOLL_FORCE;
return get_user_pages_remote(NULL, mm, start, nr_pages, write, force,
pages, vmas);
}
#endif // NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED
#else
#if defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS)
static inline long NV_GET_USER_PAGES_REMOTE(struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
unsigned int flags,
struct page **pages,
struct vm_area_struct **vmas,
int *locked)
{
int write = flags & FOLL_WRITE;
int force = flags & FOLL_FORCE;
return get_user_pages(NULL, mm, start, nr_pages, write, force, pages, vmas);
}
#else
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
get_user_pages(NULL, mm, start, nr_pages, flags, pages, vmas)
#endif // NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE_VMAS
#endif // NV_GET_USER_PAGES_REMOTE_PRESENT
/*
* The .virtual_address field was effectively renamed to .address, by these
* two commits:
*
* struct vm_fault: .address was added by:
* 2016-12-14 82b0f8c39a3869b6fd2a10e180a862248736ec6f
*
* struct vm_fault: .virtual_address was removed by:
* 2016-12-14 1a29d85eb0f19b7d8271923d8917d7b4f5540b3e
*/
static inline unsigned long nv_page_fault_va(struct vm_fault *vmf)
{
#if defined(NV_VM_FAULT_HAS_ADDRESS)
return vmf->address;
#else
return (unsigned long)(vmf->virtual_address);
#endif
}
static inline void nv_mmap_read_lock(struct mm_struct *mm)
{
#if defined(NV_MM_HAS_MMAP_LOCK)
mmap_read_lock(mm);
#else
down_read(&mm->mmap_sem);
#endif
}
static inline void nv_mmap_read_unlock(struct mm_struct *mm)
{
#if defined(NV_MM_HAS_MMAP_LOCK)
mmap_read_unlock(mm);
#else
up_read(&mm->mmap_sem);
#endif
}
static inline void nv_mmap_write_lock(struct mm_struct *mm)
{
#if defined(NV_MM_HAS_MMAP_LOCK)
mmap_write_lock(mm);
#else
down_write(&mm->mmap_sem);
#endif
}
static inline void nv_mmap_write_unlock(struct mm_struct *mm)
{
#if defined(NV_MM_HAS_MMAP_LOCK)
mmap_write_unlock(mm);
#else
up_write(&mm->mmap_sem);
#endif
}
static inline int nv_mm_rwsem_is_locked(struct mm_struct *mm)
{
#if defined(NV_MM_HAS_MMAP_LOCK)
return rwsem_is_locked(&mm->mmap_lock);
#else
return rwsem_is_locked(&mm->mmap_sem);
#endif
}
static inline struct rw_semaphore *nv_mmap_get_lock(struct mm_struct *mm)
{
#if defined(NV_MM_HAS_MMAP_LOCK)
return &mm->mmap_lock;
#else
return &mm->mmap_sem;
#endif
}
static inline void nv_vm_flags_set(struct vm_area_struct *vma, vm_flags_t flags)
{
#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
vm_flags_set(vma, flags);
#else
vma->vm_flags |= flags;
#endif
}
static inline void nv_vm_flags_clear(struct vm_area_struct *vma, vm_flags_t flags)
{
#if defined(NV_VM_AREA_STRUCT_HAS_CONST_VM_FLAGS)
vm_flags_clear(vma, flags);
#else
vma->vm_flags &= ~flags;
#endif
}
#endif // __NV_MM_H__

View File

@@ -0,0 +1,122 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_MODESET_INTERFACE_H_
#define _NV_MODESET_INTERFACE_H_
/*
* This file defines the interface between the nvidia and
* nvidia-modeset UNIX kernel modules.
*
* The nvidia-modeset kernel module calls the nvidia kernel module's
* nvidia_get_rm_ops() function to get the RM API function pointers
* which it will need.
*/
#include "nvstatus.h"
#include "nv-gpu-info.h"
/*
* nvidia_stack_s is defined in nv.h, which pulls in a lot of other
* dependencies. The nvidia-modeset kernel module doesn't need to
* dereference the nvidia_stack_s pointer, so just treat is as an
* opaque pointer for purposes of this API definition.
*/
typedef struct nvidia_stack_s *nvidia_modeset_stack_ptr;
/*
* Callback functions from the RM OS interface layer into the NVKMS OS interface
* layer.
*
* These functions should be called without the RM lock held, using the kernel's
* native calling convention.
*/
typedef struct {
/*
* Suspend & resume callbacks. Note that these are called once per GPU.
*/
void (*suspend)(NvU32 gpu_id);
void (*resume)(NvU32 gpu_id);
} nvidia_modeset_callbacks_t;
/*
* The RM API entry points which the nvidia-modeset kernel module should
* call in the nvidia kernel module.
*/
typedef struct {
/*
* The nvidia-modeset kernel module should assign version_string
* before passing the structure to the nvidia kernel module, so
* that a version match can be confirmed: it is not supported to
* mix nvidia and nvidia-modeset kernel modules from different
* releases.
*/
const char *version_string;
/*
* Return system information.
*/
struct {
/* Availability of write combining support for video memory */
NvBool allow_write_combining;
} system_info;
/*
* Allocate and free an nvidia_stack_t to pass into
* nvidia_modeset_rm_ops_t::op(). An nvidia_stack_t must only be
* used by one thread at a time.
*
* Note that on architectures where an alternate stack is not
* used, alloc_stack() will set sp=NULL even when it returns 0
* (success). I.e., check the return value, not the sp value.
*/
int (*alloc_stack)(nvidia_modeset_stack_ptr *sp);
void (*free_stack)(nvidia_modeset_stack_ptr sp);
/*
* Enumerate list of gpus probed by nvidia driver.
*
* gpu_info is an array of NVIDIA_MAX_GPUS elements. The number of GPUs
* in the system is returned.
*/
NvU32 (*enumerate_gpus)(nv_gpu_info_t *gpu_info);
/*
* {open,close}_gpu() raise and lower the reference count of the
* specified GPU. This is equivalent to opening and closing a
* /dev/nvidiaN device file from user-space.
*/
int (*open_gpu)(NvU32 gpu_id, nvidia_modeset_stack_ptr sp);
void (*close_gpu)(NvU32 gpu_id, nvidia_modeset_stack_ptr sp);
void (*op)(nvidia_modeset_stack_ptr sp, void *ops_cmd);
int (*set_callbacks)(const nvidia_modeset_callbacks_t *cb);
} nvidia_modeset_rm_ops_t;
NV_STATUS nvidia_get_rm_ops(nvidia_modeset_rm_ops_t *rm_ops);
#endif /* _NV_MODESET_INTERFACE_H_ */

View File

@@ -0,0 +1,115 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_MSI_H_
#define _NV_MSI_H_
#include "nv-linux.h"
#if (defined(CONFIG_X86_LOCAL_APIC) || defined(NVCPU_AARCH64) || \
defined(NVCPU_PPC64LE)) && \
(defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR))
#define NV_LINUX_PCIE_MSI_SUPPORTED
#endif
#if !defined(NV_LINUX_PCIE_MSI_SUPPORTED) || !defined(CONFIG_PCI_MSI)
#define NV_PCI_DISABLE_MSI(pci_dev)
#else
#define NV_PCI_DISABLE_MSI(pci_dev) pci_disable_msi(pci_dev)
#endif
irqreturn_t nvidia_isr (int, void *);
irqreturn_t nvidia_isr_msix (int, void *);
irqreturn_t nvidia_isr_kthread_bh (int, void *);
irqreturn_t nvidia_isr_msix_kthread_bh(int, void *);
#if defined(NV_LINUX_PCIE_MSI_SUPPORTED)
void NV_API_CALL nv_init_msi (nv_state_t *);
void NV_API_CALL nv_init_msix (nv_state_t *);
NvS32 NV_API_CALL nv_request_msix_irq (nv_linux_state_t *);
#define NV_PCI_MSIX_FLAGS 2
#define NV_PCI_MSIX_FLAGS_QSIZE 0x7FF
static inline void nv_free_msix_irq(nv_linux_state_t *nvl)
{
int i;
for (i = 0; i < nvl->num_intr; i++)
{
free_irq(nvl->msix_entries[i].vector, (void *)nvl);
}
}
static inline int nv_get_max_irq(struct pci_dev *pci_dev)
{
int nvec;
int cap_ptr;
NvU16 ctrl;
cap_ptr = pci_find_capability(pci_dev, PCI_CAP_ID_MSIX);
/*
* The 'PCI_MSIX_FLAGS' was added in 2.6.21-rc3 by:
* 2007-03-05 f5f2b13129a6541debf8851bae843cbbf48298b7
*/
#if defined(PCI_MSIX_FLAGS)
pci_read_config_word(pci_dev, cap_ptr + PCI_MSIX_FLAGS, &ctrl);
nvec = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
#else
pci_read_config_word(pci_dev, cap_ptr + NV_PCI_MSIX_FLAGS, &ctrl);
nvec = (ctrl & NV_PCI_MSIX_FLAGS_QSIZE) + 1;
#endif
return nvec;
}
static inline int nv_pci_enable_msix(nv_linux_state_t *nvl, int nvec)
{
int rc = 0;
/*
* pci_enable_msix_range() replaced pci_enable_msix() in 3.14-rc1:
* 2014-01-03 302a2523c277bea0bbe8340312b09507905849ed
*/
#if defined(NV_PCI_ENABLE_MSIX_RANGE_PRESENT)
// We require all the vectors we are requesting so use the same min and max
rc = pci_enable_msix_range(nvl->pci_dev, nvl->msix_entries, nvec, nvec);
if (rc < 0)
{
return NV_ERR_OPERATING_SYSTEM;
}
WARN_ON(nvec != rc);
#else
rc = pci_enable_msix(nvl->pci_dev, nvl->msix_entries, nvec);
if (rc != 0)
{
return NV_ERR_OPERATING_SYSTEM;
}
#endif
nvl->num_intr = nvec;
return NV_OK;
}
#endif
#endif /* _NV_MSI_H_ */

View File

@@ -0,0 +1,36 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_PCI_TYPES_H_
#define _NV_PCI_TYPES_H_
#include <linux/pci.h>
#include "conftest.h"
#if defined(NV_PCI_CHANNEL_STATE_PRESENT)
typedef enum pci_channel_state nv_pci_channel_state_t;
#else
typedef pci_channel_state_t nv_pci_channel_state_t;
#endif
#endif

View File

@@ -0,0 +1,40 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_PCI_H_
#define _NV_PCI_H_
#include <linux/pci.h>
#include "nv-linux.h"
#define NV_GPU_BAR1 1
#define NV_GPU_BAR3 3
int nv_pci_register_driver(void);
void nv_pci_unregister_driver(void);
int nv_pci_count_devices(void);
NvU8 nv_find_pci_capability(struct pci_dev *, NvU8);
int nvidia_dev_get_pci_info(const NvU8 *, struct pci_dev **, NvU64 *, NvU64 *);
nv_linux_state_t * find_pci(NvU32, NvU8, NvU8, NvU8);
#endif

View File

@@ -0,0 +1,129 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NV_PGPROT_H__
#define __NV_PGPROT_H__
#include "cpuopsys.h"
#include <linux/mm.h>
#if !defined(NV_VMWARE)
#if defined(NVCPU_X86_64)
/* mark memory UC-, rather than UC (don't use _PAGE_PWT) */
static inline pgprot_t pgprot_noncached_weak(pgprot_t old_prot)
{
pgprot_t new_prot = old_prot;
if (boot_cpu_data.x86 > 3)
new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD);
return new_prot;
}
#if !defined (pgprot_noncached)
static inline pgprot_t pgprot_noncached(pgprot_t old_prot)
{
pgprot_t new_prot = old_prot;
if (boot_cpu_data.x86 > 3)
new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD | _PAGE_PWT);
return new_prot;
}
#endif
static inline pgprot_t pgprot_modify_writecombine(pgprot_t old_prot)
{
pgprot_t new_prot = old_prot;
pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_PCD | _PAGE_PWT);
new_prot = __pgprot(pgprot_val(new_prot) | _PAGE_PWT);
return new_prot;
}
#endif /* defined(NVCPU_X86_64) */
#endif /* !defined(NV_VMWARE) */
#if defined(NVCPU_AARCH64)
/*
* Don't rely on the kernel's definition of pgprot_noncached(), as on 64-bit
* ARM that's not for system memory, but device memory instead. For I/O cache
* coherent systems, use cached mappings instead of uncached.
*/
#define NV_PGPROT_UNCACHED(old_prot) \
((nvos_is_chipset_io_coherent()) ? \
(old_prot) : \
__pgprot_modify((old_prot), PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC)))
#elif defined(NVCPU_PPC64LE)
/* Don't attempt to mark sysmem pages as uncached on ppc64le */
#define NV_PGPROT_UNCACHED(old_prot) old_prot
#else
#define NV_PGPROT_UNCACHED(old_prot) pgprot_noncached(old_prot)
#endif
#define NV_PGPROT_UNCACHED_DEVICE(old_prot) pgprot_noncached(old_prot)
#if defined(NVCPU_AARCH64)
#define NV_PROT_WRITE_COMBINED_DEVICE (PROT_DEFAULT | PTE_PXN | PTE_UXN | \
PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) \
__pgprot_modify(old_prot, PTE_ATTRINDX_MASK, NV_PROT_WRITE_COMBINED_DEVICE)
#define NV_PGPROT_WRITE_COMBINED(old_prot) NV_PGPROT_UNCACHED(old_prot)
#define NV_PGPROT_READ_ONLY(old_prot) \
__pgprot_modify(old_prot, 0, PTE_RDONLY)
#elif defined(NVCPU_X86_64)
#define NV_PGPROT_UNCACHED_WEAK(old_prot) pgprot_noncached_weak(old_prot)
#define NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) \
pgprot_modify_writecombine(old_prot)
#define NV_PGPROT_WRITE_COMBINED(old_prot) \
NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot)
#define NV_PGPROT_READ_ONLY(old_prot) \
__pgprot(pgprot_val((old_prot)) & ~_PAGE_RW)
#elif defined(NVCPU_PPC64LE)
/*
* Some kernels use H_PAGE instead of _PAGE
*/
#if defined(_PAGE_RW)
#define NV_PAGE_RW _PAGE_RW
#elif defined(H_PAGE_RW)
#define NV_PAGE_RW H_PAGE_RW
#else
#warning "The kernel does not provide page protection defines!"
#endif
#if defined(_PAGE_4K_PFN)
#define NV_PAGE_4K_PFN _PAGE_4K_PFN
#elif defined(H_PAGE_4K_PFN)
#define NV_PAGE_4K_PFN H_PAGE_4K_PFN
#else
#undef NV_PAGE_4K_PFN
#endif
#define NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot) \
pgprot_writecombine(old_prot)
/* Don't attempt to mark sysmem pages as write combined on ppc64le */
#define NV_PGPROT_WRITE_COMBINED(old_prot) old_prot
#define NV_PGPROT_READ_ONLY(old_prot) \
__pgprot(pgprot_val((old_prot)) & ~NV_PAGE_RW)
#else
/* Writecombine is not supported */
#undef NV_PGPROT_WRITE_COMBINED_DEVICE(old_prot)
#undef NV_PGPROT_WRITE_COMBINED(old_prot)
#define NV_PGPROT_READ_ONLY(old_prot)
#endif
#endif /* __NV_PGPROT_H__ */

View File

@@ -0,0 +1,204 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_PROCFS_UTILS_H
#define _NV_PROCFS_UTILS_H
#include "conftest.h"
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
/*
* Allow procfs to create file to exercise error forwarding.
* This is supported by CRAY platforms.
*/
#if defined(CONFIG_CRAY_XT)
#define EXERCISE_ERROR_FORWARDING NV_TRUE
#else
#define EXERCISE_ERROR_FORWARDING NV_FALSE
#endif
#define IS_EXERCISE_ERROR_FORWARDING_ENABLED() (EXERCISE_ERROR_FORWARDING)
#if defined(NV_PROC_OPS_PRESENT)
typedef struct proc_ops nv_proc_ops_t;
#define NV_PROC_OPS_SET_OWNER()
#define NV_PROC_OPS_OPEN proc_open
#define NV_PROC_OPS_READ proc_read
#define NV_PROC_OPS_WRITE proc_write
#define NV_PROC_OPS_LSEEK proc_lseek
#define NV_PROC_OPS_RELEASE proc_release
#else
typedef struct file_operations nv_proc_ops_t;
#define NV_PROC_OPS_SET_OWNER() .owner = THIS_MODULE,
#define NV_PROC_OPS_OPEN open
#define NV_PROC_OPS_READ read
#define NV_PROC_OPS_WRITE write
#define NV_PROC_OPS_LSEEK llseek
#define NV_PROC_OPS_RELEASE release
#endif
#define NV_CREATE_PROC_FILE(filename,parent,__name,__data) \
({ \
struct proc_dir_entry *__entry; \
int mode = (S_IFREG | S_IRUGO); \
const nv_proc_ops_t *fops = &nv_procfs_##__name##_fops; \
if (fops->NV_PROC_OPS_WRITE != 0) \
mode |= S_IWUSR; \
__entry = proc_create_data(filename, mode, parent, fops, __data);\
__entry; \
})
# define NV_PROC_MKDIR_MODE(name, mode, parent) \
proc_mkdir_mode(name, mode, parent)
#define NV_CREATE_PROC_DIR(name,parent) \
({ \
struct proc_dir_entry *__entry; \
int mode = (S_IFDIR | S_IRUGO | S_IXUGO); \
__entry = NV_PROC_MKDIR_MODE(name, mode, parent); \
__entry; \
})
#if defined(NV_PDE_DATA_LOWER_CASE_PRESENT)
#define NV_PDE_DATA(inode) pde_data(inode)
#else
#define NV_PDE_DATA(inode) PDE_DATA(inode)
#endif
#define NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \
static int nv_procfs_open_##name( \
struct inode *inode, \
struct file *filep \
) \
{ \
int ret; \
ret = single_open(filep, nv_procfs_read_##name, \
NV_PDE_DATA(inode)); \
if (ret < 0) \
{ \
return ret; \
} \
ret = nv_down_read_interruptible(&lock); \
if (ret < 0) \
{ \
single_release(inode, filep); \
} \
return ret; \
} \
\
static int nv_procfs_release_##name( \
struct inode *inode, \
struct file *filep \
) \
{ \
up_read(&lock); \
return single_release(inode, filep); \
}
#define NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY(name, lock) \
NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \
\
static const nv_proc_ops_t nv_procfs_##name##_fops = { \
NV_PROC_OPS_SET_OWNER() \
.NV_PROC_OPS_OPEN = nv_procfs_open_##name, \
.NV_PROC_OPS_READ = seq_read, \
.NV_PROC_OPS_LSEEK = seq_lseek, \
.NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \
};
#define NV_DEFINE_SINGLE_PROCFS_FILE_READ_WRITE(name, lock, \
write_callback) \
NV_DEFINE_SINGLE_PROCFS_FILE_HELPER(name, lock) \
\
static ssize_t nv_procfs_write_##name( \
struct file *file, \
const char __user *buf, \
size_t size, \
loff_t *ppos \
) \
{ \
ssize_t ret; \
struct seq_file *s; \
\
s = file->private_data; \
if (s == NULL) \
{ \
return -EIO; \
} \
\
ret = write_callback(s, buf + *ppos, size - *ppos); \
if (ret == 0) \
{ \
/* avoid infinite loop */ \
ret = -EIO; \
} \
return ret; \
} \
\
static const nv_proc_ops_t nv_procfs_##name##_fops = { \
NV_PROC_OPS_SET_OWNER() \
.NV_PROC_OPS_OPEN = nv_procfs_open_##name, \
.NV_PROC_OPS_READ = seq_read, \
.NV_PROC_OPS_WRITE = nv_procfs_write_##name, \
.NV_PROC_OPS_LSEEK = seq_lseek, \
.NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \
};
#define NV_DEFINE_SINGLE_PROCFS_FILE_READ_ONLY_WITHOUT_LOCK(name) \
static int nv_procfs_open_##name( \
struct inode *inode, \
struct file *filep \
) \
{ \
int ret; \
ret = single_open(filep, nv_procfs_read_##name, \
NV_PDE_DATA(inode)); \
return ret; \
} \
\
static int nv_procfs_release_##name( \
struct inode *inode, \
struct file *filep \
) \
{ \
return single_release(inode, filep); \
} \
\
static const nv_proc_ops_t nv_procfs_##name##_fops = { \
NV_PROC_OPS_SET_OWNER() \
.NV_PROC_OPS_OPEN = nv_procfs_open_##name, \
.NV_PROC_OPS_READ = seq_read, \
.NV_PROC_OPS_LSEEK = seq_lseek, \
.NV_PROC_OPS_RELEASE = nv_procfs_release_##name, \
};
#endif /* CONFIG_PROC_FS */
#endif /* _NV_PROCFS_UTILS_H */

View File

@@ -0,0 +1,28 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_PROCFS_H
#define _NV_PROCFS_H
#include "nv-procfs-utils.h"
#endif /* _NV_PROCFS_H */

View File

@@ -0,0 +1,93 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_PROTO_H_
#define _NV_PROTO_H_
#include "nv-pci.h"
#include "nv-register-module.h"
extern const char *nv_device_name;
extern nvidia_module_t nv_fops;
void nv_acpi_register_notifier (nv_linux_state_t *);
void nv_acpi_unregister_notifier (nv_linux_state_t *);
NvU8 nv_find_pci_capability (struct pci_dev *, NvU8);
int nv_procfs_init (void);
void nv_procfs_exit (void);
void nv_procfs_add_warning (const char *, const char *);
int nv_procfs_add_gpu (nv_linux_state_t *);
void nv_procfs_remove_gpu (nv_linux_state_t *);
int nvidia_mmap (struct file *, struct vm_area_struct *);
int nvidia_mmap_helper (nv_state_t *, nv_linux_file_private_t *, nvidia_stack_t *, struct vm_area_struct *, void *);
int nv_encode_caching (pgprot_t *, NvU32, NvU32);
void nv_revoke_gpu_mappings_locked(nv_state_t *);
NvUPtr nv_vm_map_pages (struct page **, NvU32, NvBool, NvBool);
void nv_vm_unmap_pages (NvUPtr, NvU32);
NV_STATUS nv_alloc_contig_pages (nv_state_t *, nv_alloc_t *);
void nv_free_contig_pages (nv_alloc_t *);
NV_STATUS nv_alloc_system_pages (nv_state_t *, nv_alloc_t *);
void nv_free_system_pages (nv_alloc_t *);
int nv_uvm_init (void);
void nv_uvm_exit (void);
NV_STATUS nv_uvm_suspend (void);
NV_STATUS nv_uvm_resume (void);
void nv_uvm_notify_start_device (const NvU8 *uuid);
void nv_uvm_notify_stop_device (const NvU8 *uuid);
NV_STATUS nv_uvm_event_interrupt (const NvU8 *uuid);
/* Move these to nv.h once implemented by other UNIX platforms */
NvBool nvidia_get_gpuid_list (NvU32 *gpu_ids, NvU32 *gpu_count);
int nvidia_dev_get (NvU32, nvidia_stack_t *);
void nvidia_dev_put (NvU32, nvidia_stack_t *);
int nvidia_dev_get_uuid (const NvU8 *, nvidia_stack_t *);
void nvidia_dev_put_uuid (const NvU8 *, nvidia_stack_t *);
int nvidia_dev_block_gc6 (const NvU8 *, nvidia_stack_t *);
int nvidia_dev_unblock_gc6 (const NvU8 *, nvidia_stack_t *);
#if defined(CONFIG_PM)
NV_STATUS nv_set_system_power_state (nv_power_state_t, nv_pm_action_depth_t);
#endif
void nvidia_modeset_suspend (NvU32 gpuId);
void nvidia_modeset_resume (NvU32 gpuId);
NvBool nv_is_uuid_in_gpu_exclusion_list (const char *);
NV_STATUS nv_parse_per_device_option_string(nvidia_stack_t *sp);
nv_linux_state_t * find_uuid(const NvU8 *uuid);
void nv_report_error(struct pci_dev *dev, NvU32 error_number, const char *format, va_list ap);
void nv_shutdown_adapter(nvidia_stack_t *, nv_state_t *, nv_linux_state_t *);
void nv_dev_free_stacks(nv_linux_state_t *);
NvBool nv_lock_init_locks(nvidia_stack_t *, nv_state_t *);
void nv_lock_destroy_locks(nvidia_stack_t *, nv_state_t *);
void nv_linux_add_device_locked(nv_linux_state_t *);
void nv_linux_remove_device_locked(nv_linux_state_t *);
NvBool nv_acpi_power_resource_method_present(struct pci_dev *);
#endif /* _NV_PROTO_H_ */

View File

@@ -0,0 +1,55 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_REGISTER_MODULE_H_
#define _NV_REGISTER_MODULE_H_
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include "nvtypes.h"
typedef struct nvidia_module_s {
struct module *owner;
/* nvidia0, nvidia1 ..*/
const char *module_name;
/* module instance */
NvU32 instance;
/* file operations */
int (*open)(struct inode *, struct file *filp);
int (*close)(struct inode *, struct file *filp);
int (*mmap)(struct file *filp, struct vm_area_struct *vma);
int (*ioctl)(struct inode *, struct file * file, unsigned int cmd, unsigned long arg);
unsigned int (*poll)(struct file * file, poll_table *wait);
} nvidia_module_t;
int nvidia_register_module(nvidia_module_t *);
int nvidia_unregister_module(nvidia_module_t *);
#endif

View File

@@ -0,0 +1,82 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_RETPOLINE_H_
#define _NV_RETPOLINE_H_
#include "cpuopsys.h"
#if (NV_SPECTRE_V2 == 0)
#define NV_RETPOLINE_THUNK NV_SPEC_THUNK
#else
#define NV_RETPOLINE_THUNK NV_NOSPEC_THUNK
#endif
#if defined(NVCPU_X86_64)
#define NV_SPEC_THUNK(REG) \
__asm__( \
".weak __x86_indirect_thunk_" #REG ";" \
".type __x86_indirect_thunk_" #REG ", @function;" \
"__x86_indirect_thunk_" #REG ":" \
" .cfi_startproc;" \
" jmp *%" #REG ";" \
" .cfi_endproc;" \
".size __x86_indirect_thunk_" #REG ", .-__x86_indirect_thunk_" #REG)
#define NV_NOSPEC_THUNK(REG) \
__asm__( \
".weak __x86_indirect_thunk_" #REG ";" \
".type __x86_indirect_thunk_" #REG ", @function;" \
"__x86_indirect_thunk_" #REG ":" \
" .cfi_startproc;" \
" call .Lnv_no_fence_" #REG ";" \
".Lnv_fence_" #REG ":" \
" pause;" \
" lfence;" \
" jmp .Lnv_fence_" #REG ";" \
".Lnv_no_fence_" #REG ":" \
" mov %" #REG ", (%rsp);" \
" ret;" \
" .cfi_endproc;" \
".size __x86_indirect_thunk_" #REG ", .-__x86_indirect_thunk_" #REG)
__asm__(".pushsection .text");
NV_RETPOLINE_THUNK(rax);
NV_RETPOLINE_THUNK(rbx);
NV_RETPOLINE_THUNK(rcx);
NV_RETPOLINE_THUNK(rdx);
NV_RETPOLINE_THUNK(rsi);
NV_RETPOLINE_THUNK(rdi);
NV_RETPOLINE_THUNK(rbp);
NV_RETPOLINE_THUNK(r8);
NV_RETPOLINE_THUNK(r9);
NV_RETPOLINE_THUNK(r10);
NV_RETPOLINE_THUNK(r11);
NV_RETPOLINE_THUNK(r12);
NV_RETPOLINE_THUNK(r13);
NV_RETPOLINE_THUNK(r14);
NV_RETPOLINE_THUNK(r15);
__asm__(".popsection");
#endif
#endif /* _NV_RETPOLINE_H_ */

View File

@@ -0,0 +1,251 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NV_TIME_H__
#define __NV_TIME_H__
#include "conftest.h"
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/ktime.h>
#include <nvstatus.h>
#define NV_MAX_ISR_DELAY_US 20000
#define NV_MAX_ISR_DELAY_MS (NV_MAX_ISR_DELAY_US / 1000)
#define NV_NSECS_TO_JIFFIES(nsec) ((nsec) * HZ / 1000000000)
#if !defined(NV_TIMESPEC64_PRESENT)
struct timespec64 {
__s64 tv_sec;
long tv_nsec;
};
#endif
#if !defined(NV_KTIME_GET_RAW_TS64_PRESENT)
static inline void ktime_get_raw_ts64(struct timespec64 *ts64)
{
struct timespec ts;
getrawmonotonic(&ts);
ts64->tv_sec = ts.tv_sec;
ts64->tv_nsec = ts.tv_nsec;
}
#endif
#if !defined(NV_KTIME_GET_REAL_TS64_PRESENT)
static inline void ktime_get_real_ts64(struct timespec64 *ts64)
{
struct timeval tv;
do_gettimeofday(&tv);
ts64->tv_sec = tv.tv_sec;
ts64->tv_nsec = tv.tv_usec * (NvU64) NSEC_PER_USEC;
}
#endif
static NvBool nv_timer_less_than
(
const struct timespec64 *a,
const struct timespec64 *b
)
{
return (a->tv_sec == b->tv_sec) ? (a->tv_nsec < b->tv_nsec)
: (a->tv_sec < b->tv_sec);
}
#if !defined(NV_TIMESPEC64_PRESENT)
static inline struct timespec64 timespec64_add
(
const struct timespec64 a,
const struct timespec64 b
)
{
struct timespec64 result;
result.tv_sec = a.tv_sec + b.tv_sec;
result.tv_nsec = a.tv_nsec + b.tv_nsec;
while (result.tv_nsec >= NSEC_PER_SEC)
{
++result.tv_sec;
result.tv_nsec -= NSEC_PER_SEC;
}
return result;
}
static inline struct timespec64 timespec64_sub
(
const struct timespec64 a,
const struct timespec64 b
)
{
struct timespec64 result;
result.tv_sec = a.tv_sec - b.tv_sec;
result.tv_nsec = a.tv_nsec - b.tv_nsec;
while (result.tv_nsec < 0)
{
--(result.tv_sec);
result.tv_nsec += NSEC_PER_SEC;
}
return result;
}
static inline s64 timespec64_to_ns(struct timespec64 *ts)
{
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
}
#endif
static inline NvU64 nv_ktime_get_raw_ns(void)
{
struct timespec64 ts;
ktime_get_raw_ts64(&ts);
return (NvU64)timespec64_to_ns(&ts);
}
// #define NV_CHECK_DELAY_ACCURACY 1
/*
* It is generally a bad idea to use udelay() to wait for more than
* a few milliseconds. Since the caller is most likely not aware of
* this, we use mdelay() for any full millisecond to be safe.
*/
static inline NV_STATUS nv_sleep_us(unsigned int us)
{
unsigned long mdelay_safe_msec;
unsigned long usec;
#ifdef NV_CHECK_DELAY_ACCURACY
struct timespec64 tm1, tm2, tm_diff;
ktime_get_raw_ts64(&tm1);
#endif
if (in_irq() && (us > NV_MAX_ISR_DELAY_US))
return NV_ERR_GENERIC;
mdelay_safe_msec = us / 1000;
if (mdelay_safe_msec)
mdelay(mdelay_safe_msec);
usec = us % 1000;
if (usec)
udelay(usec);
#ifdef NV_CHECK_DELAY_ACCURACY
ktime_get_raw_ts64(&tm2);
tm_diff = timespec64_sub(tm2, tm1);
pr_info("NVRM: delay of %d usec results in actual delay of 0x%llu nsec\n",
us, timespec64_to_ns(&tm_diff));
#endif
return NV_OK;
}
/*
* Sleep for specified milliseconds. Yields the CPU to scheduler.
*
* On Linux, a jiffie represents the time passed in between two timer
* interrupts. The number of jiffies per second (HZ) varies across the
* supported platforms. On i386, where HZ is 100, a timer interrupt is
* generated every 10ms. NV_MSECS_TO_JIFFIES should be accurate independent of
* the actual value of HZ; any partial jiffies will be 'floor'ed, the
* remainder will be accounted for with mdelay().
*/
static inline NV_STATUS nv_sleep_ms(unsigned int ms)
{
NvU64 ns;
unsigned long jiffies;
unsigned long mdelay_safe_msec;
struct timespec64 tm_end, tm_aux;
#ifdef NV_CHECK_DELAY_ACCURACY
struct timespec64 tm_start;
#endif
ktime_get_raw_ts64(&tm_aux);
#ifdef NV_CHECK_DELAY_ACCURACY
tm_start = tm_aux;
#endif
if (in_irq() && (ms > NV_MAX_ISR_DELAY_MS))
{
return NV_ERR_GENERIC;
}
if (irqs_disabled() || in_interrupt() || in_atomic())
{
mdelay(ms);
return NV_OK;
}
ns = ms * (NvU64) NSEC_PER_MSEC;
tm_end.tv_nsec = ns;
tm_end.tv_sec = 0;
tm_end = timespec64_add(tm_aux, tm_end);
/* do we have a full jiffie to wait? */
jiffies = NV_NSECS_TO_JIFFIES(ns);
if (jiffies)
{
//
// If we have at least one full jiffy to wait, give up
// up the CPU; since we may be rescheduled before
// the requested timeout has expired, loop until less
// than a jiffie of the desired delay remains.
//
set_current_state(TASK_INTERRUPTIBLE);
do
{
schedule_timeout(jiffies);
ktime_get_raw_ts64(&tm_aux);
if (nv_timer_less_than(&tm_aux, &tm_end))
{
tm_aux = timespec64_sub(tm_end, tm_aux);
ns = (NvU64) timespec64_to_ns(&tm_aux);
}
else
ns = 0;
} while ((jiffies = NV_NSECS_TO_JIFFIES(ns)) != 0);
}
if (ns > (NvU64) NSEC_PER_MSEC)
{
mdelay_safe_msec = ns / (NvU64) NSEC_PER_MSEC;
mdelay(mdelay_safe_msec);
ns %= (NvU64) NSEC_PER_MSEC;
}
if (ns)
{
ndelay(ns);
}
#ifdef NV_CHECK_DELAY_ACCURACY
ktime_get_raw_ts64(&tm_aux);
tm_aux = timespec64_sub(tm_aux, tm_start);
pr_info("NVRM: delay of %d msec results in actual delay of %lld.%09ld sec\n",
ms, tm_aux.tv_sec, tm_aux.tv_nsec);
#endif
return NV_OK;
}
#endif // __NV_TIME_H__

View File

@@ -0,0 +1,66 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef __NV_TIMER_H__
#define __NV_TIMER_H__
#include <linux/timer.h>
#include <linux/kernel.h> // For container_of
#include "conftest.h"
struct nv_timer
{
struct timer_list kernel_timer;
void (*nv_timer_callback)(struct nv_timer *nv_timer);
};
static inline void nv_timer_callback_typed_data(struct timer_list *timer)
{
struct nv_timer *nv_timer =
container_of(timer, struct nv_timer, kernel_timer);
nv_timer->nv_timer_callback(nv_timer);
}
static inline void nv_timer_callback_anon_data(unsigned long arg)
{
struct nv_timer *nv_timer = (struct nv_timer *)arg;
nv_timer->nv_timer_callback(nv_timer);
}
static inline void nv_timer_setup(struct nv_timer *nv_timer,
void (*callback)(struct nv_timer *nv_timer))
{
nv_timer->nv_timer_callback = callback;
#if defined(NV_TIMER_SETUP_PRESENT)
timer_setup(&nv_timer->kernel_timer, nv_timer_callback_typed_data, 0);
#else
init_timer(&nv_timer->kernel_timer);
nv_timer->kernel_timer.function = nv_timer_callback_anon_data;
nv_timer->kernel_timer.data = (unsigned long)nv_timer;
#endif
}
#endif // __NV_TIMER_H__

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,44 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2015-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_CPU_UUID_H_
#define _NV_CPU_UUID_H_
#define NV_UUID_LEN 16
typedef struct nv_uuid
{
NvU8 uuid[NV_UUID_LEN];
} NvUuid;
#define NV_UUID_HI(pUuid) (*((NvU64*)((pUuid)->uuid + (NV_UUID_LEN >> 1))))
#define NV_UUID_LO(pUuid) (*((NvU64*)((pUuid)->uuid + 0)))
typedef NvUuid NvSystemUuid;
typedef NvUuid NvProcessorUuid;
extern const NvProcessorUuid NV_PROCESSOR_UUID_CPU_DEFAULT;
#endif // _NV_CPU_UUID_H_

View File

@@ -0,0 +1,227 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2018 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* NVIDIA GPZ vulnerability mitigation definitions.
*/
/*
* There are two copies of this file for legacy reasons:
*
* P4: <$NV_SOURCE/>drivers/common/inc/nv_speculation_barrier.h
* Git: <tegra/core/>include/nv_speculation_barrier.h
*
* Both files need to be kept in sync if any changes are required.
*/
#ifndef _NV_SPECULATION_BARRIER_H_
#define _NV_SPECULATION_BARRIER_H_
#define NV_SPECULATION_BARRIER_VERSION 2
/*
* GNU-C/MSC/clang - x86/x86_64 : x86_64, __i386, __i386__
* GNU-C - THUMB mode : __GNUC__, __thumb__
* GNU-C - ARM modes : __GNUC__, __arm__, __aarch64__
* armclang - THUMB mode : __ARMCC_VERSION, __thumb__
* armclang - ARM modes : __ARMCC_VERSION, __arm__, __aarch64__
* GHS - THUMB mode : __ghs__, __THUMB__
* GHS - ARM modes : __ghs__, __ARM__, __ARM64__
*/
#if defined(_M_IX86) || defined(__i386__) || defined(__i386) \
|| defined(__x86_64) || defined(AMD64) || defined(_M_AMD64)
/* All x86 */
#define NV_SPECULATION_BARRIER_x86
#elif defined(macintosh) || defined(__APPLE__) \
|| defined(__powerpc) || defined(__powerpc__) || defined(__powerpc64__) \
|| defined(__POWERPC__) || defined(__ppc) || defined(__ppc__) \
|| defined(__ppc64__) || defined(__PPC__) \
|| defined(__PPC64__) || defined(_ARCH_PPC) || defined(_ARCH_PPC64)
/* All PowerPC */
#define NV_SPECULATION_BARRIER_PPC
#elif (defined(__GNUC__) && defined(__thumb__)) \
|| (defined(__ARMCC_VERSION) && defined(__thumb__)) \
|| (defined(__ghs__) && defined(__THUMB__))
/* ARM-thumb mode(<=ARMv7)/T32 (ARMv8) */
#define NV_SPECULATION_BARRIER_ARM_COMMON
#define NV_SPEC_BARRIER_CSDB ".inst.w 0xf3af8014\n"
#elif (defined(__GNUC__) && defined(__arm__)) \
|| (defined(__ARMCC_VERSION) && defined(__arm__)) \
|| (defined(__ghs__) && defined(__ARM__))
/* aarch32(ARMv8) / arm(<=ARMv7) mode */
#define NV_SPECULATION_BARRIER_ARM_COMMON
#define NV_SPEC_BARRIER_CSDB ".inst 0xe320f014\n"
#elif (defined(__GNUC__) && defined(__aarch64__)) \
|| (defined(__ARMCC_VERSION) && defined(__aarch64__)) \
|| (defined(__ghs__) && defined(__ARM64__))
/* aarch64(ARMv8) mode */
#define NV_SPECULATION_BARRIER_ARM_COMMON
#define NV_SPEC_BARRIER_CSDB "HINT #20\n"
#elif (defined(_MSC_VER) && ( defined(_M_ARM64) || defined(_M_ARM)) )
/* Not currently implemented for MSVC/ARM64. See bug 3366890. */
# define nv_speculation_barrier()
# define speculation_barrier() nv_speculation_barrier()
#elif defined(NVCPU_NVRISCV64) && NVOS_IS_LIBOS
# define nv_speculation_barrier()
#else
#error "Unknown compiler/chip family"
#endif
/*
* nv_speculation_barrier -- General-purpose speculation barrier
*
* This approach provides full protection against variant-1 vulnerability.
* However, the recommended approach is detailed below (See:
* nv_array_index_no_speculate)
*
* Semantics:
* Any memory read that is sequenced after a nv_speculation_barrier(),
* and contained directly within the scope of nv_speculation_barrier() or
* directly within a nested scope, will not speculatively execute until all
* conditions for entering that scope have been architecturally resolved.
*
* Example:
* if (untrusted_index_from_user < bound) {
* ...
* nv_speculation_barrier();
* ...
* x = array1[untrusted_index_from_user];
* bit = x & 1;
* y = array2[0x100 * bit];
* }
*/
#if defined(NV_SPECULATION_BARRIER_x86)
// Delete after all references are changed to nv_speculation_barrier
#define speculation_barrier() nv_speculation_barrier()
static inline void nv_speculation_barrier(void)
{
#if defined(_MSC_VER) && !defined(__clang__)
_mm_lfence();
#endif
#if defined(__GNUC__) || defined(__clang__)
__asm__ __volatile__ ("lfence" : : : "memory");
#endif
}
#elif defined(NV_SPECULATION_BARRIER_PPC)
static inline void nv_speculation_barrier(void)
{
asm volatile("ori 31,31,0");
}
#elif defined(NV_SPECULATION_BARRIER_ARM_COMMON)
/* Note: Cortex-A9 GNU-assembler seems to complain about DSB SY */
#define nv_speculation_barrier() \
asm volatile \
( \
"DSB sy\n" \
"ISB\n" \
: : : "memory" \
)
#endif
/*
* nv_array_index_no_speculate -- Recommended variant-1 mitigation approach
*
* The array-index-no-speculate approach "de-speculates" an array index that
* has already been bounds-checked.
*
* This approach is preferred over nv_speculation_barrier due to the following
* reasons:
* - It is just as effective as the general-purpose speculation barrier.
* - It clearly identifies what array index is being de-speculated and is thus
* self-commenting, whereas the general-purpose speculation barrier requires
* an explanation of what array index is being de-speculated.
* - It performs substantially better than the general-purpose speculation
* barrier on ARM Cortex-A cores (the difference is expected to be tens of
* cycles per invocation). Within tight loops, this difference may become
* noticeable.
*
* Semantics:
* Provided count is non-zero and the caller has already validated or otherwise
* established that index < count, any speculative use of the return value will
* use a speculative value that is less than count.
*
* Example:
* if (untrusted_index_from_user < bound) {
* untrusted_index_from_user = nv_array_index_no_speculate(
* untrusted_index_from_user, bound);
* ...
* x = array1[untrusted_index_from_user];
* ...
* }
*
* The use of nv_array_index_no_speculate() in the above example ensures that
* subsequent uses of untrusted_index_from_user will not execute speculatively
* (they will wait for the bounds check to complete).
*/
static inline unsigned long nv_array_index_no_speculate(unsigned long index,
unsigned long count)
{
#if defined(NV_SPECULATION_BARRIER_x86) && (defined(__GNUC__) || defined(__clang__))
unsigned long mask;
__asm__ __volatile__
(
"CMP %2, %1 \n"
"SBB %0, %0 \n"
: "=r"(mask) : "r"(index), "r"(count) : "cc"
);
return (index & mask);
#elif defined(NV_SPECULATION_BARRIER_ARM_COMMON)
unsigned long mask;
asm volatile
(
"CMP %[ind], %[cnt] \n"
"SBC %[res], %[cnt], %[cnt] \n"
NV_SPEC_BARRIER_CSDB
: [res] "=r" (mask) : [ind] "r" (index), [cnt] "r" (count): "cc"
);
return (index & mask);
/* Fallback to generic speculation barrier for unsupported platforms */
#else
nv_speculation_barrier();
return index;
#endif
}
#endif //_NV_SPECULATION_BARRIER_H_

View File

@@ -0,0 +1,39 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _NV_STDARG_H_
#define _NV_STDARG_H_
#if defined(NV_KERNEL_INTERFACE_LAYER) && defined(NV_LINUX)
#include "conftest.h"
#if defined(NV_LINUX_STDARG_H_PRESENT)
#include <linux/stdarg.h>
#else
#include <stdarg.h>
#endif
#else
#include <stdarg.h>
#endif
#endif // _NV_STDARG_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,177 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2006 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/***************************************************************************\
|* *|
|* NV GPU Types *|
|* *|
|* This header contains definitions describing NVIDIA's GPU hardware state. *|
|* *|
\***************************************************************************/
#ifndef NVGPUTYPES_INCLUDED
#define NVGPUTYPES_INCLUDED
#ifdef __cplusplus
extern "C" {
#endif
#include "nvtypes.h"
/***************************************************************************\
|* NvNotification *|
\***************************************************************************/
/***** NvNotification Structure *****/
/*
* NV objects return information about method completion to clients via an
* array of notification structures in main memory.
*
* The client sets the status field to NV???_NOTIFICATION_STATUS_IN_PROGRESS.
* NV fills in the NvNotification[] data structure in the following order:
* timeStamp, otherInfo32, otherInfo16, and then status.
*/
/* memory data structures */
typedef volatile struct NvNotificationRec {
struct { /* 0000- */
NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/
} timeStamp; /* -0007*/
NvV32 info32; /* info returned depends on method 0008-000b*/
NvV16 info16; /* info returned depends on method 000c-000d*/
NvV16 status; /* user sets bit 15, NV sets status 000e-000f*/
} NvNotification;
/***************************************************************************\
|* NvGpuSemaphore *|
\***************************************************************************/
/***** NvGpuSemaphore Structure *****/
/*
* NvGpuSemaphore objects are used by the GPU to synchronize multiple
* command-streams.
*
* Please refer to class documentation for details regarding the content of
* the data[] field.
*/
/* memory data structures */
typedef volatile struct NvGpuSemaphoreRec {
NvV32 data[2]; /* Payload/Report data 0000-0007*/
struct { /* 0008- */
NvV32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 8- f*/
} timeStamp; /* -000f*/
} NvGpuSemaphore;
/***************************************************************************\
|* NvGetReport *|
\***************************************************************************/
/*
* NV objects, starting with Kelvin, return information such as pixel counts to
* the user via the NV*_GET_REPORT method.
*
* The client fills in the "zero" field to any nonzero value and waits until it
* becomes zero. NV fills in the timeStamp, value, and zero fields.
*/
typedef volatile struct NVGetReportRec {
struct { /* 0000- */
NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/
} timeStamp; /* -0007*/
NvU32 value; /* info returned depends on method 0008-000b*/
NvU32 zero; /* always written to zero 000c-000f*/
} NvGetReport;
/***************************************************************************\
|* NvRcNotification *|
\***************************************************************************/
/*
* NV robust channel notification information is reported to clients via
* standard NV01_EVENT objects bound to instance of the NV*_CHANNEL_DMA and
* NV*_CHANNEL_GPFIFO objects.
*/
typedef struct NvRcNotificationRec {
struct {
NvU32 nanoseconds[2]; /* nanoseconds since Jan. 1, 1970 0- 7*/
} timeStamp; /* -0007*/
NvU32 exceptLevel; /* exception level 000c-000f*/
NvU32 exceptType; /* exception type 0010-0013*/
} NvRcNotification;
/***************************************************************************\
|* NvSyncPointFence *|
\***************************************************************************/
/***** NvSyncPointFence Structure *****/
/*
* NvSyncPointFence objects represent a syncpoint event. The syncPointID
* identifies the syncpoint register and the value is the value that the
* register will contain right after the event occurs.
*
* If syncPointID contains NV_INVALID_SYNCPOINT_ID then this is an invalid
* event. This is often used to indicate an event in the past (i.e. no need to
* wait).
*
* For more info on syncpoints refer to Mobile channel and syncpoint
* documentation.
*/
typedef struct NvSyncPointFenceRec {
NvU32 syncPointID;
NvU32 value;
} NvSyncPointFence;
#define NV_INVALID_SYNCPOINT_ID ((NvU32)-1)
/***************************************************************************\
|* *|
|* 64 bit type definitions for use in interface structures. *|
|* *|
\***************************************************************************/
typedef NvU64 NvOffset; /* GPU address */
#define NvOffset_HI32(n) ((NvU32)(((NvU64)(n)) >> 32))
#define NvOffset_LO32(n) ((NvU32)((NvU64)(n)))
/*
* There are two types of GPU-UUIDs available:
*
* (1) a SHA-256 based 32 byte ID, formatted as a 64 character
* hexadecimal string as "GPU-%16x-%08x-%08x-%08x-%024x"; this is
* deprecated.
*
* (2) a SHA-1 based 16 byte ID, formatted as a 32 character
* hexadecimal string as "GPU-%08x-%04x-%04x-%04x-%012x" (the
* canonical format of a UUID); this is the default.
*/
#define NV_GPU_UUID_SHA1_LEN (16)
#define NV_GPU_UUID_SHA256_LEN (32)
#define NV_GPU_UUID_LEN NV_GPU_UUID_SHA1_LEN
#ifdef __cplusplus
};
#endif
#endif /* NVGPUTYPES_INCLUDED */

View File

@@ -0,0 +1,608 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#if !defined(NVKMS_API_TYPES_H)
#define NVKMS_API_TYPES_H
#include <nvtypes.h>
#include <nvmisc.h>
#include <nvlimits.h>
#define NVKMS_MAX_SUBDEVICES NV_MAX_SUBDEVICES
#define NVKMS_MAX_HEADS_PER_DISP NV_MAX_HEADS
#define NVKMS_LEFT 0
#define NVKMS_RIGHT 1
#define NVKMS_MAX_EYES 2
#define NVKMS_MAIN_LAYER 0
#define NVKMS_OVERLAY_LAYER 1
#define NVKMS_MAX_LAYERS_PER_HEAD 8
#define NVKMS_MAX_PLANES_PER_SURFACE 3
#define NVKMS_DP_ADDRESS_STRING_LENGTH 64
#define NVKMS_DEVICE_ID_TEGRA 0x0000ffff
typedef NvU32 NvKmsDeviceHandle;
typedef NvU32 NvKmsDispHandle;
typedef NvU32 NvKmsConnectorHandle;
typedef NvU32 NvKmsSurfaceHandle;
typedef NvU32 NvKmsFrameLockHandle;
typedef NvU32 NvKmsDeferredRequestFifoHandle;
typedef NvU32 NvKmsSwapGroupHandle;
typedef NvU32 NvKmsVblankSyncObjectHandle;
struct NvKmsSize {
NvU16 width;
NvU16 height;
};
struct NvKmsPoint {
NvU16 x;
NvU16 y;
};
struct NvKmsSignedPoint {
NvS16 x;
NvS16 y;
};
struct NvKmsRect {
NvU16 x;
NvU16 y;
NvU16 width;
NvU16 height;
};
/*
* A 3x3 row-major matrix.
*
* The elements are 32-bit single-precision IEEE floating point values. The
* floating point bit pattern should be stored in NvU32s to be passed into the
* kernel.
*/
struct NvKmsMatrix {
NvU32 m[3][3];
};
typedef enum {
NVKMS_CONNECTOR_TYPE_DP = 0,
NVKMS_CONNECTOR_TYPE_VGA = 1,
NVKMS_CONNECTOR_TYPE_DVI_I = 2,
NVKMS_CONNECTOR_TYPE_DVI_D = 3,
NVKMS_CONNECTOR_TYPE_ADC = 4,
NVKMS_CONNECTOR_TYPE_LVDS = 5,
NVKMS_CONNECTOR_TYPE_HDMI = 6,
NVKMS_CONNECTOR_TYPE_USBC = 7,
NVKMS_CONNECTOR_TYPE_DSI = 8,
NVKMS_CONNECTOR_TYPE_DP_SERIALIZER = 9,
NVKMS_CONNECTOR_TYPE_UNKNOWN = 10,
NVKMS_CONNECTOR_TYPE_MAX = NVKMS_CONNECTOR_TYPE_UNKNOWN,
} NvKmsConnectorType;
static inline
const char *NvKmsConnectorTypeString(const NvKmsConnectorType connectorType)
{
switch (connectorType) {
case NVKMS_CONNECTOR_TYPE_DP: return "DP";
case NVKMS_CONNECTOR_TYPE_VGA: return "VGA";
case NVKMS_CONNECTOR_TYPE_DVI_I: return "DVI-I";
case NVKMS_CONNECTOR_TYPE_DVI_D: return "DVI-D";
case NVKMS_CONNECTOR_TYPE_ADC: return "ADC";
case NVKMS_CONNECTOR_TYPE_LVDS: return "LVDS";
case NVKMS_CONNECTOR_TYPE_HDMI: return "HDMI";
case NVKMS_CONNECTOR_TYPE_USBC: return "USB-C";
case NVKMS_CONNECTOR_TYPE_DSI: return "DSI";
case NVKMS_CONNECTOR_TYPE_DP_SERIALIZER: return "DP-SERIALIZER";
default: break;
}
return "Unknown";
}
typedef enum {
NVKMS_CONNECTOR_SIGNAL_FORMAT_VGA = 0,
NVKMS_CONNECTOR_SIGNAL_FORMAT_LVDS = 1,
NVKMS_CONNECTOR_SIGNAL_FORMAT_TMDS = 2,
NVKMS_CONNECTOR_SIGNAL_FORMAT_DP = 3,
NVKMS_CONNECTOR_SIGNAL_FORMAT_DSI = 4,
NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN = 5,
NVKMS_CONNECTOR_SIGNAL_FORMAT_MAX =
NVKMS_CONNECTOR_SIGNAL_FORMAT_UNKNOWN,
} NvKmsConnectorSignalFormat;
/*!
* Description of Notifiers and Semaphores (Non-isochronous (NISO) surfaces).
*
* When flipping, the client can optionally specify a notifier and/or
* a semaphore to use with the flip. The surfaces used for these
* should be registered with NVKMS to get an NvKmsSurfaceHandle.
*
* NvKmsNIsoSurface::offsetInWords indicates the starting location, in
* 32-bit words, within the surface where EVO should write the
* notifier or semaphore. Note that only the first 4096 bytes of a
* surface can be used by semaphores or notifiers; offsetInWords must
* allow for the semaphore or notifier to be written within the first
* 4096 bytes of the surface. I.e., this must be satisfied:
*
* ((offsetInWords * 4) + elementSizeInBytes) <= 4096
*
* Where elementSizeInBytes is:
*
* if NISO_FORMAT_FOUR_WORD*, elementSizeInBytes = 16
* if NISO_FORMAT_LEGACY,
* if overlay && notifier, elementSizeInBytes = 16
* else, elementSizeInBytes = 4
*
* Note that different GPUs support different semaphore and notifier formats.
* Check NvKmsAllocDeviceReply::validNIsoFormatMask to determine which are
* valid for the given device.
*
* Note also that FOUR_WORD and FOUR_WORD_NVDISPLAY are the same size, but
* FOUR_WORD uses a format compatible with display class 907[ce], and
* FOUR_WORD_NVDISPLAY uses a format compatible with c37e (actually defined by
* the NV_DISP_NOTIFIER definition in clc37d.h).
*/
enum NvKmsNIsoFormat {
NVKMS_NISO_FORMAT_LEGACY,
NVKMS_NISO_FORMAT_FOUR_WORD,
NVKMS_NISO_FORMAT_FOUR_WORD_NVDISPLAY,
};
enum NvKmsEventType {
NVKMS_EVENT_TYPE_DPY_CHANGED,
NVKMS_EVENT_TYPE_DYNAMIC_DPY_CONNECTED,
NVKMS_EVENT_TYPE_DYNAMIC_DPY_DISCONNECTED,
NVKMS_EVENT_TYPE_DPY_ATTRIBUTE_CHANGED,
NVKMS_EVENT_TYPE_FRAMELOCK_ATTRIBUTE_CHANGED,
NVKMS_EVENT_TYPE_FLIP_OCCURRED,
};
typedef enum {
NV_EVO_SCALER_1TAP = 0,
NV_EVO_SCALER_2TAPS = 1,
NV_EVO_SCALER_3TAPS = 2,
NV_EVO_SCALER_5TAPS = 3,
NV_EVO_SCALER_8TAPS = 4,
NV_EVO_SCALER_TAPS_MIN = NV_EVO_SCALER_1TAP,
NV_EVO_SCALER_TAPS_MAX = NV_EVO_SCALER_8TAPS,
} NVEvoScalerTaps;
/* This structure describes the scaling bounds for a given layer. */
struct NvKmsScalingUsageBounds {
/*
* Maximum vertical downscale factor (scaled by 1024)
*
* For example, if the downscale factor is 1.5, then maxVDownscaleFactor
* would be 1.5 x 1024 = 1536.
*/
NvU16 maxVDownscaleFactor;
/*
* Maximum horizontal downscale factor (scaled by 1024)
*
* See the example above for maxVDownscaleFactor.
*/
NvU16 maxHDownscaleFactor;
/* Maximum vertical taps allowed */
NVEvoScalerTaps vTaps;
/* Whether vertical upscaling is allowed */
NvBool vUpscalingAllowed;
};
struct NvKmsUsageBounds {
struct {
NvBool usable;
struct NvKmsScalingUsageBounds scaling;
NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8);
} layer[NVKMS_MAX_LAYERS_PER_HEAD];
};
/*
* A 3x4 row-major colorspace conversion matrix.
*
* The output color C' is the CSC matrix M times the column vector
* [ R, G, B, 1 ].
*
* Each entry in the matrix is a signed 2's-complement fixed-point number with
* 3 integer bits and 16 fractional bits.
*/
struct NvKmsCscMatrix {
NvS32 m[3][4];
};
#define NVKMS_IDENTITY_CSC_MATRIX \
(struct NvKmsCscMatrix){{ \
{ 0x10000, 0, 0, 0 }, \
{ 0, 0x10000, 0, 0 }, \
{ 0, 0, 0x10000, 0 } \
}}
/*!
* A color key match bit used in the blend equations and one can select the src
* or dst Color Key when blending. Assert key bit means match, de-assert key
* bit means nomatch.
*
* The src Color Key means using the key bit from the current layer, the dst
* Color Key means using key bit from the previous layer composition stage. The
* src or dst key bit will be inherited by blended pixel for the preparation of
* next blending, as dst Color Key.
*
* src: Forward the color key match bit from the current layer pixel to next layer
* composition stage.
*
* dst: Forward the color key match bit from the previous composition stage
* pixel to next layer composition stage.
*
* disable: Forward “1” to the next layer composition stage as the color key.
*/
enum NvKmsCompositionColorKeySelect {
NVKMS_COMPOSITION_COLOR_KEY_SELECT_DISABLE = 0,
NVKMS_COMPOSITION_COLOR_KEY_SELECT_SRC,
NVKMS_COMPOSITION_COLOR_KEY_SELECT_DST,
};
#define NVKMS_COMPOSITION_NUMBER_OF_COLOR_KEY_SELECTS 3
/*!
* Composition modes used for surfaces in general.
* The various types of composition are:
*
* Opaque: source pixels are opaque regardless of alpha,
* and will occlude the destination pixel.
*
* Alpha blending: aka opacity, which could be specified
* for a surface in its entirety, or on a per-pixel basis.
*
* Non-premultiplied: alpha value applies to source pixel,
* and also counter-weighs the destination pixel.
* Premultiplied: alpha already applied to source pixel,
* so it only counter-weighs the destination pixel.
*
* Color keying: use a color key structure to decide
* the criteria for matching and compositing.
* (See NVColorKey below.)
*/
enum NvKmsCompositionBlendingMode {
/*!
* Modes that use no other parameters.
*/
NVKMS_COMPOSITION_BLENDING_MODE_OPAQUE,
/*!
* Mode that ignores both per-pixel alpha provided
* by client and the surfaceAlpha, makes source pixel
* totally transparent.
*/
NVKMS_COMPOSITION_BLENDING_MODE_TRANSPARENT,
/*!
* Modes that use per-pixel alpha provided by client,
* and the surfaceAlpha must be set to 0.
*/
NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA,
NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA,
/*!
* These use both the surface-wide and per-pixel alpha values.
* surfaceAlpha is treated as numerator ranging from 0 to 255
* of a fraction whose denominator is 255.
*/
NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA,
NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA,
};
static inline NvBool
NvKmsIsCompositionModeUseAlpha(enum NvKmsCompositionBlendingMode mode)
{
return mode == NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_ALPHA ||
mode == NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_ALPHA ||
mode == NVKMS_COMPOSITION_BLENDING_MODE_PREMULT_SURFACE_ALPHA ||
mode == NVKMS_COMPOSITION_BLENDING_MODE_NON_PREMULT_SURFACE_ALPHA;
}
/*!
* Abstract description of a color key.
*
* a, r, g, and b are component values in the same width as the framebuffer
* values being scanned out.
*
* match[ARGB] defines whether that component is considered when matching the
* color key -- TRUE means that the value of the corresponding component must
* match the given value for the given pixel to be considered a 'key match';
* FALSE means that the value of that component is not a key match criterion.
*/
typedef struct {
NvU16 a, r, g, b;
NvBool matchA, matchR, matchG, matchB;
} NVColorKey;
/*!
* Describes the composition parameters for the single layer.
*/
struct NvKmsCompositionParams {
enum NvKmsCompositionColorKeySelect colorKeySelect;
NVColorKey colorKey;
/*
* It is possible to assign different blending mode for match pixels and
* nomatch pixels. blendingMode[0] is used to blend a pixel with the color key
* match bit "0", and blendingMode[1] is used to blend a pixel with the color
* key match bit "1".
*
* But because of the hardware restrictions match and nomatch pixels can
* not use blending mode PREMULT_ALPHA, NON_PREMULT_ALPHA,
* PREMULT_SURFACE_ALPHA, and NON_PREMULT_SURFACE_ALPHA at once.
*/
enum NvKmsCompositionBlendingMode blendingMode[2];
NvU8 surfaceAlpha; /* Applies to all pixels of entire surface */
/*
* Defines the composition order. A smaller value moves the layer closer to
* the top (away from the background). No need to pick consecutive values,
* requirements are that the value should be different for each of the
* layers owned by the head and the value for the main layer should be
* the greatest one.
*
* Cursor always remains at the top of all other layers, this parameter
* has no effect on cursor. NVKMS assigns default depth to each of the
* supported layers, by default depth of the layer is calculated as
* (NVKMS_MAX_LAYERS_PER_HEAD - index of the layer). If depth is set to
* '0' then default depth value will get used.
*/
NvU8 depth;
};
/*!
* Describes the composition capabilities supported by the hardware for
* cursor or layer. It describes supported the color key selects and for each
* of the supported color key selects it describes supported blending modes
* for match and nomatch pixles.
*/
struct NvKmsCompositionCapabilities {
struct {
/*
* A bitmask of the supported blending modes for match and nomatch
* pixels. It should be the bitwise 'or' of one or more
* NVBIT(NVKMS_COMPOSITION_BLENDING_MODE_*) values.
*/
NvU32 supportedBlendModes[2];
} colorKeySelect[NVKMS_COMPOSITION_NUMBER_OF_COLOR_KEY_SELECTS];
/*
* A bitmask of the supported color key selects.
*
* It should be the bitwise 'or' of one or more
* NVBIT(NVKMS_COMPOSITION_COLOR_KEY_SELECT_*)
* values.
*/
NvU32 supportedColorKeySelects;
};
struct NvKmsLayerCapabilities {
/*!
* Whether Layer supports the window mode. If window mode is supported,
* then clients can set the layer's dimensions so that they're smaller than
* the viewport, and can also change the output position of the layer to a
* non-(0, 0) position.
*
* NOTE: Dimension changes are currently unsupported for the main layer,
* and output position changes for the main layer are currently only
* supported via IOCTL_SET_LAYER_POSITION but not via flips. Support for
* these is coming soon, via changes to flip code.
*/
NvBool supportsWindowMode :1;
/*!
* Whether layer supports HDR pipe.
*/
NvBool supportsHDR :1;
/*!
* Describes the supported Color Key selects and blending modes for
* match and nomatch layer pixels.
*/
struct NvKmsCompositionCapabilities composition;
/*!
* Which NvKmsSurfaceMemoryFormat enum values are supported by the NVKMS
* device on the given scanout surface layer.
*
* Iff a particular enum NvKmsSurfaceMemoryFormat 'value' is supported,
* then (1 << value) will be set in the appropriate bitmask.
*
* Note that these bitmasks just report the static SW/HW capabilities,
* and are a superset of the formats that IMP may allow. Clients are
* still expected to honor the NvKmsUsageBounds for each head.
*/
NvU64 supportedSurfaceMemoryFormats NV_ALIGN_BYTES(8);
};
/*!
* Surface layouts.
*
* BlockLinear is the NVIDIA GPU native tiling format, arranging pixels into
* blocks or tiles for better locality during common GPU operations.
*
* Pitch is the naive "linear" surface layout with pixels laid out sequentially
* in memory line-by-line, optionally with some padding at the end of each line
* for alignment purposes.
*/
enum NvKmsSurfaceMemoryLayout {
NvKmsSurfaceMemoryLayoutBlockLinear = 0,
NvKmsSurfaceMemoryLayoutPitch = 1,
};
static inline const char *NvKmsSurfaceMemoryLayoutToString(
enum NvKmsSurfaceMemoryLayout layout)
{
switch (layout) {
default:
return "Unknown";
case NvKmsSurfaceMemoryLayoutBlockLinear:
return "BlockLinear";
case NvKmsSurfaceMemoryLayoutPitch:
return "Pitch";
}
}
typedef enum {
MUX_STATE_GET = 0,
MUX_STATE_INTEGRATED = 1,
MUX_STATE_DISCRETE = 2,
MUX_STATE_UNKNOWN = 3,
} NvMuxState;
enum NvKmsRotation {
NVKMS_ROTATION_0 = 0,
NVKMS_ROTATION_90 = 1,
NVKMS_ROTATION_180 = 2,
NVKMS_ROTATION_270 = 3,
NVKMS_ROTATION_MIN = NVKMS_ROTATION_0,
NVKMS_ROTATION_MAX = NVKMS_ROTATION_270,
};
struct NvKmsRRParams {
enum NvKmsRotation rotation;
NvBool reflectionX;
NvBool reflectionY;
};
/*!
* Convert each possible NvKmsRRParams to a unique integer [0..15],
* so that we can describe possible NvKmsRRParams with an NvU16 bitmask.
*
* E.g.
* rotation = 0, reflectionX = F, reflectionY = F == 0|0|0 == 0
* ...
* rotation = 270, reflectionX = T, reflectionY = T == 3|4|8 == 15
*/
static inline NvU8 NvKmsRRParamsToCapBit(const struct NvKmsRRParams *rrParams)
{
NvU8 bitPosition = (NvU8)rrParams->rotation;
if (rrParams->reflectionX) {
bitPosition |= NVBIT(2);
}
if (rrParams->reflectionY) {
bitPosition |= NVBIT(3);
}
return bitPosition;
}
/*
* NVKMS_MEMORY_ISO is used to tag surface memory that will be accessed via
* display's isochronous interface. Examples of this type of memory are pixel
* data and LUT entries.
*
* NVKMS_MEMORY_NISO is used to tag surface memory that will be accessed via
* display's non-isochronous interface. Examples of this type of memory are
* semaphores and notifiers.
*/
typedef enum {
NVKMS_MEMORY_ISO = 0,
NVKMS_MEMORY_NISO = 1,
} NvKmsMemoryIsoType;
typedef struct {
NvBool coherent;
NvBool noncoherent;
} NvKmsDispIOCoherencyModes;
enum NvKmsInputColorSpace {
/* Unknown colorspace; no de-gamma will be applied */
NVKMS_INPUT_COLORSPACE_NONE = 0,
/* Linear, Rec.709 [-0.5, 7.5) */
NVKMS_INPUT_COLORSPACE_SCRGB_LINEAR = 1,
/* PQ, Rec.2020 unity */
NVKMS_INPUT_COLORSPACE_BT2100_PQ = 2,
};
enum NvKmsOutputTf {
/*
* NVKMS itself won't apply any OETF (clients are still
* free to provide a custom OLUT)
*/
NVKMS_OUTPUT_TF_NONE = 0,
NVKMS_OUTPUT_TF_TRADITIONAL_GAMMA_SDR = 1,
NVKMS_OUTPUT_TF_PQ = 2,
};
/*!
* HDR Static Metadata Type1 Descriptor as per CEA-861.3 spec.
* This is expected to match exactly with the spec.
*/
struct NvKmsHDRStaticMetadata {
/*!
* Color primaries of the data.
* These are coded as unsigned 16-bit values in units of 0.00002,
* where 0x0000 represents zero and 0xC350 represents 1.0000.
*/
struct {
NvU16 x, y;
} displayPrimaries[3];
/*!
* White point of colorspace data.
* These are coded as unsigned 16-bit values in units of 0.00002,
* where 0x0000 represents zero and 0xC350 represents 1.0000.
*/
struct {
NvU16 x, y;
} whitePoint;
/**
* Maximum mastering display luminance.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
* where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
*/
NvU16 maxDisplayMasteringLuminance;
/*!
* Minimum mastering display luminance.
* This value is coded as an unsigned 16-bit value in units of
* 0.0001 cd/m2, where 0x0001 represents 0.0001 cd/m2 and 0xFFFF
* represents 6.5535 cd/m2.
*/
NvU16 minDisplayMasteringLuminance;
/*!
* Maximum content light level.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
* where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
*/
NvU16 maxCLL;
/*!
* Maximum frame-average light level.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
* where 0x0001 represents 1 cd/m2 and 0xFFFF represents 65535 cd/m2.
*/
NvU16 maxFALL;
};
#endif /* NVKMS_API_TYPES_H */

View File

@@ -0,0 +1,126 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#if !defined(NVKMS_FORMAT_H)
#define NVKMS_FORMAT_H
#ifdef __cplusplus
extern "C" {
#endif
#include "nvtypes.h"
/*
* In order to interpret these pixel format namings, please take note of these
* conventions:
* - The Y8_U8__Y8_V8_N422 and U8_Y8__V8_Y8_N422 formats are both packed formats
* that have an interleaved chroma component across every two pixels. The
* double-underscore is a separator between these two pixel groups.
* - The triple-underscore is a separator between planes.
* - The 'N' suffix is a delimiter for the chroma decimation factor.
*
* As examples of the above rules:
* - The Y8_U8__Y8_V8_N422 format has one 8-bit luma component (Y8) and one
* 8-bit chroma component (U8) in pixel N, and one 8-bit luma component (Y8)
* and one 8-bit chroma component (V8) in pixel (N + 1). This format is
* 422-decimated since the U and V chroma samples are shared between each
* pair of adjacent pixels per line.
* - The Y10___U10V10_N444 format has one plane of 10-bit luma (Y10) components,
* and another plane of 10-bit chroma components (U10V10). This format has no
* chroma decimation since the luma and chroma components are sampled at the
* same rate.
*/
enum NvKmsSurfaceMemoryFormat {
NvKmsSurfaceMemoryFormatI8 = 0,
NvKmsSurfaceMemoryFormatA1R5G5B5 = 1,
NvKmsSurfaceMemoryFormatX1R5G5B5 = 2,
NvKmsSurfaceMemoryFormatR5G6B5 = 3,
NvKmsSurfaceMemoryFormatA8R8G8B8 = 4,
NvKmsSurfaceMemoryFormatX8R8G8B8 = 5,
NvKmsSurfaceMemoryFormatA2B10G10R10 = 6,
NvKmsSurfaceMemoryFormatX2B10G10R10 = 7,
NvKmsSurfaceMemoryFormatA8B8G8R8 = 8,
NvKmsSurfaceMemoryFormatX8B8G8R8 = 9,
NvKmsSurfaceMemoryFormatRF16GF16BF16AF16 = 10,
NvKmsSurfaceMemoryFormatR16G16B16A16 = 11,
NvKmsSurfaceMemoryFormatRF32GF32BF32AF32 = 12,
NvKmsSurfaceMemoryFormatY8_U8__Y8_V8_N422 = 13,
NvKmsSurfaceMemoryFormatU8_Y8__V8_Y8_N422 = 14,
NvKmsSurfaceMemoryFormatY8___U8V8_N444 = 15,
NvKmsSurfaceMemoryFormatY8___V8U8_N444 = 16,
NvKmsSurfaceMemoryFormatY8___U8V8_N422 = 17,
NvKmsSurfaceMemoryFormatY8___V8U8_N422 = 18,
NvKmsSurfaceMemoryFormatY8___U8V8_N420 = 19,
NvKmsSurfaceMemoryFormatY8___V8U8_N420 = 20,
NvKmsSurfaceMemoryFormatY10___U10V10_N444 = 21,
NvKmsSurfaceMemoryFormatY10___V10U10_N444 = 22,
NvKmsSurfaceMemoryFormatY10___U10V10_N422 = 23,
NvKmsSurfaceMemoryFormatY10___V10U10_N422 = 24,
NvKmsSurfaceMemoryFormatY10___U10V10_N420 = 25,
NvKmsSurfaceMemoryFormatY10___V10U10_N420 = 26,
NvKmsSurfaceMemoryFormatY12___U12V12_N444 = 27,
NvKmsSurfaceMemoryFormatY12___V12U12_N444 = 28,
NvKmsSurfaceMemoryFormatY12___U12V12_N422 = 29,
NvKmsSurfaceMemoryFormatY12___V12U12_N422 = 30,
NvKmsSurfaceMemoryFormatY12___U12V12_N420 = 31,
NvKmsSurfaceMemoryFormatY12___V12U12_N420 = 32,
NvKmsSurfaceMemoryFormatY8___U8___V8_N444 = 33,
NvKmsSurfaceMemoryFormatY8___U8___V8_N420 = 34,
NvKmsSurfaceMemoryFormatRF16GF16BF16XF16 = 35,
NvKmsSurfaceMemoryFormatMin = NvKmsSurfaceMemoryFormatI8,
NvKmsSurfaceMemoryFormatMax = NvKmsSurfaceMemoryFormatRF16GF16BF16XF16,
};
typedef struct NvKmsSurfaceMemoryFormatInfo {
enum NvKmsSurfaceMemoryFormat format;
const char *name;
NvU8 depth;
NvBool isYUV;
NvU8 numPlanes;
union {
struct {
NvU8 bytesPerPixel;
NvU8 bitsPerPixel;
} rgb;
struct {
NvU8 depthPerComponent;
NvU8 storageBitsPerComponent;
NvU8 horizChromaDecimationFactor;
NvU8 vertChromaDecimationFactor;
} yuv;
};
} NvKmsSurfaceMemoryFormatInfo;
const NvKmsSurfaceMemoryFormatInfo *nvKmsGetSurfaceMemoryFormatInfo(
const enum NvKmsSurfaceMemoryFormat format);
const char *nvKmsSurfaceMemoryFormatToString(
const enum NvKmsSurfaceMemoryFormat format);
#ifdef __cplusplus
};
#endif
#endif /* NVKMS_FORMAT_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,52 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#pragma once
//
// This file was generated with FINN, an NVIDIA coding tool.
// Source file: nvlimits.finn
//
/*
* This is the maximum number of GPUs supported in a single system.
*/
#define NV_MAX_DEVICES 32
/*
* This is the maximum number of subdevices within a single device.
*/
#define NV_MAX_SUBDEVICES 8
/*
* This is the maximum length of the process name string.
*/
#define NV_PROC_NAME_MAX_LENGTH 100U
/*
* This is the maximum number of heads per GPU.
*/
#define NV_MAX_HEADS 4

View File

@@ -0,0 +1,927 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* nvmisc.h
*/
#ifndef __NV_MISC_H
#define __NV_MISC_H
#ifdef __cplusplus
extern "C" {
#endif //__cplusplus
#include "nvtypes.h"
#if !defined(NVIDIA_UNDEF_LEGACY_BIT_MACROS)
//
// Miscellaneous macros useful for bit field manipulations
//
// STUPID HACK FOR CL 19434692. Will revert when fix CL is delivered bfm -> chips_a.
#ifndef BIT
#define BIT(b) (1U<<(b))
#endif
#ifndef BIT32
#define BIT32(b) ((NvU32)1U<<(b))
#endif
#ifndef BIT64
#define BIT64(b) ((NvU64)1U<<(b))
#endif
#endif
//
// It is recommended to use the following bit macros to avoid macro name
// collisions with other src code bases.
//
#ifndef NVBIT
#define NVBIT(b) (1U<<(b))
#endif
#ifndef NVBIT_TYPE
#define NVBIT_TYPE(b, t) (((t)1U)<<(b))
#endif
#ifndef NVBIT32
#define NVBIT32(b) NVBIT_TYPE(b, NvU32)
#endif
#ifndef NVBIT64
#define NVBIT64(b) NVBIT_TYPE(b, NvU64)
#endif
// Helper macro's for 32 bit bitmasks
#define NV_BITMASK32_ELEMENT_SIZE (sizeof(NvU32) << 3)
#define NV_BITMASK32_IDX(chId) (((chId) & ~(0x1F)) >> 5)
#define NV_BITMASK32_OFFSET(chId) ((chId) & (0x1F))
#define NV_BITMASK32_SET(pChannelMask, chId) \
(pChannelMask)[NV_BITMASK32_IDX(chId)] |= NVBIT(NV_BITMASK32_OFFSET(chId))
#define NV_BITMASK32_GET(pChannelMask, chId) \
((pChannelMask)[NV_BITMASK32_IDX(chId)] & NVBIT(NV_BITMASK32_OFFSET(chId)))
// Index of the 'on' bit (assuming that there is only one).
// Even if multiple bits are 'on', result is in range of 0-31.
#define BIT_IDX_32(n) \
(((((n) & 0xFFFF0000U) != 0U) ? 0x10U: 0U) | \
((((n) & 0xFF00FF00U) != 0U) ? 0x08U: 0U) | \
((((n) & 0xF0F0F0F0U) != 0U) ? 0x04U: 0U) | \
((((n) & 0xCCCCCCCCU) != 0U) ? 0x02U: 0U) | \
((((n) & 0xAAAAAAAAU) != 0U) ? 0x01U: 0U) )
// Index of the 'on' bit (assuming that there is only one).
// Even if multiple bits are 'on', result is in range of 0-63.
#define BIT_IDX_64(n) \
(((((n) & 0xFFFFFFFF00000000ULL) != 0U) ? 0x20U: 0U) | \
((((n) & 0xFFFF0000FFFF0000ULL) != 0U) ? 0x10U: 0U) | \
((((n) & 0xFF00FF00FF00FF00ULL) != 0U) ? 0x08U: 0U) | \
((((n) & 0xF0F0F0F0F0F0F0F0ULL) != 0U) ? 0x04U: 0U) | \
((((n) & 0xCCCCCCCCCCCCCCCCULL) != 0U) ? 0x02U: 0U) | \
((((n) & 0xAAAAAAAAAAAAAAAAULL) != 0U) ? 0x01U: 0U) )
/*!
* DRF MACRO README:
*
* Glossary:
* DRF: Device, Register, Field
* FLD: Field
* REF: Reference
*
* #define NV_DEVICE_OMEGA_REGISTER_ALPHA 0xDEADBEEF
* #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_GAMMA 27:0
* #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA 31:28
* #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_ZERO 0x00000000
* #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_ONE 0x00000001
* #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_TWO 0x00000002
* #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_THREE 0x00000003
* #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_FOUR 0x00000004
* #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_FIVE 0x00000005
* #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_SIX 0x00000006
* #define NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA_SEVEN 0x00000007
*
*
* Device = _DEVICE_OMEGA
* This is the common "base" that a group of registers in a manual share
*
* Register = _REGISTER_ALPHA
* Register for a given block of defines is the common root for one or more fields and constants
*
* Field(s) = _FIELD_GAMMA, _FIELD_ZETA
* These are the bit ranges for a given field within the register
* Fields are not required to have defined constant values (enumerations)
*
* Constant(s) = _ZERO, _ONE, _TWO, ...
* These are named values (enums) a field can contain; the width of the constants should not be larger than the field width
*
* MACROS:
*
* DRF_SHIFT:
* Bit index of the lower bound of a field
* DRF_SHIFT(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 28
*
* DRF_SHIFT_RT:
* Bit index of the higher bound of a field
* DRF_SHIFT_RT(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 31
*
* DRF_MASK:
* Produces a mask of 1-s equal to the width of a field
* DRF_MASK(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 0xF (four 1s starting at bit 0)
*
* DRF_SHIFTMASK:
* Produces a mask of 1s equal to the width of a field at the location of the field
* DRF_SHIFTMASK(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 0xF0000000
*
* DRF_DEF:
* Shifts a field constant's value to the correct field offset
* DRF_DEF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE) == 0x30000000
*
* DRF_NUM:
* Shifts a number to the location of a particular field
* DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 3) == 0x30000000
* NOTE: If the value passed in is wider than the field, the value's high bits will be truncated
*
* DRF_SIZE:
* Provides the width of the field in bits
* DRF_SIZE(NV_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA) == 4
*
* DRF_VAL:
* Provides the value of an input within the field specified
* DRF_VAL(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0xABCD1234) == 0xA
* This is sort of like the inverse of DRF_NUM
*
* DRF_IDX...:
* These macros are similar to the above but for fields that accept an index argumment
*
* FLD_SET_DRF:
* Set the field bits in a given value with the given field constant
* NvU32 x = 0x00001234;
* x = FLD_SET_DRF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE, x);
* x == 0x30001234;
*
* FLD_SET_DRF_NUM:
* Same as FLD_SET_DRF but instead of using a field constant a literal/variable is passed in
* NvU32 x = 0x00001234;
* x = FLD_SET_DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0xF, x);
* x == 0xF0001234;
*
* FLD_IDX...:
* These macros are similar to the above but for fields that accept an index argumment
*
* FLD_TEST_DRF:
* Test if location specified by drf in 'v' has the same value as NV_drfc
* FLD_TEST_DRF(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, _THREE, 0x3000ABCD) == NV_TRUE
*
* FLD_TEST_DRF_NUM:
* Test if locations specified by drf in 'v' have the same value as n
* FLD_TEST_DRF_NUM(_DEVICE_OMEGA, _REGISTER_ALPHA, _FIELD_ZETA, 0x3, 0x3000ABCD) == NV_TRUE
*
* REF_DEF:
* Like DRF_DEF but maintains full symbol name (use in cases where "NV" is not prefixed to the field)
* REF_DEF(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, _THREE) == 0x30000000
*
* REF_VAL:
* Like DRF_VAL but maintains full symbol name (use in cases where "NV" is not prefixed to the field)
* REF_VAL(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xABCD1234) == 0xA
*
* REF_NUM:
* Like DRF_NUM but maintains full symbol name (use in cases where "NV" is not prefixed to the field)
* REF_NUM(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xA) == 0xA00000000
*
* FLD_SET_REF_NUM:
* Like FLD_SET_DRF_NUM but maintains full symbol name (use in cases where "NV" is not prefixed to the field)
* NvU32 x = 0x00001234;
* x = FLD_SET_REF_NUM(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, 0xF, x);
* x == 0xF0001234;
*
* FLD_TEST_REF:
* Like FLD_TEST_DRF but maintains full symbol name (use in cases where "NV" is not prefixed to the field)
* FLD_TEST_REF(SOME_OTHER_PREFIX_DEVICE_OMEGA_REGISTER_ALPHA_FIELD_ZETA, _THREE, 0x3000ABCD) == NV_TRUE
*
* Other macros:
* There a plethora of other macros below that extend the above (notably Multi-Word (MW), 64-bit, and some
* reg read/write variations). I hope these are self explanatory. If you have a need to use them, you
* probably have some knowledge of how they work.
*/
// tegra mobile uses nvmisc_macros.h and can't access nvmisc.h... and sometimes both get included.
#ifndef _NVMISC_MACROS_H
// Use Coverity Annotation to mark issues as false positives/ignore when using single bit defines.
#define DRF_ISBIT(bitval,drf) \
( /* coverity[identical_branches] */ \
(bitval != 0) ? drf )
#define DEVICE_BASE(d) (0?d) // what's up with this name? totally non-parallel to the macros below
#define DEVICE_EXTENT(d) (1?d) // what's up with this name? totally non-parallel to the macros below
#ifdef NV_MISRA_COMPLIANCE_REQUIRED
#ifdef MISRA_14_3
#define DRF_BASE(drf) (drf##_LOW_FIELD)
#define DRF_EXTENT(drf) (drf##_HIGH_FIELD)
#define DRF_SHIFT(drf) ((drf##_LOW_FIELD) % 32U)
#define DRF_SHIFT_RT(drf) ((drf##_HIGH_FIELD) % 32U)
#define DRF_SIZE(drf) ((drf##_HIGH_FIELD)-(drf##_LOW_FIELD)+1U)
#define DRF_MASK(drf) (0xFFFFFFFFU >> (31U - ((drf##_HIGH_FIELD) % 32U) + ((drf##_LOW_FIELD) % 32U)))
#else
#define DRF_BASE(drf) (NV_FALSE?drf) // much better
#define DRF_EXTENT(drf) (NV_TRUE?drf) // much better
#define DRF_SHIFT(drf) (((NvU32)DRF_BASE(drf)) % 32U)
#define DRF_SHIFT_RT(drf) (((NvU32)DRF_EXTENT(drf)) % 32U)
#define DRF_SIZE(drf) (DRF_EXTENT(drf)-DRF_BASE(drf)+1U)
#define DRF_MASK(drf) (0xFFFFFFFFU>>(31U - DRF_SHIFT_RT(drf) + DRF_SHIFT(drf)))
#endif
#define DRF_DEF(d,r,f,c) (((NvU32)(NV ## d ## r ## f ## c))<<DRF_SHIFT(NV ## d ## r ## f))
#define DRF_NUM(d,r,f,n) ((((NvU32)(n))&DRF_MASK(NV ## d ## r ## f))<<DRF_SHIFT(NV ## d ## r ## f))
#else
#define DRF_BASE(drf) (0?drf) // much better
#define DRF_EXTENT(drf) (1?drf) // much better
#define DRF_SHIFT(drf) ((DRF_ISBIT(0,drf)) % 32)
#define DRF_SHIFT_RT(drf) ((DRF_ISBIT(1,drf)) % 32)
#define DRF_SIZE(drf) (DRF_EXTENT(drf)-DRF_BASE(drf)+1U)
#define DRF_MASK(drf) (0xFFFFFFFFU>>(31-((DRF_ISBIT(1,drf)) % 32)+((DRF_ISBIT(0,drf)) % 32)))
#define DRF_DEF(d,r,f,c) ((NV ## d ## r ## f ## c)<<DRF_SHIFT(NV ## d ## r ## f))
#define DRF_NUM(d,r,f,n) (((n)&DRF_MASK(NV ## d ## r ## f))<<DRF_SHIFT(NV ## d ## r ## f))
#endif
#define DRF_SHIFTMASK(drf) (DRF_MASK(drf)<<(DRF_SHIFT(drf)))
#define DRF_VAL(d,r,f,v) (((v)>>DRF_SHIFT(NV ## d ## r ## f))&DRF_MASK(NV ## d ## r ## f))
#endif
// Signed version of DRF_VAL, which takes care of extending sign bit.
#define DRF_VAL_SIGNED(d,r,f,v) (((DRF_VAL(d,r,f,(v)) ^ (NVBIT(DRF_SIZE(NV ## d ## r ## f)-1U)))) - (NVBIT(DRF_SIZE(NV ## d ## r ## f)-1U)))
#define DRF_IDX_DEF(d,r,f,i,c) ((NV ## d ## r ## f ## c)<<DRF_SHIFT(NV##d##r##f(i)))
#define DRF_IDX_OFFSET_DEF(d,r,f,i,o,c) ((NV ## d ## r ## f ## c)<<DRF_SHIFT(NV##d##r##f(i,o)))
#define DRF_IDX_NUM(d,r,f,i,n) (((n)&DRF_MASK(NV##d##r##f(i)))<<DRF_SHIFT(NV##d##r##f(i)))
#define DRF_IDX_VAL(d,r,f,i,v) (((v)>>DRF_SHIFT(NV##d##r##f(i)))&DRF_MASK(NV##d##r##f(i)))
#define DRF_IDX_OFFSET_VAL(d,r,f,i,o,v) (((v)>>DRF_SHIFT(NV##d##r##f(i,o)))&DRF_MASK(NV##d##r##f(i,o)))
// Fractional version of DRF_VAL which reads Fx.y fixed point number (x.y)*z
#define DRF_VAL_FRAC(d,r,x,y,v,z) ((DRF_VAL(d,r,x,(v))*z) + ((DRF_VAL(d,r,y,v)*z) / (1<<DRF_SIZE(NV##d##r##y))))
//
// 64 Bit Versions
//
#define DRF_SHIFT64(drf) ((DRF_ISBIT(0,drf)) % 64)
#define DRF_MASK64(drf) (NV_U64_MAX>>(63-((DRF_ISBIT(1,drf)) % 64)+((DRF_ISBIT(0,drf)) % 64)))
#define DRF_SHIFTMASK64(drf) (DRF_MASK64(drf)<<(DRF_SHIFT64(drf)))
#define DRF_DEF64(d,r,f,c) (((NvU64)(NV ## d ## r ## f ## c))<<DRF_SHIFT64(NV ## d ## r ## f))
#define DRF_NUM64(d,r,f,n) ((((NvU64)(n))&DRF_MASK64(NV ## d ## r ## f))<<DRF_SHIFT64(NV ## d ## r ## f))
#define DRF_VAL64(d,r,f,v) ((((NvU64)(v))>>DRF_SHIFT64(NV ## d ## r ## f))&DRF_MASK64(NV ## d ## r ## f))
#define DRF_VAL_SIGNED64(d,r,f,v) (((DRF_VAL64(d,r,f,(v)) ^ (NVBIT64(DRF_SIZE(NV ## d ## r ## f)-1)))) - (NVBIT64(DRF_SIZE(NV ## d ## r ## f)-1)))
#define DRF_IDX_DEF64(d,r,f,i,c) (((NvU64)(NV ## d ## r ## f ## c))<<DRF_SHIFT64(NV##d##r##f(i)))
#define DRF_IDX_OFFSET_DEF64(d,r,f,i,o,c) ((NvU64)(NV ## d ## r ## f ## c)<<DRF_SHIFT64(NV##d##r##f(i,o)))
#define DRF_IDX_NUM64(d,r,f,i,n) ((((NvU64)(n))&DRF_MASK64(NV##d##r##f(i)))<<DRF_SHIFT64(NV##d##r##f(i)))
#define DRF_IDX_VAL64(d,r,f,i,v) ((((NvU64)(v))>>DRF_SHIFT64(NV##d##r##f(i)))&DRF_MASK64(NV##d##r##f(i)))
#define DRF_IDX_OFFSET_VAL64(d,r,f,i,o,v) (((NvU64)(v)>>DRF_SHIFT64(NV##d##r##f(i,o)))&DRF_MASK64(NV##d##r##f(i,o)))
#define FLD_SET_DRF64(d,r,f,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_DEF64(d,r,f,c))
#define FLD_SET_DRF_NUM64(d,r,f,n,v) ((((NvU64)(v)) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_NUM64(d,r,f,n))
#define FLD_IDX_SET_DRF64(d,r,f,i,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_DEF64(d,r,f,i,c))
#define FLD_IDX_OFFSET_SET_DRF64(d,r,f,i,o,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i,o))) | DRF_IDX_OFFSET_DEF64(d,r,f,i,o,c))
#define FLD_IDX_SET_DRF_DEF64(d,r,f,i,c,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_DEF64(d,r,f,i,c))
#define FLD_IDX_SET_DRF_NUM64(d,r,f,i,n,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f(i))) | DRF_IDX_NUM64(d,r,f,i,n))
#define FLD_SET_DRF_IDX64(d,r,f,c,i,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(NV##d##r##f)) | DRF_DEF64(d,r,f,c(i)))
#define FLD_TEST_DRF64(d,r,f,c,v) (DRF_VAL64(d, r, f, (v)) == NV##d##r##f##c)
#define FLD_TEST_DRF_AND64(d,r,f,c,v) (DRF_VAL64(d, r, f, (v)) & NV##d##r##f##c)
#define FLD_TEST_DRF_NUM64(d,r,f,n,v) (DRF_VAL64(d, r, f, (v)) == (n))
#define FLD_IDX_TEST_DRF64(d,r,f,i,c,v) (DRF_IDX_VAL64(d, r, f, i, (v)) == NV##d##r##f##c)
#define FLD_IDX_OFFSET_TEST_DRF64(d,r,f,i,o,c,v) (DRF_IDX_OFFSET_VAL64(d, r, f, i, o, (v)) == NV##d##r##f##c)
#define REF_DEF64(drf,d) (((drf ## d)&DRF_MASK64(drf))<<DRF_SHIFT64(drf))
#define REF_VAL64(drf,v) (((NvU64)(v)>>DRF_SHIFT64(drf))&DRF_MASK64(drf))
#if defined(NV_MISRA_COMPLIANCE_REQUIRED) && defined(MISRA_14_3)
#define REF_NUM64(drf,n) (((NvU64)(n)&(0xFFFFFFFFFFFFFFFFU>>(63U-((drf##_HIGH_FIELD) % 63U)+((drf##_LOW_FIELD) % 63U)))) << ((drf##_LOW_FIELD) % 63U))
#else
#define REF_NUM64(drf,n) (((NvU64)(n)&DRF_MASK64(drf))<<DRF_SHIFT64(drf))
#endif
#define FLD_TEST_REF64(drf,c,v) (REF_VAL64(drf, v) == drf##c)
#define FLD_TEST_REF_AND64(drf,c,v) (REF_VAL64(drf, v) & drf##c)
#define FLD_SET_REF_NUM64(drf,n,v) (((NvU64)(v) & ~DRF_SHIFTMASK64(drf)) | REF_NUM64(drf,n))
//
// 32 Bit Versions
//
#ifdef NV_MISRA_COMPLIANCE_REQUIRED
#define FLD_SET_DRF(d,r,f,c,v) (((NvU32)(v) & ~DRF_SHIFTMASK(NV##d##r##f)) | DRF_DEF(d,r,f,c))
#define FLD_SET_DRF_NUM(d,r,f,n,v) (((NvU32)(v) & ~DRF_SHIFTMASK(NV##d##r##f)) | DRF_NUM(d,r,f,n))
#define FLD_IDX_SET_DRF(d,r,f,i,c,v) (((NvU32)(v) & ~DRF_SHIFTMASK(NV##d##r##f(i))) | DRF_IDX_DEF(d,r,f,i,c))
#define FLD_IDX_OFFSET_SET_DRF(d,r,f,i,o,c,v) (((NvU32)(v) & ~DRF_SHIFTMASK(NV##d##r##f(i,o))) | DRF_IDX_OFFSET_DEF(d,r,f,i,o,c))
#define FLD_IDX_SET_DRF_DEF(d,r,f,i,c,v) (((NvU32)(v) & ~DRF_SHIFTMASK(NV##d##r##f(i))) | DRF_IDX_DEF(d,r,f,i,c))
#define FLD_IDX_SET_DRF_NUM(d,r,f,i,n,v) (((NvU32)(v) & ~DRF_SHIFTMASK(NV##d##r##f(i))) | DRF_IDX_NUM(d,r,f,i,n))
#define FLD_SET_DRF_IDX(d,r,f,c,i,v) (((NvU32)(v) & ~DRF_SHIFTMASK(NV##d##r##f)) | DRF_DEF(d,r,f,c(i)))
#define FLD_TEST_DRF(d,r,f,c,v) ((DRF_VAL(d, r, f, (v)) == (NvU32)(NV##d##r##f##c)))
#define FLD_TEST_DRF_AND(d,r,f,c,v) ((DRF_VAL(d, r, f, (v)) & (NvU32)(NV##d##r##f##c)) != 0U)
#define FLD_TEST_DRF_NUM(d,r,f,n,v) ((DRF_VAL(d, r, f, (v)) == (NvU32)(n)))
#define FLD_IDX_TEST_DRF(d,r,f,i,c,v) ((DRF_IDX_VAL(d, r, f, i, (v)) == (NvU32)(NV##d##r##f##c)))
#define FLD_IDX_OFFSET_TEST_DRF(d,r,f,i,o,c,v) ((DRF_IDX_OFFSET_VAL(d, r, f, i, o, (v)) == (NvU32)(NV##d##r##f##c)))
#else
#define FLD_SET_DRF(d,r,f,c,v) (((v) & ~DRF_SHIFTMASK(NV##d##r##f)) | DRF_DEF(d,r,f,c))
#define FLD_SET_DRF_NUM(d,r,f,n,v) (((v) & ~DRF_SHIFTMASK(NV##d##r##f)) | DRF_NUM(d,r,f,n))
#define FLD_IDX_SET_DRF(d,r,f,i,c,v) (((v) & ~DRF_SHIFTMASK(NV##d##r##f(i))) | DRF_IDX_DEF(d,r,f,i,c))
#define FLD_IDX_OFFSET_SET_DRF(d,r,f,i,o,c,v) (((v) & ~DRF_SHIFTMASK(NV##d##r##f(i,o))) | DRF_IDX_OFFSET_DEF(d,r,f,i,o,c))
#define FLD_IDX_SET_DRF_DEF(d,r,f,i,c,v) (((v) & ~DRF_SHIFTMASK(NV##d##r##f(i))) | DRF_IDX_DEF(d,r,f,i,c))
#define FLD_IDX_SET_DRF_NUM(d,r,f,i,n,v) (((v) & ~DRF_SHIFTMASK(NV##d##r##f(i))) | DRF_IDX_NUM(d,r,f,i,n))
#define FLD_SET_DRF_IDX(d,r,f,c,i,v) (((v) & ~DRF_SHIFTMASK(NV##d##r##f)) | DRF_DEF(d,r,f,c(i)))
#define FLD_TEST_DRF(d,r,f,c,v) ((DRF_VAL(d, r, f, (v)) == NV##d##r##f##c))
#define FLD_TEST_DRF_AND(d,r,f,c,v) ((DRF_VAL(d, r, f, (v)) & NV##d##r##f##c))
#define FLD_TEST_DRF_NUM(d,r,f,n,v) ((DRF_VAL(d, r, f, (v)) == (n)))
#define FLD_IDX_TEST_DRF(d,r,f,i,c,v) ((DRF_IDX_VAL(d, r, f, i, (v)) == NV##d##r##f##c))
#define FLD_IDX_OFFSET_TEST_DRF(d,r,f,i,o,c,v) ((DRF_IDX_OFFSET_VAL(d, r, f, i, o, (v)) == NV##d##r##f##c))
#endif
#define REF_DEF(drf,d) (((drf ## d)&DRF_MASK(drf))<<DRF_SHIFT(drf))
#define REF_VAL(drf,v) (((v)>>DRF_SHIFT(drf))&DRF_MASK(drf))
#if defined(NV_MISRA_COMPLIANCE_REQUIRED) && defined(MISRA_14_3)
#define REF_NUM(drf,n) (((n)&(0xFFFFFFFFU>>(31U-((drf##_HIGH_FIELD) % 32U)+((drf##_LOW_FIELD) % 32U)))) << ((drf##_LOW_FIELD) % 32U))
#else
#define REF_NUM(drf,n) (((n)&DRF_MASK(drf))<<DRF_SHIFT(drf))
#endif
#define FLD_TEST_REF(drf,c,v) (REF_VAL(drf, (v)) == drf##c)
#define FLD_TEST_REF_AND(drf,c,v) (REF_VAL(drf, (v)) & drf##c)
#define FLD_SET_REF_NUM(drf,n,v) (((v) & ~DRF_SHIFTMASK(drf)) | REF_NUM(drf,n))
#define CR_DRF_DEF(d,r,f,c) ((CR ## d ## r ## f ## c)<<DRF_SHIFT(CR ## d ## r ## f))
#define CR_DRF_NUM(d,r,f,n) (((n)&DRF_MASK(CR ## d ## r ## f))<<DRF_SHIFT(CR ## d ## r ## f))
#define CR_DRF_VAL(d,r,f,v) (((v)>>DRF_SHIFT(CR ## d ## r ## f))&DRF_MASK(CR ## d ## r ## f))
// Multi-word (MW) field manipulations. For multi-word structures (e.g., Fermi SPH),
// fields may have bit numbers beyond 32. To avoid errors using "classic" multi-word macros,
// all the field extents are defined as "MW(X)". For example, MW(127:96) means
// the field is in bits 0-31 of word number 3 of the structure.
//
// DRF_VAL_MW() macro is meant to be used for native endian 32-bit aligned 32-bit word data,
// not for byte stream data.
//
// DRF_VAL_BS() macro is for byte stream data used in fbQueryBIOS_XXX().
//
#define DRF_EXPAND_MW(drf) drf // used to turn "MW(a:b)" into "a:b"
#define DRF_PICK_MW(drf,v) ((v)? DRF_EXPAND_##drf) // picks low or high bits
#define DRF_WORD_MW(drf) (DRF_PICK_MW(drf,0)/32) // which word in a multi-word array
#define DRF_BASE_MW(drf) (DRF_PICK_MW(drf,0)%32) // which start bit in the selected word?
#define DRF_EXTENT_MW(drf) (DRF_PICK_MW(drf,1)%32) // which end bit in the selected word
#define DRF_SHIFT_MW(drf) (DRF_PICK_MW(drf,0)%32)
#define DRF_MASK_MW(drf) (0xFFFFFFFFU>>((31-(DRF_EXTENT_MW(drf))+(DRF_BASE_MW(drf)))%32))
#define DRF_SHIFTMASK_MW(drf) ((DRF_MASK_MW(drf))<<(DRF_SHIFT_MW(drf)))
#define DRF_SIZE_MW(drf) (DRF_EXTENT_MW(drf)-DRF_BASE_MW(drf)+1)
#define DRF_DEF_MW(d,r,f,c) ((NV##d##r##f##c) << DRF_SHIFT_MW(NV##d##r##f))
#define DRF_NUM_MW(d,r,f,n) (((n)&DRF_MASK_MW(NV##d##r##f))<<DRF_SHIFT_MW(NV##d##r##f))
//
// DRF_VAL_MW is the ONLY multi-word macro which supports spanning. No other MW macro supports spanning currently
//
#define DRF_VAL_MW_1WORD(d,r,f,v) ((((v)[DRF_WORD_MW(NV##d##r##f)])>>DRF_SHIFT_MW(NV##d##r##f))&DRF_MASK_MW(NV##d##r##f))
#define DRF_SPANS(drf) ((DRF_PICK_MW(drf,0)/32) != (DRF_PICK_MW(drf,1)/32))
#define DRF_WORD_MW_LOW(drf) (DRF_PICK_MW(drf,0)/32)
#define DRF_WORD_MW_HIGH(drf) (DRF_PICK_MW(drf,1)/32)
#define DRF_MASK_MW_LOW(drf) (0xFFFFFFFFU)
#define DRF_MASK_MW_HIGH(drf) (0xFFFFFFFFU>>(31-(DRF_EXTENT_MW(drf))))
#define DRF_SHIFT_MW_LOW(drf) (DRF_PICK_MW(drf,0)%32)
#define DRF_SHIFT_MW_HIGH(drf) (0)
#define DRF_MERGE_SHIFT(drf) ((32-((DRF_PICK_MW(drf,0)%32)))%32)
#define DRF_VAL_MW_2WORD(d,r,f,v) (((((v)[DRF_WORD_MW_LOW(NV##d##r##f)])>>DRF_SHIFT_MW_LOW(NV##d##r##f))&DRF_MASK_MW_LOW(NV##d##r##f)) | \
(((((v)[DRF_WORD_MW_HIGH(NV##d##r##f)])>>DRF_SHIFT_MW_HIGH(NV##d##r##f))&DRF_MASK_MW_HIGH(NV##d##r##f)) << DRF_MERGE_SHIFT(NV##d##r##f)))
#define DRF_VAL_MW(d,r,f,v) ( DRF_SPANS(NV##d##r##f) ? DRF_VAL_MW_2WORD(d,r,f,v) : DRF_VAL_MW_1WORD(d,r,f,v) )
#define DRF_IDX_DEF_MW(d,r,f,i,c) ((NV##d##r##f##c)<<DRF_SHIFT_MW(NV##d##r##f(i)))
#define DRF_IDX_NUM_MW(d,r,f,i,n) (((n)&DRF_MASK_MW(NV##d##r##f(i)))<<DRF_SHIFT_MW(NV##d##r##f(i)))
#define DRF_IDX_VAL_MW(d,r,f,i,v) ((((v)[DRF_WORD_MW(NV##d##r##f(i))])>>DRF_SHIFT_MW(NV##d##r##f(i)))&DRF_MASK_MW(NV##d##r##f(i)))
//
// Logically OR all DRF_DEF constants indexed from zero to s (semiinclusive).
// Caution: Target variable v must be pre-initialized.
//
#define FLD_IDX_OR_DRF_DEF(d,r,f,c,s,v) \
do \
{ NvU32 idx; \
for (idx = 0; idx < (NV ## d ## r ## f ## s); ++idx)\
{ \
v |= DRF_IDX_DEF(d,r,f,idx,c); \
} \
} while(0)
#define FLD_MERGE_MW(drf,n,v) (((v)[DRF_WORD_MW(drf)] & ~DRF_SHIFTMASK_MW(drf)) | n)
#define FLD_ASSIGN_MW(drf,n,v) ((v)[DRF_WORD_MW(drf)] = FLD_MERGE_MW(drf, n, v))
#define FLD_IDX_MERGE_MW(drf,i,n,v) (((v)[DRF_WORD_MW(drf(i))] & ~DRF_SHIFTMASK_MW(drf(i))) | n)
#define FLD_IDX_ASSIGN_MW(drf,i,n,v) ((v)[DRF_WORD_MW(drf(i))] = FLD_MERGE_MW(drf(i), n, v))
#define FLD_SET_DRF_MW(d,r,f,c,v) FLD_MERGE_MW(NV##d##r##f, DRF_DEF_MW(d,r,f,c), v)
#define FLD_SET_DRF_NUM_MW(d,r,f,n,v) FLD_ASSIGN_MW(NV##d##r##f, DRF_NUM_MW(d,r,f,n), v)
#define FLD_SET_DRF_DEF_MW(d,r,f,c,v) FLD_ASSIGN_MW(NV##d##r##f, DRF_DEF_MW(d,r,f,c), v)
#define FLD_IDX_SET_DRF_MW(d,r,f,i,c,v) FLD_IDX_MERGE_MW(NV##d##r##f, i, DRF_IDX_DEF_MW(d,r,f,i,c), v)
#define FLD_IDX_SET_DRF_DEF_MW(d,r,f,i,c,v) FLD_IDX_MERGE_MW(NV##d##r##f, i, DRF_IDX_DEF_MW(d,r,f,i,c), v)
#define FLD_IDX_SET_DRF_NUM_MW(d,r,f,i,n,v) FLD_IDX_ASSIGN_MW(NV##d##r##f, i, DRF_IDX_NUM_MW(d,r,f,i,n), v)
#define FLD_TEST_DRF_MW(d,r,f,c,v) ((DRF_VAL_MW(d, r, f, (v)) == NV##d##r##f##c))
#define FLD_TEST_DRF_NUM_MW(d,r,f,n,v) ((DRF_VAL_MW(d, r, f, (v)) == n))
#define FLD_IDX_TEST_DRF_MW(d,r,f,i,c,v) ((DRF_IDX_VAL_MW(d, r, f, i, (v)) == NV##d##r##f##c))
#define DRF_VAL_BS(d,r,f,v) ( DRF_SPANS(NV##d##r##f) ? DRF_VAL_BS_2WORD(d,r,f,(v)) : DRF_VAL_BS_1WORD(d,r,f,(v)) )
//------------------------------------------------------------------------//
// //
// Common defines for engine register reference wrappers //
// //
// New engine addressing can be created like: //
// \#define ENG_REG_PMC(o,d,r) NV##d##r //
// \#define ENG_IDX_REG_CE(o,d,i,r) CE_MAP(o,r,i) //
// //
// See FB_FBPA* for more examples //
//------------------------------------------------------------------------//
#define ENG_RD_REG(g,o,d,r) GPU_REG_RD32(g, ENG_REG##d(o,d,r))
#define ENG_WR_REG(g,o,d,r,v) GPU_REG_WR32(g, ENG_REG##d(o,d,r), (v))
#define ENG_RD_DRF(g,o,d,r,f) ((GPU_REG_RD32(g, ENG_REG##d(o,d,r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
#define ENG_WR_DRF_DEF(g,o,d,r,f,c) GPU_REG_WR32(g, ENG_REG##d(o,d,r),(GPU_REG_RD32(g,ENG_REG##d(o,d,r))&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_DEF(d,r,f,c))
#define ENG_WR_DRF_NUM(g,o,d,r,f,n) GPU_REG_WR32(g, ENG_REG##d(o,d,r),(GPU_REG_RD32(g,ENG_REG##d(o,d,r))&~(GPU_DRF_MASK(NV##d##r##f)<<GPU_DRF_SHIFT(NV##d##r##f)))|GPU_DRF_NUM(d,r,f,n))
#define ENG_TEST_DRF_DEF(g,o,d,r,f,c) (ENG_RD_DRF(g, o, d, r, f) == NV##d##r##f##c)
#define ENG_RD_IDX_DRF(g,o,d,r,f,i) ((GPU_REG_RD32(g, ENG_REG##d(o,d,r(i)))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
#define ENG_TEST_IDX_DRF_DEF(g,o,d,r,f,c,i) (ENG_RD_IDX_DRF(g, o, d, r, f, (i)) == NV##d##r##f##c)
#define ENG_IDX_RD_REG(g,o,d,i,r) GPU_REG_RD32(g, ENG_IDX_REG##d(o,d,i,r))
#define ENG_IDX_WR_REG(g,o,d,i,r,v) GPU_REG_WR32(g, ENG_IDX_REG##d(o,d,i,r), (v))
#define ENG_IDX_RD_DRF(g,o,d,i,r,f) ((GPU_REG_RD32(g, ENG_IDX_REG##d(o,d,i,r))>>GPU_DRF_SHIFT(NV ## d ## r ## f))&GPU_DRF_MASK(NV ## d ## r ## f))
//
// DRF_READ_1WORD_BS() and DRF_READ_1WORD_BS_HIGH() do not read beyond the bytes that contain
// the requested value. Reading beyond the actual data causes a page fault panic when the
// immediately following page happened to be protected or not mapped.
//
#define DRF_VAL_BS_1WORD(d,r,f,v) ((DRF_READ_1WORD_BS(d,r,f,v)>>DRF_SHIFT_MW(NV##d##r##f))&DRF_MASK_MW(NV##d##r##f))
#define DRF_VAL_BS_2WORD(d,r,f,v) (((DRF_READ_4BYTE_BS(NV##d##r##f,v)>>DRF_SHIFT_MW_LOW(NV##d##r##f))&DRF_MASK_MW_LOW(NV##d##r##f)) | \
(((DRF_READ_1WORD_BS_HIGH(d,r,f,v)>>DRF_SHIFT_MW_HIGH(NV##d##r##f))&DRF_MASK_MW_HIGH(NV##d##r##f)) << DRF_MERGE_SHIFT(NV##d##r##f)))
#define DRF_READ_1BYTE_BS(drf,v) ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4]))
#define DRF_READ_2BYTE_BS(drf,v) (DRF_READ_1BYTE_BS(drf,v)| \
((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+1])<<8))
#define DRF_READ_3BYTE_BS(drf,v) (DRF_READ_2BYTE_BS(drf,v)| \
((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+2])<<16))
#define DRF_READ_4BYTE_BS(drf,v) (DRF_READ_3BYTE_BS(drf,v)| \
((NvU32)(((const NvU8*)(v))[DRF_WORD_MW(drf)*4+3])<<24))
#define DRF_READ_1BYTE_BS_HIGH(drf,v) ((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4]))
#define DRF_READ_2BYTE_BS_HIGH(drf,v) (DRF_READ_1BYTE_BS_HIGH(drf,v)| \
((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+1])<<8))
#define DRF_READ_3BYTE_BS_HIGH(drf,v) (DRF_READ_2BYTE_BS_HIGH(drf,v)| \
((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+2])<<16))
#define DRF_READ_4BYTE_BS_HIGH(drf,v) (DRF_READ_3BYTE_BS_HIGH(drf,v)| \
((NvU32)(((const NvU8*)(v))[DRF_WORD_MW_HIGH(drf)*4+3])<<24))
// Calculate 2^n - 1 and avoid shift counter overflow
//
// On Windows amd64, 64 << 64 => 1
//
#define NV_TWO_N_MINUS_ONE(n) (((1ULL<<(n/2))<<((n+1)/2))-1)
#define DRF_READ_1WORD_BS(d,r,f,v) \
((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS(NV##d##r##f,(v)): \
((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS(NV##d##r##f,(v)): \
((DRF_EXTENT_MW(NV##d##r##f)<24)?DRF_READ_3BYTE_BS(NV##d##r##f,(v)): \
DRF_READ_4BYTE_BS(NV##d##r##f,(v)))))
#define DRF_READ_1WORD_BS_HIGH(d,r,f,v) \
((DRF_EXTENT_MW(NV##d##r##f)<8)?DRF_READ_1BYTE_BS_HIGH(NV##d##r##f,(v)): \
((DRF_EXTENT_MW(NV##d##r##f)<16)?DRF_READ_2BYTE_BS_HIGH(NV##d##r##f,(v)): \
((DRF_EXTENT_MW(NV##d##r##f)<24)?DRF_READ_3BYTE_BS_HIGH(NV##d##r##f,(v)): \
DRF_READ_4BYTE_BS_HIGH(NV##d##r##f,(v)))))
#define LOWESTBIT(x) ( (x) & (((x) - 1U) ^ (x)) )
// Destructive operation on n32
#define HIGHESTBIT(n32) \
{ \
HIGHESTBITIDX_32(n32); \
n32 = NVBIT(n32); \
}
#define ONEBITSET(x) ( ((x) != 0U) && (((x) & ((x) - 1U)) == 0U) )
// Destructive operation on n32
#define NUMSETBITS_32(n32) \
{ \
n32 = n32 - ((n32 >> 1) & 0x55555555); \
n32 = (n32 & 0x33333333) + ((n32 >> 2) & 0x33333333); \
n32 = (((n32 + (n32 >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; \
}
/*!
* Calculate number of bits set in a 32-bit unsigned integer.
* Pure typesafe alternative to @ref NUMSETBITS_32.
*/
static NV_FORCEINLINE NvU32
nvPopCount32(const NvU32 x)
{
NvU32 temp = x;
temp = temp - ((temp >> 1) & 0x55555555U);
temp = (temp & 0x33333333U) + ((temp >> 2) & 0x33333333U);
temp = (((temp + (temp >> 4)) & 0x0F0F0F0FU) * 0x01010101U) >> 24;
return temp;
}
/*!
* Calculate number of bits set in a 64-bit unsigned integer.
*/
static NV_FORCEINLINE NvU32
nvPopCount64(const NvU64 x)
{
NvU64 temp = x;
temp = temp - ((temp >> 1) & 0x5555555555555555ULL);
temp = (temp & 0x3333333333333333ULL) + ((temp >> 2) & 0x3333333333333333ULL);
temp = (temp + (temp >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
temp = (temp * 0x0101010101010101ULL) >> 56;
return (NvU32)temp;
}
/*!
* Determine how many bits are set below a bit index within a mask.
* This assigns a dense ordering to the set bits in the mask.
*
* For example the mask 0xCD contains 5 set bits:
* nvMaskPos32(0xCD, 0) == 0
* nvMaskPos32(0xCD, 2) == 1
* nvMaskPos32(0xCD, 3) == 2
* nvMaskPos32(0xCD, 6) == 3
* nvMaskPos32(0xCD, 7) == 4
*/
static NV_FORCEINLINE NvU32
nvMaskPos32(const NvU32 mask, const NvU32 bitIdx)
{
return nvPopCount32(mask & (NVBIT32(bitIdx) - 1U));
}
// Destructive operation on n32
#define LOWESTBITIDX_32(n32) \
{ \
n32 = BIT_IDX_32(LOWESTBIT(n32));\
}
// Destructive operation on n32
#define HIGHESTBITIDX_32(n32) \
{ \
NvU32 count = 0; \
while (n32 >>= 1) \
{ \
count++; \
} \
n32 = count; \
}
// Destructive operation on n32
#define ROUNDUP_POW2(n32) \
{ \
n32--; \
n32 |= n32 >> 1; \
n32 |= n32 >> 2; \
n32 |= n32 >> 4; \
n32 |= n32 >> 8; \
n32 |= n32 >> 16; \
n32++; \
}
/*!
* Round up a 32-bit unsigned integer to the next power of 2.
* Pure typesafe alternative to @ref ROUNDUP_POW2.
*
* param[in] x must be in range [0, 2^31] to avoid overflow.
*/
static NV_FORCEINLINE NvU32
nvNextPow2_U32(const NvU32 x)
{
NvU32 y = x;
y--;
y |= y >> 1;
y |= y >> 2;
y |= y >> 4;
y |= y >> 8;
y |= y >> 16;
y++;
return y;
}
static NV_FORCEINLINE NvU32
nvPrevPow2_U32(const NvU32 x )
{
NvU32 y = x;
y |= (y >> 1);
y |= (y >> 2);
y |= (y >> 4);
y |= (y >> 8);
y |= (y >> 16);
return y - (y >> 1);
}
static NV_FORCEINLINE NvU64
nvPrevPow2_U64(const NvU64 x )
{
NvU64 y = x;
y |= (y >> 1);
y |= (y >> 2);
y |= (y >> 4);
y |= (y >> 8);
y |= (y >> 16);
y |= (y >> 32);
return y - (y >> 1);
}
// Destructive operation on n64
#define ROUNDUP_POW2_U64(n64) \
{ \
n64--; \
n64 |= n64 >> 1; \
n64 |= n64 >> 2; \
n64 |= n64 >> 4; \
n64 |= n64 >> 8; \
n64 |= n64 >> 16; \
n64 |= n64 >> 32; \
n64++; \
}
#define NV_SWAP_U8(a,b) \
{ \
NvU8 temp; \
temp = a; \
a = b; \
b = temp; \
}
#define NV_SWAP_U32(a,b) \
{ \
NvU32 temp; \
temp = a; \
a = b; \
b = temp; \
}
/*!
* @brief Macros allowing simple iteration over bits set in a given mask.
*
* @param[in] maskWidth bit-width of the mask (allowed: 8, 16, 32, 64)
*
* @param[in,out] index lvalue that is used as a bit index in the loop
* (can be declared as any NvU* or NvS* variable)
* @param[in] mask expression, loop will iterate over set bits only
*/
#define FOR_EACH_INDEX_IN_MASK(maskWidth,index,mask) \
{ \
NvU##maskWidth lclMsk = (NvU##maskWidth)(mask); \
for ((index) = 0U; lclMsk != 0U; (index)++, lclMsk >>= 1U)\
{ \
if (((NvU##maskWidth)NVBIT64(0) & lclMsk) == 0U) \
{ \
continue; \
}
#define FOR_EACH_INDEX_IN_MASK_END \
} \
}
//
// Size to use when declaring variable-sized arrays
//
#define NV_ANYSIZE_ARRAY 1
//
// Returns ceil(a/b)
//
#define NV_CEIL(a,b) (((a)+(b)-1)/(b))
// Clearer name for NV_CEIL
#ifndef NV_DIV_AND_CEIL
#define NV_DIV_AND_CEIL(a, b) NV_CEIL(a,b)
#endif
#ifndef NV_MIN
#define NV_MIN(a, b) (((a) < (b)) ? (a) : (b))
#endif
#ifndef NV_MAX
#define NV_MAX(a, b) (((a) > (b)) ? (a) : (b))
#endif
//
// Returns absolute value of provided integer expression
//
#define NV_ABS(a) ((a)>=0?(a):(-(a)))
//
// Returns 1 if input number is positive, 0 if 0 and -1 if negative. Avoid
// macro parameter as function call which will have side effects.
//
#define NV_SIGN(s) ((NvS8)(((s) > 0) - ((s) < 0)))
//
// Returns 1 if input number is >= 0 or -1 otherwise. This assumes 0 has a
// positive sign.
//
#define NV_ZERO_SIGN(s) ((NvS8)((((s) >= 0) * 2) - 1))
// Returns the offset (in bytes) of 'member' in struct 'type'.
#ifndef NV_OFFSETOF
#if defined(__GNUC__) && (__GNUC__ > 3)
#define NV_OFFSETOF(type, member) ((NvU32)__builtin_offsetof(type, member))
#else
#define NV_OFFSETOF(type, member) ((NvU32)(NvU64)&(((type *)0)->member)) // shouldn't we use PtrToUlong? But will need to include windows header.
#endif
#endif
//
// Performs a rounded division of b into a (unsigned). For SIGNED version of
// NV_ROUNDED_DIV() macro check the comments in bug 769777.
//
#define NV_UNSIGNED_ROUNDED_DIV(a,b) (((a) + ((b) / 2U)) / (b))
/*!
* Performs a ceiling division of b into a (unsigned). A "ceiling" division is
* a division is one with rounds up result up if a % b != 0.
*
* @param[in] a Numerator
* @param[in] b Denominator
*
* @return a / b + a % b != 0 ? 1 : 0.
*/
#define NV_UNSIGNED_DIV_CEIL(a, b) (((a) + (b - 1)) / (b))
/*!
* Performs subtraction where a negative difference is raised to zero.
* Can be used to avoid underflowing an unsigned subtraction.
*
* @param[in] a Minuend
* @param[in] b Subtrahend
*
* @return a > b ? a - b : 0.
*/
#define NV_SUBTRACT_NO_UNDERFLOW(a, b) ((a)>(b) ? (a)-(b) : 0)
/*!
* Performs a rounded right-shift of 32-bit unsigned value "a" by "shift" bits.
* Will round result away from zero.
*
* @param[in] a 32-bit unsigned value to shift.
* @param[in] shift Number of bits by which to shift.
*
* @return Resulting shifted value rounded away from zero.
*/
#define NV_RIGHT_SHIFT_ROUNDED(a, shift) \
(((a) >> (shift)) + !!((NVBIT((shift) - 1) & (a)) == NVBIT((shift) - 1)))
//
// Power of 2 alignment.
// (Will give unexpected results if 'gran' is not a power of 2.)
//
#ifndef NV_ALIGN_DOWN
//
// Notably using v - v + gran ensures gran gets promoted to the same type as v if gran has a smaller type.
// Otherwise, if aligning a NVU64 with NVU32 granularity, the top 4 bytes get zeroed.
//
#define NV_ALIGN_DOWN(v, gran) ((v) & ~((v) - (v) + (gran) - 1))
#endif
#ifndef NV_ALIGN_UP
//
// Notably using v - v + gran ensures gran gets promoted to the same type as v if gran has a smaller type.
// Otherwise, if aligning a NVU64 with NVU32 granularity, the top 4 bytes get zeroed.
//
#define NV_ALIGN_UP(v, gran) (((v) + ((gran) - 1)) & ~((v) - (v) + (gran) - 1))
#endif
#ifndef NV_ALIGN_DOWN64
#define NV_ALIGN_DOWN64(v, gran) ((v) & ~(((NvU64)gran) - 1))
#endif
#ifndef NV_ALIGN_UP64
#define NV_ALIGN_UP64(v, gran) (((v) + ((gran) - 1)) & ~(((NvU64)gran)-1))
#endif
#ifndef NV_IS_ALIGNED
#define NV_IS_ALIGNED(v, gran) (0U == ((v) & ((gran) - 1U)))
#endif
#ifndef NV_IS_ALIGNED64
#define NV_IS_ALIGNED64(v, gran) (0U == ((v) & (((NvU64)gran) - 1U)))
#endif
#ifndef NVMISC_MEMSET
static NV_FORCEINLINE void *NVMISC_MEMSET(void *s, NvU8 c, NvLength n)
{
NvU8 *b = (NvU8 *) s;
NvLength i;
for (i = 0; i < n; i++)
{
b[i] = c;
}
return s;
}
#endif
#ifndef NVMISC_MEMCPY
static NV_FORCEINLINE void *NVMISC_MEMCPY(void *dest, const void *src, NvLength n)
{
NvU8 *destByte = (NvU8 *) dest;
const NvU8 *srcByte = (const NvU8 *) src;
NvLength i;
for (i = 0; i < n; i++)
{
destByte[i] = srcByte[i];
}
return dest;
}
#endif
static NV_FORCEINLINE char *NVMISC_STRNCPY(char *dest, const char *src, NvLength n)
{
NvLength i;
for (i = 0; i < n; i++)
{
dest[i] = src[i];
if (src[i] == '\0')
{
break;
}
}
for (; i < n; i++)
{
dest[i] = '\0';
}
return dest;
}
/*!
* Convert a void* to an NvUPtr. This is used when MISRA forbids us from doing a direct cast.
*
* @param[in] ptr Pointer to be converted
*
* @return Resulting NvUPtr
*/
static NV_FORCEINLINE NvUPtr NV_PTR_TO_NVUPTR(void *ptr)
{
union
{
NvUPtr v;
void *p;
} uAddr;
uAddr.p = ptr;
return uAddr.v;
}
/*!
* Convert an NvUPtr to a void*. This is used when MISRA forbids us from doing a direct cast.
*
* @param[in] ptr Pointer to be converted
*
* @return Resulting void *
*/
static NV_FORCEINLINE void *NV_NVUPTR_TO_PTR(NvUPtr address)
{
union
{
NvUPtr v;
void *p;
} uAddr;
uAddr.v = address;
return uAddr.p;
}
// Get bit at pos (k) from x
#define NV_BIT_GET(k, x) (((x) >> (k)) & 1)
// Get bit at pos (n) from (hi) if >= 64, otherwise from (lo). This is paired with NV_BIT_SET_128 which sets the bit.
#define NV_BIT_GET_128(n, lo, hi) (((n) < 64) ? NV_BIT_GET((n), (lo)) : NV_BIT_GET((n) - 64, (hi)))
//
// Set the bit at pos (b) for U64 which is < 128. Since the (b) can be >= 64, we need 2 U64 to store this.
// Use (lo) if (b) is less than 64, and (hi) if >= 64.
//
#define NV_BIT_SET_128(b, lo, hi) { nvAssert( (b) < 128 ); if ( (b) < 64 ) (lo) |= NVBIT64(b); else (hi) |= NVBIT64( b & 0x3F ); }
#ifdef __cplusplus
}
#endif //__cplusplus
#endif // __NV_MISC_H

View File

@@ -0,0 +1,123 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef SDK_NVSTATUS_H
#define SDK_NVSTATUS_H
#ifdef __cplusplus
extern "C" {
#endif
#include "nvtypes.h"
typedef NvU32 NV_STATUS;
#define NV_STATUS_CODE( name, code, string ) name = (code),
enum
{
#include "nvstatuscodes.h"
};
#undef NV_STATUS_CODE
/*!
* @def NV_STATUS_LEVEL_OK
* @see NV_STATUS_LEVEL
* @brief Success: No error or special condition
*/
#define NV_STATUS_LEVEL_OK 0
/*!
* @def NV_STATUS_LEVEL_WARN
* @see NV_STATUS_LEVEL
* @brief Success, but there is an special condition
*
* @details In general, NV_STATUS_LEVEL_WARN status codes are handled the
* same as NV_STATUS_LEVEL_OK, but are usefil to indicate that
* there is a condition that may be specially handled.
*
* Therefore, in most cases, client function should test for
* status <= NV_STATUS_LEVEL_WARN or status > NV_STATUS_LEVEL_WARN
* to determine success v. failure of a call.
*/
#define NV_STATUS_LEVEL_WARN 1
/*!
* @def NV_STATUS_LEVEL_ERR
* @see NV_STATUS_LEVEL
* @brief Unrecoverable error condition
*/
#define NV_STATUS_LEVEL_ERR 3
/*!
* @def NV_STATUS_LEVEL
* @see NV_STATUS_LEVEL_OK
* @see NV_STATUS_LEVEL_WARN
* @see NV_STATUS_LEVEL_ERR
* @brief Level of the status code
*
* @warning IMPORTANT: When comparing NV_STATUS_LEVEL(_S) against one of
* these constants, it is important to use '<=' or '>' (rather
* than '<' or '>=').
*
* For example. do:
* if (NV_STATUS_LEVEL(status) <= NV_STATUS_LEVEL_WARN)
* rather than:
* if (NV_STATUS_LEVEL(status) < NV_STATUS_LEVEL_ERR)
*
* By being consistent in this manner, it is easier to systematically
* add additional level constants. New levels are likely to lower
* (rather than raise) the severity of _ERR codes. For example,
* if we were to add NV_STATUS_LEVEL_RETRY to indicate hardware
* failures that may be recoverable (e.g. RM_ERR_TIMEOUT_RETRY
* or RM_ERR_BUSY_RETRY), it would be less severe than
* NV_STATUS_LEVEL_ERR the level to which these status codes now
* belong. Using '<=' and '>' ensures your code is not broken in
* cases like this.
*/
#define NV_STATUS_LEVEL(_S) \
((_S) == NV_OK? NV_STATUS_LEVEL_OK: \
((_S) != NV_ERR_GENERIC && (_S) & 0x00010000? NV_STATUS_LEVEL_WARN: \
NV_STATUS_LEVEL_ERR))
/*!
* @def NV_STATUS_LEVEL
* @see NV_STATUS_LEVEL_OK
* @see NV_STATUS_LEVEL_WARN
* @see NV_STATUS_LEVEL_ERR
* @brief Character representing status code level
*/
#define NV_STATUS_LEVEL_CHAR(_S) \
((_S) == NV_OK? '0': \
((_S) != NV_ERR_GENERIC && (_S) & 0x00010000? 'W': \
'E'))
// Function definitions
const char *nvstatusToString(NV_STATUS nvStatusIn);
#ifdef __cplusplus
}
#endif
#endif /* SDK_NVSTATUS_H */

View File

@@ -0,0 +1,164 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2014-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef SDK_NVSTATUSCODES_H
#define SDK_NVSTATUSCODES_H
NV_STATUS_CODE(NV_OK, 0x00000000, "Success")
NV_STATUS_CODE(NV_ERR_GENERIC, 0x0000FFFF, "Failure: Generic Error")
NV_STATUS_CODE(NV_ERR_BROKEN_FB, 0x00000001, "Frame-Buffer broken")
NV_STATUS_CODE(NV_ERR_BUFFER_TOO_SMALL, 0x00000002, "Buffer passed in is too small")
NV_STATUS_CODE(NV_ERR_BUSY_RETRY, 0x00000003, "System is busy, retry later")
NV_STATUS_CODE(NV_ERR_CALLBACK_NOT_SCHEDULED, 0x00000004, "The requested callback API not scheduled")
NV_STATUS_CODE(NV_ERR_CARD_NOT_PRESENT, 0x00000005, "Card not detected")
NV_STATUS_CODE(NV_ERR_CYCLE_DETECTED, 0x00000006, "Call cycle detected")
NV_STATUS_CODE(NV_ERR_DMA_IN_USE, 0x00000007, "Requested DMA is in use")
NV_STATUS_CODE(NV_ERR_DMA_MEM_NOT_LOCKED, 0x00000008, "Requested DMA memory is not locked")
NV_STATUS_CODE(NV_ERR_DMA_MEM_NOT_UNLOCKED, 0x00000009, "Requested DMA memory is not unlocked")
NV_STATUS_CODE(NV_ERR_DUAL_LINK_INUSE, 0x0000000A, "Dual-Link is in use")
NV_STATUS_CODE(NV_ERR_ECC_ERROR, 0x0000000B, "Generic ECC error")
NV_STATUS_CODE(NV_ERR_FIFO_BAD_ACCESS, 0x0000000C, "FIFO: Invalid access")
NV_STATUS_CODE(NV_ERR_FREQ_NOT_SUPPORTED, 0x0000000D, "Requested frequency is not supported")
NV_STATUS_CODE(NV_ERR_GPU_DMA_NOT_INITIALIZED, 0x0000000E, "Requested DMA not initialized")
NV_STATUS_CODE(NV_ERR_GPU_IS_LOST, 0x0000000F, "GPU lost from the bus")
NV_STATUS_CODE(NV_ERR_GPU_IN_FULLCHIP_RESET, 0x00000010, "GPU currently in full-chip reset")
NV_STATUS_CODE(NV_ERR_GPU_NOT_FULL_POWER, 0x00000011, "GPU not in full power")
NV_STATUS_CODE(NV_ERR_GPU_UUID_NOT_FOUND, 0x00000012, "GPU UUID not found")
NV_STATUS_CODE(NV_ERR_HOT_SWITCH, 0x00000013, "System in hot switch")
NV_STATUS_CODE(NV_ERR_I2C_ERROR, 0x00000014, "I2C Error")
NV_STATUS_CODE(NV_ERR_I2C_SPEED_TOO_HIGH, 0x00000015, "I2C Error: Speed too high")
NV_STATUS_CODE(NV_ERR_ILLEGAL_ACTION, 0x00000016, "Current action is not allowed")
NV_STATUS_CODE(NV_ERR_IN_USE, 0x00000017, "Generic busy error")
NV_STATUS_CODE(NV_ERR_INFLATE_COMPRESSED_DATA_FAILED, 0x00000018, "Failed to inflate compressed data")
NV_STATUS_CODE(NV_ERR_INSERT_DUPLICATE_NAME, 0x00000019, "Found a duplicate entry in the requested btree")
NV_STATUS_CODE(NV_ERR_INSUFFICIENT_RESOURCES, 0x0000001A, "Ran out of a critical resource, other than memory")
NV_STATUS_CODE(NV_ERR_INSUFFICIENT_PERMISSIONS, 0x0000001B, "The requester does not have sufficient permissions")
NV_STATUS_CODE(NV_ERR_INSUFFICIENT_POWER, 0x0000001C, "Generic Error: Low power")
NV_STATUS_CODE(NV_ERR_INVALID_ACCESS_TYPE, 0x0000001D, "This type of access is not allowed")
NV_STATUS_CODE(NV_ERR_INVALID_ADDRESS, 0x0000001E, "Address not valid")
NV_STATUS_CODE(NV_ERR_INVALID_ARGUMENT, 0x0000001F, "Invalid argument to call")
NV_STATUS_CODE(NV_ERR_INVALID_BASE, 0x00000020, "Invalid base")
NV_STATUS_CODE(NV_ERR_INVALID_CHANNEL, 0x00000021, "Given channel-id not valid")
NV_STATUS_CODE(NV_ERR_INVALID_CLASS, 0x00000022, "Given class-id not valid")
NV_STATUS_CODE(NV_ERR_INVALID_CLIENT, 0x00000023, "Given client not valid")
NV_STATUS_CODE(NV_ERR_INVALID_COMMAND, 0x00000024, "Command passed is not valid")
NV_STATUS_CODE(NV_ERR_INVALID_DATA, 0x00000025, "Invalid data passed")
NV_STATUS_CODE(NV_ERR_INVALID_DEVICE, 0x00000026, "Current device is not valid")
NV_STATUS_CODE(NV_ERR_INVALID_DMA_SPECIFIER, 0x00000027, "The requested DMA specifier is not valid")
NV_STATUS_CODE(NV_ERR_INVALID_EVENT, 0x00000028, "Invalid event occurred")
NV_STATUS_CODE(NV_ERR_INVALID_FLAGS, 0x00000029, "Invalid flags passed")
NV_STATUS_CODE(NV_ERR_INVALID_FUNCTION, 0x0000002A, "Called function is not valid")
NV_STATUS_CODE(NV_ERR_INVALID_HEAP, 0x0000002B, "Heap corrupted")
NV_STATUS_CODE(NV_ERR_INVALID_INDEX, 0x0000002C, "Index invalid")
NV_STATUS_CODE(NV_ERR_INVALID_IRQ_LEVEL, 0x0000002D, "Requested IRQ level is not valid")
NV_STATUS_CODE(NV_ERR_INVALID_LIMIT, 0x0000002E, "Generic Error: Invalid limit")
NV_STATUS_CODE(NV_ERR_INVALID_LOCK_STATE, 0x0000002F, "Requested lock state not valid")
NV_STATUS_CODE(NV_ERR_INVALID_METHOD, 0x00000030, "Requested method not valid")
NV_STATUS_CODE(NV_ERR_INVALID_OBJECT, 0x00000031, "Object not valid")
NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_BUFFER, 0x00000032, "Object buffer passed is not valid")
NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_HANDLE, 0x00000033, "Object handle is not valid")
NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_NEW, 0x00000034, "New object is not valid")
NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_OLD, 0x00000035, "Old object is not valid")
NV_STATUS_CODE(NV_ERR_INVALID_OBJECT_PARENT, 0x00000036, "Object parent is not valid")
NV_STATUS_CODE(NV_ERR_INVALID_OFFSET, 0x00000037, "The offset passed is not valid")
NV_STATUS_CODE(NV_ERR_INVALID_OPERATION, 0x00000038, "Requested operation is not valid")
NV_STATUS_CODE(NV_ERR_INVALID_OWNER, 0x00000039, "Owner not valid")
NV_STATUS_CODE(NV_ERR_INVALID_PARAM_STRUCT, 0x0000003A, "Invalid structure parameter")
NV_STATUS_CODE(NV_ERR_INVALID_PARAMETER, 0x0000003B, "At least one of the parameters passed is not valid")
NV_STATUS_CODE(NV_ERR_INVALID_PATH, 0x0000003C, "The requested path is not valid")
NV_STATUS_CODE(NV_ERR_INVALID_POINTER, 0x0000003D, "Pointer not valid")
NV_STATUS_CODE(NV_ERR_INVALID_REGISTRY_KEY, 0x0000003E, "Found an invalid registry key")
NV_STATUS_CODE(NV_ERR_INVALID_REQUEST, 0x0000003F, "Generic Error: Invalid request")
NV_STATUS_CODE(NV_ERR_INVALID_STATE, 0x00000040, "Generic Error: Invalid state")
NV_STATUS_CODE(NV_ERR_INVALID_STRING_LENGTH, 0x00000041, "The string length is not valid")
NV_STATUS_CODE(NV_ERR_INVALID_READ, 0x00000042, "The requested read operation is not valid")
NV_STATUS_CODE(NV_ERR_INVALID_WRITE, 0x00000043, "The requested write operation is not valid")
NV_STATUS_CODE(NV_ERR_INVALID_XLATE, 0x00000044, "The requested translate operation is not valid")
NV_STATUS_CODE(NV_ERR_IRQ_NOT_FIRING, 0x00000045, "Requested IRQ is not firing")
NV_STATUS_CODE(NV_ERR_IRQ_EDGE_TRIGGERED, 0x00000046, "IRQ is edge triggered")
NV_STATUS_CODE(NV_ERR_MEMORY_TRAINING_FAILED, 0x00000047, "Failed memory training sequence")
NV_STATUS_CODE(NV_ERR_MISMATCHED_SLAVE, 0x00000048, "Slave mismatch")
NV_STATUS_CODE(NV_ERR_MISMATCHED_TARGET, 0x00000049, "Target mismatch")
NV_STATUS_CODE(NV_ERR_MISSING_TABLE_ENTRY, 0x0000004A, "Requested entry missing not found in the table")
NV_STATUS_CODE(NV_ERR_MODULE_LOAD_FAILED, 0x0000004B, "Failed to load the requested module")
NV_STATUS_CODE(NV_ERR_MORE_DATA_AVAILABLE, 0x0000004C, "There is more data available")
NV_STATUS_CODE(NV_ERR_MORE_PROCESSING_REQUIRED, 0x0000004D, "More processing required for the given call")
NV_STATUS_CODE(NV_ERR_MULTIPLE_MEMORY_TYPES, 0x0000004E, "Multiple memory types found")
NV_STATUS_CODE(NV_ERR_NO_FREE_FIFOS, 0x0000004F, "No more free FIFOs found")
NV_STATUS_CODE(NV_ERR_NO_INTR_PENDING, 0x00000050, "No interrupt pending")
NV_STATUS_CODE(NV_ERR_NO_MEMORY, 0x00000051, "Out of memory")
NV_STATUS_CODE(NV_ERR_NO_SUCH_DOMAIN, 0x00000052, "Requested domain does not exist")
NV_STATUS_CODE(NV_ERR_NO_VALID_PATH, 0x00000053, "Caller did not specify a valid path")
NV_STATUS_CODE(NV_ERR_NOT_COMPATIBLE, 0x00000054, "Generic Error: Incompatible types")
NV_STATUS_CODE(NV_ERR_NOT_READY, 0x00000055, "Generic Error: Not ready")
NV_STATUS_CODE(NV_ERR_NOT_SUPPORTED, 0x00000056, "Call not supported")
NV_STATUS_CODE(NV_ERR_OBJECT_NOT_FOUND, 0x00000057, "Requested object not found")
NV_STATUS_CODE(NV_ERR_OBJECT_TYPE_MISMATCH, 0x00000058, "Specified objects do not match")
NV_STATUS_CODE(NV_ERR_OPERATING_SYSTEM, 0x00000059, "Generic operating system error")
NV_STATUS_CODE(NV_ERR_OTHER_DEVICE_FOUND, 0x0000005A, "Found other device instead of the requested one")
NV_STATUS_CODE(NV_ERR_OUT_OF_RANGE, 0x0000005B, "The specified value is out of bounds")
NV_STATUS_CODE(NV_ERR_OVERLAPPING_UVM_COMMIT, 0x0000005C, "Overlapping unified virtual memory commit")
NV_STATUS_CODE(NV_ERR_PAGE_TABLE_NOT_AVAIL, 0x0000005D, "Requested page table not available")
NV_STATUS_CODE(NV_ERR_PID_NOT_FOUND, 0x0000005E, "Process-Id not found")
NV_STATUS_CODE(NV_ERR_PROTECTION_FAULT, 0x0000005F, "Protection fault")
NV_STATUS_CODE(NV_ERR_RC_ERROR, 0x00000060, "Generic RC error")
NV_STATUS_CODE(NV_ERR_REJECTED_VBIOS, 0x00000061, "Given Video BIOS rejected/invalid")
NV_STATUS_CODE(NV_ERR_RESET_REQUIRED, 0x00000062, "Reset required")
NV_STATUS_CODE(NV_ERR_STATE_IN_USE, 0x00000063, "State in use")
NV_STATUS_CODE(NV_ERR_SIGNAL_PENDING, 0x00000064, "Signal pending")
NV_STATUS_CODE(NV_ERR_TIMEOUT, 0x00000065, "Call timed out")
NV_STATUS_CODE(NV_ERR_TIMEOUT_RETRY, 0x00000066, "Call timed out, please retry later")
NV_STATUS_CODE(NV_ERR_TOO_MANY_PRIMARIES, 0x00000067, "Too many primaries")
NV_STATUS_CODE(NV_ERR_UVM_ADDRESS_IN_USE, 0x00000068, "Unified virtual memory requested address already in use")
NV_STATUS_CODE(NV_ERR_MAX_SESSION_LIMIT_REACHED, 0x00000069, "Maximum number of sessions reached")
NV_STATUS_CODE(NV_ERR_LIB_RM_VERSION_MISMATCH, 0x0000006A, "Library version doesn't match driver version") //Contained within the RMAPI library
NV_STATUS_CODE(NV_ERR_PRIV_SEC_VIOLATION, 0x0000006B, "Priv security violation")
NV_STATUS_CODE(NV_ERR_GPU_IN_DEBUG_MODE, 0x0000006C, "GPU currently in debug mode")
NV_STATUS_CODE(NV_ERR_FEATURE_NOT_ENABLED, 0x0000006D, "Requested Feature functionality is not enabled")
NV_STATUS_CODE(NV_ERR_RESOURCE_LOST, 0x0000006E, "Requested resource has been destroyed")
NV_STATUS_CODE(NV_ERR_PMU_NOT_READY, 0x0000006F, "PMU is not ready or has not yet been initialized")
NV_STATUS_CODE(NV_ERR_FLCN_ERROR, 0x00000070, "Generic falcon assert or halt")
NV_STATUS_CODE(NV_ERR_FATAL_ERROR, 0x00000071, "Fatal/unrecoverable error")
NV_STATUS_CODE(NV_ERR_MEMORY_ERROR, 0x00000072, "Generic memory error")
NV_STATUS_CODE(NV_ERR_INVALID_LICENSE, 0x00000073, "License provided is rejected or invalid")
NV_STATUS_CODE(NV_ERR_NVLINK_INIT_ERROR, 0x00000074, "Nvlink Init Error")
NV_STATUS_CODE(NV_ERR_NVLINK_MINION_ERROR, 0x00000075, "Nvlink Minion Error")
NV_STATUS_CODE(NV_ERR_NVLINK_CLOCK_ERROR, 0x00000076, "Nvlink Clock Error")
NV_STATUS_CODE(NV_ERR_NVLINK_TRAINING_ERROR, 0x00000077, "Nvlink Training Error")
NV_STATUS_CODE(NV_ERR_NVLINK_CONFIGURATION_ERROR, 0x00000078, "Nvlink Configuration Error")
NV_STATUS_CODE(NV_ERR_RISCV_ERROR, 0x00000079, "Generic RISC-V assert or halt")
NV_STATUS_CODE(NV_ERR_FABRIC_MANAGER_NOT_PRESENT, 0x0000007A, "Fabric Manager is not loaded")
NV_STATUS_CODE(NV_ERR_ALREADY_SIGNALLED, 0x0000007B, "Semaphore Surface value already >= requested wait value")
// Warnings:
NV_STATUS_CODE(NV_WARN_HOT_SWITCH, 0x00010001, "WARNING Hot switch")
NV_STATUS_CODE(NV_WARN_INCORRECT_PERFMON_DATA, 0x00010002, "WARNING Incorrect performance monitor data")
NV_STATUS_CODE(NV_WARN_MISMATCHED_SLAVE, 0x00010003, "WARNING Slave mismatch")
NV_STATUS_CODE(NV_WARN_MISMATCHED_TARGET, 0x00010004, "WARNING Target mismatch")
NV_STATUS_CODE(NV_WARN_MORE_PROCESSING_REQUIRED, 0x00010005, "WARNING More processing required for the call")
NV_STATUS_CODE(NV_WARN_NOTHING_TO_DO, 0x00010006, "WARNING Nothing to do")
NV_STATUS_CODE(NV_WARN_NULL_OBJECT, 0x00010007, "WARNING NULL object found")
NV_STATUS_CODE(NV_WARN_OUT_OF_RANGE, 0x00010008, "WARNING value out of range")
#endif /* SDK_NVSTATUSCODES_H */

View File

@@ -0,0 +1,660 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVTYPES_INCLUDED
#define NVTYPES_INCLUDED
#ifdef __cplusplus
extern "C" {
#endif
#include "cpuopsys.h"
#ifndef NVTYPES_USE_STDINT
#define NVTYPES_USE_STDINT 0
#endif
#if NVTYPES_USE_STDINT
#ifdef __cplusplus
#include <cstdint>
#include <cinttypes>
#else
#include <stdint.h>
#include <inttypes.h>
#endif // __cplusplus
#endif // NVTYPES_USE_STDINT
#ifndef __cplusplus
// Header includes to make sure wchar_t is defined for C-file compilation
// (C++ is not affected as it is a fundamental type there)
// _MSC_VER is a hack to avoid failures for old setup of UEFI builds which are
// currently set to msvc100 but do not properly set the include paths
#if defined(NV_WINDOWS) && (!defined(_MSC_VER) || (_MSC_VER > 1600))
#include <stddef.h>
#define NV_HAS_WCHAR_T_TYPEDEF 1
#endif
#endif // __cplusplus
#if defined(MAKE_NV64TYPES_8BYTES_ALIGNED) && defined(__i386__)
// ensure or force 8-bytes alignment of NV 64-bit types
#define OPTIONAL_ALIGN8_ATTR __attribute__((aligned(8)))
#else
// nothing needed
#define OPTIONAL_ALIGN8_ATTR
#endif // MAKE_NV64TYPES_8BYTES_ALIGNED && i386
/***************************************************************************\
|* Typedefs *|
\***************************************************************************/
#ifdef NV_MISRA_COMPLIANCE_REQUIRED
//Typedefs for MISRA COMPLIANCE
typedef unsigned long long UInt64;
typedef signed long long Int64;
typedef unsigned int UInt32;
typedef signed int Int32;
typedef unsigned short UInt16;
typedef signed short Int16;
typedef unsigned char UInt8 ;
typedef signed char Int8 ;
typedef void Void;
typedef float float32_t;
typedef double float64_t;
#endif
// Floating point types
#ifdef NV_MISRA_COMPLIANCE_REQUIRED
typedef float32_t NvF32; /* IEEE Single Precision (S1E8M23) */
typedef float64_t NvF64 OPTIONAL_ALIGN8_ATTR; /* IEEE Double Precision (S1E11M52) */
#else
typedef float NvF32; /* IEEE Single Precision (S1E8M23) */
typedef double NvF64 OPTIONAL_ALIGN8_ATTR; /* IEEE Double Precision (S1E11M52) */
#endif
// 8-bit: 'char' is the only 8-bit in the C89 standard and after.
#if NVTYPES_USE_STDINT
typedef uint8_t NvV8; /* "void": enumerated or multiple fields */
typedef uint8_t NvU8; /* 0 to 255 */
typedef int8_t NvS8; /* -128 to 127 */
#else
#ifdef NV_MISRA_COMPLIANCE_REQUIRED
typedef UInt8 NvV8; /* "void": enumerated or multiple fields */
typedef UInt8 NvU8; /* 0 to 255 */
typedef Int8 NvS8; /* -128 to 127 */
#else
typedef unsigned char NvV8; /* "void": enumerated or multiple fields */
typedef unsigned char NvU8; /* 0 to 255 */
typedef signed char NvS8; /* -128 to 127 */
#endif
#endif // NVTYPES_USE_STDINT
#if NVTYPES_USE_STDINT
typedef uint16_t NvV16; /* "void": enumerated or multiple fields */
typedef uint16_t NvU16; /* 0 to 65535 */
typedef int16_t NvS16; /* -32768 to 32767 */
#else
// 16-bit: If the compiler tells us what we can use, then use it.
#ifdef __INT16_TYPE__
typedef unsigned __INT16_TYPE__ NvV16; /* "void": enumerated or multiple fields */
typedef unsigned __INT16_TYPE__ NvU16; /* 0 to 65535 */
typedef signed __INT16_TYPE__ NvS16; /* -32768 to 32767 */
// The minimal standard for C89 and after
#else // __INT16_TYPE__
#ifdef NV_MISRA_COMPLIANCE_REQUIRED
typedef UInt16 NvV16; /* "void": enumerated or multiple fields */
typedef UInt16 NvU16; /* 0 to 65535 */
typedef Int16 NvS16; /* -32768 to 32767 */
#else
typedef unsigned short NvV16; /* "void": enumerated or multiple fields */
typedef unsigned short NvU16; /* 0 to 65535 */
typedef signed short NvS16; /* -32768 to 32767 */
#endif
#endif // __INT16_TYPE__
#endif // NVTYPES_USE_STDINT
// wchar type (fixed size types consistent across Linux/Windows boundaries)
#if defined(NV_HAS_WCHAR_T_TYPEDEF)
typedef wchar_t NvWchar;
#else
typedef NvV16 NvWchar;
#endif
// Macro to build an NvU32 from four bytes, listed from msb to lsb
#define NvU32_BUILD(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
#if NVTYPES_USE_STDINT
typedef uint32_t NvV32; /* "void": enumerated or multiple fields */
typedef uint32_t NvU32; /* 0 to 4294967295 */
typedef int32_t NvS32; /* -2147483648 to 2147483647 */
#else
// 32-bit: If the compiler tells us what we can use, then use it.
#ifdef __INT32_TYPE__
typedef unsigned __INT32_TYPE__ NvV32; /* "void": enumerated or multiple fields */
typedef unsigned __INT32_TYPE__ NvU32; /* 0 to 4294967295 */
typedef signed __INT32_TYPE__ NvS32; /* -2147483648 to 2147483647 */
// Older compilers
#else // __INT32_TYPE__
// For historical reasons, NvU32/NvV32 are defined to different base intrinsic
// types than NvS32 on some platforms.
// Mainly for 64-bit linux, where long is 64 bits and win9x, where int is 16 bit.
#if (defined(NV_UNIX) || defined(vxworks) || defined(NV_WINDOWS_CE) || \
defined(__arm) || defined(__IAR_SYSTEMS_ICC__) || defined(NV_QNX) || \
defined(NV_INTEGRITY) || defined(NV_MODS) || \
defined(__GNUC__) || defined(__clang__) || defined(NV_MACINTOSH_64)) && \
(!defined(NV_MACINTOSH) || defined(NV_MACINTOSH_64))
#ifdef NV_MISRA_COMPLIANCE_REQUIRED
typedef UInt32 NvV32; /* "void": enumerated or multiple fields */
typedef UInt32 NvU32; /* 0 to 4294967295 */
#else
typedef unsigned int NvV32; /* "void": enumerated or multiple fields */
typedef unsigned int NvU32; /* 0 to 4294967295 */
#endif
// The minimal standard for C89 and after
#else // (defined(NV_UNIX) || defined(vxworks) || ...
typedef unsigned long NvV32; /* "void": enumerated or multiple fields */
typedef unsigned long NvU32; /* 0 to 4294967295 */
#endif // (defined(NV_UNIX) || defined(vxworks) || ...
// Mac OS 32-bit still needs this
#if defined(NV_MACINTOSH) && !defined(NV_MACINTOSH_64)
typedef signed long NvS32; /* -2147483648 to 2147483647 */
#else
#ifdef NV_MISRA_COMPLIANCE_REQUIRED
typedef Int32 NvS32; /* -2147483648 to 2147483647 */
#else
typedef signed int NvS32; /* -2147483648 to 2147483647 */
#endif
#endif // defined(NV_MACINTOSH) && !defined(NV_MACINTOSH_64)
#endif // __INT32_TYPE__
#endif // NVTYPES_USE_STDINT
#if NVTYPES_USE_STDINT
typedef uint64_t NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */
typedef int64_t NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */
#define NvU64_fmtX PRIX64
#define NvU64_fmtx PRIx64
#define NvU64_fmtu PRIu64
#define NvU64_fmto PRIo64
#define NvS64_fmtd PRId64
#define NvS64_fmti PRIi64
#else
// 64-bit types for compilers that support them, plus some obsolete variants
#if defined(__GNUC__) || defined(__clang__) || defined(__arm) || \
defined(__IAR_SYSTEMS_ICC__) || defined(__ghs__) || defined(_WIN64) || \
defined(__SUNPRO_C) || defined(__SUNPRO_CC) || defined (__xlC__)
#ifdef NV_MISRA_COMPLIANCE_REQUIRED
typedef UInt64 NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */
typedef Int64 NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */
#else
typedef unsigned long long NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */
typedef long long NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */
#endif
#define NvU64_fmtX "llX"
#define NvU64_fmtx "llx"
#define NvU64_fmtu "llu"
#define NvU64_fmto "llo"
#define NvS64_fmtd "lld"
#define NvS64_fmti "lli"
// Microsoft since 2003 -- https://msdn.microsoft.com/en-us/library/29dh1w7z.aspx
#else
typedef unsigned __int64 NvU64 OPTIONAL_ALIGN8_ATTR; /* 0 to 18446744073709551615 */
typedef __int64 NvS64 OPTIONAL_ALIGN8_ATTR; /* -9223372036854775808 to 9223372036854775807 */
#define NvU64_fmtX "I64X"
#define NvU64_fmtx "I64x"
#define NvU64_fmtu "I64u"
#define NvU64_fmto "I64o"
#define NvS64_fmtd "I64d"
#define NvS64_fmti "I64i"
#endif
#endif // NVTYPES_USE_STDINT
#ifdef NV_TYPESAFE_HANDLES
/*
* Can't use opaque pointer as clients might be compiled with mismatched
* pointer sizes. TYPESAFE check will eventually be removed once all clients
* have transistioned safely to NvHandle.
* The plan is to then eventually scale up the handle to be 64-bits.
*/
typedef struct
{
NvU32 val;
} NvHandle;
#else
/*
* For compatibility with modules that haven't moved typesafe handles.
*/
typedef NvU32 NvHandle;
#endif // NV_TYPESAFE_HANDLES
/* Boolean type */
typedef NvU8 NvBool;
#define NV_TRUE ((NvBool)(0 == 0))
#define NV_FALSE ((NvBool)(0 != 0))
/* Tristate type: NV_TRISTATE_FALSE, NV_TRISTATE_TRUE, NV_TRISTATE_INDETERMINATE */
typedef NvU8 NvTristate;
#define NV_TRISTATE_FALSE ((NvTristate) 0)
#define NV_TRISTATE_TRUE ((NvTristate) 1)
#define NV_TRISTATE_INDETERMINATE ((NvTristate) 2)
/* Macros to extract the low and high parts of a 64-bit unsigned integer */
/* Also designed to work if someone happens to pass in a 32-bit integer */
#ifdef NV_MISRA_COMPLIANCE_REQUIRED
#define NvU64_HI32(n) ((NvU32)((((NvU64)(n)) >> 32) & 0xffffffffU))
#define NvU64_LO32(n) ((NvU32)(( (NvU64)(n)) & 0xffffffffU))
#else
#define NvU64_HI32(n) ((NvU32)((((NvU64)(n)) >> 32) & 0xffffffff))
#define NvU64_LO32(n) ((NvU32)(( (NvU64)(n)) & 0xffffffff))
#endif
#define NvU40_HI32(n) ((NvU32)((((NvU64)(n)) >> 8) & 0xffffffffU))
#define NvU40_HI24of32(n) ((NvU32)( (NvU64)(n) & 0xffffff00U))
/* Macros to get the MSB and LSB of a 32 bit unsigned number */
#define NvU32_HI16(n) ((NvU16)((((NvU32)(n)) >> 16) & 0xffffU))
#define NvU32_LO16(n) ((NvU16)(( (NvU32)(n)) & 0xffffU))
/***************************************************************************\
|* *|
|* 64 bit type definitions for use in interface structures. *|
|* *|
\***************************************************************************/
#if defined(NV_64_BITS)
typedef void* NvP64; /* 64 bit void pointer */
typedef NvU64 NvUPtr; /* pointer sized unsigned int */
typedef NvS64 NvSPtr; /* pointer sized signed int */
typedef NvU64 NvLength; /* length to agree with sizeof */
#define NvP64_VALUE(n) (n)
#define NvP64_fmt "%p"
#define KERNEL_POINTER_FROM_NvP64(p,v) ((p)(v))
#define NvP64_PLUS_OFFSET(p,o) (NvP64)((NvU64)(p) + (NvU64)(o))
#define NvUPtr_fmtX NvU64_fmtX
#define NvUPtr_fmtx NvU64_fmtx
#define NvUPtr_fmtu NvU64_fmtu
#define NvUPtr_fmto NvU64_fmto
#define NvSPtr_fmtd NvS64_fmtd
#define NvSPtr_fmti NvS64_fmti
#else
typedef NvU64 NvP64; /* 64 bit void pointer */
typedef NvU32 NvUPtr; /* pointer sized unsigned int */
typedef NvS32 NvSPtr; /* pointer sized signed int */
typedef NvU32 NvLength; /* length to agree with sizeof */
#define NvP64_VALUE(n) ((void *)(NvUPtr)(n))
#define NvP64_fmt "0x%llx"
#define KERNEL_POINTER_FROM_NvP64(p,v) ((p)(NvUPtr)(v))
#define NvP64_PLUS_OFFSET(p,o) ((p) + (NvU64)(o))
#define NvUPtr_fmtX "X"
#define NvUPtr_fmtx "x"
#define NvUPtr_fmtu "u"
#define NvUPtr_fmto "o"
#define NvSPtr_fmtd "d"
#define NvSPtr_fmti "i"
#endif
#define NvP64_NULL (NvP64)0
/*!
* Helper macro to pack an @ref NvU64_ALIGN32 structure from a @ref NvU64.
*
* @param[out] pDst Pointer to NvU64_ALIGN32 structure to pack
* @param[in] pSrc Pointer to NvU64 with which to pack
*/
#define NvU64_ALIGN32_PACK(pDst, pSrc) \
do { \
(pDst)->lo = NvU64_LO32(*(pSrc)); \
(pDst)->hi = NvU64_HI32(*(pSrc)); \
} while (NV_FALSE)
/*!
* Helper macro to unpack a @ref NvU64_ALIGN32 structure into a @ref NvU64.
*
* @param[out] pDst Pointer to NvU64 in which to unpack
* @param[in] pSrc Pointer to NvU64_ALIGN32 structure from which to unpack
*/
#define NvU64_ALIGN32_UNPACK(pDst, pSrc) \
do { \
(*(pDst)) = NvU64_ALIGN32_VAL(pSrc); \
} while (NV_FALSE)
/*!
* Helper macro to unpack a @ref NvU64_ALIGN32 structure as a @ref NvU64.
*
* @param[in] pSrc Pointer to NvU64_ALIGN32 structure to unpack
*/
#define NvU64_ALIGN32_VAL(pSrc) \
((NvU64) ((NvU64)((pSrc)->lo) | (((NvU64)(pSrc)->hi) << 32U)))
/*!
* Helper macro to check whether the 32 bit aligned 64 bit number is zero.
*
* @param[in] _pU64 Pointer to NvU64_ALIGN32 structure.
*
* @return
* NV_TRUE _pU64 is zero.
* NV_FALSE otherwise.
*/
#define NvU64_ALIGN32_IS_ZERO(_pU64) \
(((_pU64)->lo == 0U) && ((_pU64)->hi == 0U))
/*!
* Helper macro to sub two 32 aligned 64 bit numbers on 64 bit processor.
*
* @param[in] pSrc1 Pointer to NvU64_ALIGN32 source 1 structure.
* @param[in] pSrc2 Pointer to NvU64_ALIGN32 source 2 structure.
* @param[in/out] pDst Pointer to NvU64_ALIGN32 dest. structure.
*/
#define NvU64_ALIGN32_ADD(pDst, pSrc1, pSrc2) \
do { \
NvU64 __dst, __src1, __scr2; \
\
NvU64_ALIGN32_UNPACK(&__src1, (pSrc1)); \
NvU64_ALIGN32_UNPACK(&__scr2, (pSrc2)); \
__dst = __src1 + __scr2; \
NvU64_ALIGN32_PACK((pDst), &__dst); \
} while (NV_FALSE)
/*!
* Helper macro to sub two 32 aligned 64 bit numbers on 64 bit processor.
*
* @param[in] pSrc1 Pointer to NvU64_ALIGN32 source 1 structure.
* @param[in] pSrc2 Pointer to NvU64_ALIGN32 source 2 structure.
* @param[in/out] pDst Pointer to NvU64_ALIGN32 dest. structure.
*/
#define NvU64_ALIGN32_SUB(pDst, pSrc1, pSrc2) \
do { \
NvU64 __dst, __src1, __scr2; \
\
NvU64_ALIGN32_UNPACK(&__src1, (pSrc1)); \
NvU64_ALIGN32_UNPACK(&__scr2, (pSrc2)); \
__dst = __src1 - __scr2; \
NvU64_ALIGN32_PACK((pDst), &__dst); \
} while (NV_FALSE)
/*!
* Structure for representing 32 bit aligned NvU64 (64-bit unsigned integer)
* structures. This structure must be used because the 32 bit processor and
* 64 bit processor compilers will pack/align NvU64 differently.
*
* One use case is RM being 64 bit proc whereas PMU being 32 bit proc, this
* alignment difference will result in corrupted transactions between the RM
* and PMU.
*
* See the @ref NvU64_ALIGN32_PACK and @ref NvU64_ALIGN32_UNPACK macros for
* packing and unpacking these structures.
*
* @note The intention of this structure is to provide a datatype which will
* packed/aligned consistently and efficiently across all platforms.
* We don't want to use "NV_DECLARE_ALIGNED(NvU64, 8)" because that
* leads to memory waste on our 32-bit uprocessors (e.g. FALCONs) where
* DMEM efficiency is vital.
*/
typedef struct
{
/*!
* Low 32 bits.
*/
NvU32 lo;
/*!
* High 32 bits.
*/
NvU32 hi;
} NvU64_ALIGN32;
/* Useful macro to hide required double cast */
#define NV_PTR_TO_NvP64(n) (NvP64)(NvUPtr)(n)
#define NV_SIGN_EXT_PTR_TO_NvP64(p) ((NvP64)(NvS64)(NvSPtr)(p))
#define KERNEL_POINTER_TO_NvP64(p) ((NvP64)(uintptr_t)(p))
/***************************************************************************\
|* *|
|* Limits for common types. *|
|* *|
\***************************************************************************/
/* Explanation of the current form of these limits:
*
* - Decimal is used, as hex values are by default positive.
* - Casts are not used, as usage in the preprocessor itself (#if) ends poorly.
* - The subtraction of 1 for some MIN values is used to get around the fact
* that the C syntax actually treats -x as NEGATE(x) instead of a distinct
* number. Since 214748648 isn't a valid positive 32-bit signed value, we
* take the largest valid positive signed number, negate it, and subtract 1.
*/
#define NV_S8_MIN (-128)
#define NV_S8_MAX (+127)
#define NV_U8_MIN (0U)
#define NV_U8_MAX (+255U)
#define NV_S16_MIN (-32768)
#define NV_S16_MAX (+32767)
#define NV_U16_MIN (0U)
#define NV_U16_MAX (+65535U)
#define NV_S32_MIN (-2147483647 - 1)
#define NV_S32_MAX (+2147483647)
#define NV_U32_MIN (0U)
#define NV_U32_MAX (+4294967295U)
#define NV_S64_MIN (-9223372036854775807LL - 1LL)
#define NV_S64_MAX (+9223372036854775807LL)
#define NV_U64_MIN (0ULL)
#define NV_U64_MAX (+18446744073709551615ULL)
/* Aligns fields in structs so they match up between 32 and 64 bit builds */
#if defined(__GNUC__) || defined(__clang__) || defined(NV_QNX)
#define NV_ALIGN_BYTES(size) __attribute__ ((aligned (size)))
#elif defined(__arm)
#define NV_ALIGN_BYTES(size) __align(ALIGN)
#else
// XXX This is dangerously nonportable! We really shouldn't provide a default
// version of this that doesn't do anything.
#define NV_ALIGN_BYTES(size)
#endif
// NV_DECLARE_ALIGNED() can be used on all platforms.
// This macro form accounts for the fact that __declspec on Windows is required
// before the variable type,
// and NV_ALIGN_BYTES is required after the variable name.
#if defined(__GNUC__) || defined(__clang__) || defined(NV_QNX)
#define NV_DECLARE_ALIGNED(TYPE_VAR, ALIGN) TYPE_VAR __attribute__ ((aligned (ALIGN)))
#elif defined(_MSC_VER)
#define NV_DECLARE_ALIGNED(TYPE_VAR, ALIGN) __declspec(align(ALIGN)) TYPE_VAR
#elif defined(__arm)
#define NV_DECLARE_ALIGNED(TYPE_VAR, ALIGN) __align(ALIGN) TYPE_VAR
#endif
/***************************************************************************\
|* Function Declaration Types *|
\***************************************************************************/
// stretching the meaning of "nvtypes", but this seems to least offensive
// place to re-locate these from nvos.h which cannot be included by a number
// of builds that need them
#if defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER)
#define NV_ATTRIBUTE_UNUSED __attribute__((__unused__))
#else
#define NV_ATTRIBUTE_UNUSED
#endif
#if defined(_MSC_VER)
#if _MSC_VER >= 1310
#define NV_NOINLINE __declspec(noinline)
#else
#define NV_NOINLINE
#endif
#define NV_INLINE __inline
#if _MSC_VER >= 1200
#define NV_FORCEINLINE __forceinline
#else
#define NV_FORCEINLINE __inline
#endif
#define NV_APIENTRY __stdcall
#define NV_FASTCALL __fastcall
#define NV_CDECLCALL __cdecl
#define NV_STDCALL __stdcall
#define NV_FORCERESULTCHECK
#define NV_FORMAT_PRINTF(_f, _a)
#else // ! defined(_MSC_VER)
#if defined(__GNUC__)
#if (__GNUC__ > 3) || \
((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) && (__GNUC_PATCHLEVEL__ >= 1))
#define NV_NOINLINE __attribute__((__noinline__))
#endif
#elif defined(__clang__)
#if __has_attribute(noinline)
#define NV_NOINLINE __attribute__((__noinline__))
#endif
#elif defined(__arm) && (__ARMCC_VERSION >= 300000)
#define NV_NOINLINE __attribute__((__noinline__))
#elif (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590)) ||\
(defined(__SUNPRO_CC) && (__SUNPRO_CC >= 0x590))
#define NV_NOINLINE __attribute__((__noinline__))
#elif defined (__INTEL_COMPILER)
#define NV_NOINLINE __attribute__((__noinline__))
#endif
#if !defined(NV_NOINLINE)
#define NV_NOINLINE
#endif
/* GreenHills compiler defines __GNUC__, but doesn't support
* __inline__ keyword. */
#if defined(__ghs__)
#define NV_INLINE inline
#elif defined(__GNUC__) || defined(__clang__) || defined(__INTEL_COMPILER)
#define NV_INLINE __inline__
#elif defined (macintosh) || defined(__SUNPRO_C) || defined(__SUNPRO_CC)
#define NV_INLINE inline
#elif defined(__arm)
#define NV_INLINE __inline
#else
#define NV_INLINE
#endif
/* Don't force inline on DEBUG builds -- it's annoying for debuggers. */
#if !defined(DEBUG)
/* GreenHills compiler defines __GNUC__, but doesn't support
* __attribute__ or __inline__ keyword. */
#if defined(__ghs__)
#define NV_FORCEINLINE inline
#elif defined(__GNUC__)
// GCC 3.1 and beyond support the always_inline function attribute.
#if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1))
#define NV_FORCEINLINE __attribute__((__always_inline__)) __inline__
#else
#define NV_FORCEINLINE __inline__
#endif
#elif defined(__clang__)
#if __has_attribute(always_inline)
#define NV_FORCEINLINE __attribute__((__always_inline__)) __inline__
#else
#define NV_FORCEINLINE __inline__
#endif
#elif defined(__arm) && (__ARMCC_VERSION >= 220000)
// RVDS 2.2 also supports forceinline, but ADS 1.2 does not
#define NV_FORCEINLINE __forceinline
#else /* defined(__GNUC__) */
#define NV_FORCEINLINE NV_INLINE
#endif
#else
#define NV_FORCEINLINE NV_INLINE
#endif
#define NV_APIENTRY
#define NV_FASTCALL
#define NV_CDECLCALL
#define NV_STDCALL
/*
* The 'warn_unused_result' function attribute prompts GCC to issue a
* warning if the result of a function tagged with this attribute
* is ignored by a caller. In combination with '-Werror', it can be
* used to enforce result checking in RM code; at this point, this
* is only done on UNIX.
*/
#if defined(__GNUC__) && defined(NV_UNIX)
#if (__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))
#define NV_FORCERESULTCHECK __attribute__((__warn_unused_result__))
#else
#define NV_FORCERESULTCHECK
#endif
#elif defined(__clang__)
#if __has_attribute(warn_unused_result)
#define NV_FORCERESULTCHECK __attribute__((__warn_unused_result__))
#else
#define NV_FORCERESULTCHECK
#endif
#else /* defined(__GNUC__) */
#define NV_FORCERESULTCHECK
#endif
/*
* Functions decorated with NV_FORMAT_PRINTF(f, a) have a format string at
* parameter number 'f' and variadic arguments start at parameter number 'a'.
* (Note that for C++ methods, there is an implicit 'this' parameter so
* explicit parameters are numbered from 2.)
*/
#if defined(__GNUC__)
#define NV_FORMAT_PRINTF(_f, _a) __attribute__((format(printf, _f, _a)))
#else
#define NV_FORMAT_PRINTF(_f, _a)
#endif
#endif // defined(_MSC_VER)
#ifdef __cplusplus
}
#endif
#endif /* NVTYPES_INCLUDED */

View File

@@ -0,0 +1,255 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
/*
* Os interface definitions needed by os-interface.c
*/
#ifndef OS_INTERFACE_H
#define OS_INTERFACE_H
/******************* Operating System Interface Routines *******************\
* *
* Operating system wrapper functions used to abstract the OS. *
* *
\***************************************************************************/
#include <nvtypes.h>
#include <nvstatus.h>
#include "nv_stdarg.h"
#include <nv-kernel-interface-api.h>
#include <os/nv_memory_type.h>
#include <nv-caps.h>
typedef struct
{
NvU32 os_major_version;
NvU32 os_minor_version;
NvU32 os_build_number;
const char * os_build_version_str;
const char * os_build_date_plus_str;
}os_version_info;
/* Each OS defines its own version of this opaque type */
struct os_work_queue;
/* Each OS defines its own version of this opaque type */
typedef struct os_wait_queue os_wait_queue;
/*
* ---------------------------------------------------------------------------
*
* Function prototypes for OS interface.
*
* ---------------------------------------------------------------------------
*/
NvU64 NV_API_CALL os_get_num_phys_pages (void);
NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64);
void NV_API_CALL os_free_mem (void *);
NV_STATUS NV_API_CALL os_get_current_time (NvU32 *, NvU32 *);
NvU64 NV_API_CALL os_get_current_tick (void);
NvU64 NV_API_CALL os_get_current_tick_hr (void);
NvU64 NV_API_CALL os_get_tick_resolution (void);
NV_STATUS NV_API_CALL os_delay (NvU32);
NV_STATUS NV_API_CALL os_delay_us (NvU32);
NvU64 NV_API_CALL os_get_cpu_frequency (void);
NvU32 NV_API_CALL os_get_current_process (void);
void NV_API_CALL os_get_current_process_name (char *, NvU32);
NV_STATUS NV_API_CALL os_get_current_thread (NvU64 *);
char* NV_API_CALL os_string_copy (char *, const char *);
NvU32 NV_API_CALL os_string_length (const char *);
NvU32 NV_API_CALL os_strtoul (const char *, char **, NvU32);
NvS32 NV_API_CALL os_string_compare (const char *, const char *);
NvS32 NV_API_CALL os_snprintf (char *, NvU32, const char *, ...);
NvS32 NV_API_CALL os_vsnprintf (char *, NvU32, const char *, va_list);
void NV_API_CALL os_log_error (const char *, va_list);
void* NV_API_CALL os_mem_copy (void *, const void *, NvU32);
NV_STATUS NV_API_CALL os_memcpy_from_user (void *, const void *, NvU32);
NV_STATUS NV_API_CALL os_memcpy_to_user (void *, const void *, NvU32);
void* NV_API_CALL os_mem_set (void *, NvU8, NvU32);
NvS32 NV_API_CALL os_mem_cmp (const NvU8 *, const NvU8 *, NvU32);
void* NV_API_CALL os_pci_init_handle (NvU32, NvU8, NvU8, NvU8, NvU16 *, NvU16 *);
NV_STATUS NV_API_CALL os_pci_read_byte (void *, NvU32, NvU8 *);
NV_STATUS NV_API_CALL os_pci_read_word (void *, NvU32, NvU16 *);
NV_STATUS NV_API_CALL os_pci_read_dword (void *, NvU32, NvU32 *);
NV_STATUS NV_API_CALL os_pci_write_byte (void *, NvU32, NvU8);
NV_STATUS NV_API_CALL os_pci_write_word (void *, NvU32, NvU16);
NV_STATUS NV_API_CALL os_pci_write_dword (void *, NvU32, NvU32);
NvBool NV_API_CALL os_pci_remove_supported (void);
void NV_API_CALL os_pci_remove (void *);
void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32);
void NV_API_CALL os_unmap_kernel_space (void *, NvU64);
void* NV_API_CALL os_map_user_space (NvU64, NvU64, NvU32, NvU32, void **);
void NV_API_CALL os_unmap_user_space (void *, NvU64, void *);
NV_STATUS NV_API_CALL os_flush_cpu_cache (void);
NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void);
NV_STATUS NV_API_CALL os_flush_user_cache (void);
void NV_API_CALL os_flush_cpu_write_combine_buffer(void);
NvU8 NV_API_CALL os_io_read_byte (NvU32);
NvU16 NV_API_CALL os_io_read_word (NvU32);
NvU32 NV_API_CALL os_io_read_dword (NvU32);
void NV_API_CALL os_io_write_byte (NvU32, NvU8);
void NV_API_CALL os_io_write_word (NvU32, NvU16);
void NV_API_CALL os_io_write_dword (NvU32, NvU32);
NvBool NV_API_CALL os_is_administrator (void);
NvBool NV_API_CALL os_allow_priority_override (void);
void NV_API_CALL os_dbg_init (void);
void NV_API_CALL os_dbg_breakpoint (void);
void NV_API_CALL os_dbg_set_level (NvU32);
NvU32 NV_API_CALL os_get_cpu_count (void);
NvU32 NV_API_CALL os_get_cpu_number (void);
void NV_API_CALL os_disable_console_access (void);
void NV_API_CALL os_enable_console_access (void);
NV_STATUS NV_API_CALL os_registry_init (void);
NvU64 NV_API_CALL os_get_max_user_va (void);
NV_STATUS NV_API_CALL os_schedule (void);
NV_STATUS NV_API_CALL os_alloc_spinlock (void **);
void NV_API_CALL os_free_spinlock (void *);
NvU64 NV_API_CALL os_acquire_spinlock (void *);
void NV_API_CALL os_release_spinlock (void *, NvU64);
NV_STATUS NV_API_CALL os_queue_work_item (struct os_work_queue *, void *);
NV_STATUS NV_API_CALL os_flush_work_queue (struct os_work_queue *);
NV_STATUS NV_API_CALL os_alloc_mutex (void **);
void NV_API_CALL os_free_mutex (void *);
NV_STATUS NV_API_CALL os_acquire_mutex (void *);
NV_STATUS NV_API_CALL os_cond_acquire_mutex (void *);
void NV_API_CALL os_release_mutex (void *);
void* NV_API_CALL os_alloc_semaphore (NvU32);
void NV_API_CALL os_free_semaphore (void *);
NV_STATUS NV_API_CALL os_acquire_semaphore (void *);
NV_STATUS NV_API_CALL os_cond_acquire_semaphore (void *);
NV_STATUS NV_API_CALL os_release_semaphore (void *);
void* NV_API_CALL os_alloc_rwlock (void);
void NV_API_CALL os_free_rwlock (void *);
NV_STATUS NV_API_CALL os_acquire_rwlock_read (void *);
NV_STATUS NV_API_CALL os_acquire_rwlock_write (void *);
NV_STATUS NV_API_CALL os_cond_acquire_rwlock_read (void *);
NV_STATUS NV_API_CALL os_cond_acquire_rwlock_write(void *);
void NV_API_CALL os_release_rwlock_read (void *);
void NV_API_CALL os_release_rwlock_write (void *);
NvBool NV_API_CALL os_semaphore_may_sleep (void);
NV_STATUS NV_API_CALL os_get_version_info (os_version_info*);
NvBool NV_API_CALL os_is_isr (void);
NvBool NV_API_CALL os_pat_supported (void);
void NV_API_CALL os_dump_stack (void);
NvBool NV_API_CALL os_is_efi_enabled (void);
NvBool NV_API_CALL os_is_xen_dom0 (void);
NvBool NV_API_CALL os_is_vgx_hyper (void);
NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32);
NvBool NV_API_CALL os_is_grid_supported (void);
NvU32 NV_API_CALL os_get_grid_csp_support (void);
void NV_API_CALL os_bug_check (NvU32, const char *);
NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32);
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **);
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *);
NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *);
NV_STATUS NV_API_CALL os_get_euid (NvU32 *);
NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr);
NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *);
void NV_API_CALL os_add_record_for_crashLog (void *, NvU32);
void NV_API_CALL os_delete_record_for_crashLog (void *);
NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32);
NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *);
NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *);
NV_STATUS NV_API_CALL os_get_page (NvU64 address);
NV_STATUS NV_API_CALL os_put_page (NvU64 address);
NvU32 NV_API_CALL os_get_page_refcount (NvU64 address);
NvU32 NV_API_CALL os_count_tail_pages (NvU64 address);
void NV_API_CALL os_free_pages_phys (NvU64, NvU32);
NV_STATUS NV_API_CALL os_open_temporary_file (void **);
void NV_API_CALL os_close_file (void *);
NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64);
NV_STATUS NV_API_CALL os_read_file (void *, NvU8 *, NvU64, NvU64);
NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **);
NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64);
NvBool NV_API_CALL os_is_nvswitch_present (void);
NV_STATUS NV_API_CALL os_get_random_bytes (NvU8 *, NvU16);
NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **);
void NV_API_CALL os_free_wait_queue (os_wait_queue *);
void NV_API_CALL os_wait_uninterruptible (os_wait_queue *);
void NV_API_CALL os_wait_interruptible (os_wait_queue *);
void NV_API_CALL os_wake_up (os_wait_queue *);
nv_cap_t* NV_API_CALL os_nv_cap_init (const char *);
nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry (nv_cap_t *, const char *, int);
nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry (nv_cap_t *, const char *, int);
void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *);
int NV_API_CALL os_nv_cap_validate_and_dup_fd(const nv_cap_t *, int);
void NV_API_CALL os_nv_cap_close_fd (int);
enum os_pci_req_atomics_type {
OS_INTF_PCIE_REQ_ATOMICS_32BIT,
OS_INTF_PCIE_REQ_ATOMICS_64BIT,
OS_INTF_PCIE_REQ_ATOMICS_128BIT
};
NV_STATUS NV_API_CALL os_enable_pci_req_atomics (void *, enum os_pci_req_atomics_type);
NV_STATUS NV_API_CALL os_get_numa_node_memory_usage (NvS32, NvU64 *, NvU64 *);
NV_STATUS NV_API_CALL os_numa_add_gpu_memory (void *, NvU64, NvU64, NvU32 *);
NV_STATUS NV_API_CALL os_numa_remove_gpu_memory (void *, NvU64, NvU64, NvU32);
NV_STATUS NV_API_CALL os_offline_page_at_address(NvU64 address);
void* NV_API_CALL os_get_pid_info(void);
void NV_API_CALL os_put_pid_info(void *pid_info);
NV_STATUS NV_API_CALL os_find_ns_pid(void *pid_info, NvU32 *ns_pid);
extern NvU32 os_page_size;
extern NvU64 os_page_mask;
extern NvU8 os_page_shift;
extern NvBool os_cc_enabled;
extern NvBool os_cc_tdx_enabled;
extern NvBool os_dma_buf_enabled;
/*
* ---------------------------------------------------------------------------
*
* Debug macros.
*
* ---------------------------------------------------------------------------
*/
#define NV_DBG_INFO 0x0
#define NV_DBG_SETUP 0x1
#define NV_DBG_USERERRORS 0x2
#define NV_DBG_WARNINGS 0x3
#define NV_DBG_ERRORS 0x4
void NV_API_CALL out_string(const char *str);
int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...);
#define NV_DEV_PRINTF(debuglevel, nv, format, ... ) \
nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format, NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__)
#define NV_DEV_PRINTF_STATUS(debuglevel, nv, status, format, ... ) \
nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format " (0x%x)\n", NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__, status)
/*
* Fields for os_lock_user_pages flags parameter
*/
#define NV_LOCK_USER_PAGES_FLAGS_WRITE 0:0
#define NV_LOCK_USER_PAGES_FLAGS_WRITE_NO 0x00000000
#define NV_LOCK_USER_PAGES_FLAGS_WRITE_YES 0x00000001
#endif /* OS_INTERFACE_H */

View File

@@ -0,0 +1,41 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NV_MEMORY_TYPE_H
#define NV_MEMORY_TYPE_H
#define NV_MEMORY_NONCONTIGUOUS 0
#define NV_MEMORY_CONTIGUOUS 1
#define NV_MEMORY_CACHED 0
#define NV_MEMORY_UNCACHED 1
#define NV_MEMORY_WRITECOMBINED 2
#define NV_MEMORY_WRITEBACK 5
#define NV_MEMORY_DEFAULT 6
#define NV_MEMORY_UNCACHED_WEAK 7
#define NV_PROTECT_READABLE 1
#define NV_PROTECT_WRITEABLE 2
#define NV_PROTECT_READ_WRITE (NV_PROTECT_READABLE | NV_PROTECT_WRITEABLE)
#endif /* NV_MEMORY_TYPE_H */

View File

@@ -0,0 +1,114 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 1999-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef _RM_GPU_OPS_H_
#define _RM_GPU_OPS_H_
#include <nvtypes.h>
#include <nvCpuUuid.h>
#include "nv_stdarg.h"
#include <nv-ioctl.h>
#include <nvmisc.h>
NV_STATUS NV_API_CALL rm_gpu_ops_create_session (nvidia_stack_t *, nvgpuSessionHandle_t *);
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_session (nvidia_stack_t *, nvgpuSessionHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_device_create (nvidia_stack_t *, nvgpuSessionHandle_t, const nvgpuInfo_t *, const NvProcessorUuid *, nvgpuDeviceHandle_t *, NvBool);
NV_STATUS NV_API_CALL rm_gpu_ops_device_destroy (nvidia_stack_t *, nvgpuDeviceHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_create(nvidia_stack_t *, nvgpuDeviceHandle_t, unsigned long long, unsigned long long, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_dup_address_space(nvidia_stack_t *, nvgpuDeviceHandle_t, NvHandle, NvHandle, nvgpuAddressSpaceHandle_t *, nvgpuAddressSpaceInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_address_space_destroy(nvidia_stack_t *, nvgpuAddressSpaceHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_fb(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvLength, NvU64 *, nvgpuAllocInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_pma_alloc_pages(nvidia_stack_t *, void *, NvLength, NvU32 , nvgpuPmaAllocationOptions_t, NvU64 *);
NV_STATUS NV_API_CALL rm_gpu_ops_pma_free_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32, NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_pma_pin_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32, NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_pma_unpin_pages(nvidia_stack_t *, void *, NvU64 *, NvLength , NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_get_pma_object(nvidia_stack_t *, nvgpuDeviceHandle_t, void **, const nvgpuPmaStatistics_t *);
NV_STATUS NV_API_CALL rm_gpu_ops_pma_register_callbacks(nvidia_stack_t *sp, void *, nvPmaEvictPagesCallback, nvPmaEvictRangeCallback, void *);
void NV_API_CALL rm_gpu_ops_pma_unregister_callbacks(nvidia_stack_t *sp, void *);
NV_STATUS NV_API_CALL rm_gpu_ops_memory_alloc_sys(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvLength, NvU64 *, nvgpuAllocInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_get_p2p_caps(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuDeviceHandle_t, nvgpuP2PCapsParams_t);
NV_STATUS NV_API_CALL rm_gpu_ops_memory_cpu_map(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, NvLength, void **, NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_memory_cpu_ummap(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, void*);
NV_STATUS NV_API_CALL rm_gpu_ops_tsg_allocate(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, const nvgpuTsgAllocParams_t *, nvgpuTsgHandle_t *);
NV_STATUS NV_API_CALL rm_gpu_ops_tsg_destroy(nvidia_stack_t *, nvgpuTsgHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_channel_allocate(nvidia_stack_t *, const nvgpuTsgHandle_t, const nvgpuChannelAllocParams_t *, nvgpuChannelHandle_t *, nvgpuChannelInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_channel_destroy(nvidia_stack_t *, nvgpuChannelHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_memory_free(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64);
NV_STATUS NV_API_CALL rm_gpu_ops_query_caps(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuCaps_t);
NV_STATUS NV_API_CALL rm_gpu_ops_query_ces_caps(nvidia_stack_t *sp, nvgpuDeviceHandle_t, nvgpuCesCaps_t);
NV_STATUS NV_API_CALL rm_gpu_ops_get_gpu_info(nvidia_stack_t *, const NvProcessorUuid *pUuid, const nvgpuClientInfo_t *, nvgpuInfo_t *);
NV_STATUS NV_API_CALL rm_gpu_ops_service_device_interrupts_rm(nvidia_stack_t *, nvgpuDeviceHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_dup_allocation(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuAddressSpaceHandle_t, NvU64, NvU64 *);
NV_STATUS NV_API_CALL rm_gpu_ops_dup_memory (nvidia_stack_t *, nvgpuDeviceHandle_t, NvHandle, NvHandle, NvHandle *, nvgpuMemoryInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_free_duped_handle(nvidia_stack_t *, nvgpuDeviceHandle_t, NvHandle);
NV_STATUS NV_API_CALL rm_gpu_ops_get_fb_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFbInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_get_ecc_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuEccInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_own_page_fault_intr(nvidia_stack_t *, nvgpuDeviceHandle_t, NvBool);
NV_STATUS NV_API_CALL rm_gpu_ops_init_fault_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFaultInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_fault_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuFaultInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_get_non_replayable_faults(nvidia_stack_t *, nvgpuFaultInfo_t, void *, NvU32 *);
NV_STATUS NV_API_CALL rm_gpu_ops_flush_replayable_fault_buffer(nvidia_stack_t *, nvgpuDeviceHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_has_pending_non_replayable_faults(nvidia_stack_t *, nvgpuFaultInfo_t, NvBool *);
NV_STATUS NV_API_CALL rm_gpu_ops_init_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_destroy_access_cntr_info(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_own_access_cntr_intr(nvidia_stack_t *, nvgpuSessionHandle_t, nvgpuAccessCntrInfo_t, NvBool);
NV_STATUS NV_API_CALL rm_gpu_ops_enable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t, nvgpuAccessCntrConfig_t);
NV_STATUS NV_API_CALL rm_gpu_ops_disable_access_cntr(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuAccessCntrInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_set_page_directory (nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, unsigned, NvBool, NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_unset_page_directory (nvidia_stack_t *, nvgpuAddressSpaceHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_p2p_object_create(nvidia_stack_t *, nvgpuDeviceHandle_t, nvgpuDeviceHandle_t, NvHandle *);
void NV_API_CALL rm_gpu_ops_p2p_object_destroy(nvidia_stack_t *, nvgpuSessionHandle_t, NvHandle);
NV_STATUS NV_API_CALL rm_gpu_ops_get_external_alloc_ptes(nvidia_stack_t*, nvgpuAddressSpaceHandle_t, NvHandle, NvU64, NvU64, nvgpuExternalMappingInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_retain_channel(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvHandle, NvHandle, void **, nvgpuChannelInstanceInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_bind_channel_resources(nvidia_stack_t *, void *, nvgpuChannelResourceBindParams_t);
void NV_API_CALL rm_gpu_ops_release_channel(nvidia_stack_t *, void *);
void NV_API_CALL rm_gpu_ops_stop_channel(nvidia_stack_t *, void *, NvBool);
NV_STATUS NV_API_CALL rm_gpu_ops_get_channel_resource_ptes(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvP64, NvU64, NvU64, nvgpuExternalMappingInfo_t);
NV_STATUS NV_API_CALL rm_gpu_ops_report_non_replayable_fault(nvidia_stack_t *, nvgpuDeviceHandle_t, const void *);
NV_STATUS NV_API_CALL rm_gpu_ops_paging_channel_allocate(nvidia_stack_t *, nvgpuDeviceHandle_t, const nvgpuPagingChannelAllocParams_t *, nvgpuPagingChannelHandle_t *, nvgpuPagingChannelInfo_t);
void NV_API_CALL rm_gpu_ops_paging_channel_destroy(nvidia_stack_t *, nvgpuPagingChannelHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_paging_channels_map(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuDeviceHandle_t, NvU64 *);
void NV_API_CALL rm_gpu_ops_paging_channels_unmap(nvidia_stack_t *, nvgpuAddressSpaceHandle_t, NvU64, nvgpuDeviceHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_paging_channel_push_stream(nvidia_stack_t *, nvgpuPagingChannelHandle_t, char *, NvU32);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_init(nvidia_stack_t *, struct ccslContext_t **, nvgpuChannelHandle_t);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_context_clear(nvidia_stack_t *, struct ccslContext_t *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_rotate_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *, NvU8 *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_encrypt_with_iv(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8*, NvU8 *, NvU8 *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_decrypt(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 const *, NvU8 *, NvU8 const *, NvU32, NvU8 const *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_sign(nvidia_stack_t *, struct ccslContext_t *, NvU32, NvU8 const *, NvU8 *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_query_message_pool(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64 *);
NV_STATUS NV_API_CALL rm_gpu_ops_ccsl_increment_iv(nvidia_stack_t *, struct ccslContext_t *, NvU8, NvU64, NvU8 *);
#endif