Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ set(src
kernels.cu
memcpy.cpp
nvbandwidth.cpp
nvbandwidth_json.cpp
jsoncpp.cpp
)

execute_process(
Expand Down
63 changes: 63 additions & 0 deletions Licenses.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
JsonCpp:
Copyright Baptiste Lepilleur - Public domain and MIT licenses
Attribution statements: Nvidia actively chooses to accept jsoncpp as public domain where acceptable and MIT licensed where public domain is not accepted.
License text ( https://github.com/open-source-parsers/jsoncpp/blob/master/LICENSE )

/*!
* The JsonCpp library's source code, including accompanying documentation,
* tests and demonstration applications, are licensed under the following
* conditions...
*
* Baptiste Lepilleur and The JsonCpp Authors explicitly disclaim copyright in all
* jurisdictions which recognize such a disclaimer. In such jurisdictions,
* this software is released into the Public Domain.
*
* In jurisdictions which do not recognize Public Domain property (e.g. Germany as of
* 2010), this software is Copyright (c) 2007-2010 by Baptiste Lepilleur and
* The JsonCpp Authors, and is released under the terms of the MIT License (see below).
*
* In jurisdictions which recognize Public Domain property, the user of this
* software may choose to accept it either as 1) Public Domain, 2) under the
* conditions of the MIT License (see below), or 3) under the terms of dual
* Public Domain/MIT License conditions described here, as they choose.
*
* The MIT License is about as close to Public Domain as a license can get, and is
* described in clear, concise terms at:
*
* http://en.wikipedia.org/wiki/MIT_License
*
* The full text of the MIT License follows:
*
* ========================================================================
* Copyright (c) 2007-2010 Baptiste Lepilleur and The JsonCpp Authors
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* ========================================================================
* (END LICENSE TEXT)
*
* The MIT license is compatible with both the GPL and commercial
* software, affording one all of the rights of Public Domain with the
* minor nuisance of being required to keep the above copyright notice
* and license text in the source code. Note also that by accepting the
* Public Domain "license" you can re-license your copy using whatever
* license you like.
*/

74 changes: 4 additions & 70 deletions common.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,15 +48,16 @@ extern unsigned int averageLoopCount;
extern bool disableAffinity;
extern bool skipVerification;
extern bool useMean;
extern bool jsonOutput;
// Verbosity
extern bool verbose;
class Verbosity {
public:
Verbosity() = default;
template<typename T>
Verbosity& operator<<(T input) {
if (verbose) std::cout << input;
return *this;
if (!jsonOutput && verbose) std::cout << input;
return *this;
}
};
extern Verbosity VERBOSE;
Expand Down Expand Up @@ -196,7 +197,7 @@ template <class T> struct PeerValueMatrix {
};

template <class T>
std::ostream &operator<<(std::ostream &o, const PeerValueMatrix<T> &matrix) {
std::ostream & operator<<(std::ostream &o, const PeerValueMatrix<T> &matrix) {
// This assumes T is numeric
T maxVal = std::numeric_limits<T>::min();
T minVal = std::numeric_limits<T>::max();
Expand Down Expand Up @@ -234,71 +235,4 @@ std::ostream &operator<<(std::ostream &o, const PeerValueMatrix<T> &matrix) {
return o;
}

// CUDA Error handling
inline void CU_ASSERT(CUresult cuResult, const char *msg = nullptr) {
if (cuResult != CUDA_SUCCESS) {
const char *errDescStr, *errNameStr;
cuGetErrorString(cuResult, &errDescStr);
cuGetErrorName(cuResult, &errNameStr);
std::cout << "[" << errNameStr << "] " << errDescStr;
if (msg != nullptr) std::cout << ":\n\t" << msg;
std::cout << std::endl;
std::exit(1);
}
}

// NVML Error handling
inline void NVML_ASSERT(nvmlReturn_t nvmlResult, const char *msg = nullptr) {
if (nvmlResult != NVML_SUCCESS) {
std::cout << "NVML_ERROR: [" << nvmlErrorString(nvmlResult) << "]";
if (msg != nullptr) std::cout << ":\n\t" << msg;
std::cout << std::endl;
std::exit(1);
}
}

// NUMA optimal affinity
inline void setOptimalCpuAffinity(int cudaDeviceID) {
#ifdef _WIN32
// NVML doesn't support setting affinity on Windows
return;
#endif
if (disableAffinity) {
return;
}

nvmlDevice_t device;
CUuuid dev_uuid;

std::stringstream s;
std::unordered_set <unsigned char> dashPos {0, 4, 6, 8, 10};

CU_ASSERT(cuDeviceGetUuid(&dev_uuid, cudaDeviceID));

s << "GPU";
for (int i = 0; i < 16; i++) {
if (dashPos.count(i)) {
s << '-';
}
s << std::hex << std::setfill('0') << std::setw(2) << (0xFF & (int)dev_uuid.bytes[i]);
}

NVML_ASSERT(nvmlDeviceGetHandleByUUID(s.str().c_str(), &device));
nvmlReturn_t result = nvmlDeviceSetCpuAffinity(device);
if (result != NVML_ERROR_NOT_SUPPORTED) {
NVML_ASSERT(result);
}
}

inline bool isMemoryOwnedByCUDA(void *memory) {
CUmemorytype memorytype;
CUresult status = cuPointerGetAttribute(&memorytype, CU_POINTER_ATTRIBUTE_MEMORY_TYPE, (CUdeviceptr)memory);
if (status == CUDA_ERROR_INVALID_VALUE) {
return false;
} else {
CU_ASSERT(status);
return true;
}
}

#endif
101 changes: 101 additions & 0 deletions inline_common.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
/*
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#pragma once

#include "common.h"
#include "nvbandwidth_json.h"


// CUDA Error handling
inline void CU_ASSERT(CUresult cuResult, const char *msg = nullptr) {
if (cuResult != CUDA_SUCCESS) {
const char *errDescStr, *errNameStr;
cuGetErrorString(cuResult, &errDescStr);
cuGetErrorName(cuResult, &errNameStr);
std::stringstream errmsg;
errmsg << "[" << errNameStr << "] " << errDescStr;
if (msg != nullptr) errmsg << ":\n\t" << msg;
if (!jsonOutput) {
std::cout << errmsg.str() << std::endl;
} else {
jsonMgr.recordError(errmsg.str());
jsonMgr.printJson();
}
std::exit(1);
}
}

// NVML Error handling
inline void NVML_ASSERT(nvmlReturn_t nvmlResult, const char *msg = nullptr) {
if (nvmlResult != NVML_SUCCESS) {
std::stringstream errmsg;
errmsg << "NVML_ERROR: [" << nvmlErrorString(nvmlResult) << "]";
if (msg != nullptr) errmsg << ":\n\t" << msg;
if (!jsonOutput) {
std::cout << errmsg.str() << std::endl;
} else {
jsonMgr.recordError(errmsg.str());
jsonMgr.printJson();
}
std::exit(1);
}
}

// NUMA optimal affinity
inline void setOptimalCpuAffinity(int cudaDeviceID) {
#ifdef _WIN32
// NVML doesn't support setting affinity on Windows
return;
#endif
if (disableAffinity) {
return;
}

nvmlDevice_t device;
CUuuid dev_uuid;

std::stringstream s;
std::unordered_set <unsigned char> dashPos {0, 4, 6, 8, 10};

CU_ASSERT(cuDeviceGetUuid(&dev_uuid, cudaDeviceID));

s << "GPU";
for (int i = 0; i < 16; i++) {
if (dashPos.count(i)) {
s << '-';
}
s << std::hex << std::setfill('0') << std::setw(2) << (0xFF & (int)dev_uuid.bytes[i]);
}

NVML_ASSERT(nvmlDeviceGetHandleByUUID(s.str().c_str(), &device));
nvmlReturn_t result = nvmlDeviceSetCpuAffinity(device);
if (result != NVML_ERROR_NOT_SUPPORTED) {
NVML_ASSERT(result);
}
}

inline bool isMemoryOwnedByCUDA(void *memory) {
CUmemorytype memorytype;
CUresult status = cuPointerGetAttribute(&memorytype, CU_POINTER_ATTRIBUTE_MEMORY_TYPE, (CUdeviceptr)memory);
if (status == CUDA_ERROR_INVALID_VALUE) {
return false;
} else {
CU_ASSERT(status);
return true;
}
}
Loading