From c6a56670f26f665c93457edf3e021c6c46bf2331 Mon Sep 17 00:00:00 2001 From: fbusato Date: Mon, 26 Jan 2026 16:48:05 -0800 Subject: [PATCH] Fix CCCL_THROW in dlpack_to_mdspan --- .../include/cuda/__mdspan/dlpack_to_mdspan.h | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/libcudacxx/include/cuda/__mdspan/dlpack_to_mdspan.h b/libcudacxx/include/cuda/__mdspan/dlpack_to_mdspan.h index 565bb9894c0..f04f6d4fa68 100644 --- a/libcudacxx/include/cuda/__mdspan/dlpack_to_mdspan.h +++ b/libcudacxx/include/cuda/__mdspan/dlpack_to_mdspan.h @@ -62,7 +62,7 @@ __get_layout_right_stride(const ::cuda::std::int64_t* __shapes, ::cuda::std::siz // TODO: replace with mul_overflow if (const auto __hi = ::cuda::mul_hi(__stride, __shapes[__i]); __hi != 0 && __hi != -1) { - _CCCL_THROW(::std::invalid_argument{"shape overflow"}); + _CCCL_THROW(std::invalid_argument, "shape overflow"); } __stride *= __shapes[__i]; // TODO: check for overflow } @@ -79,7 +79,7 @@ __get_layout_left_stride(const ::cuda::std::int64_t* __shapes, ::cuda::std::size // TODO: replace with mul_overflow if (const auto __hi = ::cuda::mul_hi(__stride, __shapes[__i]); __hi != 0 && __hi != -1) { - _CCCL_THROW(::std::invalid_argument{"shape overflow"}); + _CCCL_THROW(std::invalid_argument, "shape overflow"); } __stride *= __shapes[__i]; } @@ -97,12 +97,12 @@ _CCCL_HOST_API void __validate_dlpack_strides(const ::DLTensor& __tensor, [[mayb if (__strides_ptr == nullptr) { # if _CCCL_DLPACK_AT_LEAST(1, 2) - _CCCL_THROW(::std::invalid_argument{"strides=nullptr is not supported for DLPack v1.2 and later"}); + _CCCL_THROW(std::invalid_argument, "strides=nullptr is not supported for DLPack v1.2 and later"); # else // strides == nullptr means row-major (C-contiguous) layout if (__is_layout_left && __rank > 1) { - _CCCL_THROW(::std::invalid_argument{"strides must be non-null for layout_left"}); + _CCCL_THROW(std::invalid_argument, "strides must be non-null for layout_left"); } else { @@ -116,21 +116,21 @@ _CCCL_HOST_API void __validate_dlpack_strides(const ::DLTensor& __tensor, [[mayb { if (__strides_ptr[__pos] != ::cuda::__get_layout_right_stride(__tensor.shape, __pos, __rank)) { - _CCCL_THROW(::std::invalid_argument{"DLTensor strides are not compatible with layout_right"}); + _CCCL_THROW(std::invalid_argument, "DLTensor strides are not compatible with layout_right"); } } else if constexpr (__is_layout_left) { if (__strides_ptr[__pos] != ::cuda::__get_layout_left_stride(__tensor.shape, __pos)) { - _CCCL_THROW(::std::invalid_argument{"DLTensor strides are not compatible with layout_left"}); + _CCCL_THROW(std::invalid_argument, "DLTensor strides are not compatible with layout_left"); } } else if constexpr (__is_layout_stride) { if (__strides_ptr[__pos] <= 0) { - _CCCL_THROW(::std::invalid_argument{"layout_stride requires strictly positive strides"}); + _CCCL_THROW(std::invalid_argument, "layout_stride requires strictly positive strides"); } } } @@ -158,15 +158,15 @@ __to_mdspan(const ::DLTensor& __tensor) { if (cuda::std::cmp_not_equal(__tensor.ndim, _Rank)) { - _CCCL_THROW(::std::invalid_argument{"DLTensor rank does not match expected rank"}); + _CCCL_THROW(std::invalid_argument, "DLTensor rank does not match expected rank"); } if (!::cuda::__validate_dlpack_data_type<__element_type>(__tensor.dtype)) { - _CCCL_THROW(::std::invalid_argument{"DLTensor data type does not match expected type"}); + _CCCL_THROW(std::invalid_argument, "DLTensor data type does not match expected type"); } if (__tensor.data == nullptr) { - _CCCL_THROW(::std::invalid_argument{"DLTensor data must be non-null"}); + _CCCL_THROW(std::invalid_argument, "DLTensor data must be non-null"); } auto __base_data = static_cast(__tensor.data) + __tensor.byte_offset; auto __data = reinterpret_cast<__element_type*>(__base_data); @@ -175,7 +175,7 @@ __to_mdspan(const ::DLTensor& __tensor) // However, it always works for the supported data types. if (__datatype_size > 0 && !::cuda::is_aligned(__data, __datatype_size)) { - _CCCL_THROW(::std::invalid_argument{"DLTensor data must be aligned to the data type"}); + _CCCL_THROW(std::invalid_argument, "DLTensor data must be aligned to the data type"); } if constexpr (_Rank == 0) { @@ -185,14 +185,14 @@ __to_mdspan(const ::DLTensor& __tensor) { if (__tensor.shape == nullptr) { - _CCCL_THROW(::std::invalid_argument{"DLTensor shape must be non-null"}); + _CCCL_THROW(std::invalid_argument, "DLTensor shape must be non-null"); } ::cuda::std::array<::cuda::std::int64_t, _Rank> __extents_array{}; for (::cuda::std::size_t __i = 0; __i < _Rank; ++__i) { if (__tensor.shape[__i] < 0) { - _CCCL_THROW(::std::invalid_argument{"DLTensor shapes must be positive"}); + _CCCL_THROW(std::invalid_argument, "DLTensor shapes must be positive"); } __extents_array[__i] = __tensor.shape[__i]; } @@ -227,7 +227,7 @@ to_host_mdspan(const ::DLTensor& __tensor) { if (__tensor.device.device_type != ::kDLCPU) { - _CCCL_THROW(::std::invalid_argument{"DLTensor device type must be kDLCPU for host_mdspan"}); + _CCCL_THROW(std::invalid_argument, "DLTensor device type must be kDLCPU for host_mdspan"); } using __extents_type = ::cuda::std::dims<_Rank, ::cuda::std::int64_t>; using __mdspan_type = ::cuda::host_mdspan<_ElementType, __extents_type, _LayoutPolicy>; @@ -241,7 +241,7 @@ to_device_mdspan(const ::DLTensor& __tensor) { if (__tensor.device.device_type != ::kDLCUDA) { - _CCCL_THROW(::std::invalid_argument{"DLTensor device type must be kDLCUDA for device_mdspan"}); + _CCCL_THROW(std::invalid_argument, "DLTensor device type must be kDLCUDA for device_mdspan"); } using __extents_type = ::cuda::std::dims<_Rank, ::cuda::std::int64_t>; using __mdspan_type = ::cuda::device_mdspan<_ElementType, __extents_type, _LayoutPolicy>; @@ -255,7 +255,7 @@ to_managed_mdspan(const ::DLTensor& __tensor) { if (__tensor.device.device_type != ::kDLCUDAManaged) { - _CCCL_THROW(::std::invalid_argument{"DLTensor device type must be kDLCUDAManaged for managed_mdspan"}); + _CCCL_THROW(std::invalid_argument, "DLTensor device type must be kDLCUDAManaged for managed_mdspan"); } using __extents_type = ::cuda::std::dims<_Rank, ::cuda::std::int64_t>; using __mdspan_type = ::cuda::managed_mdspan<_ElementType, __extents_type, _LayoutPolicy>;