diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..881e56f --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +.vscode +output/ +unique_extraction/tester/input/* +unique_extraction/tester/output/* +build \ No newline at end of file diff --git a/unique_extraction/CMakeLists.txt b/unique_extraction/CMakeLists.txt new file mode 100644 index 0000000..b454d8f --- /dev/null +++ b/unique_extraction/CMakeLists.txt @@ -0,0 +1,29 @@ +cmake_minimum_required(VERSION 3.18) + +project(unique_extraction LANGUAGES CXX CUDA) + +set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS OFF) + +set(CMAKE_CUDA_STANDARD 17) +set(CMAKE_CUDA_STANDARD_REQUIRED ON) +set(CMAKE_CUDA_EXTENSIONS OFF) + +find_package(CUDAToolkit REQUIRED) + +find_package(OpenMP REQUIRED) + +add_executable(unique_extraction src/unique_extraction.cu) + +if(OpenMP_CXX_FOUND) + target_compile_options(unique_extraction PRIVATE + $<$:-fopenmp> + $<$:-Xcompiler=-fopenmp> + ) +endif() + +target_link_libraries(unique_extraction OpenMP::OpenMP_CXX CUDA::cudart) + +add_executable(generate_file tester/generate_file.cpp) +target_link_libraries(generate_file OpenMP::OpenMP_CXX) \ No newline at end of file diff --git "a/unique_extraction/report/picture/5e8\344\270\25230\344\275\215\346\225\260\346\215\256.png" "b/unique_extraction/report/picture/5e8\344\270\25230\344\275\215\346\225\260\346\215\256.png" new file mode 100644 index 0000000..46401eb Binary files /dev/null and "b/unique_extraction/report/picture/5e8\344\270\25230\344\275\215\346\225\260\346\215\256.png" differ diff --git "a/unique_extraction/report/picture/5e8\344\270\25264\344\275\215\346\225\260\346\215\256.png" "b/unique_extraction/report/picture/5e8\344\270\25264\344\275\215\346\225\260\346\215\256.png" new file mode 100644 index 0000000..0c96ab6 Binary files /dev/null and "b/unique_extraction/report/picture/5e8\344\270\25264\344\275\215\346\225\260\346\215\256.png" differ diff --git "a/unique_extraction/report/picture/unique\351\203\250\345\210\206.png" "b/unique_extraction/report/picture/unique\351\203\250\345\210\206.png" new file mode 100644 index 0000000..951d1b6 Binary files /dev/null and "b/unique_extraction/report/picture/unique\351\203\250\345\210\206.png" differ diff --git "a/unique_extraction/report/picture/\344\274\230\345\214\226\350\276\223\345\205\245.png" "b/unique_extraction/report/picture/\344\274\230\345\214\226\350\276\223\345\205\245.png" new file mode 100644 index 0000000..d3475ed Binary files /dev/null and "b/unique_extraction/report/picture/\344\274\230\345\214\226\350\276\223\345\205\245.png" differ diff --git "a/unique_extraction/report/picture/\344\274\230\345\214\226\350\276\223\345\207\272.png" "b/unique_extraction/report/picture/\344\274\230\345\214\226\350\276\223\345\207\272.png" new file mode 100644 index 0000000..425969d Binary files /dev/null and "b/unique_extraction/report/picture/\344\274\230\345\214\226\350\276\223\345\207\272.png" differ diff --git a/unique_extraction/report/report.md b/unique_extraction/report/report.md new file mode 100644 index 0000000..51a885c --- /dev/null +++ b/unique_extraction/report/report.md @@ -0,0 +1,124 @@ +# InfiniTensor 大模型与人工智能系统训练营 CUDA 方向项目报告 + +- 选题: unique_extration(对大规模的 64 位特征值进行去重提取) +- 选题人: JBFYS-XD +- InfInfinitensor 主页: https://beta.infinitensor.com/user/jbfysxd +- Github 主页: https://github.com/JBFYS-XD + +> 前言: 本蒟蒻的实现方式只能处理能完整加载到内存的数据量, 太菜导致的( + +## 解题思路 + +去重通常有两种解法: + +1. 使用 hash 表检测重复特征 +2. 使用 sort 排序 + unique 进行去重 + +对于方法一, 特征量规模较大, hash 表并不能直接载入 GPU 中(会爆显存),如进行分表实现复杂(本蒟蒻太菜了, 甚至写不出一版做对比 (〒︿〒) ); + +反观方法二无论是 sort 操作还是 unique 操作在并行领域中都有高效简单的方法, 所以本蒟蒻决定用第二版解决此问题。 + +## 各部分解决方法 + +### sort 排序 + +假设 data_tile_size 是 GPU 所能容纳的最大数组长度(即 data_tile_size * 8B 的数据大小), 对于每一个小于等于 data_tile_size 的小块能很轻易地进行排序, 所以主要难点在如何利用有限的显存将多个排好的块进行归并。 + +#### 归并排序 +说到归并那第一个排序思路就是用归并排序, 对于每一对要归并的块案 data_tile_size 大小分步归并, 对每一对分出来的大小为 data_size (data_size <= data_tile_size) 的小块, 先用二分规划好在 block1 和 block2 分别所需要的元素数量(两个块所需的元素数量应该为 data_size), 按需将这些元素加载到 GPU 中进行归并。 + +#### 双调排序 +还有一种更简单的排序方法, 双调排序。 这个排序的特点是, 每一步中, 每一个元素只与特定位置的元素作比较操作, 且连续的元素所对应的操作数也一定连续且极好预测, 易于实现且内存合并和发散性也非常好,并行相性极好。 + +传统的双调函数需要满足元素数满足 $2^n$ 个数, 对于大规模数据如果进行补全的话极端情况会导致一倍的内存和时间开销, 但好在有任意长度的实现方式: [拜读大佬](https://zhuanlan.zhihu.com/p/707468542) + +综上两种方法, 按性能来说归并排序是 $O(nlogn)$, 而双调排序是 $O(nlog^2n)$, 很明显应该选归并排序, 但是蒟蒻没吃透归并, 所以决定用双调排序实现 (能理解的吧, 对吧 ( ˘•ω•˘ ) ) + +### unique 去重 +这一步就比较简单了, 因为进行过整体排序, 所以能够将全部特征进行分块去重 + +#### 步骤一 mark 打标记 +标记当前位置的元素相较前面的元素是否是一个新的特征, $mark = data[pos] != data[pos - 1]$, 对于每一个块的第一个元素, 只需要记录前一个块的最后一个元素就行了, 或者他是全特质第一个, $first\_mark = global\_pos == 0$ $||$ $block[block\_pos - 1].back()$ $!=$ $block[block\_pos].first()$ + +#### 步骤二 scan 前缀和 +对 marks 数组进行前缀和得到该位置的元素在去重后的数组的下标(下标从 1 开始)是多少 + +前缀和利用有名的 $Brent-Kung$ 算法进行扫描, 因涉及到同步所以需要多层操作, 每次操作只能对每个 BLOCKSIZE 大小的待操作数组进行单独扫描, 然后将每个扫描块的最后一个元素(也就是每个块的总和)作为待操作数进行下一层 scan, 最后再进行求和, 这样第 $i$ 层能操作的元素个数是 $BLOCKSIZE^i$ 个。 + +#### 步骤三 write 写回 +这一操作就是以 scan 数组作为下标写入 result 数组(因 scan 数组第一个有效值应该是 1, 所以会写到 $result[scan[pos] - 1]$, 需要特别注意的是, 确实会有 $scan[pos] == 0$ 的情况, 那就是当前元素已被前一个块写过了, 即 $first\_mark == 0$ 的情况),result 数组就是一个去重成功的数组了。 + +> 因为蒟蒻项目开始的较晚, 就没有图解进行详细讲解, 具体实现请看源代码, 请见谅 <(_ _)> + +## 测试 + +### 测试环境 +- 操作系统: Debian GNU/Linux 12 (bookworm) +- CPU 型号: Intel(R) Xeon(R) CPU E5-2673 v3 @ 2.40GHz +- GPU 型号: NVIDIA GeForce GTX 1060 5GB(没错是 10 系显卡, 导致后面测试没有 ncu (〒︿〒) ) +- NVIDIA 驱动版本: 535.247.01 +- 测试数据: 500,000,000 个随机生成的 30(4.5GB)/64(9.5GB) 位特征量 + +### Nsight System 测试 +![30 位数据](./picture/5e8个30位数据.png) +![64 位数据](./picture/5e8个64位数据.png) + +对于测试数据的区别, 可以发现只会影响加载文件的速度, GPU 运行变化不大(毕竟都是以 uint64_t 存储)。 + +发现除去读写文件, 大部分时间花在了第一步 sort 环节, 且大部分都花在了 $bitonic_sort_tile_kernel$(这是对数组小于 data_tile_size 的小块进行的排序) 上面, 也就是说比归并排序多的 $logn$ 的时间复杂度发力了, 如果是 merge_sort 估计这部分时间会快很多, 这组数据估计快 6X 左右。 + +![](./picture/unique部分.png) + +放大后面 unique 部分, 全花在写文件上了! 实际 GPU 运行时间在 10ms * 30 左右。 + + +## 改进 + +### 文件读写 + +文件读写应该可以用多核进行读写, 或者用 mmap 进行直接映射, 但可惜的是蒟蒻都不会, 这应该是改进最大的地方,用 mmap 甚至能改进本项目只能处理能完整加载到内存的数据量的局限性。 + +### sort 算法 + +如果归并算法替换双调排序应该又是一个巨大的提升 + +### kernel 内改进 + +因没有 ncu 所以对 kernel 的改进欠佳(实际也是这个原因选的第三个项目), 但应该可以通过粗度和调整 data_tile_size 等参数进一步特化对 GPU 的适配, 对于 kernel 运行间穿插的 Host 与 Device 的数据加载, 可以用 cuda 流进行一定的延迟的隐藏。 + +## 结语 + +本次项目因某些原因 9.28 号才开始, 虽然之前也构思过, 但实际建库开始也是从 9.27 开始的, 所以完成度欠佳, 还望老师轻点锐评 <3。 + +> 完成时间: 2025.9.30 23:51 + + +## 二编 + +学了下 openmp , 利用 openmp 进行文件的并行读写优化, 充分利用我的 22 多线程, 拒绝 单核有难, 21 核围观, 直接上成果 + +### 输入优化 + +这里改变了文件的结构, 从单个大文件拆分成多个小文件进行并行读取数据, 测试文件格式为 $uint64\_\$\{elem_size\}\_\$\{file_id\}.in$ 如 $uint64\_16777216\_0.in$, 这样做一方面可以通过正则运算符对文件名进行元素数量的计算提前申请空间, 一方面为了并行读入所设计。 + +优化结果: +![](./picture/优化输入.png) + +优化非常可观, 读入时间从一分半变为了 12 秒。 + +### 输出优化 + +对输出采用相同的优化, 考虑将去重结果都算出来后并行写出, 不过如果另申请一个大内存数组, 内存有点捉襟见肘了, 这里考虑复用 data 即输入时的数组, 因最后一步 标记-扫描-写回 过程中, 这一块的数据在 标记 过程中复制给 GPU 使命就完成了, 在 写回 阶段可以安心覆盖, 且输出数一定比输入的少, 所以不用害怕 越界覆盖。 + +优化结果: +![](./picture/优化输出.png) + +效果依旧显著, 从 一分钟缩减到 10 秒钟。 + +### 结语 + +通过输入输出的优化, 时间从 4分半, 缩减到 两分十几秒, 差不多 2X 的加速比, 可喜可贺可喜可贺。 + +至于蒟蒻什么是否能写一般归并替换双调排序, 还是等蒟蒻再练练吧 + +> 二编时间 2025.10.15 22:44 \ No newline at end of file diff --git a/unique_extraction/src/unique_extraction.cu b/unique_extraction/src/unique_extraction.cu new file mode 100644 index 0000000..ccfb285 --- /dev/null +++ b/unique_extraction/src/unique_extraction.cu @@ -0,0 +1,412 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define FILE_DATA_SIZE (1 << 24) +#define BLOCKSIZE 256 + +const uint64_t data_tile_size = 1 << 24; + +/** + * @author JBFYS-XD + */ + +/** + * @brief 对小于 tile 大小的 GPU 运算 + * + * @param d_data 待排数组 + * @param d_data_size 待排数组大小 + * @param k 步长, 以长度为 k 为单位进行排序 + * @param j 对长度 k 的每 j 个进行双调排序 + */ +__global__ void bitonic_sort_tile_kernel(uint64_t* d_data, uint64_t d_data_size, uint64_t k, uint64_t j) { + uint64_t x = blockDim.x * blockIdx.x + threadIdx.x; + if (x >= d_data_size || x & j) return; + uint64_t y = j == (k >> 1) ? x + 2 * (j - (x % j)) - 1 : x ^ j; + if (y >= d_data_size) return; + uint64_t valx = d_data[x]; + uint64_t valy = d_data[y]; + if (valx > valy) { + d_data[x] = valy; + d_data[y] = valx; + } +} + +/** + * @brief 进行步间分块为对单内核可完成(数据量可容)的数组,进行排序 + * + * @param data 分块的待排数组 + * @param d_data 加载到显存数组 + * @param d_data_size 待排数组大小 + */ +void bitonic_sort_tile(uint64_t* data, uint64_t* d_data, uint64_t d_data_size) { + + cudaMemcpy(d_data, data, d_data_size * sizeof(uint64_t), cudaMemcpyHostToDevice); + + int threadsPerBlock = BLOCKSIZE; + int blocksPerGrid = (d_data_size + threadsPerBlock - 1) / threadsPerBlock; + + + for (uint64_t k = 2; (k >> 1) < d_data_size; k <<= 1) { + for (uint64_t j = (k >> 1); j > 0; j >>= 1) { + bitonic_sort_tile_kernel<<>>(d_data, d_data_size, k, j); + cudaDeviceSynchronize(); + } + } + + cudaMemcpy(data, d_data, d_data_size * sizeof(uint64_t), cudaMemcpyDeviceToHost); +} + + +__global__ void bitonic_sort_merge_kernel( + uint64_t* data1, uint64_t data1_size, + uint64_t* data2, uint64_t data2_size, uint64_t k, uint64_t j +) { + uint64_t x = blockDim.x * blockIdx.x + threadIdx.x; + uint64_t y = j == (k >> 1) ? data_tile_size - x - 1 : x; + if (x >= data_tile_size || x >= data1_size || y >= data2_size) return; + + uint64_t valx = data1[x]; + uint64_t valy = data2[y]; + if (valx > valy) { + data1[x] = valy; + data2[y] = valx; + } +} + +/** + * @brief 对步长大于 data_TILE_SIZE 的进行步内分块 + * + * @param data 全特征数组 + * @param data_size data 元素数 + * @param d_data1 映射步内分块后的操作对的前一个数组 + * @param d_data2 映射步内分块后的操作对的后一个数组 + */ +void bitonic_sort_merge(uint64_t* data, uint64_t data_size, uint64_t* d_data1, uint64_t* d_data2) { + + int threadsPerBlock = BLOCKSIZE; + int blocksPerGrid = (data_tile_size + threadsPerBlock - 1) / threadsPerBlock; + + for (uint64_t k = 2 * data_tile_size; (k >> 1) < data_size; k <<= 1) { + for (uint64_t j = (k >> 1); j >= data_tile_size; j >>= 1) { + for (uint64_t idx = 0; idx < data_size; idx += data_tile_size) { + if (idx & j) continue; + uint64_t data1_l = idx; + uint64_t data1_r = idx + data_tile_size - 1; + uint64_t data2_l = j == (k >> 1) ? data1_r + 2 * (j - (data1_r % j)) - 1 : data1_l ^ j; + uint64_t data2_r = j == (k >> 1) ? data1_l + 2 * (j - (data1_l % j)) - 1 : data1_r ^ j; + if (data2_l >= data_size) continue; + data2_r = std::min(data2_r, data_size - 1); + uint64_t data1_len = data1_r - data1_l + 1; + uint64_t data2_len = data2_r - data2_l + 1; + cudaMemcpy(d_data1, data + data1_l, data1_len * sizeof(uint64_t), cudaMemcpyHostToDevice); + cudaMemcpy(d_data2, data + data2_l, data2_len * sizeof(uint64_t), cudaMemcpyHostToDevice); + + bitonic_sort_merge_kernel<<>>( + d_data1, data1_len, + d_data2, data2_len, k, j + ); + cudaDeviceSynchronize(); + + cudaMemcpy(data + data1_l, d_data1, data1_len * sizeof(uint64_t), cudaMemcpyDeviceToHost); + cudaMemcpy(data + data2_l, d_data2, data2_len * sizeof(uint64_t), cudaMemcpyDeviceToHost); + + } + } + for (uint64_t i = 0; i < data_size; i += data_tile_size) { + uint64_t d_data1_size = std::min(data_tile_size, data_size - i); + bitonic_sort_tile(data + i, d_data1, d_data1_size); + } + } +} + +/** + * @brief 使用双调排序实现全特征排序 + * + * @param data 待排序数组 + * @param data_size 待排序数组大小 + * + * @note 此处双调排序使用的是适用于任意长度的版本 + */ + +void bitonic_sort(uint64_t* data, uint64_t data_size, uint64_t* d_data1, uint64_t* d_data2) { + + // 先对每个 tile 块进行单独排序 + for (uint64_t i = 0; i < data_size; i += data_tile_size) { + uint64_t d_data1_size = std::min(data_tile_size, data_size - i); + bitonic_sort_tile(data + i, d_data1, d_data1_size); + } + + // 对大于 data_tile_size 的进行步内分块排序 + bitonic_sort_merge(data, data_size, d_data1, d_data2); + +} + +__global__ void mark_tile_kernel(uint64_t* input, uint64_t data_size, uint64_t* output, uint64_t first_mark) { + int idx = blockDim.x * blockIdx.x + threadIdx.x; + if (idx >= data_size) return; + if (idx == 0) + output[idx] = first_mark; + else + output[idx] = input[idx] != input[idx - 1]; +} + +void mark_tile(uint64_t* data, uint64_t data_size, uint64_t* input, uint64_t* marks, uint64_t first_mark) { + + int threadsPerBlock = BLOCKSIZE; + int blocksPerGrid = (data_size + threadsPerBlock - 1) / threadsPerBlock; + + cudaMemcpy(input, data, data_size * sizeof(uint64_t), cudaMemcpyHostToDevice); + + mark_tile_kernel<<>>(input, data_size, marks, first_mark); + cudaDeviceSynchronize(); +} + +__global__ void scan_tile_kernel(uint64_t* mark, uint64_t data_size, uint64_t i) { + __shared__ uint64_t smem[BLOCKSIZE]; + + int tid = threadIdx.x; + int idx = blockDim.x * blockIdx.x + threadIdx.x; + + uint64_t now = (idx + 1) * i - 1; + smem[tid] = now < data_size ? mark[now] : 0ULL; + + for (uint32_t i = 2; i <= BLOCKSIZE; i <<= 1) { + __syncthreads(); + uint64_t pos = (tid + 1) * i - 1; + if (pos < BLOCKSIZE) + smem[pos] += smem[pos - (i >> 1)]; + } + + for (uint32_t i = BLOCKSIZE / 2; i > 1; i >>= 1) { + __syncthreads(); + uint64_t pos = (tid + 1) * i - 1; + if (pos + (i >> 1) < BLOCKSIZE) + smem[pos + (i >> 1)] += smem[pos]; + } + + __syncthreads(); + if (now < data_size) + mark[now] = smem[tid]; +} + +__global__ void scan_tile_sum_kernel(uint64_t* marks, uint64_t data_size, uint64_t* scan) { + int idx = blockDim.x * blockIdx.x + threadIdx.x; + uint64_t val = marks[idx]; + for (uint64_t i = BLOCKSIZE; i < data_size; i *= BLOCKSIZE) { + uint64_t now = (idx + 1) / i; + if (now == 0 || ((idx + 1) % i) == 0) continue; + val += marks[now * i - 1]; + } + scan[idx] = val; +} + +void scan_tile(uint64_t* marks, uint64_t data_size, uint64_t* scan) { + + int threadsPerBlock = BLOCKSIZE; + + for (uint64_t i = 1; i < data_size; i *= threadsPerBlock) { + uint64_t oper_size = data_size / i; + int blocksPerGrid = (oper_size + threadsPerBlock - 1) / threadsPerBlock; + + scan_tile_kernel<<>>(marks, data_size, i); + cudaDeviceSynchronize(); + + } + + int blocksPerGrid = (data_size + threadsPerBlock - 1) / threadsPerBlock; + scan_tile_sum_kernel<<>>(marks, data_size, scan); + cudaDeviceSynchronize(); +} + +__global__ void write_tile_kernel(uint64_t* data, uint64_t* scan, uint64_t data_size, uint64_t* write) { + int idx = blockDim.x * blockIdx.x + threadIdx.x; + if (idx >= data_size) return; + if (scan[idx] >= 1) + write[scan[idx] - 1] = data[idx]; +} + +void write_tile( + uint64_t* data, uint64_t* scan, uint64_t data_size, + uint64_t* write, uint64_t* result, uint64_t& result_size +) { + cudaMemcpy(&result_size, scan + data_size - 1, sizeof(uint64_t), cudaMemcpyDeviceToHost); + + int threadsPerBlock = BLOCKSIZE; + int blocksPerGrid = (data_size + threadsPerBlock - 1) / threadsPerBlock; + + write_tile_kernel<<>>(data, scan, data_size, write); + cudaDeviceSynchronize(); + + cudaMemcpy(result, write, result_size * sizeof(uint64_t), cudaMemcpyDeviceToHost); +} + +/** + * @brief 用于对大数据特征的去重, 总调度函数 + * + * @param input_path 特征文件路径 + * @param output_path 去重后文件路径 + * + * @note 解决方案: 全特征排序-->分块标记独特特征-->分块scan前缀和标记 + * -->分块以前缀和标记下标写回独特特征-->分块写入output_path + */ +void unique_extraction(std::string input_path, std::string output_path = "unique_feature.output") { + + + uint64_t data_size = 0; + size_t file_num = 0; + + std::string folderPath = input_path; + std::regex filePattern(R"(uint64-(\d+)-(\d+)\.in)"); + + std::vector filename; + std::vector data_len; + + try { + for (const auto& entry : std::filesystem::directory_iterator(folderPath)) { + if (!entry.is_regular_file()) continue; + + filename.push_back(entry.path().filename().string()); + + std::smatch matches; + + if (std::regex_match(filename[file_num], matches, filePattern)) { + data_len.push_back(std::stoull(matches[1].str())); + data_size += data_len[file_num]; + + } + file_num ++; + } + } catch (const std::filesystem::filesystem_error& e) { + std::cerr << "Filesystem error: " << e.what() << "\n"; + } + + std::cout << "number of data: " << data_size << "\n"; + std::cout << "number of files: " << file_num << "\n"; + + std::cout << "Max threads: " << omp_get_max_threads() << "\n"; + + std::vector data(data_size); + + uint64_t file_cnt = 0; + + bool file_check = true; + + #pragma omp parallel for + for (int i = 0; i < file_num; i++) { + FILE *fp_input; + fp_input = std::fopen((folderPath + filename[i]).c_str(), "r"); + + if (!fp_input) { + std::cout << "false to open file " << filename[i] << "\n"; + continue; + } + + uint64_t it = 0; + #pragma omp atomic capture + { + it = file_cnt; + file_cnt += data_len[i]; + } + + uint64_t tmp, check = 0; + while (std::fscanf(fp_input, "%llu", &tmp) != EOF) { + data[it++] = tmp; + check ++; + } + if (check != data_len[i]) { + file_check = false; + } + + std::fclose(fp_input); + + } + + if (!file_check || file_cnt != data_size) { + std::cout << file_check << " " << file_cnt << "\n"; + std::cerr << "false to read file\n"; + } else { + std::cout << "success to read file\n"; + } + + uint64_t *d_data1, *d_data2, *d_data3; + cudaMalloc(&d_data1, data_tile_size * sizeof(uint64_t)); + cudaMalloc(&d_data2, data_tile_size * sizeof(uint64_t)); + cudaMalloc(&d_data3, data_tile_size * sizeof(uint64_t)); + + + printf("sort of all element start\n"); + // 全特征排序 + bitonic_sort(data.data(), data_size, d_data1, d_data2); + printf("sort of all element end\n"); + + // auto result = std::make_unique(data_tile_size); + + + uint64_t end_size = 0; + + for (uint64_t i = 0; i < data_size; i += data_tile_size) { + uint64_t tile_size = std::min(data_tile_size, data_size - i); + + + // 用于判断分块的第一个元素是否 unique + uint64_t first_mark = i == 0 || data[i - 1] != data[i]; + // 标记 unique feature + mark_tile(data.data() + i, tile_size, d_data1, d_data2, first_mark); + + // scan_tile + scan_tile(d_data2, tile_size, d_data3); + + uint64_t result_size; + write_tile(d_data1, d_data3, tile_size, d_data2, data.data() + end_size, result_size); + + end_size += result_size; + + printf("tile range %llu finished\n", i / data_tile_size); + } + + + int out_file_num = (end_size + FILE_DATA_SIZE - 1) / FILE_DATA_SIZE; + + std::cout << "number of out element: " << end_size << "\n"; + std::cout << "number of out file: " << out_file_num << "\n"; + + #pragma omp parallel for + for (int i = 0; i < out_file_num; i++) { + + int tid = omp_get_thread_num(); + uint64_t out_start = i * FILE_DATA_SIZE; + int out_len = std::min((uint64_t)FILE_DATA_SIZE, end_size - out_start); + char path[100]; + snprintf(path, 100 * sizeof(char), "./tester/output/uint64-%llu-%llu.out", out_len, i); + + FILE* fp; + + fp = std::fopen(path, "w"); + + for (uint64_t j = 0; j < out_len; j ++) + fprintf(fp, "%llu ", data[out_start + j]); + + fclose(fp); + } + + cudaFree(d_data1); + cudaFree(d_data2); + cudaFree(d_data3); +} + +// test +int main() { + unique_extraction("./tester/input/", "./tester/output/"); + return 0; +} + +// 381344736 \ No newline at end of file diff --git a/unique_extraction/tester/generate_file.cpp b/unique_extraction/tester/generate_file.cpp new file mode 100644 index 0000000..0f463e2 --- /dev/null +++ b/unique_extraction/tester/generate_file.cpp @@ -0,0 +1,80 @@ +#include +#include +#include +#include + +#define FILE_DATA_SIZE (1 << 24) + +int main(int argc, char* argv[]) { + + // uint64_t data_size = 1 << 27; + uint64_t data_size = 1 << 20; + + if (argc > 2) { + std::cerr << "参数错误\n"; + return 1; + } else if (argc == 2) { + bool ok = true; + uint64_t tmp = 0, size = 0; + for (int i = 0; argv[1][i] != '\0'; i++) { + if ('0' <= argv[1][i] && argv[1][i] <= '9') + tmp = tmp * 10 + (argv[1][i] - '0'); + else { + ok = false; + std::cerr << "请输入正整数\n"; + } + size ++; + } + if (!ok) return 1; + if (size >= 10) { + std::cerr << "太大了!!!\n"; + return 1; + } + data_size = tmp; + } + + printf("data_size: %llu\n", data_size); + + int n = (data_size + FILE_DATA_SIZE - 1) / FILE_DATA_SIZE; + + const int num_threads = omp_get_max_threads(); + + std::vector seed(num_threads); + + { + std::random_device rd; + std::mt19937_64 gen(rd()); + for (int i = 0; i < num_threads; i++) + seed[i] = gen(); + } + + #pragma omp parallel for + for (int i = 0; i < n; i++) { + + int tid = omp_get_thread_num(); + int data_len = std::min((uint64_t)FILE_DATA_SIZE, data_size - i * FILE_DATA_SIZE); + char path[100]; + snprintf(path, 100 * sizeof(char), "./tester/input/uint64-%llu-%llu.in", data_len, i); + + FILE* fp; + + fp = std::fopen(path, "w"); + + if (!fp) { + fprintf(stderr, "无法创建文件 %s\n", path); + continue; + } else { + fprintf(stdout, "创建成功 %s\n", path); + } + + std::mt19937_64 gen(seed[tid]); + std::uniform_int_distribution dis; + + for (uint64_t j = 0; j < data_len; j ++) + fprintf(fp, "%llu ", dis(gen) & 0xFFFFFFFF00000000); + + fclose(fp); + } + + return 0; +} \ No newline at end of file diff --git a/unique_extraction/tester/test.cpp b/unique_extraction/tester/test.cpp new file mode 100644 index 0000000..76f9812 --- /dev/null +++ b/unique_extraction/tester/test.cpp @@ -0,0 +1,19 @@ +#include +#include +#include + +int main() { + int n = 100, cnt = 0; + std::vector arr(n); + #pragma omp parallel for + for (int i = 0; i < n; i++) { + int now; + #pragma omp atomic capture + now = cnt ++; + arr[i] = now; + } + + for (auto x : arr) { + std::cout << x << " "; + } std::cout << "\n"; +} \ No newline at end of file