From 1fdcbb003d88b4028dcf189e0b9a71fe0b0fd86e Mon Sep 17 00:00:00 2001 From: Guangzhen Jin Date: Tue, 6 Jan 2026 01:06:24 -0500 Subject: [PATCH 1/5] Update modules. --- .../anvil/gpu/Core/nccl/cuda-12.8_2.26.2.lua | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 modulefiles/anvil/gpu/Core/nccl/cuda-12.8_2.26.2.lua diff --git a/modulefiles/anvil/gpu/Core/nccl/cuda-12.8_2.26.2.lua b/modulefiles/anvil/gpu/Core/nccl/cuda-12.8_2.26.2.lua new file mode 100644 index 00000000..a6297e84 --- /dev/null +++ b/modulefiles/anvil/gpu/Core/nccl/cuda-12.8_2.26.2.lua @@ -0,0 +1,20 @@ + +whatis([[Name : nccl]]) +whatis([[Version : cuda-12.8_2.26.2]]) +whatis([[Short description : Optimized primitives for collective multi-GPU communication.]]) +whatis([[Configure options : unknown, software installed outside of Spack]]) + +help([[Optimized primitives for collective multi-GPU communication.]]) + +depends_on("cuda/12.8.0") + +local modroot="/apps/anvilgpu/external/apps/nccl/cuda12.8/2.26.2" +prepend_path("LIBRARY_PATH", modroot.."/lib", ":") +prepend_path("LD_LIBRARY_PATH", modroot.."/lib", ":") +prepend_path("CPATH", modroot.."/include", ":") +prepend_path("PKG_CONFIG_PATH", modroot.."/lib/pkgconfig", ":") +prepend_path("CMAKE_PREFIX_PATH", modroot.."/", ":") +setenv("NCCL_HOME", modroot) +setenv("RCAC_NCCL_ROOT", modroot) +setenv("RCAC_NCCL_VERSION", "cuda-12.8_2.26.2") + From 9790dfb8d89a62ae823056359aca8666ce7e0404 Mon Sep 17 00:00:00 2001 From: purduercac-docs-bot Date: Tue, 6 Jan 2026 06:06:50 +0000 Subject: [PATCH 2/5] Auto-update app catalog and markdown files --- docs/software/app_catalog.md | 2 +- docs/software/apps_md/nccl.md | 2 +- docs/software/index.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/software/app_catalog.md b/docs/software/app_catalog.md index 6ad59a2a..f0b923a2 100644 --- a/docs/software/app_catalog.md +++ b/docs/software/app_catalog.md @@ -9,7 +9,7 @@ hide: # All software and versions on RCAC clusters ## Overview -As of **December 17, 2025**, there have been a total of **270** applications with **2469** available versions deployed across **6** RCAC HPC clusters: **ANVIL, BELL, GAUTSCHI, GILBRETH, NEGISHI, SCHOLAR**. +As of **January 06, 2026**, there have been a total of **270** applications with **2470** available versions deployed across **6** RCAC HPC clusters: **ANVIL, BELL, GAUTSCHI, GILBRETH, NEGISHI, SCHOLAR**. ## Applications Catalog diff --git a/docs/software/apps_md/nccl.md b/docs/software/apps_md/nccl.md index 5d11e11d..1b353f89 100644 --- a/docs/software/apps_md/nccl.md +++ b/docs/software/apps_md/nccl.md @@ -14,7 +14,7 @@ Optimized primitives for collective multi-GPU communication. |Cluster|Versions| |---|---| -**ANVIL**|cuda-11.2_2.8.4 (D), cuda-11.0_2.11.4, cuda-11.4_2.11.4 +**ANVIL**|cuda-11.2_2.8.4 (D), cuda-11.0_2.11.4, cuda-11.4_2.11.4, cuda-12.8_2.26.2 (D): Default Module diff --git a/docs/software/index.md b/docs/software/index.md index b59cf932..dd374075 100644 --- a/docs/software/index.md +++ b/docs/software/index.md @@ -9,7 +9,7 @@ hide: # Software and versions on RCAC clusters ## Overview -As of **December 17, 2025**, there have been a total of **270** applications with **2469** available versions deployed across **6** RCAC HPC clusters: **ANVIL, BELL, GAUTSCHI, GILBRETH, NEGISHI, SCHOLAR**. +As of **January 06, 2026**, there have been a total of **270** applications with **2470** available versions deployed across **6** RCAC HPC clusters: **ANVIL, BELL, GAUTSCHI, GILBRETH, NEGISHI, SCHOLAR**. You can see [**a full list of all software and version deployed on all RCAC clusters**](app_catalog.md), OR From 66296b9cd6a40432e86dbad3055bbd3339c1f34b Mon Sep 17 00:00:00 2001 From: hkashgar Date: Wed, 7 Jan 2026 18:06:24 -0600 Subject: [PATCH 3/5] LVIS and visualgenome datasets are added --- docs/datasets/ai.md | 4 ++++ docs/datasets/ai/LVIS.md | 23 +++++++++++++++++++++++ docs/datasets/ai/visualgenome.md | 23 +++++++++++++++++++++++ 3 files changed, 50 insertions(+) create mode 100644 docs/datasets/ai/LVIS.md create mode 100644 docs/datasets/ai/visualgenome.md diff --git a/docs/datasets/ai.md b/docs/datasets/ai.md index 79598bce..67f5e561 100644 --- a/docs/datasets/ai.md +++ b/docs/datasets/ai.md @@ -20,7 +20,11 @@ $ module avail datasets/ai ## AI Datasets * [**COCO**](ai/COCO.md) +* [**LVIS**](ai/LVIS.md) * [**PhysicalAI-Robotics-GR00T-Teleop-Sim**](ai/PhysicalAI-Robotics-GR00T-Teleop-Sim.md) * [**PhysicalAI-Robotics-GR00T-X-Embodiment-Sim**](ai/PhysicalAI-Robotics-GR00T-X-Embodiment-Sim.md) * [**PhysicalAI-Robotics-Manipulation-SingleArm**](ai/PhysicalAI-Robotics-Manipulation-SingleArm.md) * [**PhysicalAI-SmartSpaces**](ai/PhysicalAI-SmartSpaces.md) +* [**VisualGenome**](ai/visualgenome.md) + + diff --git a/docs/datasets/ai/LVIS.md b/docs/datasets/ai/LVIS.md new file mode 100644 index 00000000..11c4dd7e --- /dev/null +++ b/docs/datasets/ai/LVIS.md @@ -0,0 +1,23 @@ +# LVIS + +[Back to AI datasets](../ai.md) + +| Field | Value | +|--------|-------| +| **Description** | Progress on object detection is enabled by datasets that focus the research community's attention on open challenges. This process led us from simple images to complex scenes and from bounding boxes to segmentation masks. LVIS is ~2 million high-quality instance segmentation masks for over 1000 entry-level object categories in 164k images. Due to the Zipfian distribution of categories in natural images, LVIS naturally has a long tail of categories with few training samples. Given that state-of-the-art deep learning methods for object detection perform poorly in the low-sample regime, we believe that our dataset poses an important and exciting new scientific challenge. | +| **Folder** | `/datasets/ai/LVIS` | +| **Discipline** | AI / computer vision / Segmentation | +| **DOI** | [10.48550/arXiv.1908.03195](https://doi.org/10.48550/arXiv.1908.03195) | +| **Link** | [Access Data](https://www.lvisdataset.org/dataset) | +| **Public** | `True` | +| **Publication Date** | 2019-08-08 | +| **Downloaded** | 2026-01-07 | +| **Data Type** | LMDB, SquashFS, Extracted JPG files on Ceph | +| **Dataset Size** | 33G (extracted) | +| **Number of Files** | 204631 (extracted) | +| **Usage** |
$ module avail
$ module load datasets
$ module load ai/LVIS/2019-08-08
| +| **Usage Policy Link** | https://creativecommons.org/licenses/by/4.0/ | +| **Usage Policy** | The LVIS annotations along with this website are licensed under a Creative Commons Attribution 4.0 License. All LVIS dataset images come from the COCO dataset which those are licensed under Creative Commons Attribution 4.0 License. | +| **Citation** | Gupta, A., Dollar, P., & Girshick, R. (2019). LVIS: A dataset for large vocabulary instance segmentation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (pp. 5356–5364). | +| **BibTeX** |
📜 View BibTeX citation
@inproceedings{gupta2019lvis,
title={LVIS: A Dataset for Large Vocabulary Instance Segmentation},
author={Gupta, Agrim and Dollar, Piotr and Girshick, Ross},
booktitle={Proceedings of the {IEEE} Conference on Computer Vision and Pattern Recognition},
year={2019}
}
+
| diff --git a/docs/datasets/ai/visualgenome.md b/docs/datasets/ai/visualgenome.md new file mode 100644 index 00000000..d5042773 --- /dev/null +++ b/docs/datasets/ai/visualgenome.md @@ -0,0 +1,23 @@ +# visualgenome + +[Back to AI datasets](../ai.md) + +| Field | Value | +|--------|-------| +| **Description** | Visual Genome is a dataset, a knowledge base, an ongoing effort to connect structured image concepts to language. | +| **Folder** | `/datasets/ai/visualgenome` | +| **Discipline** | AI / computer vision / multimodel AI | +| **DOI** | [10.1007/s11263-016-0981-7](https://doi.org/10.1007/s11263-016-0981-7) | +| **Link** | [Access Data](https://homes.cs.washington.edu/~ranjay/visualgenome/api.html) | +| **Public** | `True` | +| **Publication Date** | 2017-02-06 | +| **Downloaded** | 2026-01-07 | +| **Data Type** | LMDB, SquashFS, Extracted JPG files on Ceph | +| **Dataset Size** | 21G (extracted) | +| **Number of Files** | 108,265 (extracted) | +| **Usage** |
$ module avail
$ module load datasets
$ module load ai/visualgenome/2017-02-06
| +| **Usage Policy Link** | http://creativecommons.org/licenses/by/4.0 | +| **Usage Policy** | This dataset is distributed under the Creative Commons Attribution 4.0 International (CC BY 4.0) license. Users may use, share, adapt, and redistribute the data with appropriate credit to the dataset authors and hosting repository. A link to the license must be included (https://creativecommons.org/licenses/by/4.0/), and any modifications should be clearly indicated. Users are responsible for ensuring that the dataset is appropriate for their applications. | +| **Citation** | Krishna, R., Zhu, Y., Groth, O. et al. Visual Genome: Connecting Language and Vision Using Crowdsourced Dense Image Annotations. Int J Comput Vis 123, 32–73 (2017). https://doi.org/10.1007/s11263-016-0981-7 | +| **BibTeX** |
📜 View BibTeX citation
@article{Krishna2017,
author = {Krishna, Ranjay and Zhu, Yuke and Groth, Oliver and Johnson, Justin and Hata, Kenji and Kravitz, Joshua and Chen, Stephanie and Kalantidis, Yannis and Li, Li-Jia and Shamma, David A. and Bernstein, Michael S. and Fei-Fei, Li},
title = {Visual Genome: Connecting Language and Vision Using Crowdsourced Dense Image Annotations},
journal = {International Journal of Computer Vision},
volume = {123},
number = {1},
pages = {32--73},
year = {2017},
month = {5},
doi = {10.1007/s11263-016-0981-7},
url = {https://doi.org/10.1007/s11263-016-0981-7},
issn = {1573-1405},
abstract = {Despite progress in perceptual tasks such as image classification, computers still perform poorly on cognitive tasks such as image description and question answering. Cognition is core to tasks that involve not just recognizing, but reasoning about our visual world. However, models used to tackle the rich content in images for cognitive tasks are still being trained using the same datasets designed for perceptual tasks. To achieve success at cognitive tasks, models need to understand the interactions and relationships between objects in an image. In this paper, we present the Visual Genome dataset to enable the modeling of such relationships. We collect dense annotations of objects, attributes, and relationships within each image. Specifically, the dataset contains over 108K images where each image has an average of 35 objects, 26 attributes, and 21 pairwise relationships between objects.}
}
+
| From a08037031db262f31039ca0e7611ffd83e4c26b6 Mon Sep 17 00:00:00 2001 From: Guangzhen Jin Date: Thu, 15 Jan 2026 15:09:45 -0500 Subject: [PATCH 4/5] Re-configure giscus discussion. --- overrides/partials/comments.html | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/overrides/partials/comments.html b/overrides/partials/comments.html index c9331417..bc4ae3c7 100644 --- a/overrides/partials/comments.html +++ b/overrides/partials/comments.html @@ -1,15 +1,15 @@ {% if page.file.src_uri.startswith('blog/posts') %}