diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000..3941c74 Binary files /dev/null and b/.DS_Store differ diff --git a/TAGDS2025.bib b/TAGDS2025.bib new file mode 100644 index 0000000..d9b73b7 --- /dev/null +++ b/TAGDS2025.bib @@ -0,0 +1,295 @@ +@Proceedings{TAGDS2025, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +name = {Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +shortname = {TAGDS2025}, +editor = {Bernardez Gil, Guillermo and Black, Mitchell and Cloninger, Alexander and Doster, Timothy and Emerson, Tegan and Garc{\'\i}a-Rodondo, Ińes and Holtz, Chester and Kotak, Mit and Kvinge, Henry and Mishne, Gal and Papillon, Mathilde and Pouplin, Alison and Rainey, Katie and Rieck, Bastian and Telyatnikov, Lev and Yeats, Eric and Wang, Qingsong and Wang, Yusu and Wayland, Jeremy}, +volume = {321}, +year = {2025}, +start = {2025-12-01}, +end = {2025-12-02}, +published = {2026-02-04}, +conference_url = {https://www.tagds.com/events/tag-ds-2025}, +conference_number={1}, +address = {San Diego, California, USA} +} + +@InProceedings{doster25a, +title={1st Conference on Topology, Algebra, and Geometry in Data Science (TAG-DS 2025): Preface}, +author = {Bernardez Gil, Guillermo and Black, Mitchell and Cloninger, Alexander and Doster, Timothy and Emerson, Tegan and Garc{\'\i}a-Rodondo, I\'nes and Holtz, Chester and Kotak, Mit and Kvinge, Henry and Mishne, Gal and Papillon, Mathilde and Pouplin, Alison and Rainey, Katie and Rieck, Bastian and Telyatnikov, Lev and Yeats, Eric and Wang, Qingsong and Wang, Yusu and Wayland, Jeremy}, +pages={1-3}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +abstract={The First Annual Topology, Algebra, and Geometry in Data Science (TAG-DS) Conference occurred on December 1st and 2nd, 2025 at the University of California, San Diego. The conference featured 4 keynotes, 2 panel discussions, 35 accepted works (of which 26 were selected for archival publication here) and was attended by over 150 researchers.} +} + +@InProceedings{bernardez25a, +title={Topological Deep Learning Challenge 2025: Expanding the Data Landscape}, +author = {Bern\'ardez, Guillermo and Telyatnikov, Lev and Papillon, Mathilde and Montagna, Marco and Theiler, Raffael and Cornelis, Louisa and Mathe, Johan and Ferriol, Miquel and Vasylenko, Pavlo and Van Looy, Jan-Willem and Testa, Lucia and Neri, Bruno and Genovese, Donatella and Weber, Melanie and Wei, Amaury and Devoto, Alessio and Weers, Alexander and Jankowski, Robert and Cino, Loris and Leko, David and Banf, Michael and Müller, Jonas and Grapentin, Thomas and Paik, Taejin and Dutta, Abhijeet and Walter, Hugo and Fontanari, Thomas Vaitses and Ghasemi, Ali and Loi, Dario and S\'aez de Oc\'ariz Borde, Haitz and Aguilar-Arg\"uello, Gabriela and da Rosa, Giovanni B. and Saulus, Th\'eo and Dolores-Cuenca, Eric Rubiel and Di Nino, Leonardo and Leroy, Pierrick and Pandolfo, Mario Edoardo and Cavallo, Andrea and Qin, Yu and Snopov, Pavel and Akbari, Amirreza and Meza-Ch\'avez, Ixchel and Van Langendonck, Louis and Able, Jared and Meshcheryakova, Maria Yuffa and Tsay, Henry and Beni\'c, Luka and Filipiak, Dominik and Liu, Patrick and Liang, Huidong and Santos da Rosa Jr., Alexsandro and Cattai, Tiziana and Borges, Henrique M. and Grimaldi, Enrico and Lecha, Manuel and Battiloro, Claudio and Liu, Xuan-Chen and Deshpande, Raj and Johnson, Graham and Morgunov, Igor and Micheron, Hugo and Devaux, R\'emi and Jardin, Antoine and Emerson, Tegan and Fink, Olga and Miolane, Nina}, +pages={4-14}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +abstract={This paper describes the 2025 edition of the Topological Deep Learning Challenge: Expanding the Data Landscape, hosted at the first Topology, Algebra, and Geometry in Data Science (TAG-DS) Conference. This year’s challenge aimed to address the data bottleneck in the field by systematically expanding the ecosystem of Topological Deep Learning (TDL). Powered by TopoBench, the challenge was organized into two primary missions: enriching the data landscape with diverse datasets, and advancing core data infrastructure. In particular, participants were invited to contribute to the open-source platform by implementing new dataset loaders, designing new benchmark tasks, or engineering robust, scalable data pipelines. The initiative successfully yielded 44 qualifying submissions. This paper outlines the scope of the competition and summarizes the key results and findings, highlighting the new resources now available to the TDL community.} +} + +@InProceedings{liao25a, +title={LR-RaNN: Lipschitz Regularized Randomized Neural Networks for System Identification}, +author = {Liao, Chunyang}, +openreview = {Ax735X1IqN}, +pages={15-29}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +abstract={Approximating the governing equations from data is of great importance in studying the dynamical systems. In this paper, we propose randomized neural networks (RaNN) to investigate the problem of approximating the governing equations of the system of ordinary differential equations. In contrast with other neural networks based methods, training randomized neural network solves a least-squares problem, which significant reduces the computational complexity. Moreover, we introduce a regularization term to the loss function, which improves the generalization ability. We provide an estimation of Lipschitz constant for our proposed model and analyze its generalization error. Our empirical experiments on synthetic datasets demonstrate that our proposed method achieves good generalization performance and enjoys easy implementation.} +} + +@InProceedings{huntsman25a, +title={Peeling metric spaces of strict negative type}, +author = {Huntsman, Steve}, +openreview = {hNo7q50nVb}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +pages={30-44}, +year = {2025}, +abstract={We describe a unified and computationally tractable framework for finding outliers in, and maximum-diversity subsets of, finite metric spaces of strict negative type. Examples of such spaces include finite subsets of Euclidean space and finite subsets of a sphere without antipodal points. The latter accounts for state-of-the-art text embeddings, and we apply our framework in this context to sketch a hallucination mitigation strategy and separately to a class of path diversity optimization problems with a real-world example.} +} + +@InProceedings{lhuang25a, +title={Bilevel Optimization for Hyperparameter Learning in Supporting Vector Machines}, +author={Huang, Lei and Nie, Jiawang and Wang, Jiajia and Zhong, Suhan}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +pages={45-55}, +year = {2025}, +openreview = {ekmkZ1ekoy}, +abstract={Bilevel optimization is central to many machine learning tasks, including hyperparameter learning and adversarial training. We present a novel single-level +reformulation for bilevel problems with convex lower-level objective functions and linear constraints. Our method eliminates auxiliary Lagrange multiplier variables by expressing them in terms of the original decision variables, which allows the reformulated problem to preserve the same dimension as the original problem. We applied our method to support vector machines (SVMs) and evaluated it on several benchmark tasks, demonstrating efficiency and scalability.} +} + +@InProceedings{campos25a, +title = {Topological Preservation in Temporal Link Prediction}, +author = {Campos, Marco and Doyle, Casey and Krofcheck, Daniel and Simpson, Sarah and Xi, Michael and Ott, William and Adams, Henry}, +pages={56-78}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +openreview = {efxWlBNAQa}, +abstract = {Temporal link prediction seeks to model evolving networks to forecast future or missing interactions. Although many methods in this field achieve strong predictive performance, interpretability remains limited, especially in high-stakes domains. We address this by showing how topological data analysis can assess the faithfulness of learned representations to the underlying data, providing a pipeline for comparing temporal topological structure across modal output. We further introduce a prototypical model that enables this analysis while maintaining predictive power. Taken together, these contributions lay the groundwork for models whose representations are more transparent to end users.} +} + +@InProceedings{girshfeld25a, +title = {Neural Local Wasserstein Regression}, +author = {Girshfeld, Inga and Chen, Xiaohui}, +openreview = {4yxmrrWlzY}, +pages={79-89}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +abstract = {We study the estimation problem of distribution-on distribution regression, where both predictors and responses are probability measures. Existing approaches typically rely on a global optimal transport map or tangent-space linearization, which can be restrictive in approximation capacity and distort geometry in multivariate underlying domains. In this paper, we propose the \emph{Neural Local Wasserstein Regression}, a flexible nonparametric framework that models regression through locally defined transport maps in Wasserstein space. Our method builds on the analogy with classical kernel regression: kernel weights based on the 2-Wasserstein distance localize estimators around reference measures, while neural networks parameterize transport operators that adapt flexibly to complex data geometries. This localized perspective broadens the class of admissible transformations and avoids the limitations of global map assumptions and linearization structures. We develop a practical training procedure using DeepSets-style architectures and Sinkhorn-approximated losses, combined with a greedy reference selection strategy for scalability. Through synthetic experiments on Gaussian and mixture models, as well as distributional prediction tasks on MNIST, we demonstrate that our approach effectively captures nonlinear and high-dimensional distributional relationships that elude existing methods.} +} + +@InProceedings{zhang25a, +title = {Learning Polynomial Activation Functions for Deep Neural Networks}, +author = {Zhang, Linghao and Nie, Jiawang and Tang, Tingting}, +pages={90-99}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +openreview = {E7InqVkZkv}, +abstract = {Activation functions are crucial for deep neural networks. This novel work frames the problem of training neural network with learnable polynomial activation functions as a polynomial optimization problem, which is solvable by the Moment-SOS hierarchy. This work represents a fundamental departure from the conventional paradigm of training deep neural networks, which relies on local optimization methods like backpropagation and gradient descent. Numerical experiments are presented to demonstrate the accuracy and robustness of optimum parameter recovery in presence of noises.} +} + +@InProceedings{faldet25a, +title = {Kernel Mean Embeddings of \texttt{[CLS]} Tokens in ViTs}, +author = {Faldet, Mason}, +pages={100-113}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +openreview = {2vGT4j03GR}, +abstract = {We study the geometry of Vision Transformer (\texttt{ViT}) \texttt{[CLS]} representations across layers through the lens of reproducing kernel Hilbert spaces (RKHS). For each layer and class, we estimate class-conditional kernel mean embeddings (KMEs) and measure separability with maximum mean discrepancy (MMD), tuning the kernel and a per-layer PCA projection via a bootstrap-based signal-to-noise (SNR) objective. We further propose a layer-wise confidence signal by evaluating class mean embeddings along a query’s \texttt{[CLS]} trajectory. On ImageNet-1k subsets, this exploratory, proof-of-concept analysis indicates that the RKHS framework can capture meaningful geometric and semantic signals in \texttt{[CLS]} representations across ViT layers. We make no SOTA claims; our contribution is a unified framework and practical recipe for probing \texttt{[CLS]} geometry.} +} + +@InProceedings{karris25a, +title={Looping back: {C}ircular nodes revisited with novel applications in the radio frequency domain}, +author={Karris, Nicholas and Durell, Luke and Flores, Javier and Emerson, Tegan}, +year = {2025}, +pages={114-125}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +openreview = {PDlIU7sfqY}, +abstract={ It can be shown that Stable Diffusion has a permutation-invariance property with respect to the rows of Contrastive Language-Image Pretraining (CLIP) embedding matrices. This inspired the novel observation that these embeddings can naturally be interpreted as point clouds in a Wasserstein space rather than as matrices in a Euclidean space. This perspective opens up new possibilities for understanding the geometry of embedding space. For example, when interpolating between embeddings of two distinct prompts, we propose reframing the interpolation problem as an optimal transport problem. By solving this optimal transport problem, we compute a shortest path (or geodesic) between embeddings that captures a more natural and geometrically smooth transition through the embedding space. This results in smoother and more coherent intermediate (interpolated) images when rendered by the Stable Diffusion generative model. We conduct experiments to investigate this effect, comparing the quality of interpolated images produced using optimal transport to those generated by other standard interpolation methods. The novel optimal transport--based approach presented indeed gives smoother image interpolations, suggesting that viewing the embeddings as point clouds (rather than as matrices) better reflects and leverages the geometry of the embedding space.} +} + +@InProceedings{shen25a, +title = {Advancing Local Clustering on Graphs via Compressive Sensing: Semi-supervised and Unsupervised Methods}, +author = {Shen, Zhaiming and Kang, Sung Ha}, +pages={126-146}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +openreview = {zMRanIHJkT}, +abstract = {Local clustering aims to identify specific substructures within a large graph without any additional structural information of the graph. These substructures are typically small compared to the overall graph, enabling the problem to be approached by finding a sparse solution to a linear system associated with the graph Laplacian. In this work, we first propose a method for identifying specific local clusters when very few labeled data are given, which we term semi-supervised local clustering. We then extend this approach to the unsupervised setting when no prior information on labels is available. The proposed methods involve randomly sampling the graph, applying diffusion through local cluster extraction, then examining the overlap among the results to find each cluster. We establish the co-membership conditions for any pair of nodes, and rigorously prove the correctness of our methods. Additionally, we conduct extensive experiments to demonstrate that the proposed methods achieve state of the art results in the low-label rates regime.} +} + +@InProceedings{dey25a, +title={Quasi Zigzag Persistence: A Topological Framework for Analyzing Time-Varying Data}, +author={Dey, Tamal K. and Samaga, Shreyas N.}, +pages={147-165}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +openreview = {xftVcYSqjV}, +abstract={In this paper, we propose Quasi Zigzag Persistent Homology (QZPH) as a framework for analyzing time-varying data by integrating multiparameter persistence and zigzag persistence. To this end, we introduce a stable topological invariant that captures both static and dynamic features at different scales. We present an algorithm to compute this invariant efficiently. We show that it enhances the machine learning models when applied to tasks such as sleep-stage detection, demonstrating its effectiveness in capturing the evolving patterns in time-varying datasets.} +} + +@InProceedings{jorgenson25a, +title = {Scratching the Surface: Reflections of Training Data Properties in Early CNN Filters}, +author = {Jorgenson, Grayson and Heine, Cassie and Cosbey, Robin and Reynolds, Abby and Brown, Davis and Kvinge, Henry and Doster, Timothy and Emerson, Tegan}, +pages={166-175}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +openreview = {SJnTlsmP7Y}, +abstract = {The ability to understand deep learning models by analyzing their weights is key to advancing the growing field of model interpretability. In this article, we study information about the training data of convolutional neural network (CNN) models that can be gleaned from analyzing just the first of their learned filters. While gradient updates to the model weights during training become increasingly complex in the deeper layers of typical CNNs, the updates to the initial layer can be simple enough that high-level dataset properties such as image sharpness, noisiness, and color distribution are prominently featured. We give a simple mathematical justification for this and demonstrate how training dataset properties appear in this way for several standard CNNs on a number of datasets.} +} + +@InProceedings{marrinan25a, +title={Looping back: {C}ircular nodes revisited with novel applications in the radio frequency domain}, +author={Marrinan, Tim and Kay, Bill and Myers, Audun and Wofford, Rachel and Emerson, Tegan}, +pages={176-190}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +openreview = {ixu5CAJJAk}, +abstract={In domains with complex-structured data, some relationships cannot be easily modeled using only real-valued Euclidean features. In spite of this misalignment, most modern machine learning methods default to representing data in just that way. By failing to appropriately encode the data structure, the performance and reliability of the resulting machine learning models can be degraded. In prior work, Kirby and Miranda introduced the concept of a circular node, a type of artificial neuron engineered to represent periodic data or angular information [11]. These nodes can be implemented directly in many traditional neural network architectures to more faithfully model periodic relationships. However, since they have garnered relatively little attention compared to their non-circular counterparts, circular nodes have largely been excluded from open-source machine learning libraries. In this paper, we re-investigate circular nodes in the context of modern machine learning libraries, and demonstrate the advantages of circular representations in applications with complex-structured data. Our experiments center around radio frequency signals, which naturally encode circular relationships. We illustrate that a neural network composed of a single circular node can learn the phase offset of a radio frequency signal. We show that a fully-connected neural network made up of multiple layers of circular nodes can successfully classify digital modulation constellation points, and demonstrates accuracy gains over its traditional counterpart when the model size is small. Finally, we demonstrate notable performance improvements on the task of automatic modulation classification through the integration of a circular node layer into traditional convolutional networks.} +} + +@InProceedings{myers25a, +title = {HAGGLE: Get a better deal using a Hierarchical Autoencoder for Graph Generation and Latent-space Expressivity}, +author = {Myers, Audun and Young, Stephen J. and Emerson, Tegan}, +pages={191-202}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +openreview = {FP7p1mBssP}, +abstract = {Generating realistic and diverse graph structures is a challenge with broad applications across various scientific and engineering disciplines. A common approach involves learning a compressed latent space where graphs are represented by a collection of node-level embeddings, often via methods such as a Graph Autoencoder (GAE). A fundamental challenge arises when we try to generate new graphs by sampling from this space. While many deep learning methods like Diffusion, Variational Autoencoders (VAEs), and Generative Adversarial Networks (GANs) can successfully generate new points in the latent space, they fail to capture the inherent relational dependencies between the node embeddings. This leads to decoded graphs that lack structural coherence and fail to replicate essential real-world properties. Alternatively, generating a single graph-level embedding and then decoding it to new node embeddings is also fundamentally limited, as pooling methods needed to create the graph level embedding are inherently lossy and discard crucial local structural information. We present a three-stage hierarchical framework called Hierarchical Autoencoder for Graph Generation and Latent-space Expressivity (HAGGLE) that addresses these limitations through systematic bridging of node-level representations with graph-level generation. The framework trains a Graph Autoencoder for node embeddings, employs a Pooling Autoencoder for graph-level compression, and utilizes a size-conditioned GAN for new graph generation. This approach generates structurally coherent graphs while providing useful graph-level embeddings for downstream tasks.} +} + +@InProceedings{chen25a, +title = {Multi-View Graph Learning with Graph-Tuple}, +author = {Chen, Shiyu and Huang, Ningyuan (Teresa) and Villar, Soledad}, +pages={203-216}, +year = {2025}, +openreview = {s4ezAuj5xM}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +abstract = {Graph Neural Networks (GNNs) typically scale with the number of graph edges, making them well suited for sparse graphs but less efficient on dense graphs, such as point clouds or molecular interactions. A common remedy is to sparsify the graph via similarity thresholding or distance pruning, but this forces an arbitrary choice of a single interaction scale and discards crucial information from other scales. To overcome this limitation, we introduce a multi-view graph-tuple framework. Instead of a single graph, our graph-tuple framework partitions the graph into disjoint subgraphs, capturing primary local interactions and weaker, long-range connections. We then learn multi-view representations from the graph-tuple via a heterogeneous message-passing architecture inspired by the theory of non-commuting operators, which we formally prove is strictly more expressive and guarantees a lower oracle risk compared to single-graph message-passing models. We instantiate our framework on two scientific domains: molecular property prediction from feature-scarce Coulomb matrices and cosmological parameter inference from geometric point clouds. On both applications, our multi-view graph-tuple models demonstrate better performance than single-graph baselines, highlighting the power and versatility of our multi-view approach.} +} + +@InProceedings{boufalis25a, +title = {Symmetry-Aware Graph Metanetwork Autoencoders: Model Merging through Parameter Canonicalization}, +author = {Boufalis, Odysseas and Carrasco-Pollo, Jorge and Rosenthal, Joshua and Terres-Caballero, Eduardo and Garc{\'\i}a-Castellanos, Alejandro}, +pages={217-235}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +openreview = {tA5AOPHvrM}, +abstract = {Neural network parameterizations exhibit inherent symmetries that yield multiple equivalent minima within the loss landscape. Scale Graph Metanetworks (ScaleGMNs) explicitly leverage these symmetries by proposing an architecture equivariant to both permutation and parameter scaling transformations. Previous work by Ainsworth et al. (2023) addressed permutation symmetries through a computationally intensive combinatorial assignment problem, demonstrating that leveraging permutation symmetries alone can map networks into a shared loss basin. In this work, we extend their approach by also incorporating scaling symmetries, presenting an autoencoder framework utilizing ScaleGMNs as invariant encoders. Experimental results demonstrate that our method aligns Implicit Neural Representations (INRs) and Convolutional Neural Networks (CNNs) under both permutation and scaling symmetries without explicitly solving the assignment problem. This approach ensures that similar networks naturally converge within the same basin, facilitating model merging, i.e., smooth linear interpolation while avoiding regions of high loss. The code is publicly available on our GitHub repository.} +} + +@InProceedings{bhaskar25a, +title = {DYMAG: Rethinking Message Passing Using Dynamical-systems-based Waveforms}, +author = {Bhaskar, Dhananjay and Sun, Xingzhi and Zhang, Yanlei and Xu, Charles and Afrasiyabi, Arman and Viswanath, Siddharth and Fasina, Oluwadamilola and Wolf, Guy and Perlmutter, Michael and Krishnaswamy, Smita}, +pages={236-268}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +openreview = {WYiiqnArMy}, +abstract = {We present DYMAG, a graph neural network based on a novel form of message aggregation. Standard message-passing neural networks, which often aggregate local neighbors via mean-aggregation, can be regarded as convolving with a simple rectangular waveform which is non-zero only on 1-hop neighbors of every vertex. Here, we go beyond such local averaging. We will convolve the node features with more sophisticated waveforms generated using dynamics such as the heat equation, wave equation, and the Sprott model (an example of chaotic dynamics). Furthermore, we use snapshots of these dynamics at different time points to create waveforms at many effective scales. Theoretically, we show that these dynamic waveforms can capture salient information about the graph, including connected components, connectivity, and cycle structures. Empirically, we test DYMAG on both real and synthetic benchmarks to establish that DYMAG outperforms baseline models on recovery of graph persistence, generating parameters of random graphs, as well as property prediction for proteins, molecules and materials. Our code is available at~\url{https://github.com/KrishnaswamyLab/DYMAG}.} +} + +@InProceedings{dennehy25a, +title = {LINSCAN - A Linearity Based Clustering Algorithm}, +author = {Dennehy, Andrew and Zou, Xiaoyu and Semnani, Shabnam J. and Fialko, Yuri and Cloninger, Alexander}, +pages={269-286}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +openreview = {tEmWgZqJph}, +abstract = {DBSCAN and OPTICS are powerful algorithms for identifying clusters of points in domains where few assumptions can be made about the structure of the data. In this paper, we leverage these strengths and introduce a new algorithm, LINSCAN, designed to seek lineated clusters that are difficult to find and isolate with existing methods. In particular, by embedding points as normal distributions approximating their local neighborhoods and leveraging a distance function derived from the Kullback Leibler Divergence, LINSCAN can detect and distinguish lineated clusters that are spatially close but have orthogonal covariances. We demonstrate how LINSCAN can be applied to seismic data to identify active faults, including intersecting faults, and determine their orientation. Finally, we discuss the properties a generalization of DBSCAN and OPTICS must have in order to retain the stability benefits of these algorithms.} +} + +@InProceedings{bosca25a, +title = { Topological Signatures of ReLU Neural Network Activation Patterns}, +author = {Bosca, Vicente and Rask, Tatum and Tanweer, Sunia and Tawfeek, Andrew R. and Stone, Branden}, +pages={287-301}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +openreview = {Q88w76j2Dd}, +abstract = {This paper explores the topological signatures of ReLU neural network activation patterns. We consider feedforward neural networks with ReLU activation functions and analyze the polytope decomposition of the feature space induced by the network. Mainly, we investigate how the Fiedler partition of the dual graph and show that it appears to correlate with the decision boundary—in the case of binary classification. Additionally, we compute the homology of the cellular decomposition—in a regression task—to draw similar patterns in behavior between the training loss and polyhedral cell-count, as the model is trained.} +} + +@InProceedings{kvinge25a, +title={Can Neural Networks Learn Small Algebraic Worlds? An Investigation Into the Group-theoretic Structures Learned By Narrow Models Trained To Predict Group Operations}, +author={Kvinge, Henry and Aguilar, Andrew and Farnsworth, Nayda and O'Brien, Grace and Jasper, Robert and Scullen, Sarah and Jenne, Helen}, +year = {2025}, +pages={302-312}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +openreview = {74Xpma3Lha}, +abstract={While a real-world research program in mathematics may be guided by a motivating question, the process of mathematical discovery is typically open-ended. Ideally, exploration needed to answer the original question will reveal new structures, patterns, and insights that are valuable in their own right. This contrasts with the exam-style paradigm in which the machine learning community typically applies AI to math. To maximize progress in mathematics using AI, we will need to go beyond simple question answering. With this in mind, we explore the extent to which narrow models trained to solve a fixed mathematical task learn broader mathematical structure that can be extracted by a researcher or other AI system. As a basic test case for this, we use the task of training a neural network to predict a group operation (for example, performing modular arithmetic or composition of permutations). We describe a suite of tests designed to assess whether the model captures significant group-theoretic notions such as the identity element, commutativity, or subgroups. Through extensive experimentation we find evidence that models learn representations capable of capturing abstract algebraic properties. For example, we find hints that models capture the commutativity of modular arithmetic. We are also able to train linear classifiers that reliably distinguish between elements of certain subgroups (even though no labels for these subgroups are included in the data). On the other hand, we are unable to extract notions such as the concept of the identity element. Together, our results suggest that in some cases the representations of even small neural networks can be used to distill interesting abstract structure from new mathematical objects.} +} + +@InProceedings{geisz25a, +title = {A Model of Flocking Using Sheaves}, +author = {Geisz, Joseph}, +pages={313-337}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +openreview = {VK0d2Ba7df}, +abstract = {Sheaves have been used recently to model information on networks, such as the spread of opinions in a social network. Dynamical systems on networks model the evolving states of nodes on graphs. Using these lenses of sheaf theory and network dynamics, we explore a model of flocking. We describe from this perspective what it means for birds to come to consensus on flight velocities, and a system of ordinary differential equations (ODEs) that describes this consensus process. Then we couple these consensus dynamics with flight dynamics to describe a model of flocking. We include numerous visualizations of examples in 2 dimensions.} +} + +@InProceedings{rubaiyat25a, +title={Robust Hyperspectral Anomaly Detection via Bootstrap Sampling-based Subspace Modeling in the Signed Cumulative Distribution Transform Domain}, +author={Rubaiyat, Abu Hasnat Mohammad and Vincent, Jordan and Olson, Colin}, +pages={338-348}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +openreview = {MGOMEaj9Fl}, +abstract={This paper introduces an approach that combines a transport-based model of hyperspectral pixels and a bootstrap sampling strategy to construct an ensemble of background subspaces in the signed cumulative distribution transform (SCDT) domain for robust anomaly detection in hyperspectral images characterized by complex and varied background clutter. Each spectral signal (i.e., pixel) is treated as an observation of an unknown background template pattern that has undergone unknown, but restricted, deformation due to factors such as shadowing, look angle, or atmospheric absorption. When combined with the SCDT—a transport-based transform with close connections to one-dimensional Wasserstein embedding—the model induces convexity of hyperspectral pixel representations in the SCDT space and facilitates the construction of subspace models that characterize dominant background signals. A bootstrap sampling strategy in the ambient domain yields an ensemble of background subspace models in SCDT domain and anomalies are subsequently detected as pixels that do not conform to any of the learned subspace models. Experiments on six benchmark hyperspectral datasets demonstrate that the approach effectively captures spectral variability and reliably detects anomalies with low false alarm rates, outperforming state-of-the-art comparison methods in most cases. These results underscore the potential of transport-based subspace representations for robust and interpretable hyperspectral anomaly detection across diverse imaging scenarios. Finally, the geodesic properties of the SCDT embedding are leveraged to provide a geometric interpretation of the method via visualization of paths between test signals and their subspace projections.} +} + +@InProceedings{zou25a, +title = {Precision Matrix based Feature Learning Mechanism for Subspace Clustering Task}, +author = {Zou, Haohan and Cloninger, Alexander}, +pages={349-361}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +openreview = {Ict46MdQEl}, +abstract = {In recent studies, the *Average Gradient Outer Product* (AGOP) has emerged as a powerful tool to understand feature learning in deep neural networks, particularly in supervised learning tasks such as image classification. In this work, we extend this perspective to unsupervised learning, particularly the task of subspace clustering. Building on the existing kernel-based subspace clustering approaches, we introduce a feature learning mechanism which iteratively projects the training data onto an averaged precision matrix. Notably, the relevant feature learning matrix we derived is the inverse of the traditional AGOP matrix. +We explain this from the viewpoint of isotropic variance control in the latent domain, and illustrate that the proposed projection mechanism refines the data distribution and orthogonalizes the data in the latent space. Empirically, we visualize the evolution of projected data distribution, kernel matrix, and the emergence of pronounced block-diagonal structure in affinity matrix on a toy example. Furthermore, our approach outperforms the state-of-the-art kernel-based subspace clustering method KTRR [Zhen et al., 2020] on the Extended Yale B dataset [Lee et al., 2005]. Full experiment implementation is available on [Github](https://github.com/HaohanZou/AGOP_subspace_clustering).} +} + +@InProceedings{price25a, +title = {Self-Organizing Maps for the Reconstruction of Images in Pixel Permuted Image Stacks}, +author = {Price, Connor and Kott, David and Peterson, Chris and Kirby, Michael}, +year = {2025}, +pages={362-374}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +openreview = {e0oVJQp7M9}, +abstract = {A color digital photograph, of resolution $a\times b$, is typically stored as an $a\times b\times3$ array. The vector of length 3, sitting at a pixel, encodes its color in terms of intensity measurements of the red, green, and blue color bands. We call this length 3 vector a "pixel pole". Suppose we are given the $ab$ individual pixel poles as a jumbled collection of length 3 vectors and we wish to reconstruct the image, with no advanced knowledge concerning its content. In other words, we are given $ab$ color squares and we wish to "solve the pixel puzzle" meaning we want to reconstruct the original unknown image. As one can imagine, this is a difficult problem. In this paper, we show how to rebuild the images in a stack of $N$ distinct $a\times b$ color digital images from the $ab$ stacked pixel poles. More precisely, this paper shows how to use "Self Organizing Maps" (SOMs) to algorithmically reconstruct, unsupervised, a stack of distinct $a\times b$ color images from the collection of $1\times1\times3N$ stacked pixel poles using no apriori information about the original images. We evaluate the accuracy of the reconstructions as a function of $N$, we determine the effectiveness of the algorithm when the individual images are corrupted by noise, and we assess the model's performance when pure noise images are included in the stack.} +} + +@InProceedings{amarel25a, +title = {On Predicting Material Fracture from Persistence Homology: Or, Which Topological Features Are Informative Covariates?}, +year = {2025}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +author = {Amarel, James and Hengartner, Nicolas and Miller, Robyn and Migliori, Benjamin and Hope, Daniel and Casleton, Emily and Skurikhin, Alexei and Lawrence, Earl and Kunde, Gerd J.}, +openreview = {67P63h2AUS}, +pages={375-388}, +abstract = {We apply topological data analysis to characterize the simulated evolution of cracks in heterogeneous materials. Using persistence homology, we derive covariates for survival analysis, enabling lifetime prediction within a generalized linear modeling framework. Zeroth-homology features alone reproduce the ensemble survival curves of distinct materials, revealing that coarse topological statistics retain predictive signal even when important geometric details are abstracted away. We further compare the predictive capability of neural networks trained directly on damage fields with those trained on persistence-homology-derived representations, finding that the latter achieve superior accuracy. Finally, we investigate patched persistence homology, which encodes local topological information by computing persistence within spatial subdomains. This localized variant bridges global and geometric perspectives, capturing the collective mechanisms that govern fracture and may eventually yield representations better suited to the design and evaluation of fracture emulators.} +} + +@InProceedings{mccracken25a, +title = {Interpreting deep neural networks trained on elementary $p$ groups reveals algorithmic structure}, +author = {McCracken, Gavin and Ayestas Hilgert, Arthur and Wei, Sihui and Moisescu-Pareja, Gabriela and Wang, Zhaoyue and Love, Jonathan}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +pages={389-402}, +year = {2025}, +openreview = {NTXCFymNWu}, +abstract = {We interpret deep neural networks (DNNs) trained on elementary $p$ group multiplication, examining how our results reveal some of the nature within major deep learning hypotheses. Assisted by tools from computational algebra and geometry, we perform analyses at multiple levels of abstraction, finding we can fully characterize and describe: 1) the global algorithm DNNs learn on this task---the multidimensional Chinese remainder theorem; 2) the neural representations, which are 2-torus $\mathbb{T}^2$ embedded in $\mathbb{R}^4$ encoding coset structure; 3) the individual neuron activation patterns, which activate solely on coset structures of the group. Furthermore, we find neurons learn the Lee metric to organize their activation strengths. Overall, our work serves as an exposition toward understanding how DNNs learn group multiplications.} +} + +@InProceedings{czaja25a, +title = {Comparative Analysis in Pre-image Algorithms of Kernel PCA}, +booktitle = {Proceedings of the 1st Conference on Topology, Algebra, and Geometry in Data Science(TAG-DS 2025)}, +author = {Czaja, Wojciech and Ji, Canran}, +pages={403-413}, +year = {2025}, +openreview = {7M2fkdmQDy}, +abstract = {We study the kernel PCA (kPCA) pre-image problem in image denoising by benchmarking classical algorithms and introducing two neural network adversarial pre-imaging models, DCGAN-KPCAnet and WGAN-KPCAnet. Our results show that WGAN-KPCAnet delivers superior reconstruction results and is robust to noise compared to baselines.} +} + diff --git a/amarel25a/amarel25a.pdf b/amarel25a/amarel25a.pdf new file mode 100755 index 0000000..8b25142 Binary files /dev/null and b/amarel25a/amarel25a.pdf differ diff --git a/bernardez25a/bernardez25a.pdf b/bernardez25a/bernardez25a.pdf new file mode 100644 index 0000000..72f8add Binary files /dev/null and b/bernardez25a/bernardez25a.pdf differ diff --git a/bhaskar25a/bhaskar25a.pdf b/bhaskar25a/bhaskar25a.pdf new file mode 100755 index 0000000..88605e2 Binary files /dev/null and b/bhaskar25a/bhaskar25a.pdf differ diff --git a/bosca25a/bosca25a.pdf b/bosca25a/bosca25a.pdf new file mode 100755 index 0000000..de49877 Binary files /dev/null and b/bosca25a/bosca25a.pdf differ diff --git a/boufalis25a/boufalis25a.pdf b/boufalis25a/boufalis25a.pdf new file mode 100755 index 0000000..5f374ff Binary files /dev/null and b/boufalis25a/boufalis25a.pdf differ diff --git a/campos25a/campos25a.pdf b/campos25a/campos25a.pdf new file mode 100755 index 0000000..be1c589 Binary files /dev/null and b/campos25a/campos25a.pdf differ diff --git a/chen25a/chen25a.pdf b/chen25a/chen25a.pdf new file mode 100755 index 0000000..7cd997f Binary files /dev/null and b/chen25a/chen25a.pdf differ diff --git a/czaja25a/czaja25a.pdf b/czaja25a/czaja25a.pdf new file mode 100755 index 0000000..d522558 Binary files /dev/null and b/czaja25a/czaja25a.pdf differ diff --git a/dennehy25a/dennehy25a.pdf b/dennehy25a/dennehy25a.pdf new file mode 100755 index 0000000..5b1e19a Binary files /dev/null and b/dennehy25a/dennehy25a.pdf differ diff --git a/dey25a/dey25a.pdf b/dey25a/dey25a.pdf new file mode 100755 index 0000000..2793f81 Binary files /dev/null and b/dey25a/dey25a.pdf differ diff --git a/doster25a/doster25a.pdf b/doster25a/doster25a.pdf new file mode 100644 index 0000000..2577349 Binary files /dev/null and b/doster25a/doster25a.pdf differ diff --git a/faldet25a/faldet25a.pdf b/faldet25a/faldet25a.pdf new file mode 100755 index 0000000..ef9164f Binary files /dev/null and b/faldet25a/faldet25a.pdf differ diff --git a/geisz25a/geisz25a.pdf b/geisz25a/geisz25a.pdf new file mode 100755 index 0000000..989d58b Binary files /dev/null and b/geisz25a/geisz25a.pdf differ diff --git a/girshfeld25a/girshfeld25a.pdf b/girshfeld25a/girshfeld25a.pdf new file mode 100755 index 0000000..58d1e3c Binary files /dev/null and b/girshfeld25a/girshfeld25a.pdf differ diff --git a/huang25a/huang25a.pdf b/huang25a/huang25a.pdf new file mode 100755 index 0000000..e266d27 Binary files /dev/null and b/huang25a/huang25a.pdf differ diff --git a/huntsman25a/huntsman25a.pdf b/huntsman25a/huntsman25a.pdf new file mode 100755 index 0000000..5a92c6c Binary files /dev/null and b/huntsman25a/huntsman25a.pdf differ diff --git a/jorgenson25a/jorgenson25a.pdf b/jorgenson25a/jorgenson25a.pdf new file mode 100755 index 0000000..3e88cd3 Binary files /dev/null and b/jorgenson25a/jorgenson25a.pdf differ diff --git a/karris25a/karris25a.pdf b/karris25a/karris25a.pdf new file mode 100755 index 0000000..18b6983 Binary files /dev/null and b/karris25a/karris25a.pdf differ diff --git a/kvinge25a/kvinge25a.pdf b/kvinge25a/kvinge25a.pdf new file mode 100755 index 0000000..dcfb04f Binary files /dev/null and b/kvinge25a/kvinge25a.pdf differ diff --git a/liao25a/liao25a.pdf b/liao25a/liao25a.pdf new file mode 100755 index 0000000..3b861a2 Binary files /dev/null and b/liao25a/liao25a.pdf differ diff --git a/marrinan25a/marrinan25a.pdf b/marrinan25a/marrinan25a.pdf new file mode 100755 index 0000000..4d35cf8 Binary files /dev/null and b/marrinan25a/marrinan25a.pdf differ diff --git a/mccracken25a/mccracken25a.pdf b/mccracken25a/mccracken25a.pdf new file mode 100755 index 0000000..2e7ae63 Binary files /dev/null and b/mccracken25a/mccracken25a.pdf differ diff --git a/myers25a/myers25a.pdf b/myers25a/myers25a.pdf new file mode 100755 index 0000000..bd94394 Binary files /dev/null and b/myers25a/myers25a.pdf differ diff --git a/price25a/price25a.pdf b/price25a/price25a.pdf new file mode 100755 index 0000000..689f808 Binary files /dev/null and b/price25a/price25a.pdf differ diff --git a/rubaiyata25a/rubaiyata25a.pdf b/rubaiyata25a/rubaiyata25a.pdf new file mode 100755 index 0000000..2226a91 Binary files /dev/null and b/rubaiyata25a/rubaiyata25a.pdf differ diff --git a/shen25a/shen25a.pdf b/shen25a/shen25a.pdf new file mode 100755 index 0000000..afe3fe1 Binary files /dev/null and b/shen25a/shen25a.pdf differ diff --git a/v321permissions/amarel25apermission.pdf b/v321permissions/amarel25apermission.pdf new file mode 100755 index 0000000..186cc0e Binary files /dev/null and b/v321permissions/amarel25apermission.pdf differ diff --git a/v321permissions/bernardez25apermission.pdf b/v321permissions/bernardez25apermission.pdf new file mode 100644 index 0000000..c20b2d1 Binary files /dev/null and b/v321permissions/bernardez25apermission.pdf differ diff --git a/v321permissions/bhaskar25apermission.pdf b/v321permissions/bhaskar25apermission.pdf new file mode 100755 index 0000000..c8a33a8 Binary files /dev/null and b/v321permissions/bhaskar25apermission.pdf differ diff --git a/v321permissions/bosca25apermission.pdf b/v321permissions/bosca25apermission.pdf new file mode 100755 index 0000000..a51be6d Binary files /dev/null and b/v321permissions/bosca25apermission.pdf differ diff --git a/v321permissions/boufalis25apermission.pdf b/v321permissions/boufalis25apermission.pdf new file mode 100755 index 0000000..d77eae0 Binary files /dev/null and b/v321permissions/boufalis25apermission.pdf differ diff --git a/v321permissions/campos25apermission.pdf b/v321permissions/campos25apermission.pdf new file mode 100755 index 0000000..d3bda82 Binary files /dev/null and b/v321permissions/campos25apermission.pdf differ diff --git a/v321permissions/chen25apermission.pdf b/v321permissions/chen25apermission.pdf new file mode 100755 index 0000000..cf0691e Binary files /dev/null and b/v321permissions/chen25apermission.pdf differ diff --git a/v321permissions/czaja25apermission.pdf b/v321permissions/czaja25apermission.pdf new file mode 100755 index 0000000..812a597 Binary files /dev/null and b/v321permissions/czaja25apermission.pdf differ diff --git a/v321permissions/dennehy25apermission.pdf b/v321permissions/dennehy25apermission.pdf new file mode 100644 index 0000000..e31b866 Binary files /dev/null and b/v321permissions/dennehy25apermission.pdf differ diff --git a/v321permissions/dey25apermission.pdf b/v321permissions/dey25apermission.pdf new file mode 100755 index 0000000..9ca5bd6 Binary files /dev/null and b/v321permissions/dey25apermission.pdf differ diff --git a/v321permissions/doster25apermission.pdf b/v321permissions/doster25apermission.pdf new file mode 100755 index 0000000..831bbac Binary files /dev/null and b/v321permissions/doster25apermission.pdf differ diff --git a/v321permissions/faldet25apermission.pdf b/v321permissions/faldet25apermission.pdf new file mode 100755 index 0000000..52277f2 Binary files /dev/null and b/v321permissions/faldet25apermission.pdf differ diff --git a/v321permissions/geisz25apermission.pdf b/v321permissions/geisz25apermission.pdf new file mode 100755 index 0000000..f90b0d4 Binary files /dev/null and b/v321permissions/geisz25apermission.pdf differ diff --git a/v321permissions/girshfeld25apermission.pdf b/v321permissions/girshfeld25apermission.pdf new file mode 100755 index 0000000..af873a2 Binary files /dev/null and b/v321permissions/girshfeld25apermission.pdf differ diff --git a/v321permissions/huang25apermission.pdf b/v321permissions/huang25apermission.pdf new file mode 100755 index 0000000..838f164 Binary files /dev/null and b/v321permissions/huang25apermission.pdf differ diff --git a/v321permissions/huntsman25permission.pdf b/v321permissions/huntsman25permission.pdf new file mode 100644 index 0000000..5110482 Binary files /dev/null and b/v321permissions/huntsman25permission.pdf differ diff --git a/v321permissions/jorgenson25apermission.pdf b/v321permissions/jorgenson25apermission.pdf new file mode 100755 index 0000000..8c157eb Binary files /dev/null and b/v321permissions/jorgenson25apermission.pdf differ diff --git a/v321permissions/karris25apermission.pdf b/v321permissions/karris25apermission.pdf new file mode 100755 index 0000000..bf510d0 Binary files /dev/null and b/v321permissions/karris25apermission.pdf differ diff --git a/v321permissions/kvinge25apermission.pdf b/v321permissions/kvinge25apermission.pdf new file mode 100755 index 0000000..0826ab3 Binary files /dev/null and b/v321permissions/kvinge25apermission.pdf differ diff --git a/v321permissions/liao25apermission.pdf b/v321permissions/liao25apermission.pdf new file mode 100755 index 0000000..9ed8600 Binary files /dev/null and b/v321permissions/liao25apermission.pdf differ diff --git a/v321permissions/marrinan25apermission.pdf b/v321permissions/marrinan25apermission.pdf new file mode 100755 index 0000000..75d45d0 Binary files /dev/null and b/v321permissions/marrinan25apermission.pdf differ diff --git a/v321permissions/mccracken25apermission.pdf b/v321permissions/mccracken25apermission.pdf new file mode 100755 index 0000000..6d2f23b Binary files /dev/null and b/v321permissions/mccracken25apermission.pdf differ diff --git a/v321permissions/myers25apermission.pdf b/v321permissions/myers25apermission.pdf new file mode 100755 index 0000000..604a116 Binary files /dev/null and b/v321permissions/myers25apermission.pdf differ diff --git a/v321permissions/price25apermission.pdf b/v321permissions/price25apermission.pdf new file mode 100755 index 0000000..0dde114 Binary files /dev/null and b/v321permissions/price25apermission.pdf differ diff --git a/v321permissions/rubaiyat25apermission.pdf b/v321permissions/rubaiyat25apermission.pdf new file mode 100755 index 0000000..47e0a48 Binary files /dev/null and b/v321permissions/rubaiyat25apermission.pdf differ diff --git a/v321permissions/shen25apermission.pdf b/v321permissions/shen25apermission.pdf new file mode 100755 index 0000000..faab371 Binary files /dev/null and b/v321permissions/shen25apermission.pdf differ diff --git a/v321permissions/zhang25apermission.pdf b/v321permissions/zhang25apermission.pdf new file mode 100755 index 0000000..5c5e499 Binary files /dev/null and b/v321permissions/zhang25apermission.pdf differ diff --git a/v321permissions/zou25apermission.pdf b/v321permissions/zou25apermission.pdf new file mode 100755 index 0000000..0cc1fcb Binary files /dev/null and b/v321permissions/zou25apermission.pdf differ diff --git a/zhang25a/zhang25a.pdf b/zhang25a/zhang25a.pdf new file mode 100755 index 0000000..3ad7c00 Binary files /dev/null and b/zhang25a/zhang25a.pdf differ diff --git a/zou25a/zou25a.pdf b/zou25a/zou25a.pdf new file mode 100755 index 0000000..0f0d3de Binary files /dev/null and b/zou25a/zou25a.pdf differ