From f2d658459cb3e19397b0ba6d5fcf3e39bee0c286 Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Fri, 31 Oct 2025 16:09:10 -0700 Subject: [PATCH 1/8] Rework sequence querying & updating --- .../api/data/DatabaseMigrationService.java | 10 +-- .../labkey/api/data/dialect/SqlDialect.java | 8 ++- assay/src/org/labkey/assay/AssayModule.java | 4 +- .../core/CoreMigrationSchemaHandler.java | 3 +- .../core/dialect/PostgreSql92Dialect.java | 66 +++++++++++-------- .../DataClassMigrationSchemaHandler.java | 5 +- .../ExperimentMigrationSchemaHandler.java | 3 +- .../issue/IssueMigrationSchemaHandler.java | 3 +- wiki/src/org/labkey/wiki/WikiModule.java | 2 +- 9 files changed, 53 insertions(+), 51 deletions(-) diff --git a/api/src/org/labkey/api/data/DatabaseMigrationService.java b/api/src/org/labkey/api/data/DatabaseMigrationService.java index 4717a69f32a..5fb7ae8e71c 100644 --- a/api/src/org/labkey/api/data/DatabaseMigrationService.java +++ b/api/src/org/labkey/api/data/DatabaseMigrationService.java @@ -4,7 +4,6 @@ import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.labkey.api.data.DatabaseMigrationConfiguration.DefaultDatabaseMigrationConfiguration; -import org.labkey.api.data.DatabaseMigrationService.MigrationSchemaHandler.Sequence; import org.labkey.api.data.SimpleFilter.AndClause; import org.labkey.api.data.SimpleFilter.FilterClause; import org.labkey.api.data.SimpleFilter.InClause; @@ -24,7 +23,6 @@ import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.stream.Collectors; @@ -66,12 +64,10 @@ default void registerMigrationFilter(MigrationFilter filter) {} return null; } - default void copySourceTableToTargetTable(DatabaseMigrationConfiguration configuration, TableInfo sourceTable, TableInfo targetTable, DbSchemaType schemaType, Map schemaSequenceMap, MigrationSchemaHandler schemaHandler) {}; + default void copySourceTableToTargetTable(DatabaseMigrationConfiguration configuration, TableInfo sourceTable, TableInfo targetTable, DbSchemaType schemaType, MigrationSchemaHandler schemaHandler) {}; interface MigrationSchemaHandler { - record Sequence(String schemaName, String tableName, String columnName, long lastValue) {} - // Marker for tables to declare themselves as site-wide (no container filtering) FieldKey SITE_WIDE_TABLE = FieldKey.fromParts("site-wide"); @@ -107,7 +103,7 @@ record Sequence(String schemaName, String tableName, String columnName, long las // container filtering or rows in a provisioned table not copied due to domain data filtering.) void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilter notCopiedFilter); - void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema, Map> sequenceMap); + void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema); } class DefaultMigrationSchemaHandler implements MigrationSchemaHandler @@ -310,7 +306,7 @@ public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilte } @Override - public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema, Map> sequenceMap) + public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema) { } } diff --git a/api/src/org/labkey/api/data/dialect/SqlDialect.java b/api/src/org/labkey/api/data/dialect/SqlDialect.java index 22dd8446ae4..0edb407cf39 100644 --- a/api/src/org/labkey/api/data/dialect/SqlDialect.java +++ b/api/src/org/labkey/api/data/dialect/SqlDialect.java @@ -2093,9 +2093,11 @@ public boolean shouldTest() return null; } - // Returns a SQL query that selects the last auto-increment values where they're non-null. Required columns are: - // SchemaName, TableName, ColumnName, and LastValue. - public String getSelectSequencesSql() + public record Sequence(String schemaName, String tableName, String columnName, Long lastValue) {} + + // Returns information about all auto-increment / serial sequences associated with a table. PostgreSQL tables can + // have more than one. Sequence value will be null if the sequence hasn't been incremented yet. + public @NotNull Collection getAutoIncrementSequences(TableInfo table) { throw new UnsupportedOperationException(getClass().getSimpleName() + " does not implement"); } diff --git a/assay/src/org/labkey/assay/AssayModule.java b/assay/src/org/labkey/assay/AssayModule.java index 0c86fa88455..39b8ddc4d22 100644 --- a/assay/src/org/labkey/assay/AssayModule.java +++ b/assay/src/org/labkey/assay/AssayModule.java @@ -331,8 +331,8 @@ public Set getSchemaNames() public Set getProvisionedSchemaNames() { return Set.of( - AbstractTsvAssayProvider.ASSAY_SCHEMA_NAME, - PlateMetadataDomainKind.PROVISIONED_SCHEMA_NAME + AbstractTsvAssayProvider.ASSAY_SCHEMA_NAME, + PlateMetadataDomainKind.PROVISIONED_SCHEMA_NAME ); } diff --git a/core/src/org/labkey/core/CoreMigrationSchemaHandler.java b/core/src/org/labkey/core/CoreMigrationSchemaHandler.java index 53ac07fd310..e47dceb5142 100644 --- a/core/src/org/labkey/core/CoreMigrationSchemaHandler.java +++ b/core/src/org/labkey/core/CoreMigrationSchemaHandler.java @@ -24,7 +24,6 @@ import org.labkey.api.util.GUID; import java.util.List; -import java.util.Map; import java.util.Set; class CoreMigrationSchemaHandler extends DatabaseMigrationService.DefaultMigrationSchemaHandler implements DatabaseMigrationService.MigrationFilter @@ -175,7 +174,7 @@ public FilterClause getContainerClause(TableInfo sourceTable, FieldKey container } @Override - public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema, Map> sequenceMap) + public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema) { new SqlExecutor(getSchema()).execute("ALTER TABLE core.Containers ADD CONSTRAINT FK_Containers_Containers FOREIGN KEY (Parent) REFERENCES core.Containers(EntityId)"); new SqlExecutor(getSchema()).execute("ALTER TABLE core.ViewCategory ADD CONSTRAINT FK_ViewCategory_Parent FOREIGN KEY (Parent) REFERENCES core.ViewCategory(RowId)"); diff --git a/core/src/org/labkey/core/dialect/PostgreSql92Dialect.java b/core/src/org/labkey/core/dialect/PostgreSql92Dialect.java index 663010c76af..52dbb60e160 100644 --- a/core/src/org/labkey/core/dialect/PostgreSql92Dialect.java +++ b/core/src/org/labkey/core/dialect/PostgreSql92Dialect.java @@ -19,7 +19,9 @@ import org.jetbrains.annotations.NotNull; import org.labkey.api.data.DbScope; import org.labkey.api.data.ParameterMarkerInClauseGenerator; +import org.labkey.api.data.SQLFragment; import org.labkey.api.data.SqlSelector; +import org.labkey.api.data.TableInfo; import org.labkey.api.data.dialect.BasePostgreSqlDialect; import org.labkey.api.data.dialect.DialectStringHandler; import org.labkey.api.data.dialect.JdbcHelper; @@ -213,35 +215,41 @@ protected Pattern getSQLScriptSplitPattern() } @Override - public String getSelectSequencesSql() + public @NotNull Collection getAutoIncrementSequences(TableInfo table) { - return """ - SELECT - s.relname AS SequenceName, -- Not used - tns.nspname AS SchemaName, - t.relname AS TableName, - a.attname AS ColumnName, - seq.last_value AS LastValue, - sns.nspname AS SequenceSchema -- Not used. In theory, sequence could live in a different schema, but not our practice - FROM - pg_depend d - JOIN - pg_class s ON d.objid = s.oid -- The sequence - JOIN - pg_namespace sns ON s.relnamespace = sns.oid - JOIN - pg_class t ON d.refobjid = t.oid -- The table - JOIN - pg_namespace tns ON t.relnamespace = tns.oid - JOIN - pg_attribute a ON d.refobjid = a.attrelid AND d.refobjsubid = a.attnum - JOIN - pg_sequences seq ON s.relname = seq.SequenceName - WHERE - s.relkind = 'S' -- Sequence - AND t.relkind IN ('r', 'P') -- Table (regular table or partitioned table) - AND d.deptype IN ('a', 'i') -- Automatic dependency for DEFAULT or index-related for PK - AND seq.last_value IS NOT NULL - """; + SQLFragment sql = new SQLFragment(""" + SELECT SchemaName, TableName, ColumnName, LastValue FROM ( + SELECT + s.relname AS SequenceName, -- Not used + tns.nspname AS SchemaName, + t.relname AS TableName, + a.attname AS ColumnName, + seq.last_value AS LastValue, + sns.nspname AS SequenceSchema -- Not used. In theory, sequence could live in a different schema, but not our practice + FROM + pg_depend d + JOIN + pg_class s ON d.objid = s.oid -- The sequence + JOIN + pg_namespace sns ON s.relnamespace = sns.oid + JOIN + pg_class t ON d.refobjid = t.oid -- The table + JOIN + pg_namespace tns ON t.relnamespace = tns.oid + JOIN + pg_attribute a ON d.refobjid = a.attrelid AND d.refobjsubid = a.attnum + JOIN + pg_sequences seq ON s.relname = seq.SequenceName AND tns.nspname = seq.SchemaName -- maybe sns.nspname instead? but that's slower... + WHERE + s.relkind = 'S' -- Sequence + AND t.relkind IN ('r', 'P') -- Table (regular table or partitioned table) + AND d.deptype IN ('a', 'i') -- Automatic dependency for DEFAULT or index-related for PK + ) + WHERE SchemaName ILIKE ? AND TableName ILIKE ? + """, + table.getSchema().getName(), + table.getName() + ); + return new SqlSelector(table.getSchema(), sql).getCollection(Sequence.class); } } diff --git a/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java b/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java index 24492cf2bb1..9190dc6f7ea 100644 --- a/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java +++ b/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java @@ -30,7 +30,6 @@ import java.util.Collection; import java.util.Collections; import java.util.HashSet; -import java.util.Map; import java.util.Set; class DataClassMigrationSchemaHandler extends DefaultMigrationSchemaHandler @@ -148,7 +147,7 @@ public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilte } @Override - public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema, Map> sequenceMap) + public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema) { // Experiment shouldn't mess with Biologics tables, but it gets the job done @@ -162,7 +161,7 @@ public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema s TableInfo sourceTable = biologicsSourceSchema.getTable("SequenceIdentity"); TableInfo targetTable = biologicsTargetSchema.getTable("SequenceIdentity"); - DatabaseMigrationService.get().copySourceTableToTargetTable(configuration, sourceTable, targetTable, DbSchemaType.Module, sequenceMap.get("biologics"), new DefaultMigrationSchemaHandler(biologicsTargetSchema) + DatabaseMigrationService.get().copySourceTableToTargetTable(configuration, sourceTable, targetTable, DbSchemaType.Module, new DefaultMigrationSchemaHandler(biologicsTargetSchema) { @Override public FilterClause getTableFilter(TableInfo sourceTable, FieldKey containerFieldKey, Set containers) diff --git a/experiment/src/org/labkey/experiment/ExperimentMigrationSchemaHandler.java b/experiment/src/org/labkey/experiment/ExperimentMigrationSchemaHandler.java index 9515e96a490..2b1450cdc8c 100644 --- a/experiment/src/org/labkey/experiment/ExperimentMigrationSchemaHandler.java +++ b/experiment/src/org/labkey/experiment/ExperimentMigrationSchemaHandler.java @@ -22,7 +22,6 @@ import org.labkey.experiment.api.ExperimentServiceImpl; import java.util.List; -import java.util.Map; import java.util.Set; class ExperimentMigrationSchemaHandler extends DefaultMigrationSchemaHandler @@ -123,7 +122,7 @@ public FilterClause getContainerClause(TableInfo sourceTable, FieldKey container } @Override - public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema, Map> sequenceMap) + public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema) { new SqlExecutor(getSchema()).execute("ALTER TABLE exp.ExperimentRun ADD CONSTRAINT FK_Run_WorfklowTask FOREIGN KEY (WorkflowTask) REFERENCES exp.ProtocolApplication (RowId) MATCH SIMPLE ON DELETE SET NULL"); new SqlExecutor(getSchema()).execute("ALTER TABLE exp.Object ADD CONSTRAINT FK_Object_Object FOREIGN KEY (OwnerObjectId) REFERENCES exp.Object (ObjectId)"); diff --git a/issues/src/org/labkey/issue/IssueMigrationSchemaHandler.java b/issues/src/org/labkey/issue/IssueMigrationSchemaHandler.java index d744555cafa..d1dc462c5b8 100644 --- a/issues/src/org/labkey/issue/IssueMigrationSchemaHandler.java +++ b/issues/src/org/labkey/issue/IssueMigrationSchemaHandler.java @@ -20,7 +20,6 @@ import org.labkey.api.util.logging.LogHelper; import java.util.HashSet; -import java.util.Map; import java.util.Set; public class IssueMigrationSchemaHandler extends DefaultMigrationSchemaHandler @@ -54,7 +53,7 @@ public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilte } @Override - public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema, Map> sequenceMap) + public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema) { LOG.info(" Deleting related issues, comments, and issues rows associated with {} issues", ISSUE_IDS.size()); diff --git a/wiki/src/org/labkey/wiki/WikiModule.java b/wiki/src/org/labkey/wiki/WikiModule.java index c4e297f05cf..2287744d497 100644 --- a/wiki/src/org/labkey/wiki/WikiModule.java +++ b/wiki/src/org/labkey/wiki/WikiModule.java @@ -142,7 +142,7 @@ public List getTablesToCopy() } @Override - public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema, Map> sequenceMap) + public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema) { new SqlExecutor(getSchema()).execute("ALTER TABLE comm.Pages ADD CONSTRAINT FK_Pages_PageVersions FOREIGN KEY (PageVersionId) REFERENCES comm.PageVersions (RowId)"); new SqlExecutor(getSchema()).execute("ALTER TABLE comm.Pages ADD CONSTRAINT FK_Pages_Parent FOREIGN KEY (Parent) REFERENCES comm.Pages (RowId)"); From 71f05f9193d0236c32ede8fac6fec9b0b4acc0f2 Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Sat, 1 Nov 2025 15:23:19 -0700 Subject: [PATCH 2/8] Copy assay results. Fix Sample Type delete of ObjectIds. Fix not-copied queries for no-DataId data classes and no-RowId sample types. Clear deleted samples from assay wells. --- .../api/data/DatabaseMigrationService.java | 38 +++---- assay/src/org/labkey/assay/AssayModule.java | 13 +-- .../AssayResultMigrationSchemaHandler.java | 98 +++++++++++++++++++ .../core/CoreMigrationSchemaHandler.java | 3 +- .../core/dialect/PostgreSql92Dialect.java | 2 +- .../DataClassMigrationSchemaHandler.java | 15 ++- .../SampleTypeMigrationSchemaHandler.java | 52 +++++++--- 7 files changed, 167 insertions(+), 54 deletions(-) create mode 100644 assay/src/org/labkey/assay/AssayResultMigrationSchemaHandler.java diff --git a/api/src/org/labkey/api/data/DatabaseMigrationService.java b/api/src/org/labkey/api/data/DatabaseMigrationService.java index 5fb7ae8e71c..923b58a02ff 100644 --- a/api/src/org/labkey/api/data/DatabaseMigrationService.java +++ b/api/src/org/labkey/api/data/DatabaseMigrationService.java @@ -83,7 +83,7 @@ interface MigrationSchemaHandler List getTablesToCopy(); // Create a filter clause that selects from all specified containers and (in some overrides) applies table-specific filters - FilterClause getTableFilter(TableInfo sourceTable, FieldKey containerFieldKey, Set containers); + FilterClause getTableFilterClause(TableInfo sourceTable, FieldKey containerFieldKey, Set containers); // Create a filter clause that selects from all specified containers FilterClause getContainerClause(TableInfo sourceTable, FieldKey containerFieldKey, Set containers); @@ -94,9 +94,9 @@ interface MigrationSchemaHandler @Nullable FieldKey getContainerFieldKey(TableInfo sourceTable); // Create a filter clause that selects all rows from unfiltered containers plus filtered rows from the filtered containers - FilterClause getDomainDataFilter(Set copyContainers, Set filteredContainers, List domainFilters, TableInfo sourceTable, FieldKey containerFieldKey, Set selectColumnNames); + FilterClause getDomainDataFilterClause(Set copyContainers, Set filteredContainers, List domainFilters, TableInfo sourceTable, FieldKey containerFieldKey, Set selectColumnNames); - void addDomainDataFilter(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames); + void addDomainDataFilterClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames); // Do any necessary clean up after the target table has been populated. notCopiedFilter selects all rows in the // source table that were NOT copied to the target table. (For example, rows in a global table not copied due to @@ -153,7 +153,7 @@ public List getTablesToCopy() } @Override - public FilterClause getTableFilter(TableInfo sourceTable, FieldKey containerFieldKey, Set containers) + public FilterClause getTableFilterClause(TableInfo sourceTable, FieldKey containerFieldKey, Set containers) { return getContainerClause(sourceTable, containerFieldKey, containers); } @@ -209,7 +209,7 @@ public FilterClause getContainerClause(TableInfo sourceTable, FieldKey container } @Override - public FilterClause getDomainDataFilter(Set copyContainers, Set filteredContainers, List domainFilters, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames) + public final FilterClause getDomainDataFilterClause(Set copyContainers, Set filteredContainers, List domainFilters, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames) { // Filtered case: remove the filtered containers from the unconditional container set Set otherContainers = new HashSet<>(copyContainers); @@ -219,7 +219,7 @@ public FilterClause getDomainDataFilter(Set copyContainers, Set filt OrClause orClause = new OrClause(); // Delegate to the MigrationSchemaHandler to add domain-filtered containers back with their special filter applied - domainFilters.forEach(filter -> addDomainDataFilter(orClause, filter, sourceTable, fKey, selectColumnNames)); + domainFilters.forEach(filter -> addDomainDataFilterClause(orClause, filter, sourceTable, fKey, selectColumnNames)); if (!orClause.getClauses().isEmpty()) { @@ -231,13 +231,13 @@ public FilterClause getDomainDataFilter(Set copyContainers, Set filt } @Override - public void addDomainDataFilter(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames) + public void addDomainDataFilterClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames) { - addDataFilter(orClause, filter, sourceTable, fKey, selectColumnNames); + addDataFilterClause(orClause, filter, sourceTable, fKey, selectColumnNames); } // Add a filter and return true if the column exists directly on the table - protected boolean addDataFilter(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames) + protected boolean addDataFilterClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames) { boolean columnExists = selectColumnNames.contains(filter.column()); @@ -255,32 +255,20 @@ protected boolean addDataFilter(OrClause orClause, DataFilter filter, TableInfo return columnExists; } - // Add a filter to select all rows where the object property with equals the filter value - protected void addObjectPropertyFilter(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, int propertyId) + // Add a clause that selects all rows where the object property with equals the filter value. This + // is only for provisioned tables that lack an ObjectId, MaterialId, or DataId column. + protected void addObjectPropertyClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey containerFieldKey, int propertyId) { SQLFragment flagWhere = new SQLFragment("lsid IN (SELECT ObjectURI FROM exp.Object WHERE ObjectId IN (SELECT ObjectId FROM exp.ObjectProperty WHERE StringValue = ? AND PropertyId = ?))", filter.condition().getParamVals()[0], propertyId); orClause.addClause( new AndClause( - getContainerClause(sourceTable, fKey, filter.containers()), + getContainerClause(sourceTable, containerFieldKey, filter.containers()), new SQLClause(flagWhere) ) ); } - // Special domain data filter method for provisioned tables that have a built-in Flag field (currently used by data classes) - protected void addDomainDataFlagFilter(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames) - { - if (filter.column().equalsIgnoreCase("Flag")) - { - addObjectPropertyFilter(orClause, filter, sourceTable, fKey, getCommentPropertyId(sourceTable.getSchema().getScope())); - } - else - { - addDataFilter(orClause, filter, sourceTable, fKey, selectColumnNames); - } - } - private Integer _commentPropertyId = null; protected synchronized int getCommentPropertyId(DbScope scope) diff --git a/assay/src/org/labkey/assay/AssayModule.java b/assay/src/org/labkey/assay/AssayModule.java index 39b8ddc4d22..3f34529b3be 100644 --- a/assay/src/org/labkey/assay/AssayModule.java +++ b/assay/src/org/labkey/assay/AssayModule.java @@ -307,6 +307,8 @@ public void moduleStartupComplete(ServletContext servletContext) return SITE_WIDE_TABLE; } }); + + DatabaseMigrationService.get().registerSchemaHandler(new AssayResultMigrationSchemaHandler()); } @Override @@ -319,9 +321,8 @@ public ActionURL getTabURL(Container c, User user) @NotNull public Set getSchemaNames() { - HashSet set = new HashSet<>(); + HashSet set = new HashSet<>(getProvisionedSchemaNames()); set.add(AssayDbSchema.getInstance().getSchemaName()); - set.addAll(getProvisionedSchemaNames()); return set; } @@ -358,13 +359,13 @@ public Set getProvisionedSchemaNames() public @NotNull Set> getUnitTests() { return Set.of( - TsvAssayProvider.TestCase.class, - AssaySchemaImpl.TestCase.class, + AssayPlateMetadataServiceImpl.TestCase.class, AssayProviderSchema.TestCase.class, - PositionImpl.TestCase.class, + AssaySchemaImpl.TestCase.class, PlateImpl.TestCase.class, PlateUtils.TestCase.class, - AssayPlateMetadataServiceImpl.TestCase.class + PositionImpl.TestCase.class, + TsvAssayProvider.TestCase.class ); } diff --git a/assay/src/org/labkey/assay/AssayResultMigrationSchemaHandler.java b/assay/src/org/labkey/assay/AssayResultMigrationSchemaHandler.java new file mode 100644 index 00000000000..20f40a36569 --- /dev/null +++ b/assay/src/org/labkey/assay/AssayResultMigrationSchemaHandler.java @@ -0,0 +1,98 @@ +package org.labkey.assay; + +import org.apache.logging.log4j.Logger; +import org.jetbrains.annotations.Nullable; +import org.labkey.api.assay.AbstractTsvAssayProvider; +import org.labkey.api.collections.CsvSet; +import org.labkey.api.data.DatabaseMigrationService; +import org.labkey.api.data.DatabaseMigrationService.DataFilter; +import org.labkey.api.data.DbSchema; +import org.labkey.api.data.DbSchemaType; +import org.labkey.api.data.SQLFragment; +import org.labkey.api.data.Selector; +import org.labkey.api.data.SimpleFilter; +import org.labkey.api.data.SimpleFilter.FilterClause; +import org.labkey.api.data.SimpleFilter.InClause; +import org.labkey.api.data.SimpleFilter.OrClause; +import org.labkey.api.data.SimpleFilter.SQLClause; +import org.labkey.api.data.TableInfo; +import org.labkey.api.data.TableSelector; +import org.labkey.api.exp.api.ExperimentService; +import org.labkey.api.query.FieldKey; +import org.labkey.api.util.Formats; +import org.labkey.api.util.GUID; +import org.labkey.api.util.logging.LogHelper; + +import java.util.Collection; +import java.util.Collections; +import java.util.Set; + +class AssayResultMigrationSchemaHandler extends DatabaseMigrationService.DefaultMigrationSchemaHandler +{ + private static final Logger LOG = LogHelper.getLogger(AssayResultMigrationSchemaHandler.class, "Assay result migration status"); + + public AssayResultMigrationSchemaHandler() + { + super(DbSchema.get(AbstractTsvAssayProvider.ASSAY_SCHEMA_NAME, DbSchemaType.Provisioned)); + } + + @Override + public @Nullable FieldKey getContainerFieldKey(TableInfo table) + { + return DUMMY_FIELD_KEY; + } + + // Provisioned assay result tables occasionally have no DataId column; hopefully they have an LSID column. + private boolean hasDataIdColumn(TableInfo sourceTable) + { + return sourceTable.getColumn("DataId") != null; + } + + @Override + public FilterClause getContainerClause(TableInfo sourceTable, FieldKey containerFieldKey, Set containers) + { + SQLFragment sqlFragment = new SQLFragment(); + + if (hasDataIdColumn(sourceTable)) + sqlFragment.append("DataId IN (SELECT RowId"); + else + sqlFragment.append("LSID IN (SELECT LSID"); + + return new SQLClause( + sqlFragment.append(" FROM exp.Data WHERE Container") + .appendInClause(containers, sourceTable.getSqlDialect()) + .append(")") + ); + } + + @Override + public void addDomainDataFilterClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames) + { + // We want no rows from containers with a domain data filter, so don't add any clauses + } + + @Override + public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilter notCopiedFilter) + { + final Selector selector; + + if (hasDataIdColumn(sourceTable)) + { + selector = new TableSelector(sourceTable, Collections.singleton("DataId"), notCopiedFilter, null); + } + else + { + // Forced to use two separate queries here since notCopiedFilter must be applied to sourceTable. Attempting + // a combined query that joins to exp.Data results in ambiguous LSID references. + Collection lsids = new TableSelector(sourceTable, Collections.singleton("LSID"), notCopiedFilter, null).getCollection(String.class); + SimpleFilter filter = new SimpleFilter(new InClause(FieldKey.fromParts("LSID"), lsids)); + selector = new TableSelector(ExperimentService.get().getTinfoData(), new CsvSet("RowId, LSID"), filter, null); + } + + Collection notCopiedDataIds = selector.getCollection(Integer.class); + + LOG.info(" {} rows not copied -- deleting associated rows from exp.Data, exp.Object, etc.", Formats.commaf0.format(notCopiedDataIds.size())); + + // TODO: Delete exp.Data, etc. rows associated with the rows that weren't copied + } +} diff --git a/core/src/org/labkey/core/CoreMigrationSchemaHandler.java b/core/src/org/labkey/core/CoreMigrationSchemaHandler.java index e47dceb5142..08ccc626703 100644 --- a/core/src/org/labkey/core/CoreMigrationSchemaHandler.java +++ b/core/src/org/labkey/core/CoreMigrationSchemaHandler.java @@ -50,7 +50,6 @@ public List getTablesToCopy() } }); - // TODO: Temporary, until "clone" migration type copies schemas with a registered handler only if (ModuleLoader.getInstance().getModule(DbScope.getLabKeyScope(), "vehicle") != null) { DatabaseMigrationService.get().registerSchemaHandler(new DatabaseMigrationService.DefaultMigrationSchemaHandler(DbSchema.get("vehicle", DbSchemaType.Module)) @@ -115,7 +114,7 @@ public List getTablesToCopy() } @Override - public FilterClause getTableFilter(TableInfo sourceTable, FieldKey containerFieldKey, Set containers) + public FilterClause getTableFilterClause(TableInfo sourceTable, FieldKey containerFieldKey, Set containers) { FilterClause filterClause = getContainerClause(sourceTable, containerFieldKey, containers); String tableName = sourceTable.getName(); diff --git a/core/src/org/labkey/core/dialect/PostgreSql92Dialect.java b/core/src/org/labkey/core/dialect/PostgreSql92Dialect.java index 52dbb60e160..4ec4bf45c5c 100644 --- a/core/src/org/labkey/core/dialect/PostgreSql92Dialect.java +++ b/core/src/org/labkey/core/dialect/PostgreSql92Dialect.java @@ -239,7 +239,7 @@ SELECT SchemaName, TableName, ColumnName, LastValue FROM ( JOIN pg_attribute a ON d.refobjid = a.attrelid AND d.refobjsubid = a.attnum JOIN - pg_sequences seq ON s.relname = seq.SequenceName AND tns.nspname = seq.SchemaName -- maybe sns.nspname instead? but that's slower... + pg_sequences seq ON s.relname = seq.SequenceName AND tns.nspname = seq.SchemaName -- maybe sns.nspname instead? but that is slower... WHERE s.relkind = 'S' -- Sequence AND t.relkind IN ('r', 'P') -- Table (regular table or partitioned table) diff --git a/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java b/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java index 9190dc6f7ea..760fe47f587 100644 --- a/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java +++ b/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java @@ -5,8 +5,8 @@ import org.labkey.api.collections.Sets; import org.labkey.api.data.DatabaseMigrationConfiguration; import org.labkey.api.data.DatabaseMigrationService; -import org.labkey.api.data.DatabaseMigrationService.DefaultMigrationSchemaHandler; import org.labkey.api.data.DatabaseMigrationService.DataFilter; +import org.labkey.api.data.DatabaseMigrationService.DefaultMigrationSchemaHandler; import org.labkey.api.data.DbSchema; import org.labkey.api.data.DbSchemaType; import org.labkey.api.data.DbScope; @@ -72,10 +72,17 @@ public FilterClause getContainerClause(TableInfo sourceTable, FieldKey container } @Override - public void addDomainDataFilter(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames) + public void addDomainDataFilterClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames) { // Data classes have a built-in Flag field - addDomainDataFlagFilter(orClause, filter, sourceTable, fKey, selectColumnNames); + if (filter.column().equalsIgnoreCase("Flag")) + { + addObjectPropertyClause(orClause, filter, sourceTable, fKey, getCommentPropertyId(sourceTable.getSchema().getScope())); + } + else + { + addDataFilterClause(orClause, filter, sourceTable, fKey, selectColumnNames); + } } private static final Set SEQUENCE_TABLES = Sets.newCaseInsensitiveHashSet("protsequence", "nucsequence", "molecule"); @@ -164,7 +171,7 @@ public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema s DatabaseMigrationService.get().copySourceTableToTargetTable(configuration, sourceTable, targetTable, DbSchemaType.Module, new DefaultMigrationSchemaHandler(biologicsTargetSchema) { @Override - public FilterClause getTableFilter(TableInfo sourceTable, FieldKey containerFieldKey, Set containers) + public FilterClause getTableFilterClause(TableInfo sourceTable, FieldKey containerFieldKey, Set containers) { // This is a global table, so no container clause. Just query and copy the sequence IDs referenced by data class rows we copied. return new InClause(FieldKey.fromParts("SequenceId"), SEQUENCE_IDS); diff --git a/experiment/src/org/labkey/experiment/SampleTypeMigrationSchemaHandler.java b/experiment/src/org/labkey/experiment/SampleTypeMigrationSchemaHandler.java index 1209df3e966..2224e260c16 100644 --- a/experiment/src/org/labkey/experiment/SampleTypeMigrationSchemaHandler.java +++ b/experiment/src/org/labkey/experiment/SampleTypeMigrationSchemaHandler.java @@ -2,20 +2,22 @@ import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.Nullable; -import org.labkey.api.data.DatabaseMigrationService.DefaultMigrationSchemaHandler; +import org.labkey.api.collections.CsvSet; import org.labkey.api.data.DatabaseMigrationService.DataFilter; +import org.labkey.api.data.DatabaseMigrationService.DefaultMigrationSchemaHandler; import org.labkey.api.data.SQLFragment; import org.labkey.api.data.Selector; import org.labkey.api.data.SimpleFilter; import org.labkey.api.data.SimpleFilter.FilterClause; +import org.labkey.api.data.SimpleFilter.InClause; import org.labkey.api.data.SimpleFilter.OrClause; import org.labkey.api.data.SimpleFilter.SQLClause; import org.labkey.api.data.SqlExecutor; -import org.labkey.api.data.SqlSelector; import org.labkey.api.data.TableInfo; import org.labkey.api.data.TableSelector; import org.labkey.api.data.dialect.SqlDialect; import org.labkey.api.exp.OntologyManager; +import org.labkey.api.exp.api.ExperimentService; import org.labkey.api.exp.api.SampleTypeDomainKind; import org.labkey.api.query.FieldKey; import org.labkey.api.util.Formats; @@ -57,7 +59,7 @@ public FilterClause getContainerClause(TableInfo sourceTable, FieldKey container } @Override - public void addDomainDataFilter(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames) + public void addDomainDataFilterClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames) { // Sample-type-specific optimization - joining to exp.Material instead of exp.Object is much faster if (filter.column().equalsIgnoreCase("Flag")) @@ -80,7 +82,7 @@ public void addDomainDataFilter(OrClause orClause, DataFilter filter, TableInfo } else { - addDataFilter(orClause, filter, sourceTable, fKey, selectColumnNames); + addDataFilterClause(orClause, filter, sourceTable, fKey, selectColumnNames); } } @@ -96,44 +98,62 @@ public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilte SqlDialect dialect = sourceTable.getSqlDialect(); final Selector selector; - if (getJoinColumnName(sourceTable).equals("LSID")) + if (getJoinColumnName(sourceTable).equals("RowId")) { - Collection lsids = new TableSelector(sourceTable, Collections.singleton("LSID"), notCopiedFilter, null).getCollection(String.class); - selector = new SqlSelector(getSchema(), new SQLFragment("SELECT RowId FROM exp.Material m WHERE lsid").appendInClause(lsids, dialect)); + selector = new TableSelector(sourceTable, Collections.singleton("RowId"), notCopiedFilter, null); } else { - selector = new TableSelector(sourceTable, Collections.singleton("RowId"), notCopiedFilter, null); + // Forced to use two separate queries here since notCopiedFilter must be applied to sourceTable. Attempting + // a combined query that joins to exp.Material results in ambiguous LSID references. + Collection lsids = new TableSelector(sourceTable, Collections.singleton("LSID"), notCopiedFilter, null).getCollection(String.class); + SimpleFilter filter = new SimpleFilter(new InClause(FieldKey.fromParts("LSID"), lsids)); + selector = new TableSelector(ExperimentService.get().getTinfoMaterial(), new CsvSet("RowId, LSID"), filter, null); } - Collection notCopiedRows = selector.getCollection(Integer.class); + Collection notCopiedMaterialIds = selector.getCollection(Integer.class); - if (!notCopiedRows.isEmpty()) + if (!notCopiedMaterialIds.isEmpty()) { - LOG.info(" {} rows not copied -- deleting associated rows from exp.Material, exp.Object, etc.", Formats.commaf0.format(notCopiedRows.size())); + LOG.info(" {} rows not copied -- deleting associated rows from exp.Material, exp.Object, etc.", Formats.commaf0.format(notCopiedMaterialIds.size())); SqlExecutor executor = new SqlExecutor(OntologyManager.getExpSchema()); - // An IN clause of exp.Material.RowIds are also the associated ObjectIds + // Create an IN clause of exp.Material.RowIds + SQLFragment materialIdClause = new SQLFragment() + .appendInClause(notCopiedMaterialIds, dialect); + + // Create an IN clause of exp.Object.ObjectIds. Need to do this now, before we delete from exp.Materials. + SimpleFilter filter = new SimpleFilter(new SQLClause( + new SQLFragment("RowId") + .append(materialIdClause) + )); + Collection objectIds = new TableSelector(ExperimentService.get().getTinfoMaterial(), new CsvSet("ObjectId, RowId"), filter, null) + .getCollection(Long.class); SQLFragment objectIdClause = new SQLFragment() - .appendInClause(notCopiedRows, dialect); + .appendInClause(objectIds, dialect); // Delete from exp.Material (and associated tables) LOG.info(" exp.MaterialInput"); executor.execute( new SQLFragment("DELETE FROM exp.MaterialInput WHERE MaterialId") - .append(objectIdClause) + .append(materialIdClause) ); LOG.info(" exp.MaterialAliasMap"); executor.execute( new SQLFragment("DELETE FROM exp.MaterialAliasMap WHERE LSID IN (SELECT LSID FROM exp.Material WHERE RowId") - .append(objectIdClause) + .append(materialIdClause) .append(")") ); + LOG.info(" assay.Well"); + executor.execute( + new SQLFragment("UPDATE assay.Well SET SampleId = null WHERE SampleId") + .append(materialIdClause) + ); LOG.info(" exp.Material"); executor.execute( new SQLFragment("DELETE FROM exp.Material WHERE RowId") - .append(objectIdClause) + .append(materialIdClause) ); ExperimentMigrationSchemaHandler.deleteObjectIds(objectIdClause); From fc618de4ceca6651cf6e7a217d91cde479781c32 Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Sun, 2 Nov 2025 14:23:16 -0800 Subject: [PATCH 3/8] Improve not-copied queries - combine multiple into one query, always query source DB for object IDs, etc., use pluralize() when logging counts. --- .../api/data/DatabaseMigrationService.java | 6 ++ .../AssayResultMigrationSchemaHandler.java | 50 ++++------ .../DataClassMigrationSchemaHandler.java | 91 +++++++++++-------- .../SampleTypeMigrationSchemaHandler.java | 74 ++++++++------- .../issue/IssueMigrationSchemaHandler.java | 6 +- 5 files changed, 126 insertions(+), 101 deletions(-) diff --git a/api/src/org/labkey/api/data/DatabaseMigrationService.java b/api/src/org/labkey/api/data/DatabaseMigrationService.java index 923b58a02ff..bc856f11ac0 100644 --- a/api/src/org/labkey/api/data/DatabaseMigrationService.java +++ b/api/src/org/labkey/api/data/DatabaseMigrationService.java @@ -15,6 +15,7 @@ import org.labkey.api.services.ServiceRegistry; import org.labkey.api.util.ConfigurationException; import org.labkey.api.util.GUID; +import org.labkey.api.util.StringUtilsLabKey; import org.labkey.api.util.logging.LogHelper; import org.labkey.vfs.FileLike; @@ -288,6 +289,11 @@ protected synchronized int getCommentPropertyId(DbScope scope) return _commentPropertyId; } + protected String rowsNotCopied(int count) + { + return " " + StringUtilsLabKey.pluralize(count, "row") + " not copied"; + } + @Override public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilter notCopiedFilter) { diff --git a/assay/src/org/labkey/assay/AssayResultMigrationSchemaHandler.java b/assay/src/org/labkey/assay/AssayResultMigrationSchemaHandler.java index 20f40a36569..429616ec939 100644 --- a/assay/src/org/labkey/assay/AssayResultMigrationSchemaHandler.java +++ b/assay/src/org/labkey/assay/AssayResultMigrationSchemaHandler.java @@ -3,31 +3,25 @@ import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.Nullable; import org.labkey.api.assay.AbstractTsvAssayProvider; -import org.labkey.api.collections.CsvSet; -import org.labkey.api.data.DatabaseMigrationService; import org.labkey.api.data.DatabaseMigrationService.DataFilter; +import org.labkey.api.data.DatabaseMigrationService.DefaultMigrationSchemaHandler; import org.labkey.api.data.DbSchema; import org.labkey.api.data.DbSchemaType; import org.labkey.api.data.SQLFragment; -import org.labkey.api.data.Selector; import org.labkey.api.data.SimpleFilter; import org.labkey.api.data.SimpleFilter.FilterClause; -import org.labkey.api.data.SimpleFilter.InClause; import org.labkey.api.data.SimpleFilter.OrClause; import org.labkey.api.data.SimpleFilter.SQLClause; +import org.labkey.api.data.SqlSelector; import org.labkey.api.data.TableInfo; -import org.labkey.api.data.TableSelector; -import org.labkey.api.exp.api.ExperimentService; import org.labkey.api.query.FieldKey; -import org.labkey.api.util.Formats; import org.labkey.api.util.GUID; import org.labkey.api.util.logging.LogHelper; import java.util.Collection; -import java.util.Collections; import java.util.Set; -class AssayResultMigrationSchemaHandler extends DatabaseMigrationService.DefaultMigrationSchemaHandler +class AssayResultMigrationSchemaHandler extends DefaultMigrationSchemaHandler { private static final Logger LOG = LogHelper.getLogger(AssayResultMigrationSchemaHandler.class, "Assay result migration status"); @@ -51,15 +45,9 @@ private boolean hasDataIdColumn(TableInfo sourceTable) @Override public FilterClause getContainerClause(TableInfo sourceTable, FieldKey containerFieldKey, Set containers) { - SQLFragment sqlFragment = new SQLFragment(); - - if (hasDataIdColumn(sourceTable)) - sqlFragment.append("DataId IN (SELECT RowId"); - else - sqlFragment.append("LSID IN (SELECT LSID"); - return new SQLClause( - sqlFragment.append(" FROM exp.Data WHERE Container") + new SQLFragment(hasDataIdColumn(sourceTable) ? "DataId IN (SELECT RowId" : "LSID IN (SELECT LSID") + .append(" FROM exp.Data WHERE Container") .appendInClause(containers, sourceTable.getSqlDialect()) .append(")") ); @@ -74,25 +62,25 @@ public void addDomainDataFilterClause(OrClause orClause, DataFilter filter, Tabl @Override public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilter notCopiedFilter) { - final Selector selector; + SQLFragment objectIdSql = new SQLFragment("SELECT ObjectId FROM exp.Data WHERE ") + .append(hasDataIdColumn(sourceTable) ? "RowId IN (SELECT DataId" : "LSID IN (SELECT LSID") + .append(" FROM ") + .appendIdentifier(sourceTable.getSelectName()) + .append(" ") + .append(notCopiedFilter.getSQLFragment(sourceTable.getSqlDialect())) + .append(")"); + + Collection notCopiedDataIds = new SqlSelector(sourceTable.getSchema(), objectIdSql).getCollection(Long.class); - if (hasDataIdColumn(sourceTable)) + if (notCopiedDataIds.isEmpty()) { - selector = new TableSelector(sourceTable, Collections.singleton("DataId"), notCopiedFilter, null); + LOG.info(rowsNotCopied(0)); } else { - // Forced to use two separate queries here since notCopiedFilter must be applied to sourceTable. Attempting - // a combined query that joins to exp.Data results in ambiguous LSID references. - Collection lsids = new TableSelector(sourceTable, Collections.singleton("LSID"), notCopiedFilter, null).getCollection(String.class); - SimpleFilter filter = new SimpleFilter(new InClause(FieldKey.fromParts("LSID"), lsids)); - selector = new TableSelector(ExperimentService.get().getTinfoData(), new CsvSet("RowId, LSID"), filter, null); - } - - Collection notCopiedDataIds = selector.getCollection(Integer.class); + LOG.info("{} -- deleting associated rows from exp.Data, exp.Object, etc.", rowsNotCopied(notCopiedDataIds.size())); - LOG.info(" {} rows not copied -- deleting associated rows from exp.Data, exp.Object, etc.", Formats.commaf0.format(notCopiedDataIds.size())); - - // TODO: Delete exp.Data, etc. rows associated with the rows that weren't copied + // TODO: Delete exp.Data, etc. rows associated with the rows that weren't copied + } } } diff --git a/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java b/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java index 760fe47f587..ae5d259999f 100644 --- a/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java +++ b/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java @@ -17,13 +17,14 @@ import org.labkey.api.data.SimpleFilter.OrClause; import org.labkey.api.data.SimpleFilter.SQLClause; import org.labkey.api.data.SqlExecutor; +import org.labkey.api.data.SqlSelector; import org.labkey.api.data.TableInfo; import org.labkey.api.data.TableSelector; import org.labkey.api.data.dialect.SqlDialect; import org.labkey.api.exp.api.ExperimentService; import org.labkey.api.query.FieldKey; -import org.labkey.api.util.Formats; import org.labkey.api.util.GUID; +import org.labkey.api.util.StringUtilsLabKey; import org.labkey.api.util.logging.LogHelper; import org.labkey.experiment.api.DataClassDomainKind; @@ -92,41 +93,28 @@ public void addDomainDataFilterClause(OrClause orClause, DataFilter filter, Tabl @Override public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilter notCopiedFilter) { - // exp.Data has an index on ObjectId, so use ObjectIds to delete from exp.Data tables as well as exp.Object. - // Our notCopiedFilter works on the data class provisioned table, so we need to select LSIDs from that table - // and then select from exp.Data to map those LSIDs to ObjectIds. - Collection notCopiedLsids = new TableSelector(sourceTable, Collections.singleton("LSID"), notCopiedFilter, null).getCollection(String.class); - - if (!notCopiedLsids.isEmpty()) + // Select all ObjectIds associated with the not-copied rows from the source database. Our notCopiedFilter + // works on the data class provisioned table, so we need to use a sub-select (as opposed to a join) to avoid + // ambiguous column references. + SQLFragment objectIdSql = new SQLFragment("SELECT ObjectId FROM exp.Data WHERE LSID IN (SELECT LSID FROM ") + .appendIdentifier(sourceTable.getSelectName()) + .append(" ") + .append(notCopiedFilter.getSQLFragment(sourceTable.getSqlDialect())) + .append(")"); + Collection notCopiedObjectIds = new SqlSelector(sourceTable.getSchema(), objectIdSql).getCollection(Long.class); + + // TODO: temp check - delete this + Collection lsids = new TableSelector(sourceTable, Collections.singleton("LSID"), notCopiedFilter, null).getCollection(String.class); + assert notCopiedObjectIds.size() == lsids.size(); + + if (notCopiedObjectIds.isEmpty()) { - SimpleFilter dataFilter = new SimpleFilter(new InClause(FieldKey.fromParts("LSID"), notCopiedLsids)); - Collection notCopiedObjectIds = new TableSelector(ExperimentService.get().getTinfoData(), Collections.singleton("ObjectId"), dataFilter, null).getCollection(Integer.class); - - LOG.info(" {} rows not copied -- deleting associated rows from exp.Data, exp.Object, etc.", Formats.commaf0.format(notCopiedObjectIds.size())); - SqlExecutor executor = new SqlExecutor(ExperimentService.get().getSchema()); - SqlDialect dialect = ExperimentService.get().getSchema().getSqlDialect(); - SQLFragment objectIdClause = new SQLFragment() - .appendInClause(notCopiedObjectIds, dialect); - - // Delete from exp.Data (and associated tables) - LOG.info(" exp.DataInput"); - executor.execute( - new SQLFragment("DELETE FROM exp.DataInput WHERE DataId IN (SELECT RowId FROM exp.Data WHERE ObjectId") - .append(objectIdClause) - .append(")") - ); - LOG.info(" exp.DataAliasMap"); - executor.execute( - new SQLFragment("DELETE FROM exp.DataAliasMap WHERE LSID") - .appendInClause(notCopiedLsids, dialect) - ); - LOG.info(" exp.Data"); - executor.execute( - new SQLFragment("DELETE FROM exp.Data WHERE ObjectId") - .append(objectIdClause) - ); - - ExperimentMigrationSchemaHandler.deleteObjectIds(objectIdClause); + LOG.info(rowsNotCopied(0)); + } + else + { + LOG.info("{} -- deleting associated rows from exp.Data, exp.Object, etc.", rowsNotCopied(notCopiedObjectIds.size())); + deleteDataRows(notCopiedObjectIds); } String name = sourceTable.getName(); @@ -149,10 +137,41 @@ public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilte } }) .forEach(SEQUENCE_IDS::add); - LOG.info(" {} unique SequenceIds were added to the SequenceIdentity set", Formats.commaf0.format(SEQUENCE_IDS.size() - startSize)); + LOG.info("{} added to the SequenceIdentity set", + StringUtilsLabKey.pluralize(SEQUENCE_IDS.size() - startSize, "unique SequenceId was", "unique SequenceIds were")); } } + // exp.Data has an index on ObjectId plus we need ObjectIds to delete from exp.Object, etc. so pass in ObjectIds here + private void deleteDataRows(Collection objectIds) + { + SqlExecutor executor = new SqlExecutor(ExperimentService.get().getSchema()); + SqlDialect dialect = ExperimentService.get().getSchema().getSqlDialect(); + SQLFragment objectIdClause = new SQLFragment() + .appendInClause(objectIds, dialect); + + // Delete from exp.Data (and associated tables) + LOG.info(" exp.DataInput"); + executor.execute( + new SQLFragment("DELETE FROM exp.DataInput WHERE DataId IN (SELECT RowId FROM exp.Data WHERE ObjectId") + .append(objectIdClause) + .append(")") + ); + LOG.info(" exp.DataAliasMap"); + executor.execute( + new SQLFragment("DELETE FROM exp.DataAliasMap WHERE LSID IN (SELECT LSID FROM exp.Data WHERE ObjectId") + .appendInClause(objectIds, dialect) + .append(")") + ); + LOG.info(" exp.Data"); + executor.execute( + new SQLFragment("DELETE FROM exp.Data WHERE ObjectId") + .append(objectIdClause) + ); + + ExperimentMigrationSchemaHandler.deleteObjectIds(objectIdClause); + } + @Override public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema) { diff --git a/experiment/src/org/labkey/experiment/SampleTypeMigrationSchemaHandler.java b/experiment/src/org/labkey/experiment/SampleTypeMigrationSchemaHandler.java index 2224e260c16..15dd8edf2ff 100644 --- a/experiment/src/org/labkey/experiment/SampleTypeMigrationSchemaHandler.java +++ b/experiment/src/org/labkey/experiment/SampleTypeMigrationSchemaHandler.java @@ -2,31 +2,29 @@ import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.Nullable; -import org.labkey.api.collections.CsvSet; import org.labkey.api.data.DatabaseMigrationService.DataFilter; import org.labkey.api.data.DatabaseMigrationService.DefaultMigrationSchemaHandler; import org.labkey.api.data.SQLFragment; -import org.labkey.api.data.Selector; import org.labkey.api.data.SimpleFilter; import org.labkey.api.data.SimpleFilter.FilterClause; -import org.labkey.api.data.SimpleFilter.InClause; import org.labkey.api.data.SimpleFilter.OrClause; import org.labkey.api.data.SimpleFilter.SQLClause; import org.labkey.api.data.SqlExecutor; +import org.labkey.api.data.SqlSelector; import org.labkey.api.data.TableInfo; -import org.labkey.api.data.TableSelector; import org.labkey.api.data.dialect.SqlDialect; import org.labkey.api.exp.OntologyManager; -import org.labkey.api.exp.api.ExperimentService; import org.labkey.api.exp.api.SampleTypeDomainKind; import org.labkey.api.query.FieldKey; -import org.labkey.api.util.Formats; import org.labkey.api.util.GUID; import org.labkey.api.util.logging.LogHelper; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.Set; +import java.util.stream.Stream; class SampleTypeMigrationSchemaHandler extends DefaultMigrationSchemaHandler { @@ -96,26 +94,47 @@ private String getJoinColumnName(TableInfo sourceTable) public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilter notCopiedFilter) { SqlDialect dialect = sourceTable.getSqlDialect(); - final Selector selector; - if (getJoinColumnName(sourceTable).equals("RowId")) + // Select all MaterialIds and ObjectIds associated with the not-copied rows from the source database. Our + // notCopiedFilter works on the sample type provisioned table, so we need to use a sub-select (as opposed + // to a join) to avoid ambiguous column references. + String joinColumnName = getJoinColumnName(sourceTable); + + SQLFragment rowIdAndObjectIdSql = new SQLFragment("SELECT RowId, ObjectId FROM exp.Material WHERE ") + .appendIdentifier(joinColumnName) + .append(" IN (SELECT ") + .appendIdentifier(joinColumnName) + .append(" FROM ") + .appendIdentifier(sourceTable.getSelectName()) + .append(" ") + .append(notCopiedFilter.getSQLFragment(dialect)) + .append(")"); + + Collection notCopiedMaterialIds = new ArrayList<>(); + Collection notCopiedObjectIds = new ArrayList<>(); + + try (Stream stream = new SqlSelector(sourceTable.getSchema(), rowIdAndObjectIdSql).uncachedResultSetStream()) { - selector = new TableSelector(sourceTable, Collections.singleton("RowId"), notCopiedFilter, null); + stream.forEach(rs -> { + try + { + notCopiedMaterialIds.add(rs.getInt(1)); + notCopiedObjectIds.add(rs.getLong(2)); + } + catch (SQLException e) + { + throw new RuntimeException(e); + } + }); } - else + + if (notCopiedMaterialIds.isEmpty()) { - // Forced to use two separate queries here since notCopiedFilter must be applied to sourceTable. Attempting - // a combined query that joins to exp.Material results in ambiguous LSID references. - Collection lsids = new TableSelector(sourceTable, Collections.singleton("LSID"), notCopiedFilter, null).getCollection(String.class); - SimpleFilter filter = new SimpleFilter(new InClause(FieldKey.fromParts("LSID"), lsids)); - selector = new TableSelector(ExperimentService.get().getTinfoMaterial(), new CsvSet("RowId, LSID"), filter, null); + LOG.info(rowsNotCopied(0)); } - - Collection notCopiedMaterialIds = selector.getCollection(Integer.class); - - if (!notCopiedMaterialIds.isEmpty()) + else { - LOG.info(" {} rows not copied -- deleting associated rows from exp.Material, exp.Object, etc.", Formats.commaf0.format(notCopiedMaterialIds.size())); + LOG.info("{} -- deleting associated rows from exp.Material, exp.Object, etc.", rowsNotCopied(notCopiedMaterialIds.size())); SqlExecutor executor = new SqlExecutor(OntologyManager.getExpSchema()); @@ -123,16 +142,6 @@ public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilte SQLFragment materialIdClause = new SQLFragment() .appendInClause(notCopiedMaterialIds, dialect); - // Create an IN clause of exp.Object.ObjectIds. Need to do this now, before we delete from exp.Materials. - SimpleFilter filter = new SimpleFilter(new SQLClause( - new SQLFragment("RowId") - .append(materialIdClause) - )); - Collection objectIds = new TableSelector(ExperimentService.get().getTinfoMaterial(), new CsvSet("ObjectId, RowId"), filter, null) - .getCollection(Long.class); - SQLFragment objectIdClause = new SQLFragment() - .appendInClause(objectIds, dialect); - // Delete from exp.Material (and associated tables) LOG.info(" exp.MaterialInput"); executor.execute( @@ -156,6 +165,9 @@ public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilte .append(materialIdClause) ); + SQLFragment objectIdClause = new SQLFragment() + .appendInClause(notCopiedObjectIds, dialect); + ExperimentMigrationSchemaHandler.deleteObjectIds(objectIdClause); } } diff --git a/issues/src/org/labkey/issue/IssueMigrationSchemaHandler.java b/issues/src/org/labkey/issue/IssueMigrationSchemaHandler.java index d1dc462c5b8..90e3c4e19eb 100644 --- a/issues/src/org/labkey/issue/IssueMigrationSchemaHandler.java +++ b/issues/src/org/labkey/issue/IssueMigrationSchemaHandler.java @@ -16,7 +16,7 @@ import org.labkey.api.data.TableSelector; import org.labkey.api.issues.IssuesSchema; import org.labkey.api.query.FieldKey; -import org.labkey.api.util.Formats; +import org.labkey.api.util.StringUtilsLabKey; import org.labkey.api.util.logging.LogHelper; import java.util.HashSet; @@ -49,13 +49,13 @@ public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilte new TableSelector(IssuesSchema.getInstance().getTableInfoIssues(), new CsvSet("IssueId, EntityId"), new SimpleFilter(joinOnEntityId), null).stream(Integer.class) .forEach(ISSUE_IDS::add); - LOG.info(" {} IssueIds were added to the IssueId set", Formats.commaf0.format(ISSUE_IDS.size() - startSize)); + LOG.info(" {} added to the IssueId set", StringUtilsLabKey.pluralize(ISSUE_IDS.size() - startSize, "IssueId was", "IssueIds were")); } @Override public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema sourceSchema, DbSchema targetSchema) { - LOG.info(" Deleting related issues, comments, and issues rows associated with {} issues", ISSUE_IDS.size()); + LOG.info(" Deleting related issues, comments, and issues rows associated with {}", StringUtilsLabKey.pluralize(ISSUE_IDS.size(), "issue")); if (!ISSUE_IDS.isEmpty()) { From 5e4c448edda65db0c4c7f841bb582c53b65f2f45 Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Sun, 2 Nov 2025 14:41:55 -0800 Subject: [PATCH 4/8] ExperimentDeleteService that allows assay results handler to delete exp.Data & exp.Object rows. --- .../api/data/DatabaseMigrationService.java | 22 +++++++++++++++++++ .../AssayResultMigrationSchemaHandler.java | 10 +++++---- .../DataClassMigrationSchemaHandler.java | 10 +++++---- .../labkey/experiment/ExperimentModule.java | 4 +++- 4 files changed, 37 insertions(+), 9 deletions(-) diff --git a/api/src/org/labkey/api/data/DatabaseMigrationService.java b/api/src/org/labkey/api/data/DatabaseMigrationService.java index bc856f11ac0..6942f7b1b2a 100644 --- a/api/src/org/labkey/api/data/DatabaseMigrationService.java +++ b/api/src/org/labkey/api/data/DatabaseMigrationService.java @@ -20,6 +20,7 @@ import org.labkey.vfs.FileLike; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashSet; @@ -329,6 +330,27 @@ interface MigrationFilter void saveFilter(@Nullable GUID guid, String value); } + interface ExperimentDeleteService + { + static @NotNull ExperimentDeleteService get() + { + ExperimentDeleteService ret = ServiceRegistry.get().getService(ExperimentDeleteService.class); + if (ret == null) + throw new IllegalStateException("ExperimentDeleteService not found"); + return ret; + } + + static void setInstance(ExperimentDeleteService impl) + { + ServiceRegistry.get().registerService(ExperimentDeleteService.class, impl); + } + + /** + * Deletes all rows from exp.Data, exp.Object, and related tables associated with the provided ObjectIds + */ + void deleteDataRows(Collection objectIds); + } + // Helper method that parses a data filter then adds it and its container to the provided collections, coalescing // cases where multiple containers specify the same filter static void addDataFilter(String filterName, List dataFilters, Set filteredContainers, GUID guid, String filter) diff --git a/assay/src/org/labkey/assay/AssayResultMigrationSchemaHandler.java b/assay/src/org/labkey/assay/AssayResultMigrationSchemaHandler.java index 429616ec939..885baf61df3 100644 --- a/assay/src/org/labkey/assay/AssayResultMigrationSchemaHandler.java +++ b/assay/src/org/labkey/assay/AssayResultMigrationSchemaHandler.java @@ -5,6 +5,7 @@ import org.labkey.api.assay.AbstractTsvAssayProvider; import org.labkey.api.data.DatabaseMigrationService.DataFilter; import org.labkey.api.data.DatabaseMigrationService.DefaultMigrationSchemaHandler; +import org.labkey.api.data.DatabaseMigrationService.ExperimentDeleteService; import org.labkey.api.data.DbSchema; import org.labkey.api.data.DbSchemaType; import org.labkey.api.data.SQLFragment; @@ -70,17 +71,18 @@ public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilte .append(notCopiedFilter.getSQLFragment(sourceTable.getSqlDialect())) .append(")"); - Collection notCopiedDataIds = new SqlSelector(sourceTable.getSchema(), objectIdSql).getCollection(Long.class); + Collection notCopiedObjectIds = new SqlSelector(sourceTable.getSchema(), objectIdSql).getCollection(Long.class); - if (notCopiedDataIds.isEmpty()) + if (notCopiedObjectIds.isEmpty()) { LOG.info(rowsNotCopied(0)); } else { - LOG.info("{} -- deleting associated rows from exp.Data, exp.Object, etc.", rowsNotCopied(notCopiedDataIds.size())); + LOG.info("{} -- deleting associated rows from exp.Data, exp.Object, etc.", rowsNotCopied(notCopiedObjectIds.size())); - // TODO: Delete exp.Data, etc. rows associated with the rows that weren't copied + // Delete exp.Data, exp.Object, etc. rows associated with the rows that weren't copied + ExperimentDeleteService.get().deleteDataRows(notCopiedObjectIds); } } } diff --git a/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java b/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java index ae5d259999f..a8afc65a71a 100644 --- a/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java +++ b/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java @@ -7,6 +7,7 @@ import org.labkey.api.data.DatabaseMigrationService; import org.labkey.api.data.DatabaseMigrationService.DataFilter; import org.labkey.api.data.DatabaseMigrationService.DefaultMigrationSchemaHandler; +import org.labkey.api.data.DatabaseMigrationService.ExperimentDeleteService; import org.labkey.api.data.DbSchema; import org.labkey.api.data.DbSchemaType; import org.labkey.api.data.DbScope; @@ -33,7 +34,7 @@ import java.util.HashSet; import java.util.Set; -class DataClassMigrationSchemaHandler extends DefaultMigrationSchemaHandler +class DataClassMigrationSchemaHandler extends DefaultMigrationSchemaHandler implements ExperimentDeleteService { private static final Logger LOG = LogHelper.getLogger(DataClassMigrationSchemaHandler.class, "Data class migration status"); @@ -60,8 +61,8 @@ public FilterClause getContainerClause(TableInfo sourceTable, FieldKey container // container FilterClause explicitly. clause = new SQLClause( new SQLFragment("LSID IN (SELECT LSID FROM exp.Data WHERE Container") - .appendInClause(containers, sourceTable.getSqlDialect()) - .append(")") + .appendInClause(containers, sourceTable.getSqlDialect()) + .append(")") ); } else @@ -143,7 +144,8 @@ public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilte } // exp.Data has an index on ObjectId plus we need ObjectIds to delete from exp.Object, etc. so pass in ObjectIds here - private void deleteDataRows(Collection objectIds) + @Override + public void deleteDataRows(Collection objectIds) { SqlExecutor executor = new SqlExecutor(ExperimentService.get().getSchema()); SqlDialect dialect = ExperimentService.get().getSchema().getSqlDialect(); diff --git a/experiment/src/org/labkey/experiment/ExperimentModule.java b/experiment/src/org/labkey/experiment/ExperimentModule.java index 78b03f46520..c64f5ea669b 100644 --- a/experiment/src/org/labkey/experiment/ExperimentModule.java +++ b/experiment/src/org/labkey/experiment/ExperimentModule.java @@ -875,7 +875,9 @@ SELECT COUNT(DISTINCT DD.DomainURI) FROM DatabaseMigrationService.get().registerSchemaHandler(new ExperimentMigrationSchemaHandler()); DatabaseMigrationService.get().registerSchemaHandler(new SampleTypeMigrationSchemaHandler()); - DatabaseMigrationService.get().registerSchemaHandler(new DataClassMigrationSchemaHandler()); + DataClassMigrationSchemaHandler dcHandler = new DataClassMigrationSchemaHandler(); + DatabaseMigrationService.get().registerSchemaHandler(dcHandler); + DatabaseMigrationService.ExperimentDeleteService.setInstance(dcHandler); } @Override From 86ed8c526a413bb39161f8d86cc65bf8792079ce Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Sun, 2 Nov 2025 16:15:25 -0800 Subject: [PATCH 5/8] Use a small batch size for attachments --- .../org/labkey/api/dataiterator/StatementDataIterator.java | 4 ++-- .../labkey/experiment/DataClassMigrationSchemaHandler.java | 4 ---- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/api/src/org/labkey/api/dataiterator/StatementDataIterator.java b/api/src/org/labkey/api/dataiterator/StatementDataIterator.java index 8fb94466ab5..bf55b98b82c 100644 --- a/api/src/org/labkey/api/dataiterator/StatementDataIterator.java +++ b/api/src/org/labkey/api/dataiterator/StatementDataIterator.java @@ -129,8 +129,8 @@ public void setUseAsynchronousExecute(boolean useAsynchronousExecute) * does not 'release' rows until the statement that operates on that row (e.g. inserts it) has been * executed. * - * This is different than the normal flow of control where 'later' data iterators only call 'earlier' data iterators. - * In this case the StatementDataIterator is passing some internal state information forward to to the EmbargoDataIterator + * This is different from the normal flow of control where 'later' data iterators only call 'earlier' data iterators. + * In this case the StatementDataIterator is passing some internal state information forward to the EmbargoDataIterator * This is actually fine, since it's the DataIteratorBuilder's job to set up a correct pipeline. */ public void setEmbargoDataIterator(EmbargoDataIterator cache) diff --git a/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java b/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java index a8afc65a71a..aabb243022e 100644 --- a/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java +++ b/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java @@ -104,10 +104,6 @@ public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilte .append(")"); Collection notCopiedObjectIds = new SqlSelector(sourceTable.getSchema(), objectIdSql).getCollection(Long.class); - // TODO: temp check - delete this - Collection lsids = new TableSelector(sourceTable, Collections.singleton("LSID"), notCopiedFilter, null).getCollection(String.class); - assert notCopiedObjectIds.size() == lsids.size(); - if (notCopiedObjectIds.isEmpty()) { LOG.info(rowsNotCopied(0)); From f909993f1599ecca72ed6cdd31250bc57038c92a Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Mon, 3 Nov 2025 09:30:11 -0800 Subject: [PATCH 6/8] More sensible method hierarchy for filtering: getTableFilterClause() calls getContainerClause() calls getContainerFieldKey() --- .../api/data/DatabaseMigrationService.java | 41 +++++++++---------- .../AssayResultMigrationSchemaHandler.java | 12 +----- .../core/CoreMigrationSchemaHandler.java | 11 ++--- .../DataClassMigrationSchemaHandler.java | 22 ++++------ .../ExperimentMigrationSchemaHandler.java | 5 +-- .../SampleTypeMigrationSchemaHandler.java | 14 ++----- .../org/labkey/query/sql/QuerySelectView.java | 14 +++---- 7 files changed, 47 insertions(+), 72 deletions(-) diff --git a/api/src/org/labkey/api/data/DatabaseMigrationService.java b/api/src/org/labkey/api/data/DatabaseMigrationService.java index 6942f7b1b2a..4a818063bd2 100644 --- a/api/src/org/labkey/api/data/DatabaseMigrationService.java +++ b/api/src/org/labkey/api/data/DatabaseMigrationService.java @@ -73,9 +73,6 @@ interface MigrationSchemaHandler // Marker for tables to declare themselves as site-wide (no container filtering) FieldKey SITE_WIDE_TABLE = FieldKey.fromParts("site-wide"); - // Dummy value returned from getContainerFieldKey() to ensure that custom getContainerClause() method is called - FieldKey DUMMY_FIELD_KEY = FieldKey.fromParts("DUMMY"); - DbSchema getSchema(); void beforeVerification(); @@ -85,10 +82,10 @@ interface MigrationSchemaHandler List getTablesToCopy(); // Create a filter clause that selects from all specified containers and (in some overrides) applies table-specific filters - FilterClause getTableFilterClause(TableInfo sourceTable, FieldKey containerFieldKey, Set containers); + FilterClause getTableFilterClause(TableInfo sourceTable, Set containers); // Create a filter clause that selects from all specified containers - FilterClause getContainerClause(TableInfo sourceTable, FieldKey containerFieldKey, Set containers); + FilterClause getContainerClause(TableInfo sourceTable, Set containers); // Return the FieldKey that can be used to filter this table by container. Special values SITE_WIDE_TABLE and // DUMMY_FIELD_KEY can be returned for special behaviors. DUMMY_FIELD_KEY ensures that the handler's custom @@ -96,9 +93,9 @@ interface MigrationSchemaHandler @Nullable FieldKey getContainerFieldKey(TableInfo sourceTable); // Create a filter clause that selects all rows from unfiltered containers plus filtered rows from the filtered containers - FilterClause getDomainDataFilterClause(Set copyContainers, Set filteredContainers, List domainFilters, TableInfo sourceTable, FieldKey containerFieldKey, Set selectColumnNames); + FilterClause getDomainDataFilterClause(Set copyContainers, Set filteredContainers, List domainFilters, TableInfo sourceTable, Set selectColumnNames); - void addDomainDataFilterClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames); + void addDomainDataFilterClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, Set selectColumnNames); // Do any necessary clean up after the target table has been populated. notCopiedFilter selects all rows in the // source table that were NOT copied to the target table. (For example, rows in a global table not copied due to @@ -155,16 +152,18 @@ public List getTablesToCopy() } @Override - public FilterClause getTableFilterClause(TableInfo sourceTable, FieldKey containerFieldKey, Set containers) + public FilterClause getTableFilterClause(TableInfo sourceTable, Set containers) { - return getContainerClause(sourceTable, containerFieldKey, containers); + return getContainerClause(sourceTable, containers); } @Override - public FilterClause getContainerClause(TableInfo sourceTable, FieldKey containerFieldKey, Set containers) + public FilterClause getContainerClause(TableInfo sourceTable, Set containers) { - if (containerFieldKey == SITE_WIDE_TABLE || containerFieldKey == DUMMY_FIELD_KEY) - throw new IllegalStateException("Should not be supplying " + containerFieldKey + " to the default getContainerClause() method"); + FieldKey containerFieldKey = getContainerFieldKey(sourceTable); + + if (containerFieldKey == SITE_WIDE_TABLE) + return new SQLClause(new SQLFragment("TRUE")); return new InClause(containerFieldKey, containers); } @@ -211,17 +210,17 @@ public FilterClause getContainerClause(TableInfo sourceTable, FieldKey container } @Override - public final FilterClause getDomainDataFilterClause(Set copyContainers, Set filteredContainers, List domainFilters, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames) + public final FilterClause getDomainDataFilterClause(Set copyContainers, Set filteredContainers, List domainFilters, TableInfo sourceTable, Set selectColumnNames) { // Filtered case: remove the filtered containers from the unconditional container set Set otherContainers = new HashSet<>(copyContainers); otherContainers.removeAll(filteredContainers); - FilterClause ret = getContainerClause(sourceTable, fKey, otherContainers); + FilterClause ret = getContainerClause(sourceTable, otherContainers); OrClause orClause = new OrClause(); // Delegate to the MigrationSchemaHandler to add domain-filtered containers back with their special filter applied - domainFilters.forEach(filter -> addDomainDataFilterClause(orClause, filter, sourceTable, fKey, selectColumnNames)); + domainFilters.forEach(filter -> addDomainDataFilterClause(orClause, filter, sourceTable, selectColumnNames)); if (!orClause.getClauses().isEmpty()) { @@ -233,13 +232,13 @@ public final FilterClause getDomainDataFilterClause(Set copyContainers, Se } @Override - public void addDomainDataFilterClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames) + public void addDomainDataFilterClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, Set selectColumnNames) { - addDataFilterClause(orClause, filter, sourceTable, fKey, selectColumnNames); + addDataFilterClause(orClause, filter, sourceTable, selectColumnNames); } // Add a filter and return true if the column exists directly on the table - protected boolean addDataFilterClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames) + protected boolean addDataFilterClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, Set selectColumnNames) { boolean columnExists = selectColumnNames.contains(filter.column()); @@ -248,7 +247,7 @@ protected boolean addDataFilterClause(OrClause orClause, DataFilter filter, Tabl // Select all rows in this domain-filtered container that meet its criteria orClause.addClause( new AndClause( - getContainerClause(sourceTable, fKey, filter.containers()), + getContainerClause(sourceTable, filter.containers()), filter.condition() ) ); @@ -259,13 +258,13 @@ protected boolean addDataFilterClause(OrClause orClause, DataFilter filter, Tabl // Add a clause that selects all rows where the object property with equals the filter value. This // is only for provisioned tables that lack an ObjectId, MaterialId, or DataId column. - protected void addObjectPropertyClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey containerFieldKey, int propertyId) + protected void addObjectPropertyClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, int propertyId) { SQLFragment flagWhere = new SQLFragment("lsid IN (SELECT ObjectURI FROM exp.Object WHERE ObjectId IN (SELECT ObjectId FROM exp.ObjectProperty WHERE StringValue = ? AND PropertyId = ?))", filter.condition().getParamVals()[0], propertyId); orClause.addClause( new AndClause( - getContainerClause(sourceTable, containerFieldKey, filter.containers()), + getContainerClause(sourceTable, filter.containers()), new SQLClause(flagWhere) ) ); diff --git a/assay/src/org/labkey/assay/AssayResultMigrationSchemaHandler.java b/assay/src/org/labkey/assay/AssayResultMigrationSchemaHandler.java index 885baf61df3..218fc955ac6 100644 --- a/assay/src/org/labkey/assay/AssayResultMigrationSchemaHandler.java +++ b/assay/src/org/labkey/assay/AssayResultMigrationSchemaHandler.java @@ -1,7 +1,6 @@ package org.labkey.assay; import org.apache.logging.log4j.Logger; -import org.jetbrains.annotations.Nullable; import org.labkey.api.assay.AbstractTsvAssayProvider; import org.labkey.api.data.DatabaseMigrationService.DataFilter; import org.labkey.api.data.DatabaseMigrationService.DefaultMigrationSchemaHandler; @@ -15,7 +14,6 @@ import org.labkey.api.data.SimpleFilter.SQLClause; import org.labkey.api.data.SqlSelector; import org.labkey.api.data.TableInfo; -import org.labkey.api.query.FieldKey; import org.labkey.api.util.GUID; import org.labkey.api.util.logging.LogHelper; @@ -31,12 +29,6 @@ public AssayResultMigrationSchemaHandler() super(DbSchema.get(AbstractTsvAssayProvider.ASSAY_SCHEMA_NAME, DbSchemaType.Provisioned)); } - @Override - public @Nullable FieldKey getContainerFieldKey(TableInfo table) - { - return DUMMY_FIELD_KEY; - } - // Provisioned assay result tables occasionally have no DataId column; hopefully they have an LSID column. private boolean hasDataIdColumn(TableInfo sourceTable) { @@ -44,7 +36,7 @@ private boolean hasDataIdColumn(TableInfo sourceTable) } @Override - public FilterClause getContainerClause(TableInfo sourceTable, FieldKey containerFieldKey, Set containers) + public FilterClause getContainerClause(TableInfo sourceTable, Set containers) { return new SQLClause( new SQLFragment(hasDataIdColumn(sourceTable) ? "DataId IN (SELECT RowId" : "LSID IN (SELECT LSID") @@ -55,7 +47,7 @@ public FilterClause getContainerClause(TableInfo sourceTable, FieldKey container } @Override - public void addDomainDataFilterClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames) + public void addDomainDataFilterClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, Set selectColumnNames) { // We want no rows from containers with a domain data filter, so don't add any clauses } diff --git a/core/src/org/labkey/core/CoreMigrationSchemaHandler.java b/core/src/org/labkey/core/CoreMigrationSchemaHandler.java index 08ccc626703..bc33e9c6b62 100644 --- a/core/src/org/labkey/core/CoreMigrationSchemaHandler.java +++ b/core/src/org/labkey/core/CoreMigrationSchemaHandler.java @@ -2,6 +2,7 @@ import org.jetbrains.annotations.Nullable; import org.labkey.api.data.CompareType; +import org.labkey.api.data.CompareType.CompareClause; import org.labkey.api.data.CoreSchema; import org.labkey.api.data.DatabaseMigrationConfiguration; import org.labkey.api.data.DatabaseMigrationService; @@ -114,9 +115,9 @@ public List getTablesToCopy() } @Override - public FilterClause getTableFilterClause(TableInfo sourceTable, FieldKey containerFieldKey, Set containers) + public FilterClause getTableFilterClause(TableInfo sourceTable, Set containers) { - FilterClause filterClause = getContainerClause(sourceTable, containerFieldKey, containers); + FilterClause filterClause = getContainerClause(sourceTable, containers); String tableName = sourceTable.getName(); if ("Principals".equals(tableName) || "Members".equals(tableName)) @@ -155,9 +156,9 @@ public FilterClause getTableFilterClause(TableInfo sourceTable, FieldKey contain } @Override - public FilterClause getContainerClause(TableInfo sourceTable, FieldKey containerFieldKey, Set containers) + public FilterClause getContainerClause(TableInfo sourceTable, Set containers) { - FilterClause containerClause = super.getContainerClause(sourceTable, containerFieldKey, containers); + FilterClause containerClause = super.getContainerClause(sourceTable, containers); String tableName = sourceTable.getName(); if ("Principals".equals(tableName) || "Members".equals(tableName)) @@ -165,7 +166,7 @@ public FilterClause getContainerClause(TableInfo sourceTable, FieldKey container // Users and root groups have container == null, so add that as an OR clause OrClause orClause = new OrClause(); orClause.addClause(containerClause); - orClause.addClause(new CompareType.CompareClause(containerFieldKey, CompareType.ISBLANK, null)); + orClause.addClause(new CompareClause(getContainerFieldKey(sourceTable), CompareType.ISBLANK, null)); containerClause = orClause; } diff --git a/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java b/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java index aabb243022e..eb152a86611 100644 --- a/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java +++ b/experiment/src/org/labkey/experiment/DataClassMigrationSchemaHandler.java @@ -1,7 +1,6 @@ package org.labkey.experiment; import org.apache.logging.log4j.Logger; -import org.jetbrains.annotations.Nullable; import org.labkey.api.collections.Sets; import org.labkey.api.data.DatabaseMigrationConfiguration; import org.labkey.api.data.DatabaseMigrationService; @@ -44,18 +43,11 @@ public DataClassMigrationSchemaHandler() } @Override - public @Nullable FieldKey getContainerFieldKey(TableInfo table) - { - FieldKey fieldKey = super.getContainerFieldKey(table); - return fieldKey != null ? fieldKey : DUMMY_FIELD_KEY; // "DUMMY" case is a data class that lacks an FK - } - - @Override - public FilterClause getContainerClause(TableInfo sourceTable, FieldKey containerFieldKey, Set containers) + public FilterClause getContainerClause(TableInfo sourceTable, Set containers) { final FilterClause clause; - if (containerFieldKey == DUMMY_FIELD_KEY) + if (getContainerFieldKey(sourceTable) == null) { // There are a couple bad data class provisioned tables that lack an FK to exp.Data. In that case, craft the // container FilterClause explicitly. @@ -67,23 +59,23 @@ public FilterClause getContainerClause(TableInfo sourceTable, FieldKey container } else { - clause = super.getContainerClause(sourceTable, containerFieldKey, containers); + clause = super.getContainerClause(sourceTable, containers); } return clause; } @Override - public void addDomainDataFilterClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames) + public void addDomainDataFilterClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, Set selectColumnNames) { // Data classes have a built-in Flag field if (filter.column().equalsIgnoreCase("Flag")) { - addObjectPropertyClause(orClause, filter, sourceTable, fKey, getCommentPropertyId(sourceTable.getSchema().getScope())); + addObjectPropertyClause(orClause, filter, sourceTable, getCommentPropertyId(sourceTable.getSchema().getScope())); } else { - addDataFilterClause(orClause, filter, sourceTable, fKey, selectColumnNames); + addDataFilterClause(orClause, filter, sourceTable, selectColumnNames); } } @@ -188,7 +180,7 @@ public void afterSchema(DatabaseMigrationConfiguration configuration, DbSchema s DatabaseMigrationService.get().copySourceTableToTargetTable(configuration, sourceTable, targetTable, DbSchemaType.Module, new DefaultMigrationSchemaHandler(biologicsTargetSchema) { @Override - public FilterClause getTableFilterClause(TableInfo sourceTable, FieldKey containerFieldKey, Set containers) + public FilterClause getTableFilterClause(TableInfo sourceTable, Set containers) { // This is a global table, so no container clause. Just query and copy the sequence IDs referenced by data class rows we copied. return new InClause(FieldKey.fromParts("SequenceId"), SEQUENCE_IDS); diff --git a/experiment/src/org/labkey/experiment/ExperimentMigrationSchemaHandler.java b/experiment/src/org/labkey/experiment/ExperimentMigrationSchemaHandler.java index 2b1450cdc8c..69399581f3f 100644 --- a/experiment/src/org/labkey/experiment/ExperimentMigrationSchemaHandler.java +++ b/experiment/src/org/labkey/experiment/ExperimentMigrationSchemaHandler.java @@ -50,7 +50,6 @@ public void beforeSchema() { return switch (table.getName()) { - case "Alias", "ObjectLegacyNames" -> DUMMY_FIELD_KEY; // Unused dummy value -- see override below case "DataTypeExclusion" -> FieldKey.fromParts("ExcludedContainer"); case "PropertyDomain" -> FieldKey.fromParts("DomainId", "Container"); case "ProtocolApplication" -> FieldKey.fromParts("RunId", "Container"); @@ -69,7 +68,7 @@ public List getTablesToCopy() } @Override - public FilterClause getContainerClause(TableInfo sourceTable, FieldKey containerFieldKey, Set containers) + public FilterClause getContainerClause(TableInfo sourceTable, Set containers) { return switch (sourceTable.getName()) { @@ -117,7 +116,7 @@ public FilterClause getContainerClause(TableInfo sourceTable, FieldKey container .appendInClause(containers, sourceTable.getSqlDialect()) .append(")") ); - default -> super.getContainerClause(sourceTable, containerFieldKey, containers); + default -> super.getContainerClause(sourceTable, containers); }; } diff --git a/experiment/src/org/labkey/experiment/SampleTypeMigrationSchemaHandler.java b/experiment/src/org/labkey/experiment/SampleTypeMigrationSchemaHandler.java index 15dd8edf2ff..a091b15c2b5 100644 --- a/experiment/src/org/labkey/experiment/SampleTypeMigrationSchemaHandler.java +++ b/experiment/src/org/labkey/experiment/SampleTypeMigrationSchemaHandler.java @@ -1,7 +1,6 @@ package org.labkey.experiment; import org.apache.logging.log4j.Logger; -import org.jetbrains.annotations.Nullable; import org.labkey.api.data.DatabaseMigrationService.DataFilter; import org.labkey.api.data.DatabaseMigrationService.DefaultMigrationSchemaHandler; import org.labkey.api.data.SQLFragment; @@ -15,7 +14,6 @@ import org.labkey.api.data.dialect.SqlDialect; import org.labkey.api.exp.OntologyManager; import org.labkey.api.exp.api.SampleTypeDomainKind; -import org.labkey.api.query.FieldKey; import org.labkey.api.util.GUID; import org.labkey.api.util.logging.LogHelper; @@ -36,13 +34,7 @@ public SampleTypeMigrationSchemaHandler() } @Override - public @Nullable FieldKey getContainerFieldKey(TableInfo table) - { - return DUMMY_FIELD_KEY; // Unused dummy value -- see override below - } - - @Override - public FilterClause getContainerClause(TableInfo sourceTable, FieldKey containerFieldKey, Set containers) + public FilterClause getContainerClause(TableInfo sourceTable, Set containers) { String joinColumnName = getJoinColumnName(sourceTable); @@ -57,7 +49,7 @@ public FilterClause getContainerClause(TableInfo sourceTable, FieldKey container } @Override - public void addDomainDataFilterClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, FieldKey fKey, Set selectColumnNames) + public void addDomainDataFilterClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, Set selectColumnNames) { // Sample-type-specific optimization - joining to exp.Material instead of exp.Object is much faster if (filter.column().equalsIgnoreCase("Flag")) @@ -80,7 +72,7 @@ public void addDomainDataFilterClause(OrClause orClause, DataFilter filter, Tabl } else { - addDataFilterClause(orClause, filter, sourceTable, fKey, selectColumnNames); + addDataFilterClause(orClause, filter, sourceTable, selectColumnNames); } } diff --git a/query/src/org/labkey/query/sql/QuerySelectView.java b/query/src/org/labkey/query/sql/QuerySelectView.java index fbf2b59c2f1..76eee5c368d 100644 --- a/query/src/org/labkey/query/sql/QuerySelectView.java +++ b/query/src/org/labkey/query/sql/QuerySelectView.java @@ -331,13 +331,13 @@ private SQLFragment getSelectSQL(TableInfo table, @Nullable Collection fieldKeySet = new TreeSet<>(); allColumns.stream() - .map(col -> (null != col.getWrappedColumnName() && null != table.getColumn(col.getWrappedColumnName())) ? table.getColumn(col.getWrappedColumnName()) : col) - .forEach(col -> { - var fk = col.getFieldKey(); - fieldKeySet.add(fk); - if (null != fk.getParent()) - fieldKeySet.add(fk.getRootFieldKey()); - }); + .map(col -> (null != col.getWrappedColumnName() && null != table.getColumn(col.getWrappedColumnName())) ? table.getColumn(col.getWrappedColumnName()) : col) + .forEach(col -> { + var fk = col.getFieldKey(); + fieldKeySet.add(fk); + if (null != fk.getParent()) + fieldKeySet.add(fk.getRootFieldKey()); + }); SQLFragment getFromSql = table.getFromSQL(tableAlias, fieldKeySet); fromFrag.append(getFromSql); fromFrag.append(" "); From 55809ebdbe1bd0e641b69f238fe8b9873136b0bc Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Mon, 3 Nov 2025 11:11:04 -0800 Subject: [PATCH 7/8] Use joins instead of sub-selects --- api/src/org/labkey/api/data/DatabaseMigrationService.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/src/org/labkey/api/data/DatabaseMigrationService.java b/api/src/org/labkey/api/data/DatabaseMigrationService.java index 4a818063bd2..b32dd40f0c9 100644 --- a/api/src/org/labkey/api/data/DatabaseMigrationService.java +++ b/api/src/org/labkey/api/data/DatabaseMigrationService.java @@ -260,7 +260,7 @@ protected boolean addDataFilterClause(OrClause orClause, DataFilter filter, Tabl // is only for provisioned tables that lack an ObjectId, MaterialId, or DataId column. protected void addObjectPropertyClause(OrClause orClause, DataFilter filter, TableInfo sourceTable, int propertyId) { - SQLFragment flagWhere = new SQLFragment("lsid IN (SELECT ObjectURI FROM exp.Object WHERE ObjectId IN (SELECT ObjectId FROM exp.ObjectProperty WHERE StringValue = ? AND PropertyId = ?))", filter.condition().getParamVals()[0], propertyId); + SQLFragment flagWhere = new SQLFragment("lsid IN (SELECT ObjectURI FROM exp.Object o INNER JOIN exp.ObjectProperty op ON o.ObjectId = op.ObjectId WHERE StringValue = ? AND PropertyId = ?)", filter.condition().getParamVals()[0], propertyId); orClause.addClause( new AndClause( From 36557ae9a85c2f5b81d12151f108db3ba5a621ef Mon Sep 17 00:00:00 2001 From: Adam Rauch Date: Mon, 3 Nov 2025 14:45:58 -0800 Subject: [PATCH 8/8] Delete not-copied SampleIds from inventory.Item --- .../labkey/experiment/SampleTypeMigrationSchemaHandler.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/experiment/src/org/labkey/experiment/SampleTypeMigrationSchemaHandler.java b/experiment/src/org/labkey/experiment/SampleTypeMigrationSchemaHandler.java index a091b15c2b5..c963d1d57e0 100644 --- a/experiment/src/org/labkey/experiment/SampleTypeMigrationSchemaHandler.java +++ b/experiment/src/org/labkey/experiment/SampleTypeMigrationSchemaHandler.java @@ -151,6 +151,11 @@ public void afterTable(TableInfo sourceTable, TableInfo targetTable, SimpleFilte new SQLFragment("UPDATE assay.Well SET SampleId = null WHERE SampleId") .append(materialIdClause) ); + LOG.info(" inventory.Item"); + executor.execute( + new SQLFragment("DELETE FROM inventory.Item WHERE MaterialId") + .append(materialIdClause) + ); LOG.info(" exp.Material"); executor.execute( new SQLFragment("DELETE FROM exp.Material WHERE RowId")