diff --git a/assembly/docfiles/javadoc.css b/assembly/docfiles/javadoc.css deleted file mode 100644 index 628bc2ccabad9..0000000000000 --- a/assembly/docfiles/javadoc.css +++ /dev/null @@ -1,648 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* Javadoc style sheet */ -body { - background-color:#ffffff; - color:#353833; - font-family:Arial, Helvetica, sans-serif; - font-size:76%; - margin:0; -} -a:link, a:visited { - text-decoration:none; - color:#4c6b87; -} -a:hover, a:focus { - text-decoration:none; - color:#bb7a2a; -} -a:active { - text-decoration:none; - color:#4c6b87; -} -a[name] { - color:#353833; -} -a[name]:hover { - text-decoration:none; - color:#353833; -} -pre { - font-size:1.3em; -} -h1 { - font-size:1.8em; -} -h2 { - font-size:1.5em; -} -h3 { - font-size:1.4em; -} -h4 { - font-size:1.3em; -} -h5 { - font-size:1.2em; -} -h6 { - font-size:1.1em; -} -ul { - list-style-type:disc; -} -code, tt { - font-size:1.2em; -} -dt code { - font-size:1.2em; -} -table tr td dt code { - font-size:1.2em; - vertical-align:top; -} -sup { - font-size:.6em; -} -/* -Document title and Copyright styles -*/ -.clear { - clear:both; - height:0px; - overflow:hidden; -} -.aboutLanguage { - float:right; - font-weight: bold; - padding:5px 21px; - font-size:13px; - z-index:200; - margin-top:-7px; -} -.aboutLanguage em { - font-style: normal; -} -.legalCopy { - margin-left:.5em; -} -.bar a, .bar a:link, .bar a:visited, .bar a:active { - color:#FFFFFF; - text-decoration:none; -} -.bar a:hover, .bar a:focus { - color:#bb7a2a; -} -.tab { - background-color:#0066FF; - background-image:url(resources/titlebar.gif); - background-position:left top; - background-repeat:no-repeat; - color:#ffffff; - padding:8px; - width:5em; - font-weight:bold; -} -/* -Navigation bar styles -*/ -.bar { - background-image:url(resources/background.gif); - background-repeat:repeat-x; - color:#FFFFFF; - padding:.8em .5em .4em .8em; - height:auto;/*height:1.8em;*/ - font-size:1em; - margin:0; -} -.topNav { - background-image:url(resources/background.gif); - background-repeat:repeat-x; - color:#FFFFFF; - float:left; - padding:0; - width:100%; - clear:right; - height:2.8em; - padding-top:10px; - overflow:hidden; -} -.bottomNav { - margin-top:10px; - background-image:url(resources/background.gif); - background-repeat:repeat-x; - color:#FFFFFF; - float:left; - padding:0; - width:100%; - clear:right; - height:2.8em; - padding-top:10px; - overflow:hidden; -} -.subNav { - background-color:#dee3e9; - border-bottom:1px solid #9eadc0; - float:left; - width:100%; - overflow:hidden; -} -.subNav div { - clear:left; - float:left; - padding:0 0 5px 6px; -} -ul.navList, ul.subNavList { - float:left; - margin:0 25px 0 0; - padding:0; -} -ul.navList li{ - list-style:none; - float:left; - padding:3px 6px; -} -ul.subNavList li{ - list-style:none; - float:left; - font-size:90%; -} -.topNav a:link, .topNav a:active, .topNav a:visited, .bottomNav a:link, .bottomNav a:active, .bottomNav a:visited { - color:#FFFFFF; - text-decoration:none; -} -.topNav a:hover, .bottomNav a:hover { - text-decoration:none; - color:#bb7a2a; -} -.navBarCell1Rev { - background-image:url(resources/tab.gif); - background-color:#a88834; - color:#FFFFFF; - margin: auto 5px; - border:1px solid #c9aa44; -} -/* -Page header and footer styles -*/ -.header, .footer { - clear:both; - margin:0 20px; - padding:5px 0 0 0; -} -.indexHeader { - margin:10px; - position:relative; -} -.indexHeader h1 { - font-size:1.3em; -} -.title { - color:#2c4557; - margin:10px 0; -} -.subTitle { - margin:5px 0 0 0; -} -.header ul { - margin:0 0 25px 0; - padding:0; -} -.footer ul { - margin:20px 0 5px 0; -} -.header ul li, .footer ul li { - list-style:none; - font-size:1.2em; -} -/* -Heading styles -*/ -div.details ul.blockList ul.blockList ul.blockList li.blockList h4, div.details ul.blockList ul.blockList ul.blockListLast li.blockList h4 { - background-color:#dee3e9; - border-top:1px solid #9eadc0; - border-bottom:1px solid #9eadc0; - margin:0 0 6px -8px; - padding:2px 5px; -} -ul.blockList ul.blockList ul.blockList li.blockList h3 { - background-color:#dee3e9; - border-top:1px solid #9eadc0; - border-bottom:1px solid #9eadc0; - margin:0 0 6px -8px; - padding:2px 5px; -} -ul.blockList ul.blockList li.blockList h3 { - padding:0; - margin:15px 0; -} -ul.blockList li.blockList h2 { - padding:0px 0 20px 0; -} -/* -Page layout container styles -*/ -.contentContainer, .sourceContainer, .classUseContainer, .serializedFormContainer, .constantValuesContainer { - clear:both; - padding:10px 20px; - position:relative; -} -.indexContainer { - margin:10px; - position:relative; - font-size:1.0em; -} -.indexContainer h2 { - font-size:1.1em; - padding:0 0 3px 0; -} -.indexContainer ul { - margin:0; - padding:0; -} -.indexContainer ul li { - list-style:none; -} -.contentContainer .description dl dt, .contentContainer .details dl dt, .serializedFormContainer dl dt { - font-size:1.1em; - font-weight:bold; - margin:10px 0 0 0; - color:#4E4E4E; -} -.contentContainer .description dl dd, .contentContainer .details dl dd, .serializedFormContainer dl dd { - margin:10px 0 10px 20px; -} -.serializedFormContainer dl.nameValue dt { - margin-left:1px; - font-size:1.1em; - display:inline; - font-weight:bold; -} -.serializedFormContainer dl.nameValue dd { - margin:0 0 0 1px; - font-size:1.1em; - display:inline; -} -/* -List styles -*/ -ul.horizontal li { - display:inline; - font-size:0.9em; -} -ul.inheritance { - margin:0; - padding:0; -} -ul.inheritance li { - display:inline; - list-style:none; -} -ul.inheritance li ul.inheritance { - margin-left:15px; - padding-left:15px; - padding-top:1px; -} -ul.blockList, ul.blockListLast { - margin:10px 0 10px 0; - padding:0; -} -ul.blockList li.blockList, ul.blockListLast li.blockList { - list-style:none; - margin-bottom:25px; -} -ul.blockList ul.blockList li.blockList, ul.blockList ul.blockListLast li.blockList { - padding:0px 20px 5px 10px; - border:1px solid #9eadc0; - background-color:#f9f9f9; -} -ul.blockList ul.blockList ul.blockList li.blockList, ul.blockList ul.blockList ul.blockListLast li.blockList { - padding:0 0 5px 8px; - background-color:#ffffff; - border:1px solid #9eadc0; - border-top:none; -} -ul.blockList ul.blockList ul.blockList ul.blockList li.blockList { - margin-left:0; - padding-left:0; - padding-bottom:15px; - border:none; - border-bottom:1px solid #9eadc0; -} -ul.blockList ul.blockList ul.blockList ul.blockList li.blockListLast { - list-style:none; - border-bottom:none; - padding-bottom:0; -} -table tr td dl, table tr td dl dt, table tr td dl dd { - margin-top:0; - margin-bottom:1px; -} -/* -Table styles -*/ -.contentContainer table, .classUseContainer table, .constantValuesContainer table { - border-bottom:1px solid #9eadc0; - width:100%; -} -.contentContainer ul li table, .classUseContainer ul li table, .constantValuesContainer ul li table { - width:100%; -} -.contentContainer .description table, .contentContainer .details table { - border-bottom:none; -} -.contentContainer ul li table th.colOne, .contentContainer ul li table th.colFirst, .contentContainer ul li table th.colLast, .classUseContainer ul li table th, .constantValuesContainer ul li table th, .contentContainer ul li table td.colOne, .contentContainer ul li table td.colFirst, .contentContainer ul li table td.colLast, .classUseContainer ul li table td, .constantValuesContainer ul li table td{ - vertical-align:top; - padding-right:20px; -} -.contentContainer ul li table th.colLast, .classUseContainer ul li table th.colLast,.constantValuesContainer ul li table th.colLast, -.contentContainer ul li table td.colLast, .classUseContainer ul li table td.colLast,.constantValuesContainer ul li table td.colLast, -.contentContainer ul li table th.colOne, .classUseContainer ul li table th.colOne, -.contentContainer ul li table td.colOne, .classUseContainer ul li table td.colOne { - padding-right:3px; -} -.overviewSummary caption, .packageSummary caption, .contentContainer ul.blockList li.blockList caption, .summary caption, .classUseContainer caption, .constantValuesContainer caption { - position:relative; - text-align:left; - background-repeat:no-repeat; - color:#FFFFFF; - font-weight:bold; - clear:none; - overflow:hidden; - padding:0px; - margin:0px; -} -caption a:link, caption a:hover, caption a:active, caption a:visited { - color:#FFFFFF; -} -.overviewSummary caption span, .packageSummary caption span, .contentContainer ul.blockList li.blockList caption span, .summary caption span, .classUseContainer caption span, .constantValuesContainer caption span { - white-space:nowrap; - padding-top:8px; - padding-left:8px; - display:block; - float:left; - background-image:url(resources/titlebar.gif); - height:18px; -} -.overviewSummary .tabEnd, .packageSummary .tabEnd, .contentContainer ul.blockList li.blockList .tabEnd, .summary .tabEnd, .classUseContainer .tabEnd, .constantValuesContainer .tabEnd { - width:10px; - background-image:url(resources/titlebar_end.gif); - background-repeat:no-repeat; - background-position:top right; - position:relative; - float:left; -} -ul.blockList ul.blockList li.blockList table { - margin:0 0 12px 0px; - width:100%; -} -.tableSubHeadingColor { - background-color: #EEEEFF; -} -.altColor { - background-color:#eeeeef; -} -.rowColor { - background-color:#ffffff; -} -.overviewSummary td, .packageSummary td, .contentContainer ul.blockList li.blockList td, .summary td, .classUseContainer td, .constantValuesContainer td { - text-align:left; - padding:3px 3px 3px 7px; -} -th.colFirst, th.colLast, th.colOne, .constantValuesContainer th { - background:#dee3e9; - border-top:1px solid #9eadc0; - border-bottom:1px solid #9eadc0; - text-align:left; - padding:3px 3px 3px 7px; -} -td.colOne a:link, td.colOne a:active, td.colOne a:visited, td.colOne a:hover, td.colFirst a:link, td.colFirst a:active, td.colFirst a:visited, td.colFirst a:hover, td.colLast a:link, td.colLast a:active, td.colLast a:visited, td.colLast a:hover, .constantValuesContainer td a:link, .constantValuesContainer td a:active, .constantValuesContainer td a:visited, .constantValuesContainer td a:hover { - font-weight:bold; -} -td.colFirst, th.colFirst { - border-left:1px solid #9eadc0; - white-space:nowrap; -} -td.colLast, th.colLast { - border-right:1px solid #9eadc0; -} -td.colOne, th.colOne { - border-right:1px solid #9eadc0; - border-left:1px solid #9eadc0; -} -table.overviewSummary { - padding:0px; - margin-left:0px; -} -table.overviewSummary td.colFirst, table.overviewSummary th.colFirst, -table.overviewSummary td.colOne, table.overviewSummary th.colOne { - width:25%; - vertical-align:middle; -} -table.packageSummary td.colFirst, table.overviewSummary th.colFirst { - width:25%; - vertical-align:middle; -} -/* -Content styles -*/ -.description pre { - margin-top:0; -} -.deprecatedContent { - margin:0; - padding:10px 0; -} -.docSummary { - padding:0; -} -/* -Formatting effect styles -*/ -.sourceLineNo { - color:green; - padding:0 30px 0 0; -} -h1.hidden { - visibility:hidden; - overflow:hidden; - font-size:.9em; -} -.block { - display:block; - margin:3px 0 0 0; -} -.strong { - font-weight:bold; -} -h1.header { - font: 16px helvetica, verdana, arial, sans-serif; - color: #111; - font-weight: bold; - margin: 10px 0 0 0; -} -h2.header { - font: 15px helvetica, verdana, arial, sans-serif; - color: #333; - margin: 10px 0 0 0; - font-weight: bold; -} -body, pre, dd, dl, dt, td, table, th, tr, li, ol, ul, p, br, a, input, form, textarea, select { - font: 13px helvetica, verdana, arial, sans-serif; - color: #36393D; -} -ul { - list-style-type: circle; - padding: 0px 0px 10px 30px; -} - -li { - padding-top: 0px; -} - -tt, code { - font: 13px Courier New; -} -hr { - border-style: dotted; - width: 100%; - color: #eee; -} - -.dotted { - border: 1px #ddd dotted; -} -table.doctable { - margin: 10px 0 10px 0; - padding: 0; - border-spacing: 0; - border-collapse: collapse; - border: 1px dotted #ccc; -} - -table.doctable th { - font-weight: bold; - border: 1px dotted #ccc; - background: #D6EBAD; - padding: 5px; -} - -table.doctable tr:hover { - background: #e6ffde; -} - -table.doctable td { - border: 1px dotted #ddd; - padding: 5px; -} -.close-link { - border: 1px solid #eee; - background-color: #efefef; - padding: 3px; - color: #333; - font-size: 9px; -} -.close-link:hover { - text-decoration: none; - background-color: #efefef; - border-bottom: 1px solid #ddd; - color: #333; - padding: 3px; - font-size: 9px; -} -blockquote.snippet, pre.snippet { - background: #f0f7ff; - border: 1px dotted #aaa; - padding: 10px; - font-family: courier new, serif; - margin: 10px 0 10px 0; -} -/* - * By default links have underline decoration. - */ -a { - text-decoration: none; - color: #369; -} - -/* - * Note, it works only on IE. On NS links don't have any effects. - */ -a:hover { - color: #f00; - border-bottom: 1px dotted; -} - -.header { - font: 16px helvetica, verdana, arial, helvetica, sans-serif; - color: #333; -} - -.javadocimg { - border:0; - padding: 0 0 0 5px; -} - -.pkgbox { -} -/* - * CSS for @Nullable. - */ -.nullable { - color: #d4a61f; - font-variant: small-caps; - background-color: #ffffff; -} - -/* - * CSS for @Deprecated. - */ -.deprecated { - color: #e05249; - font-weight: bold; - font-variant: small-caps; -} - -/* - * CSS for TODOs. - */ -.todo { - color: #e05249; - font-weight: bold; - font-variant: small-caps; - font-size: 10px; -} - -/* - * CSS for '<' and '>'. - */ -.angle_bracket { - color: #aaa; -} - -span.table-header { - font: 20px helvetica, verdana, arial, helvetica, sans-serif; - padding-left: 5px; - color: #333; -} - -span.table-subheader { - font: 13px helvetica, verdana, arial, helvetica, sans-serif; - padding-left: 5px; - color: #333; -} diff --git a/bin/control.bat b/bin/control.bat index 15d5e6fcb619b..4894cbc4322ad 100644 --- a/bin/control.bat +++ b/bin/control.bat @@ -156,6 +156,11 @@ if %ERRORLEVEL% equ 0 ( if "%JVM_OPTS%" == "" set JVM_OPTS=-Xms256m -Xmx1g ) +:: +:: Uncomment to enable experimental commands [--wal] +:: +:: set JVM_OPTS=%JVM_OPTS% -DIGNITE_ENABLE_EXPERIMENTAL_COMMAND=true + :: :: Uncomment the following GC settings if you see spikes in your throughput due to Garbage Collection. :: diff --git a/examples/pom.xml b/examples/pom.xml index 8a854cfc1771a..4b34283db3a6a 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -194,11 +194,6 @@ - - org.apache.ignite - ignite-spark_2.10 - ${project.version} - diff --git a/modules/aop/src/test/java/org/apache/ignite/gridify/AbstractAopTest.java b/modules/aop/src/test/java/org/apache/ignite/gridify/AbstractAopTest.java index 33f2cddbfaddf..31d23595fe503 100644 --- a/modules/aop/src/test/java/org/apache/ignite/gridify/AbstractAopTest.java +++ b/modules/aop/src/test/java/org/apache/ignite/gridify/AbstractAopTest.java @@ -25,6 +25,7 @@ import org.apache.ignite.configuration.DeploymentMode; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.events.Event; +import org.apache.ignite.events.EventType; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.spi.deployment.local.LocalDeploymentSpi; @@ -59,6 +60,8 @@ public abstract class AbstractAopTest extends GridCommonAbstractTest { cfg.setMetricsUpdateFrequency(500); cfg.setDeploymentMode(depMode); + cfg.setIncludeEventTypes(EventType.EVTS_ALL); + return cfg; } diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java index ba858775def5b..dcf8dc8e853d8 100644 --- a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java @@ -178,6 +178,7 @@ protected static class TestTree extends BPlusTree { super( "test", cacheId, + null, pageMem, null, new AtomicLong(), diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jol/FileStoreHeapUtilizationJolBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jol/FileStoreHeapUtilizationJolBenchmark.java new file mode 100644 index 0000000000000..1dc7d474270ed --- /dev/null +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jol/FileStoreHeapUtilizationJolBenchmark.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.benchmarks.jol; + +import java.io.File; +import java.nio.ByteBuffer; +import java.nio.file.Path; +import java.util.LinkedList; +import java.util.List; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.pagemem.store.PageStore; +import org.apache.ignite.internal.processors.cache.persistence.file.AsyncFileIOFactory; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreFactory; +import org.apache.ignite.internal.processors.cache.persistence.file.FileVersionCheckingFactory; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.openjdk.jol.info.GraphLayout; + +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.PART_FILE_TEMPLATE; + +/** + * + */ +public class FileStoreHeapUtilizationJolBenchmark { + /** */ + private void benchmark() throws IgniteCheckedException { + FilePageStoreFactory factory = new FileVersionCheckingFactory( + new AsyncFileIOFactory(), + new AsyncFileIOFactory(), + new DataStorageConfiguration() + .setPageSize(4096) + ); + + List stores = new LinkedList<>(); + + File workDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), "db", false); + + for (int i = 0; i < 10000; i++) { + final int p = i; + + PageStore ps = factory.createPageStore( + PageMemory.FLAG_DATA, + () -> getPartitionFilePath(workDir, p), + d -> { } + ); + + ps.ensure(); + + ps.write(0, ByteBuffer.allocate(256), 1, false); + + stores.add(ps); + } + + System.gc(); + + GraphLayout layout = GraphLayout.parseInstance(stores); + + System.out.println("heap usage: " + layout.totalSize()); + + U.delete(workDir); + } + + /** */ + private Path getPartitionFilePath(File cacheWorkDir, int partId) { + return new File(cacheWorkDir, String.format(PART_FILE_TEMPLATE, partId)).toPath(); + } + + /** */ + public static void main(String[] args) throws Exception { + new FileStoreHeapUtilizationJolBenchmark().benchmark(); + } +} diff --git a/modules/camel/pom.xml b/modules/camel/pom.xml index 0d65ce80aabe5..6cd725f5ba913 100644 --- a/modules/camel/pom.xml +++ b/modules/camel/pom.xml @@ -35,7 +35,7 @@ http://ignite.apache.org - 18.0 + 25.1-jre 2.5.0 diff --git a/modules/camel/src/test/java/org/apache/ignite/stream/camel/IgniteCamelStreamerTest.java b/modules/camel/src/test/java/org/apache/ignite/stream/camel/IgniteCamelStreamerTest.java index 88b7eb8845757..992fd1afa8ea4 100644 --- a/modules/camel/src/test/java/org/apache/ignite/stream/camel/IgniteCamelStreamerTest.java +++ b/modules/camel/src/test/java/org/apache/ignite/stream/camel/IgniteCamelStreamerTest.java @@ -48,7 +48,9 @@ import org.apache.ignite.IgniteDataStreamer; import org.apache.ignite.IgniteException; import org.apache.ignite.cache.CachePeekMode; +import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.events.CacheEvent; +import org.apache.ignite.events.EventType; import org.apache.ignite.internal.util.lang.GridMapEntry; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.lang.IgniteBiPredicate; @@ -95,6 +97,11 @@ public IgniteCamelStreamerTest() { super(true); } + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + return super.getConfiguration(igniteInstanceName).setIncludeEventTypes(EventType.EVTS_ALL); + } + @SuppressWarnings("unchecked") @Override public void beforeTest() throws Exception { grid().getOrCreateCache(defaultCacheConfiguration()); diff --git a/modules/cassandra/store/pom.xml b/modules/cassandra/store/pom.xml index 32b10f5225b7b..8922a53472515 100644 --- a/modules/cassandra/store/pom.xml +++ b/modules/cassandra/store/pom.xml @@ -39,7 +39,7 @@ 3.0.0 3.3 4.1.27.Final - 19.0 + 25.1-jre 3.0.2 diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java index 4fb0cb27d7c8f..4d59e54716115 100644 --- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java +++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java @@ -648,17 +648,25 @@ private void createKeyspace(KeyValuePersistenceSettings settings) { while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) { try { - log.info("-----------------------------------------------------------------------"); - log.info("Creating Cassandra keyspace '" + settings.getKeyspace() + "'"); - log.info("-----------------------------------------------------------------------\n\n" + - settings.getKeyspaceDDLStatement() + "\n"); - log.info("-----------------------------------------------------------------------"); + if (log.isInfoEnabled()) { + log.info("-----------------------------------------------------------------------"); + log.info("Creating Cassandra keyspace '" + settings.getKeyspace() + "'"); + log.info("-----------------------------------------------------------------------\n\n" + + settings.getKeyspaceDDLStatement() + "\n"); + log.info("-----------------------------------------------------------------------"); + } + session().execute(settings.getKeyspaceDDLStatement()); - log.info("Cassandra keyspace '" + settings.getKeyspace() + "' was successfully created"); + + if (log.isInfoEnabled()) + log.info("Cassandra keyspace '" + settings.getKeyspace() + "' was successfully created"); + return; } catch (AlreadyExistsException ignored) { - log.info("Cassandra keyspace '" + settings.getKeyspace() + "' already exist"); + if (log.isInfoEnabled()) + log.info("Cassandra keyspace '" + settings.getKeyspace() + "' already exist"); + return; } catch (Throwable e) { @@ -689,17 +697,25 @@ private void createTable(String table, KeyValuePersistenceSettings settings) { while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) { try { - log.info("-----------------------------------------------------------------------"); - log.info("Creating Cassandra table '" + tableFullName + "'"); - log.info("-----------------------------------------------------------------------\n\n" + + if (log.isInfoEnabled()) { + log.info("-----------------------------------------------------------------------"); + log.info("Creating Cassandra table '" + tableFullName + "'"); + log.info("-----------------------------------------------------------------------\n\n" + settings.getTableDDLStatement(table) + "\n"); - log.info("-----------------------------------------------------------------------"); + log.info("-----------------------------------------------------------------------"); + } + session().execute(settings.getTableDDLStatement(table)); - log.info("Cassandra table '" + tableFullName + "' was successfully created"); + + if (log.isInfoEnabled()) + log.info("Cassandra table '" + tableFullName + "' was successfully created"); + return; } catch (AlreadyExistsException ignored) { - log.info("Cassandra table '" + tableFullName + "' already exist"); + if (log.isInfoEnabled()) + log.info("Cassandra table '" + tableFullName + "' already exist"); + return; } catch (Throwable e) { @@ -741,14 +757,19 @@ private void createTableIndexes(String table, KeyValuePersistenceSettings settin while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) { try { - log.info("-----------------------------------------------------------------------"); - log.info("Creating indexes for Cassandra table '" + tableFullName + "'"); - log.info("-----------------------------------------------------------------------"); + if (log.isInfoEnabled()) { + log.info("-----------------------------------------------------------------------"); + log.info("Creating indexes for Cassandra table '" + tableFullName + "'"); + log.info("-----------------------------------------------------------------------"); + } for (String statement : indexDDLStatements) { try { - log.info(statement); - log.info("-----------------------------------------------------------------------"); + if (log.isInfoEnabled()) { + log.info(statement); + log.info("-----------------------------------------------------------------------"); + } + session().execute(statement); } catch (AlreadyExistsException ignored) { @@ -759,7 +780,8 @@ private void createTableIndexes(String table, KeyValuePersistenceSettings settin } } - log.info("Indexes for Cassandra table '" + tableFullName + "' were successfully created"); + if (log.isInfoEnabled()) + log.info("Indexes for Cassandra table '" + tableFullName + "' were successfully created"); return; } diff --git a/modules/clients/src/test/java/org/apache/ignite/common/ClientSideCacheCreationDestructionWileTopologyChangeTest.java b/modules/clients/src/test/java/org/apache/ignite/common/ClientSideCacheCreationDestructionWileTopologyChangeTest.java new file mode 100644 index 0000000000000..d4c2a5664ca34 --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/common/ClientSideCacheCreationDestructionWileTopologyChangeTest.java @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite.common; + +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.testframework.GridTestUtils; + +import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * {@inheritDoc} With topology events in parallel + */ +public class ClientSideCacheCreationDestructionWileTopologyChangeTest extends ClientSizeCacheCreationDestructionTest { + /** **/ + private static final int MAX_NODES_CNT = 10; + + /** **/ + IgniteInternalFuture topChangeProcFut; + + /** **/ + AtomicBoolean procTopChanges = new AtomicBoolean(true); + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + topChangeProcFut = asyncTopologyChanges(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + procTopChanges.set(false); + + topChangeProcFut.get(); + + super.afterTest(); + } + + /** + * @return {@code IgniteInternalFuture} to wait for topology process to stop in {@code afterTest()}. + */ + private IgniteInternalFuture asyncTopologyChanges() { + return GridTestUtils.runAsync(() -> { + while (procTopChanges.get()) { + try { + if (srv.cluster().nodes().size() < MAX_NODES_CNT) + startGrid(UUID.randomUUID().toString()); + } + catch (Exception e) { + fail("Unable to add or remove node: " + e); + } + } + }); + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/common/ClientSizeCacheCreationDestructionTest.java b/modules/clients/src/test/java/org/apache/ignite/common/ClientSizeCacheCreationDestructionTest.java new file mode 100644 index 0000000000000..cd0aef7d7f6c4 --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/common/ClientSizeCacheCreationDestructionTest.java @@ -0,0 +1,1242 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite.common; + +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.Serializable; +import java.net.URL; +import java.net.URLConnection; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import javax.cache.CacheException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.CacheAtomicityMode; +import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.client.ClientCacheConfiguration; +import org.apache.ignite.client.IgniteClient; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.ClientConfiguration; +import org.apache.ignite.configuration.ConnectorConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.client.thin.ClientServerError; +import org.apache.ignite.internal.jdbc.thin.JdbcThinConnection; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.junit.Test; + +/** + * Tests for cache creation and destruction from servers and clients: thin, thick, jdbc and rest. + * Including simultaneous operations. Mainly within same cache group. + */ +@SuppressWarnings({"ThrowableNotThrown", "unchecked"}) +public class ClientSizeCacheCreationDestructionTest extends GridCommonAbstractTest { + /** **/ + private static final String CACHE_NAME = "CacheName"; + + /** **/ + private static final String ANOTHER_CACHE_NAME = "AnotherCacheName"; + + /** **/ + private static final String CLIENT_CACHE_NAME = "ClientCacheName"; + + /** **/ + private static final String CACHE_GROUP_NAME = "CacheGroupName"; + + /** **/ + protected Ignite srv; + + /** **/ + private Ignite thickClient; + + /** **/ + private IgniteClient thinClient; + + /** **/ + private Connection jdbcConn; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration configuration = super.getConfiguration(igniteInstanceName); + + configuration.setConnectorConfiguration(new ConnectorConfiguration()); + + return configuration; + } + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + srv = startGrid("server"); + + thickClient = startClientGrid(1); + + thinClient = Ignition.startClient(new ClientConfiguration().setAddresses("127.0.0.1:10800")); + + jdbcConn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1:10800"); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + super.afterTest(); + + if (thickClient != null) + thickClient.close(); + + if (thinClient != null) + thinClient.close(); + + if (jdbcConn != null) + jdbcConn.close(); + + stopAllGrids(); + } + + /** + * Direct scenario: + *
    + *
  1. Start server node, create cache in cache group.
  2. + *
  3. Start client node and create cache in same cache group.
  4. + *
  5. Assert no exception, cache successfully created, value may be inserted into this cache.
  6. + *
+ * + * @throws Exception If failed. + */ + @Test + public void testServerThenClientCacheCreation() throws Exception { + createCache(srv, cacheConfig()); + + createCache(thickClient, cacheConfig().setName(CLIENT_CACHE_NAME)); + + IgniteCache cache = srv.cache(CLIENT_CACHE_NAME); + + cache.put(1L, "abc"); + + assertEquals("abc", cache.get(1L)); + } + + /** + * Few caches created in chain: + *
    + *
  1. Start server node, create 4 different caches in 4 different cache groups: each cache + * in corresponding cache group.
  2. + *
  3. Start Thick client node, create 1 new cache in each created cache group.
  4. + *
  5. Assert that 4 cache groups exist with 2 caches each.
  6. + *
  7. Try to insert and get some data from caches.
  8. + *
+ * @throws Exception If failed. + */ + @Test + public void testFewCachesCreatedInChainWithinFourCacheGroupsThickClient() throws Exception { + for (int i = 0; i < 4; i++) + createCache(srv, cacheConfig().setGroupName(CACHE_GROUP_NAME + i).setName(CACHE_NAME + i)); + + for (int i = 0; i < 4; i++) + createCache(thickClient, cacheConfig().setGroupName(CACHE_GROUP_NAME + i).setName(CLIENT_CACHE_NAME + i)); + + // Assertions. + assertEquals(8, srv.cacheNames().size()); + + for (int i = 0; i < 4; i++) { + assertEquals(CACHE_GROUP_NAME + i, + srv.cache(CACHE_NAME + i).getConfiguration(CacheConfiguration.class).getGroupName()); + + assertEquals(CACHE_GROUP_NAME + i, + srv.cache(CLIENT_CACHE_NAME + i).getConfiguration(CacheConfiguration.class).getGroupName()); + + srv.cache(CACHE_NAME + i).put(1, "abc_srv" + i); + assertEquals("abc_srv" + i, srv.cache(CACHE_NAME + i).get(1)); + + srv.cache(CLIENT_CACHE_NAME + i).put(1, "abc_cli" + i); + assertEquals("abc_cli" + i, srv.cache(CLIENT_CACHE_NAME + i).get(1)); + } + } + + /** + * Few caches created in chain: + *
    + *
  1. Start server node, create 4 different caches in 4 different cache groups: each cache + * in corresponding cache group.
  2. + *
  3. Start Thin client node, create 1 new cache in each created cache group.
  4. + *
  5. Assert that 4 cache groups exist with 2 caches each.
  6. + *
+ * @throws Exception If failed. + */ + @Test + public void testFewCachesCreatedInChainWithinFourCacheGroupsThinClient() throws Exception { + for (int i = 0; i < 4; i++) + createCache(srv, cacheConfig().setGroupName(CACHE_GROUP_NAME + i).setName(CACHE_NAME + i)); + + for (int i = 0; i < 4; i++) { + createCache(thinClient, clientCacheConfig().setGroupName(CACHE_GROUP_NAME + i). + setName(CLIENT_CACHE_NAME + i)); + } + + // Assertions. + assertEquals(8, srv.cacheNames().size()); + + for (int i = 0; i < 4; i++) { + assertEquals(CACHE_GROUP_NAME + i, + srv.cache(CACHE_NAME + i).getConfiguration(CacheConfiguration.class).getGroupName()); + + assertEquals(CACHE_GROUP_NAME + i, + srv.cache(CLIENT_CACHE_NAME + i).getConfiguration(CacheConfiguration.class).getGroupName()); + + srv.cache(CACHE_NAME + i).put(1, "abc_srv" + i); + assertEquals("abc_srv" + i, srv.cache(CACHE_NAME + i).get(1)); + + srv.cache(CLIENT_CACHE_NAME + i).put(1, "abc_cli" + i); + assertEquals("abc_cli" + i, srv.cache(CLIENT_CACHE_NAME + i).get(1)); + } + } + + /** + * Few caches created in chain: + *
    + *
  1. Start server node, create 4 different caches in 4 different cache groups: each cache + * in corresponding cache group.
  2. + *
  3. Start Jdbc Thin client node, create 1 new cache in each created cache group.
  4. + *
  5. Assert that 4 cache groups exist with 2 caches each.
  6. + *
+ * @throws Exception If failed. + */ + @Test + public void testFewCachesCreatedInChainWithinFourCacheGroupsJdbcThinClient() throws Exception { + for (int i = 0; i < 4; i++) + createCache(srv, cacheConfig().setGroupName(CACHE_GROUP_NAME + i).setName(CACHE_NAME + i)); + + for (int i = 0; i < 4; i++) + createCache(jdbcConn, cacheConfig().setGroupName(CACHE_GROUP_NAME + i).setName(CLIENT_CACHE_NAME + i)); + + // Assertions. + assertEquals(8, srv.cacheNames().size()); + + for (int i = 0; i < 4; i++) { + assertEquals(CACHE_GROUP_NAME + i, + srv.cache(CACHE_NAME + i).getConfiguration(CacheConfiguration.class).getGroupName()); + + assertEquals(CACHE_GROUP_NAME + i, + srv.cache("SQL_PUBLIC_" + CLIENT_CACHE_NAME.toUpperCase() + i). + getConfiguration(CacheConfiguration.class).getGroupName()); + + srv.cache(CACHE_NAME + i).put(1, "abc_srv" + i); + assertEquals("abc_srv" + i, srv.cache(CACHE_NAME + i).get(1)); + + srv.cache("SQL_PUBLIC_" + CLIENT_CACHE_NAME.toUpperCase() + i).put(1, "abc_cli" + i); + assertEquals("abc_cli" + i, + srv.cache("SQL_PUBLIC_" + CLIENT_CACHE_NAME.toUpperCase() + i).get(1)); + } + } + + /** + * Few caches created in chain: + *
    + *
  1. Start server node, create 4 different caches in 4 different cache groups: each cache + * in corresponding cache group.
  2. + *
  3. Start Rest client node, create 1 new cache in each created cache group.
  4. + *
  5. Assert that 4 cache groups exist with 2 caches each.
  6. + *
+ * @throws Exception If failed. + */ + @Test + public void testFewCachesCreatedInChainWithinFourCacheGroupsRestClient() throws Exception { + for (int i = 0; i < 4; i++) + createCache(srv, cacheConfig().setGroupName(CACHE_GROUP_NAME + i).setName(CACHE_NAME + i)); + + for (int i = 0; i < 4; i++) + createCacheWithRestClient(cacheConfig().setGroupName(CACHE_GROUP_NAME + i).setName(CLIENT_CACHE_NAME + i)); + + // Assertions. + assertEquals(8, srv.cacheNames().size()); + + for (int i = 0; i < 4; i++) { + assertEquals(CACHE_GROUP_NAME + i, + srv.cache(CACHE_NAME + i).getConfiguration(CacheConfiguration.class).getGroupName()); + + assertEquals(CACHE_GROUP_NAME + i, + srv.cache(CLIENT_CACHE_NAME + i).getConfiguration(CacheConfiguration.class).getGroupName()); + + srv.cache(CACHE_NAME + i).put(1, "abc_srv" + i); + assertEquals("abc_srv" + i, srv.cache(CACHE_NAME + i).get(1)); + + srv.cache(CLIENT_CACHE_NAME + i).put(1, "abc_cli" + i); + assertEquals("abc_cli" + i, srv.cache(CLIENT_CACHE_NAME + i).get(1)); + } + } + + /** + * Few caches created in chain: + *
    + *
  1. Start server node, create 4 different caches in 2 different cache groups in server node (2+2).
  2. + *
  3. Start Thick client node, create 2 new caches in each created cache group.
  4. + *
  5. Assert that 2 cache groups exist with 4 caches each.
  6. + *
+ * @throws Exception If failed. + */ + @Test + public void testFewCachesCreatedInChainWithinTwoCacheGroupsThickClient() throws Exception { + for (int i = 0; i < 4; i++) + createCache(srv, cacheConfig().setGroupName(CACHE_GROUP_NAME + (i % 2)).setName(CACHE_NAME + i)); + + for (int i = 0; i < 4; i++) { + createCache(thickClient, cacheConfig().setGroupName(CACHE_GROUP_NAME + (i % 2)). + setName(CLIENT_CACHE_NAME + i)); + } + + // Assertions. + assertEquals(8, srv.cacheNames().size()); + + for (int i = 0; i < 4; i++) { + assertEquals(CACHE_GROUP_NAME + (i % 2), + srv.cache(CACHE_NAME + i).getConfiguration(CacheConfiguration.class).getGroupName()); + + assertEquals(CACHE_GROUP_NAME + (i % 2), + srv.cache(CLIENT_CACHE_NAME + i).getConfiguration(CacheConfiguration.class).getGroupName()); + + srv.cache(CACHE_NAME + i).put(1, "abc_srv" + i); + assertEquals("abc_srv" + i, srv.cache(CACHE_NAME + i).get(1)); + + srv.cache(CLIENT_CACHE_NAME + i).put(1, "abc_cli" + i); + assertEquals("abc_cli" + i, srv.cache(CLIENT_CACHE_NAME + i).get(1)); + } + } + + /** + * Few caches created in chain: + *
    + *
  1. Start server node, create 4 different caches in 2 different cache groups in server node (2+2).
  2. + *
  3. Start Thin client node, create 2 new caches in each created cache group.
  4. + *
  5. Assert that 2 cache groups exist with 4 caches each.
  6. + *
+ * @throws Exception If failed. + */ + @Test + public void testFewCachesCreatedInChainWithinTwoCacheGroupsThinClient() throws Exception { + for (int i = 0; i < 4; i++) + createCache(srv, cacheConfig().setGroupName(CACHE_GROUP_NAME + (i % 2)).setName(CACHE_NAME + i)); + + for (int i = 0; i < 4; i++) { + createCache(thinClient, clientCacheConfig().setGroupName(CACHE_GROUP_NAME + (i % 2)). + setName(CLIENT_CACHE_NAME + i)); + } + + // Assertions. + assertEquals(8, srv.cacheNames().size()); + + for (int i = 0; i < 4; i++) { + assertEquals(CACHE_GROUP_NAME + (i % 2), + srv.cache(CACHE_NAME + i).getConfiguration(CacheConfiguration.class).getGroupName()); + + assertEquals(CACHE_GROUP_NAME + (i % 2), + srv.cache(CLIENT_CACHE_NAME + i).getConfiguration(CacheConfiguration.class).getGroupName()); + + srv.cache(CACHE_NAME + i).put(1, "abc_srv" + i); + assertEquals("abc_srv" + i, srv.cache(CACHE_NAME + i).get(1)); + + srv.cache(CLIENT_CACHE_NAME + i).put(1, "abc_cli" + i); + assertEquals("abc_cli" + i, srv.cache(CLIENT_CACHE_NAME + i).get(1)); + } + } + + /** + * Few caches created in chain: + *
    + *
  1. Start server node, create 4 different caches in 2 different cache groups in server node (2+2).
  2. + *
  3. Start Jdbc Thin client node, create 2 new caches in each created cache group.
  4. + *
  5. Assert that 2 cache groups exist with 4 caches each.
  6. + *
+ * @throws Exception If failed. + */ + @Test + public void testFewCachesCreatedInChainWithinTwoCacheGroupsJdbcThinClient() throws Exception { + for (int i = 0; i < 4; i++) + createCache(srv, cacheConfig().setGroupName(CACHE_GROUP_NAME + (i % 2)).setName(CACHE_NAME + i)); + + for (int i = 0; i < 4; i++) { + createCache(jdbcConn, cacheConfig().setGroupName(CACHE_GROUP_NAME + (i % 2)). + setName(CLIENT_CACHE_NAME + i)); + } + // Assertions. + assertEquals(8, srv.cacheNames().size()); + + for (int i = 0; i < 4; i++) { + assertEquals(CACHE_GROUP_NAME + (i % 2), + srv.cache(CACHE_NAME + i).getConfiguration(CacheConfiguration.class).getGroupName()); + + assertEquals(CACHE_GROUP_NAME + (i % 2), + srv.cache("SQL_PUBLIC_" + CLIENT_CACHE_NAME.toUpperCase() + i). + getConfiguration(CacheConfiguration.class).getGroupName()); + + srv.cache(CACHE_NAME + i).put(1, "abc_srv" + i); + assertEquals("abc_srv" + i, srv.cache(CACHE_NAME + i).get(1)); + + srv.cache("SQL_PUBLIC_" + CLIENT_CACHE_NAME.toUpperCase() + i).put(1, "abc_cli" + i); + assertEquals("abc_cli" + i, + srv.cache("SQL_PUBLIC_" + CLIENT_CACHE_NAME.toUpperCase() + i).get(1)); + } + } + + /** + * Few caches created in chain: + *
    + *
  1. Start server node, create 4 different caches in 2 different cache groups in server node (2+2).
  2. + *
  3. Start Rest client node, create 2 new caches in each created cache group.
  4. + *
  5. Assert that 2 cache groups exist with 4 caches each.
  6. + *
+ * @throws Exception If failed. + */ + @Test + public void testFewCachesCreatedInChainWithinTwoCacheGroupsRestClient() throws Exception { + for (int i = 0; i < 4; i++) + createCache(srv, cacheConfig().setGroupName(CACHE_GROUP_NAME + (i % 2)).setName(CACHE_NAME + i)); + + for (int i = 0; i < 4; i++) { + createCacheWithRestClient(cacheConfig().setGroupName(CACHE_GROUP_NAME + (i % 2)). + setName(CLIENT_CACHE_NAME + i)); + } + // Assertions. + assertEquals(8, srv.cacheNames().size()); + + for (int i = 0; i < 4; i++) { + assertEquals(CACHE_GROUP_NAME + (i % 2), + srv.cache(CACHE_NAME + i).getConfiguration(CacheConfiguration.class).getGroupName()); + + assertEquals(CACHE_GROUP_NAME + (i % 2), + srv.cache(CLIENT_CACHE_NAME + i).getConfiguration(CacheConfiguration.class).getGroupName()); + + srv.cache(CACHE_NAME + i).put(1, "abc_srv" + i); + assertEquals("abc_srv" + i, srv.cache(CACHE_NAME + i).get(1)); + + srv.cache(CLIENT_CACHE_NAME + i).put(1, "abc_cli" + i); + assertEquals("abc_cli" + i, srv.cache(CLIENT_CACHE_NAME + i).get(1)); + } + } + + /** + * Few caches created in chain: + *
    + *
  1. Start server node, create 4 different caches without cache groups.
  2. + *
  3. Start Thick client node, try to create cache with + * cache group with a name == first cache name.
  4. + *
  5. {@code CacheException} expected with message: + * 'Failed to start cache. Cache group name conflict with existing cache (change group name)'.
  6. + *
+ * @throws Exception If failed. + */ + @Test + public void testFewCachesCreatedInChainWithCacheGroupNameEqualsFirstCacheNameThickClient() throws Exception { + for (int i = 0; i < 4; i++) + createCache(srv, cacheConfigWithoutCacheGroup().setName(CACHE_NAME + i)); + + GridTestUtils.assertThrows( + null, + () -> { + createCache(thickClient, cacheConfig().setGroupName(CACHE_NAME + 0).setName(CLIENT_CACHE_NAME)); + + return null; + }, + CacheException.class, + "Failed to start cache. Cache group name conflict with existing cache (change group name)"); + } + + /** + * Few caches created in chain: + *
    + *
  1. Start server node, create 4 different caches without cache groups.
  2. + *
  3. Start Thin client node, try to create cache with cache group with a name == first cache name.
  4. + *
  5. {@code ClientServerError} expected with message: + * 'Failed to start cache. Cache group name conflict with existing cache (change group name)'.
  6. + *
+ * @throws Exception If failed. + */ + @Test + public void testFewCachesCreatedInChainWithCacheGroupNameEqualsFirstCacheNameThinClient() throws Exception { + for (int i = 0; i < 4; i++) + createCache(srv, cacheConfigWithoutCacheGroup().setName(CACHE_NAME + i)); + + GridTestUtils.assertThrows( + null, + () -> { + createCache(thinClient, clientCacheConfig().setGroupName(CACHE_NAME + 0).setName(CLIENT_CACHE_NAME)); + + return null; + }, + ClientServerError.class, + "Failed to start cache. Cache group name conflict with existing cache (change group name)"); + } + + /** + * Few caches created in chain: + *
    + *
  1. Start server node, create 4 different caches without cache groups.
  2. + *
  3. Start Jdbc Thin client node, try to create cache + * with cache group with a name == first cache name.
  4. + *
  5. {@code SQLException} expected with message: + * 'Failed to start cache. Cache group name conflict with existing cache (change group name)'.
  6. + *
+ * @throws Exception If failed. + */ + @Test + public void testFewCachesCreatedInChainWithCacheGroupNameEqualsFirstCacheNameJdbcThinClient() throws Exception { + for (int i = 0; i < 4; i++) + createCache(srv, cacheConfigWithoutCacheGroup().setName(CACHE_NAME + i)); + + GridTestUtils.assertThrows( + null, + () -> { + createCache(jdbcConn, cacheConfig().setGroupName(CACHE_NAME + 0).setName(CLIENT_CACHE_NAME)); + + return null; + }, + SQLException.class, + "Failed to start cache. Cache group name conflict with existing cache (change group name)"); + } + + /** + * Few caches created in chain: + *
    + *
  1. Start server node, create 4 different caches without cache groups.
  2. + *
  3. Start Rest client node, try to create cache with cache group with a name == first cache name.
  4. + *
  5. {@code Exception} expected.
  6. + *
+ * @throws Exception If failed. + */ + @Test + public void testFewCachesCreatedInChainWithCacheGroupNameEqualsFirstCacheNameRestClient() throws Exception { + for (int i = 0; i < 4; i++) + createCache(srv, cacheConfigWithoutCacheGroup().setName(CACHE_NAME + i)); + + GridTestUtils.assertThrows( + null, + () -> { + createCacheWithRestClient(cacheConfig().setGroupName(CACHE_NAME + 0).setName(CLIENT_CACHE_NAME)); + return null; + }, + AssertionError.class, + "expected:<0> but was:<1>"); + } + + /** + * Few caches created in chain: + *
    + *
  1. Start server node, create 4 different caches with cache groups.
  2. + *
  3. Start Thick client node, try to create extra cache within same cache group but with different + * config.
  4. + *
  5. {@code CacheException} expected + * with message 'Backups mismatch for caches related to the same group'.
  6. + *
+ * @throws Exception If failed. + */ + @Test + public void testFewCachesCreatedInChainWithDifferentConfigThickClient() throws Exception { + createCache(srv, cacheConfig().setGroupName(CACHE_GROUP_NAME).setName(CACHE_NAME).setBackups(1)); + + GridTestUtils.assertThrows( + null, + () -> { + createCache(thickClient, cacheConfig().setGroupName(CACHE_GROUP_NAME).setName(CLIENT_CACHE_NAME). + setBackups(2)); + + return null; + }, + CacheException.class, + "Backups mismatch for caches related to the same group"); + } + + /** + * Few caches created in chain: + *
    + *
  1. Start server node, create 4 different caches with cache groups.
  2. + *
  3. Start Thin client node, try to create extra cache within same cache group but with different + * config.
  4. + *
  5. {@code ClientServerError} expected + * with message 'Backups mismatch for caches related to the same group'.
  6. + *
+ * @throws Exception If failed. + */ + @Test + public void testFewCachesCreatedInChainWithDifferentConfigThinClient() throws Exception { + createCache(srv, cacheConfig().setGroupName(CACHE_GROUP_NAME).setName(CACHE_NAME).setBackups(1)); + + GridTestUtils.assertThrows( + null, + () -> { + createCache(thinClient, clientCacheConfig().setGroupName(CACHE_GROUP_NAME).setName(CLIENT_CACHE_NAME). + setBackups(2)); + + return null; + }, + ClientServerError.class, + "Backups mismatch for caches related to the same group"); + } + + /** + * Few caches created in chain: + *
    + *
  1. Start server node, create 4 different caches with cache groups.
  2. + *
  3. Start Jdbc Thin client node, try to create extra cache within same cache group but with different + * config.
  4. + *
  5. {@code SQLException} expected with message 'Backups mismatch for caches related to the same group'.
  6. + *
+ * @throws Exception If failed. + */ + @Test + public void testFewCachesCreatedInChainWithDifferentConfigJdbcThinClient() throws Exception { + createCache(srv, cacheConfig().setGroupName(CACHE_GROUP_NAME).setName(CACHE_NAME).setBackups(1)); + + GridTestUtils.assertThrows( + null, + () -> { + createCache(jdbcConn, cacheConfig().setGroupName(CACHE_GROUP_NAME).setName(CLIENT_CACHE_NAME). + setBackups(2)); + + return null; + }, + SQLException.class, + "Backups mismatch for caches related to the same group"); + } + + /** + * Few caches created in chain: + *
    + *
  1. Start server node, create 4 different caches with cache groups.
  2. + *
  3. Start Rest client node, try to create extra cache within same cache group but with different + * config.
  4. + *
  5. Exception is expected.
  6. + *
+ * @throws Exception If failed. + */ + @Test + public void testFewCachesCreatedInChainWithDifferentConfigRestClient() throws Exception { + createCache(srv, cacheConfig().setGroupName(CACHE_GROUP_NAME).setName(CACHE_NAME).setBackups(1)); + + GridTestUtils.assertThrows( + null, + () -> { + createCacheWithRestClient(cacheConfig().setGroupName(CACHE_GROUP_NAME).setName(CLIENT_CACHE_NAME). + setBackups(2)); + + return null; + }, + AssertionError.class, + "expected:<0> but was:<1>"); + } + + /** + * Destroy caches: + *
    + *
  1. Start server node, create 2 caches in single cache group.
  2. + *
  3. Start Thick client and try to destroy 2 caches at the same time from client and from server.
  4. + *
  5. Assert that operation completed successfully, both caches are destroyed and cache group is no longer + * exists (for example create cache with same name as deleted cache group)
  6. + *
+ * @throws Exception If failed. + */ + @Test + public void testDestroyCachesThickClient() throws Exception { + for (int i = 0; i < 2; i++) + createCache(srv, cacheConfig().setGroupName(CACHE_GROUP_NAME).setName(CACHE_NAME + i)); + + CountDownLatch latch = new CountDownLatch(1); + + IgniteInternalFuture srv = GridTestUtils.runAsync(() -> { + try { + latch.await(); + } + catch (InterruptedException e) { + fail(e.toString()); + } + this.srv.destroyCache(CACHE_NAME + 0); + }); + + IgniteInternalFuture client = GridTestUtils.runAsync(() -> { + try { + latch.await(); + } + catch (InterruptedException e) { + fail(e.toString()); + } + thickClient.destroyCache(CACHE_NAME + 1); + }); + + latch.countDown(); + + srv.get(); + + client.get(); + + assertEquals(0, this.srv.cacheNames().size()); + + this.srv.createCache(CACHE_GROUP_NAME); + } + + /** + * Destroy caches: + *
    + *
  1. Start server node, create 2 caches in single cache group.
  2. + *
  3. Start Thin client and try to destroy 2 caches at the same time from client and from server.
  4. + *
  5. Assert that operation completed successfully, both caches are destroyed and cache group is no longer + * exists (for example create cache with same name as deleted cache group)
  6. + *
+ * @throws Exception If failed. + */ + @Test + public void testDestroyCachesThinClient() throws Exception { + for (int i = 0; i < 2; i++) + createCache(srv, cacheConfig().setGroupName(CACHE_GROUP_NAME).setName(CACHE_NAME + i)); + + CountDownLatch latch = new CountDownLatch(1); + + IgniteInternalFuture srv = GridTestUtils.runAsync(() -> { + try { + latch.await(); + } + catch (InterruptedException e) { + fail(e.toString()); + } + this.srv.destroyCache(CACHE_NAME + 0); + }); + + IgniteInternalFuture client = GridTestUtils.runAsync(() -> { + try { + latch.await(); + } + catch (InterruptedException e) { + fail(e.toString()); + } + thinClient.destroyCache(CACHE_NAME + 1); + }); + + latch.countDown(); + + srv.get(); + + client.get(); + + assertEquals(0, this.srv.cacheNames().size()); + + this.srv.createCache(CACHE_GROUP_NAME); + } + + /** + * Destroy caches: + *
    + *
  1. Start server node, create 2 caches in single cache group.
  2. + *
  3. Start Rest client and try to destroy 2 caches at the same time from client and from server.
  4. + *
  5. Assert that operation completed successfully, both caches are destroyed and cache group is no longer + * exists (for example create cache with same name as deleted cache group)
  6. + *
+ * @throws Exception If failed. + */ + @Test + public void testDestroyCachesRestClient() throws Exception { + for (int i = 0; i < 2; i++) + createCache(srv, cacheConfig().setGroupName(CACHE_GROUP_NAME).setName(CACHE_NAME + i)); + + CountDownLatch latch = new CountDownLatch(1); + + IgniteInternalFuture srv = GridTestUtils.runAsync(() -> { + try { + latch.await(); + } + catch (InterruptedException e) { + fail(e.toString()); + } + this.srv.destroyCache(CACHE_NAME + 0); + }); + + IgniteInternalFuture client = GridTestUtils.runAsync(() -> { + try { + latch.await(); + } + catch (InterruptedException e) { + fail(e.toString()); + } + + URLConnection conn = null; + try { + conn = new URL("http://localhost:8080/ignite?cmd=destcache&cacheName=" + CACHE_NAME + "1"). + openConnection(); + } + catch (IOException e) { + fail(e.toString()); + } + + try { + conn.connect(); + + try (InputStreamReader streamReader = new InputStreamReader(conn.getInputStream())) { + ObjectMapper objMapper = new ObjectMapper(); + Map myMap = objMapper.readValue(streamReader, + new TypeReference>() { + }); + + log.info("Version command response is: " + myMap); + + assertTrue(myMap.containsKey("response")); + assertEquals(0, myMap.get("successStatus")); + } + } + catch (IOException e) { + fail(e.toString()); + } + + }); + + latch.countDown(); + + srv.get(); + + client.get(); + + assertEquals(0, this.srv.cacheNames().size()); + + this.srv.createCache(CACHE_GROUP_NAME); + } + + /** + * Create and destroy caches: + *

+ * Prerequisites: + * Start server node, create 1 cache in a single cache group. + *

+ * Steps: + *

    + *
  1. Start Thick client.
  2. + *
  3. Create new cache with an existing cache group on server side.
  4. + *
  5. Destroy newly created cache through client.
  6. + *
+ *

+ * Expected: + * Only one cache, initially created within server node is expected. + */ + @Test + public void testCreateOnSrvDestroyOnThickClient() { + srv.createCache(cacheConfig().setName(ANOTHER_CACHE_NAME)); + + srv.createCache(cacheConfig()); + + thickClient.destroyCache(CACHE_NAME); + + assertEquals(1, srv.cacheNames().size()); + + assertEquals(ANOTHER_CACHE_NAME, srv.cacheNames().iterator().next()); + } + + /** + * Create and destroy caches: + *

+ * Prerequisites: + * Start server node, create 1 cache in a single cache group. + *

+ * Steps: + *

    + *
  1. Start Thin client.
  2. + *
  3. Create new cache with an existing cache group on server side.
  4. + *
  5. Destroy newly created cache through client.
  6. + *
+ *

+ * Expected: + * Only one cache, initially created within server node is expected. + */ + @Test + public void testCreateOnSrvDestroyOnThinClient() { + srv.createCache(cacheConfig().setName(ANOTHER_CACHE_NAME)); + + srv.createCache(cacheConfig()); + + thinClient.destroyCache(CACHE_NAME); + + assertEquals(1, srv.cacheNames().size()); + + assertEquals(ANOTHER_CACHE_NAME, srv.cacheNames().iterator().next()); + } + + /** + * Create and destroy caches: + *

+ * Prerequisites: + * Start server node, create 1 cache in a single cache group. + *

+ * Steps: + *

    + *
  1. Start Rest client.
  2. + *
  3. Create new cache with an existing cache group on server side.
  4. + *
  5. Destroy newly created cache through client.
  6. + *
+ *

+ * Expected: + * Only one cache, initially created within server node is expected. + */ + @Test + public void testCreateOnSrvDestroyOnRestClient() throws Exception { + srv.createCache(cacheConfig().setName(ANOTHER_CACHE_NAME)); + + srv.createCache(cacheConfig()); + + destroyCacheWithRestClient(CACHE_NAME); + + assertEquals(1, srv.cacheNames().size()); + + assertEquals(ANOTHER_CACHE_NAME, srv.cacheNames().iterator().next()); + } + + /** + * Create and destroy caches: + *

+ * Prerequisites: + * Start server node, create 1 cache in a single cache group. + *

+ * Steps: + *

    + *
  1. Start Thick client.
  2. + *
  3. Create new cache with an existing cache group on client side.
  4. + *
  5. Destroy newly created cache through server node.
  6. + *
+ *

+ * Expected: + * Only one cache, initially created within server node is expected. + */ + @Test + public void testCreateOnThickClientDestroyOnSrv() { + srv.createCache(cacheConfig().setName(ANOTHER_CACHE_NAME)); + + thickClient.createCache(cacheConfig()); + + srv.destroyCache(CACHE_NAME); + + assertEquals(1, srv.cacheNames().size()); + + assertEquals(ANOTHER_CACHE_NAME, srv.cacheNames().iterator().next()); + } + + /** + * Create and destroy caches: + *

+ * Prerequisites: + * Start server node, create 1 cache in a single cache group. + *

+ * Steps: + *

    + *
  1. Start Thin client.
  2. + *
  3. Create new cache with an existing cache group on client side.
  4. + *
  5. Destroy newly created cache through server node.
  6. + *
+ *

+ * Expected: + * Only one cache, initially created within server node is expected. + */ + @Test + public void testCreateOnThinClientSrvDestroyOnSrv() { + srv.createCache(cacheConfig().setName(ANOTHER_CACHE_NAME)); + + thinClient.createCache(clientCacheConfig()); + + srv.destroyCache(CACHE_NAME); + + assertEquals(1, srv.cacheNames().size()); + + assertEquals(ANOTHER_CACHE_NAME, srv.cacheNames().iterator().next()); + } + + /** + * Create and destroy caches: + *

+ * Prerequisites: + * Start server node, create 1 cache in a single cache group. + *

+ * Steps: + *

    + *
  1. Start jdbc client.
  2. + *
  3. Create new cache with an existing cache group on client side.
  4. + *
  5. Destroy newly created cache through server node.
  6. + *
+ *

+ * Expected: Only one cache, initially created within server node is expected. + */ + @Test + public void testCreateOnJdbcClientDestroyOnSrv() throws Exception { + srv.createCache(cacheConfig().setName(ANOTHER_CACHE_NAME)); + + createCache(jdbcConn, cacheConfig()); + + srv.destroyCache("SQL_PUBLIC_" + CACHE_NAME.toUpperCase()); + + assertEquals(1, srv.cacheNames().size()); + + assertEquals(ANOTHER_CACHE_NAME, srv.cacheNames().iterator().next()); + } + + /** + * Create and destroy caches: + *

+ * Prerequisites: + * Start server node, create 1 cache in a single cache group. + *

+ * Steps: + *

    + *
  1. Start Rest client.
  2. + *
  3. Create new cache with an existing cache group on client side.
  4. + *
  5. Destroy newly created cache through server node.
  6. + *
+ *

+ * Expected: Only one cache, initially created within server node is expected. + */ + @Test + public void testCreateOnRestClientDestroyOnSrv() throws Exception { + srv.createCache(cacheConfig().setName(ANOTHER_CACHE_NAME)); + + createCacheWithRestClient(cacheConfig()); + + srv.destroyCache(CACHE_NAME); + + assertEquals(1, srv.cacheNames().size()); + + assertEquals(ANOTHER_CACHE_NAME, srv.cacheNames().iterator().next()); + } + + /** + * Create and destroy caches: + *

+ * Prerequisites: + * Start server node, create 1 cache in a single cache group. + *

+ * Steps: + *

    + *
  1. Start Thick client.
  2. + *
  3. Create new cache with an existing cache group on client side.
  4. + *
  5. Destroy newly created cache through some other, previously created, Thin client node.
  6. + *
+ *

+ * Expected: Only one cache, initially created within server node is expected. + */ + @Test + public void testCreateOnThickClientDestroyThinClient() { + srv.createCache(cacheConfig().setName(ANOTHER_CACHE_NAME)); + + thickClient.createCache(cacheConfig()); + + thinClient.destroyCache(CACHE_NAME); + + assertEquals(1, srv.cacheNames().size()); + + assertEquals(ANOTHER_CACHE_NAME, srv.cacheNames().iterator().next()); + } + + /** + * Create and destroy caches: + *

+ * Prerequisites: + * Start server node, create 1 cache in a single cache group. + *

+ * Steps: + *

    + *
  1. Start Thin client.
  2. + *
  3. Create new cache with an existing cache group on client side.
  4. + *
  5. Destroy newly created cache through some other, previously created, Rest client node.
  6. + *
+ *

+ * Expected: Only one cache, initially created within server node is expected. + */ + @Test + public void testCreateOnThinClientSrvDestroyOnRestClient() throws Exception{ + srv.createCache(cacheConfig().setName(ANOTHER_CACHE_NAME)); + + thinClient.createCache(clientCacheConfig()); + + destroyCacheWithRestClient(CACHE_NAME); + + assertEquals(1, srv.cacheNames().size()); + + assertEquals(ANOTHER_CACHE_NAME, srv.cacheNames().iterator().next()); + } + + /** + * Create and destroy caches: + *

+ * Prerequisites: + * Start server node, create 1 cache in a single cache group. + *

+ * Steps: + *

    + *
  1. Start Jdbc Thin client.
  2. + *
  3. Create new cache with an existing cache group on client side.
  4. + *
  5. Destroy newly created cache through some other, previously created, Thin client node.
  6. + *
+ *

+ * Expected: Only one cache, initially created within server node is expected. + */ + @Test + public void testCreateOnJdbcClientDestroyOnThinClient() throws Exception { + srv.createCache(cacheConfig().setName(ANOTHER_CACHE_NAME)); + + createCache(jdbcConn, cacheConfig()); + + thinClient.destroyCache("SQL_PUBLIC_" + CACHE_NAME.toUpperCase()); + + assertEquals(1, srv.cacheNames().size()); + + assertEquals(ANOTHER_CACHE_NAME, srv.cacheNames().iterator().next()); + } + + /** + * Create and destroy caches: + *

+ * Prerequisites: + * Start server node, create 1 cache in a single cache group. + *

+ * Steps: + *

    + *
  1. Start Jdbc Thin client.
  2. + *
  3. Create new cache with an existing cache group on client side.
  4. + *
  5. Destroy newly created cache through some other, previously created, Thick client node.
  6. + *
+ *

+ * Expected: Only one cache, initially created within server node is expected. + * @throws Exception If failed. + */ + @Test + public void testCreateOnRestClientDestroyOnThickClient() throws Exception { + srv.createCache(cacheConfig().setName(ANOTHER_CACHE_NAME)); + + createCacheWithRestClient(cacheConfig()); + + thickClient.destroyCache(CACHE_NAME); + + assertEquals(1, srv.cacheNames().size()); + + assertEquals(ANOTHER_CACHE_NAME, srv.cacheNames().iterator().next()); + } + + /** + * Create cache with specified configuration through thin/thick client or jdbc thin. + * + * @param node Cluster node or jdbc connection. + * @param cacheCfg Cache or ClientCache configuration + * @throws SQLException If failed to create cache through Jdbc Thin connection. + */ + private void createCache(AutoCloseable node, Serializable cacheCfg) throws SQLException { + if (node instanceof IgniteClient) + ((IgniteClient)node).createCache((ClientCacheConfiguration)cacheCfg); + else if (node instanceof Ignite) + ((Ignite)node).createCache((CacheConfiguration)cacheCfg); + else if (node instanceof JdbcThinConnection) { + CacheConfiguration jdbcCacheCfg = (CacheConfiguration)cacheCfg; + + srv.addCacheConfiguration(jdbcCacheCfg); + + try (Statement stmt = jdbcConn.createStatement()) { + stmt.execute("CREATE TABLE " + jdbcCacheCfg.getName() + + " (id int, name varchar, primary key (id)) WITH \"template=" + jdbcCacheCfg.getName() + "\""); + } + } + else + fail(" Unexpected node/client type"); + } + + /** + * Create cache with specified configuration through rest client. + * @param cacheCfg Cache configuration. + * @throws Exception If failed. + */ + private void createCacheWithRestClient(CacheConfiguration cacheCfg) throws Exception { + srv.addCacheConfiguration(cacheCfg); + + URLConnection conn = new URL("http://localhost:8080/ignite?cmd=getorcreate&cacheName=" + + cacheCfg.getName() + "&templateName=" + cacheCfg.getName()).openConnection(); + + conn.connect(); + + try (InputStreamReader streamReader = new InputStreamReader(conn.getInputStream())) { + ObjectMapper objMapper = new ObjectMapper(); + Map myMap = objMapper.readValue(streamReader, + new TypeReference>() { + }); + + log.info("Version command response is: " + myMap); + + assertTrue(myMap.containsKey("response")); + assertEquals(0, myMap.get("successStatus")); + } + } + + /** + * Destroy cache from within rest client. + * @param cacheName Cache name. + * @throws Exception If failed. + */ + private void destroyCacheWithRestClient(String cacheName) throws Exception { + URLConnection conn = new URL("http://localhost:8080/ignite?cmd=destcache&cacheName=" + cacheName). + openConnection(); + + conn.connect(); + + try (InputStreamReader streamReader = new InputStreamReader(conn.getInputStream())) { + ObjectMapper objMapper = new ObjectMapper(); + Map myMap = objMapper.readValue(streamReader, + new TypeReference>() { + }); + + log.info("Version command response is: " + myMap); + + assertTrue(myMap.containsKey("response")); + assertEquals(0, myMap.get("successStatus")); + } + } + + /** + * @return Default client cache configuration. + */ + private ClientCacheConfiguration clientCacheConfig() { + return new ClientCacheConfiguration(). + setGroupName(CACHE_GROUP_NAME). + setName(CACHE_NAME). + setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL). + setCacheMode(CacheMode.PARTITIONED); + } + + /** + * @return Default cache configuration. + */ + private CacheConfiguration cacheConfig() { + return new CacheConfiguration(). + setGroupName(CACHE_GROUP_NAME). + setName(CACHE_NAME). + setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL). + setCacheMode(CacheMode.PARTITIONED); + } + + /** + * @return Default cache configuration without cache group. + */ + private CacheConfiguration cacheConfigWithoutCacheGroup() { + return new CacheConfiguration(). + setName(CACHE_NAME). + setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL). + setCacheMode(CacheMode.PARTITIONED); + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/TaskEventSubjectIdSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/TaskEventSubjectIdSelfTest.java index 0c272b9866e14..26cd5f538d24f 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/TaskEventSubjectIdSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/TaskEventSubjectIdSelfTest.java @@ -36,6 +36,7 @@ import org.apache.ignite.configuration.ConnectorConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.events.Event; +import org.apache.ignite.events.EventType; import org.apache.ignite.events.TaskEvent; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.client.GridClientConfiguration; @@ -76,6 +77,8 @@ public class TaskEventSubjectIdSelfTest extends GridCommonAbstractTest { cfg.setConnectorConfiguration(new ConnectorConfiguration()); + cfg.setIncludeEventTypes(EventType.EVTS_ALL); + return cfg; } diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java b/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java index d6ef9f9e208fa..a35c49ec22738 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java @@ -18,6 +18,8 @@ package org.apache.ignite.internal.client.suite; import junit.framework.TestSuite; +import org.apache.ignite.common.ClientSideCacheCreationDestructionWileTopologyChangeTest; +import org.apache.ignite.common.ClientSizeCacheCreationDestructionTest; import org.apache.ignite.internal.IgniteClientFailuresTest; import org.apache.ignite.internal.TaskEventSubjectIdSelfTest; import org.apache.ignite.internal.client.ClientDefaultCacheSelfTest; @@ -169,6 +171,9 @@ public static TestSuite suite() { suite.addTestSuite(IgniteClientFailuresTest.class); + suite.addTestSuite(ClientSizeCacheCreationDestructionTest.class); + suite.addTestSuite(ClientSideCacheCreationDestructionWileTopologyChangeTest.class); + return suite; } } diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcDynamicIndexAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcDynamicIndexAbstractSelfTest.java index 9485d0d54212c..652d635cbe4a1 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcDynamicIndexAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcDynamicIndexAbstractSelfTest.java @@ -31,6 +31,7 @@ import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.testframework.GridTestUtils; /** * Test that checks indexes handling with JDBC. @@ -168,9 +169,9 @@ public void testCreateIndex() throws SQLException { public void testCreateIndexWithDuplicateName() throws SQLException { jdbcRun(CREATE_INDEX); - assertSqlException(new RunnableX() { + assertSqlException(new GridTestUtils.RunnableX() { /** {@inheritDoc} */ - @Override public void run() throws Exception { + @Override public void runx() throws Exception { jdbcRun(CREATE_INDEX); } }); @@ -219,9 +220,9 @@ public void testDropIndex() throws SQLException { * Test that dropping a non-existent index yields an error. */ public void testDropMissingIndex() { - assertSqlException(new RunnableX() { + assertSqlException(new GridTestUtils.RunnableX() { /** {@inheritDoc} */ - @Override public void run() throws Exception { + @Override public void runx() throws Exception { jdbcRun(DROP_INDEX); } }); @@ -310,11 +311,11 @@ private IgniteCache cache() { * * @param r Runnable. */ - private static void assertSqlException(RunnableX r) { + private static void assertSqlException(GridTestUtils.RunnableX r) { // We expect IgniteSQLException with given code inside CacheException inside JDBC SQLException. try { - r.run(); + r.runx(); } catch (SQLException e) { return; @@ -325,16 +326,4 @@ private static void assertSqlException(RunnableX r) { fail(SQLException.class.getSimpleName() + " is not thrown."); } - - /** - * Runnable which can throw checked exceptions. - */ - private interface RunnableX { - /** - * Do run. - * - * @throws Exception If failed. - */ - public void run() throws Exception; - } } diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStreamingSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStreamingSelfTest.java index e302529404d70..eaf81ec205e81 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStreamingSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStreamingSelfTest.java @@ -20,19 +20,21 @@ import java.sql.Connection; import java.sql.DriverManager; import java.sql.PreparedStatement; +import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.util.Collections; import java.util.Properties; import org.apache.ignite.IgniteCache; import org.apache.ignite.IgniteJdbcDriver; -import org.apache.ignite.IgniteLogger; import org.apache.ignite.binary.BinaryObject; import org.apache.ignite.binary.BinaryObjectBuilder; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.ConnectorConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.processors.cache.distributed.dht.IgniteClusterReadOnlyException; import org.apache.ignite.internal.processors.query.QueryUtils; +import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteCallable; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; @@ -166,6 +168,41 @@ protected Connection createStreamedConnection(boolean allowOverwrite, long flush super.afterTest(); } + /** + * @throws Exception if failed. + */ + public void testStreamedInsertFailsOnReadOnlyMode() throws Exception { + try (Connection conn = createStreamedConnection(true)) { + populateData(conn, 0, 1); + + grid(0).cluster().readOnly(true); + + try { + assertTrue(grid(0).cluster().readOnly()); + + try (Connection ordinalCon = createOrdinaryConnection()) { + assertEquals(1, countPersons(ordinalCon)); + + try { + populateData(conn, 1, 100); + + fail("Insert should be failed!"); + } + catch (Exception e) { + log.error("Insert failed", e); + + assertTrue("Wrong exception", X.hasCause(e, IgniteClusterReadOnlyException.class)); + } + + assertEquals("Insert should be failed", 1, countPersons(ordinalCon)); + } + } + finally { + grid(0).cluster().readOnly(false); + } + } + } + /** * @throws Exception if failed. */ @@ -174,15 +211,7 @@ public void testStreamedInsert() throws Exception { put(i, nameForId(i * 100)); try (Connection conn = createStreamedConnection(false)) { - try (PreparedStatement stmt = conn.prepareStatement("insert into PUBLIC.Person(\"id\", \"name\") " + - "values (?, ?)")) { - for (int i = 1; i <= 100; i++) { - stmt.setInt(1, i); - stmt.setString(2, nameForId(i)); - - stmt.executeUpdate(); - } - } + populateData(conn, 1, 100); } U.sleep(500); @@ -204,15 +233,7 @@ public void testStreamedInsertWithoutColumnsList() throws Exception { put(i, nameForId(i * 100)); try (Connection conn = createStreamedConnection(false)) { - try (PreparedStatement stmt = conn.prepareStatement("insert into PUBLIC.Person(\"id\", \"name\") " + - "values (?, ?)")) { - for (int i = 1; i <= 100; i++) { - stmt.setInt(1, i); - stmt.setString(2, nameForId(i)); - - stmt.executeUpdate(); - } - } + populateData(conn, 1, 100); } U.sleep(500); @@ -234,15 +255,7 @@ public void testStreamedInsertWithOverwritesAllowed() throws Exception { put(i, nameForId(i * 100)); try (Connection conn = createStreamedConnection(true)) { - try (PreparedStatement stmt = conn.prepareStatement("insert into PUBLIC.Person(\"id\", \"name\") " + - "values (?, ?)")) { - for (int i = 1; i <= 100; i++) { - stmt.setInt(1, i); - stmt.setString(2, nameForId(i)); - - stmt.executeUpdate(); - } - } + populateData(conn, 1, 100); } U.sleep(500); @@ -327,4 +340,38 @@ protected String nameForIdInCache(int id) { return ((BinaryObject)o).field("name"); } + + /** + * Populates data to the table. + * + * @param conn Connection. + * @param from First person id. + * @param count Number of persons. + * @throws SQLException If something goes wrong. + */ + private void populateData(Connection conn, int from, int count) throws SQLException { + try (PreparedStatement stmt = conn.prepareStatement("insert into PUBLIC.Person(\"id\", \"name\") values (?, ?)")) { + for (int i = from; i < from + count; i++) { + stmt.setInt(1, i); + stmt.setString(2, nameForId(i)); + + stmt.executeUpdate(); + } + } + } + + /** + * @param conn Connection. + * @return Size of PUBLIC.Person table. + * @throws SQLException If something goes wrong. + */ + private long countPersons(Connection conn) throws SQLException { + try (Statement selectStmt = conn.createStatement()) { + try (ResultSet rs = selectStmt.executeQuery("select count(*) from PUBLIC.Person")) { + assertTrue("Result set is empty!", rs.next()); + + return rs.getLong(1); + } + } + } } diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java index bcd66f0a4de8a..9f3c71b9951de 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java @@ -137,6 +137,8 @@ import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.testframework.GridTestUtils; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; import static org.apache.ignite.cache.CacheMode.PARTITIONED; import static org.apache.ignite.cache.CacheMode.REPLICATED; @@ -280,6 +282,31 @@ protected JsonNode jsonTaskResult(String content) throws IOException { return res.get("result"); } + /** + * Check task result with expected failure. + * + * @param content Content to check. + * @return Node with failure result. + */ + protected JsonNode jsonTaskErrorResult(String content) throws IOException { + assertNotNull(content); + assertFalse(content.isEmpty()); + + JsonNode node = JSON_MAPPER.readTree(content); + + assertEquals(STATUS_FAILED, node.get("successStatus").asInt()); + assertFalse(node.get("error").isNull()); + assertTrue(node.get("response").isNull()); + + assertEquals(securityEnabled(), !node.get("sessionToken").isNull()); + + JsonNode error = node.get("error"); + + assertTrue(error.isTextual()); + + return error; + } + /** * @throws Exception If failed. */ @@ -1567,161 +1594,161 @@ public void testVisorGateway() throws Exception { final IgniteUuid cid = grid(1).context().cache().internalCache("person").context().dynamicDeploymentId(); String ret = content(new VisorGatewayArgument(VisorCacheConfigurationCollectorTask.class) - .forNode(locNode) - .argument(VisorCacheConfigurationCollectorTaskArg.class) - .collection(IgniteUuid.class, cid)); + .setNode(locNode) + .setTaskArgument(VisorCacheConfigurationCollectorTaskArg.class) + .addCollectionArgument(IgniteUuid.class, cid)); info("VisorCacheConfigurationCollectorTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorCacheNodesTask.class) - .forNode(locNode) - .argument(VisorCacheNodesTaskArg.class, "person")); + .setNode(locNode) + .setTaskArgument(VisorCacheNodesTaskArg.class, "person")); info("VisorCacheNodesTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorCachePartitionsTask.class) - .forNode(locNode) - .argument(VisorCachePartitionsTaskArg.class, "person")); + .setNode(locNode) + .setTaskArgument(VisorCachePartitionsTaskArg.class, "person")); info("VisorCachePartitionsTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorCacheLoadTask.class) - .forNode(locNode) - .argument(VisorCacheLoadTaskArg.class) - .set(String.class, "person") - .arguments(0, "null")); + .setNode(locNode) + .setTaskArgument(VisorCacheLoadTaskArg.class) + .addSetArgument(String.class, "person") + .addArguments(0, "null")); info("VisorCacheLoadTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorCacheRebalanceTask.class) - .forNode(locNode) - .argument(VisorCacheRebalanceTaskArg.class) - .set(String.class, "person")); + .setNode(locNode) + .setTaskArgument(VisorCacheRebalanceTaskArg.class) + .addSetArgument(String.class, "person")); info("VisorCacheRebalanceTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorCacheMetadataTask.class) - .forNode(locNode) - .argument(VisorCacheMetadataTaskArg.class, "person")); + .setNode(locNode) + .setTaskArgument(VisorCacheMetadataTaskArg.class, "person")); info("VisorCacheMetadataTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorCacheResetMetricsTask.class) - .forNode(locNode) - .argument(VisorCacheResetMetricsTaskArg.class, "person")); + .setNode(locNode) + .setTaskArgument(VisorCacheResetMetricsTaskArg.class, "person")); info("VisorCacheResetMetricsTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorIgfsSamplingStateTask.class) - .forNode(locNode) - .argument(VisorIgfsSamplingStateTaskArg.class, "igfs", false)); + .setNode(locNode) + .setTaskArgument(VisorIgfsSamplingStateTaskArg.class, "igfs", false)); info("VisorIgfsSamplingStateTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorIgfsProfilerClearTask.class) - .forNode(locNode) - .argument(VisorIgfsProfilerClearTaskArg.class, "igfs")); + .setNode(locNode) + .setTaskArgument(VisorIgfsProfilerClearTaskArg.class, "igfs")); info("VisorIgfsProfilerClearTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorIgfsProfilerTask.class) - .forNode(locNode) - .argument(VisorIgfsProfilerTaskArg.class, "igfs")); + .setNode(locNode) + .setTaskArgument(VisorIgfsProfilerTaskArg.class, "igfs")); info("VisorIgfsProfilerTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorIgfsFormatTask.class) - .forNode(locNode) - .argument(VisorIgfsFormatTaskArg.class, "igfs")); + .setNode(locNode) + .setTaskArgument(VisorIgfsFormatTaskArg.class, "igfs")); info("VisorIgfsFormatTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorIgfsResetMetricsTask.class) - .forNode(locNode) - .argument(VisorIgfsResetMetricsTaskArg.class) - .set(String.class, "igfs")); + .setNode(locNode) + .setTaskArgument(VisorIgfsResetMetricsTaskArg.class) + .addSetArgument(String.class, "igfs")); info("VisorIgfsResetMetricsTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorThreadDumpTask.class) - .forNode(locNode)); + .setNode(locNode)); info("VisorThreadDumpTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorLatestTextFilesTask.class) - .forNode(locNode) - .argument(VisorLatestTextFilesTaskArg.class, "", "")); + .setNode(locNode) + .setTaskArgument(VisorLatestTextFilesTaskArg.class, "", "")); info("VisorLatestTextFilesTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorLatestVersionTask.class) - .forNode(locNode)); + .setNode(locNode)); info("VisorLatestVersionTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorFileBlockTask.class) - .forNode(locNode) - .argument(VisorFileBlockTaskArg.class, "", 0L, 1, 0L)); + .setNode(locNode) + .setTaskArgument(VisorFileBlockTaskArg.class, "", 0L, 1, 0L)); info("VisorFileBlockTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorNodePingTask.class) - .forNode(locNode) - .argument(VisorNodePingTaskArg.class, locNode.id())); + .setNode(locNode) + .setTaskArgument(VisorNodePingTaskArg.class, locNode.id())); info("VisorNodePingTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorNodeConfigurationCollectorTask.class) - .forNode(locNode)); + .setNode(locNode)); info("VisorNodeConfigurationCollectorTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorComputeResetMetricsTask.class) - .forNode(locNode)); + .setNode(locNode)); info("VisorComputeResetMetricsTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorQueryTask.class) - .forNode(locNode) - .argument(VisorQueryTaskArg.class, "person", URLEncoder.encode("select * from Person", CHARSET), + .setNode(locNode) + .setTaskArgument(VisorQueryTaskArg.class, "person", URLEncoder.encode("select * from Person", CHARSET), false, false, false, false, 1)); info("VisorQueryTask result: " + ret); @@ -1731,51 +1758,51 @@ public void testVisorGateway() throws Exception { final String qryId = res.get("result").get("queryId").asText(); ret = content(new VisorGatewayArgument(VisorQueryNextPageTask.class) - .forNode(locNode) - .argument(VisorQueryNextPageTaskArg.class, qryId, 1)); + .setNode(locNode) + .setTaskArgument(VisorQueryNextPageTaskArg.class, qryId, 1)); info("VisorQueryNextPageTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorQueryCleanupTask.class) - .argument(VisorQueryCleanupTaskArg.class) - .map(UUID.class, Set.class, F.asMap(locNode.id(), qryId))); + .setTaskArgument(VisorQueryCleanupTaskArg.class) + .addMapArgument(UUID.class, Set.class, F.asMap(locNode.id(), qryId))); info("VisorQueryCleanupTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorResolveHostNameTask.class) - .forNode(locNode)); + .setNode(locNode)); info("VisorResolveHostNameTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorQueryCancelTask.class) - .argument(VisorQueryCancelTaskArg.class, 0L)); + .setTaskArgument(VisorQueryCancelTaskArg.class, 0L)); info("VisorResolveHostNameTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorQueryResetMetricsTask.class) - .argument(VisorQueryResetMetricsTaskArg.class, "person")); + .setTaskArgument(VisorQueryResetMetricsTaskArg.class, "person")); info("VisorResolveHostNameTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorQueryCancelTask.class) - .argument(VisorQueryCancelTaskArg.class, 0L)); + .setTaskArgument(VisorQueryCancelTaskArg.class, 0L)); info("VisorResolveHostNameTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorQueryResetMetricsTask.class) - .argument(VisorQueryResetMetricsTaskArg.class, "person")); + .setTaskArgument(VisorQueryResetMetricsTaskArg.class, "person")); info("VisorResolveHostNameTask result: " + ret); @@ -1784,30 +1811,30 @@ public void testVisorGateway() throws Exception { // Multinode tasks ret = content(new VisorGatewayArgument(VisorComputeCancelSessionsTask.class) - .argument(VisorComputeCancelSessionsTaskArg.class) - .set(IgniteUuid.class, IgniteUuid.randomUuid())); + .setTaskArgument(VisorComputeCancelSessionsTaskArg.class) + .addSetArgument(IgniteUuid.class, IgniteUuid.randomUuid())); info("VisorComputeCancelSessionsTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorCacheMetricsCollectorTask.class) - .argument(VisorCacheMetricsCollectorTaskArg.class, false) - .collection(String.class, "person")); + .setTaskArgument(VisorCacheMetricsCollectorTaskArg.class, false) + .addCollectionArgument(String.class, "person")); info("VisorCacheMetricsCollectorTask result: " + ret); ret = content(new VisorGatewayArgument(VisorCacheMetricsCollectorTask.class) - .forNodes(grid(1).cluster().nodes()) - .argument(VisorCacheMetricsCollectorTaskArg.class, false) - .collection(String.class, "person")); + .setNodes(grid(1).cluster().nodes()) + .setTaskArgument(VisorCacheMetricsCollectorTaskArg.class, false) + .addCollectionArgument(String.class, "person")); info("VisorCacheMetricsCollectorTask (with nodes) result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorLogSearchTask.class) - .argument(VisorLogSearchTaskArg.class, ".", ".", "abrakodabra.txt", 1)); + .setTaskArgument(VisorLogSearchTaskArg.class, ".", ".", "abrakodabra.txt", 1)); info("VisorLogSearchTask result: " + ret); @@ -1820,14 +1847,14 @@ public void testVisorGateway() throws Exception { jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorAckTask.class) - .argument(VisorAckTaskArg.class, "MSG")); + .setTaskArgument(VisorAckTaskArg.class, "MSG")); info("VisorAckTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorNodeEventsCollectorTask.class) - .argument(VisorNodeEventsCollectorTaskArg.class, + .setTaskArgument(VisorNodeEventsCollectorTaskArg.class, "null", "null", "null", "taskName", "null")); info("VisorNodeEventsCollectorTask result: " + ret); @@ -1835,7 +1862,7 @@ public void testVisorGateway() throws Exception { jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorNodeDataCollectorTask.class) - .argument(VisorNodeDataCollectorTaskArg.class, false, + .setTaskArgument(VisorNodeDataCollectorTaskArg.class, false, "CONSOLE_" + UUID.randomUUID(), UUID.randomUUID(), false)); info("VisorNodeDataCollectorTask result: " + ret); @@ -1843,23 +1870,23 @@ public void testVisorGateway() throws Exception { jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorComputeToggleMonitoringTask.class) - .argument(VisorComputeToggleMonitoringTaskArg.class, UUID.randomUUID(), false)); + .setTaskArgument(VisorComputeToggleMonitoringTaskArg.class, UUID.randomUUID(), false)); info("VisorComputeToggleMonitoringTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorNodeSuppressedErrorsTask.class) - .argument(VisorNodeSuppressedErrorsTaskArg.class) - .map(UUID.class, Long.class, new HashMap())); + .setTaskArgument(VisorNodeSuppressedErrorsTaskArg.class) + .addMapArgument(UUID.class, Long.class, new HashMap())); info("VisorNodeSuppressedErrorsTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorCacheClearTask.class) - .forNode(locNode) - .argument(VisorCacheClearTaskArg.class, "person")); + .setNode(locNode) + .setTaskArgument(VisorCacheClearTaskArg.class, "person")); info("VisorCacheClearTask result: " + ret); @@ -1878,7 +1905,7 @@ public void testVisorGateway() throws Exception { ""; ret = content(new VisorGatewayArgument(VisorCacheStartTask.class) - .argument(VisorCacheStartTaskArg.class, false, "person2", + .setTaskArgument(VisorCacheStartTaskArg.class, false, "person2", URLEncoder.encode(START_CACHE, CHARSET))); info("VisorCacheStartTask result: " + ret); @@ -1886,29 +1913,29 @@ public void testVisorGateway() throws Exception { jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorCacheStopTask.class) - .forNode(locNode) - .argument(VisorCacheStopTaskArg.class, "c")); + .setNode(locNode) + .setTaskArgument(VisorCacheStopTaskArg.class, "c")); info("VisorCacheStopTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorQueryDetailMetricsCollectorTask.class) - .argument(VisorQueryDetailMetricsCollectorTaskArg.class, 0)); + .setTaskArgument(VisorQueryDetailMetricsCollectorTaskArg.class, 0)); info("VisorQueryDetailMetricsCollectorTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorRunningQueriesCollectorTask.class) - .argument(VisorRunningQueriesCollectorTaskArg.class, 0L)); + .setTaskArgument(VisorRunningQueriesCollectorTaskArg.class, 0L)); info("VisorQueryDetailMetricsCollectorTask result: " + ret); jsonTaskResult(ret); ret = content(new VisorGatewayArgument(VisorChangeGridActiveStateTask.class) - .argument(VisorChangeGridActiveStateTaskArg.class, true)); + .setTaskArgument(VisorChangeGridActiveStateTaskArg.class, true)); info("VisorQueryDetailMetricsCollectorTask result: " + ret); @@ -2830,24 +2857,24 @@ public VisorGatewayArgument(Class cls) { } /** - * Execute task on node. + * Execute task on specified node. * * @param node Node. * @return This helper for chaining method calls. */ - public VisorGatewayArgument forNode(ClusterNode node) { - put("p1", node != null ? node.id().toString() : null); + public VisorGatewayArgument setNode(ClusterNode node) { + put("p1", node != null ? node.id().toString() : null); return this; } /** - * Prepare list of node IDs. + * Execute task on specified nodes. * * @param nodes Collection of nodes. * @return This helper for chaining method calls. */ - public VisorGatewayArgument forNodes(Collection nodes) { + public VisorGatewayArgument setNodes(Collection nodes) { put("p1", concat(F.transform(nodes, new C1() { /** {@inheritDoc} */ @Override public UUID apply(ClusterNode node) { @@ -2859,43 +2886,48 @@ public VisorGatewayArgument forNodes(Collection nodes) { } /** - * Add custom argument. + * Add custom arguments. * - * @param vals Values. + * @param vals Array of values or {@code null}. * @return This helper for chaining method calls. */ - public VisorGatewayArgument arguments(Object... vals) { - for (Object val : vals) - put("p" + idx++, String.valueOf(val)); + public VisorGatewayArgument addArguments(@Nullable Object... vals) { + if (idx == 3) + throw new IllegalStateException("Task argument class should be declared before adding of additional arguments. " + + "Use VisorGatewayArgument.setTaskArgument first"); - return this; - } + if (vals != null && F.isEmpty(vals)) + throw new IllegalArgumentException("Additional arguments should be configured as null or not empty array of arguments"); - /** - * Add string argument. - * - * @param val Value. - * @return This helper for chaining method calls. - */ - public VisorGatewayArgument argument(String val) { - put("p" + idx++, String.class.getName()); - put("p" + idx++, val); + if (vals != null) { + for (Object val : vals) + put("p" + idx++, String.valueOf(val)); + } + else + put("p" + idx++, null); return this; } /** - * Add custom class argument. + * Add task argument class with custom arguments. * * @param cls Class. * @param vals Values. * @return This helper for chaining method calls. */ - public VisorGatewayArgument argument(Class cls, Object... vals) { + public VisorGatewayArgument setTaskArgument(Class cls, @Nullable Object... vals) { + if (idx != 3) + throw new IllegalStateException("Task argument class should be declared before adding of additional arguments"); + put("p" + idx++, cls.getName()); - for (Object val : vals) - put("p" + idx++, val != null ? val.toString() : null); + if (vals != null) { + for (Object val : vals) + put("p" + idx++, val != null ? val.toString() : null); + } + else + put("p" + idx++, null); return this; } @@ -2907,7 +2939,11 @@ public VisorGatewayArgument argument(Class cls, Object... vals) { * @param vals Values. * @return This helper for chaining method calls. */ - public VisorGatewayArgument collection(Class cls, Object... vals) { + public VisorGatewayArgument addCollectionArgument(Class cls, @Nullable Object... vals) { + if (idx == 3) + throw new IllegalStateException("Task argument class should be declared before adding of additional arguments. " + + "Use VisorGatewayArgument.setTaskArgument first"); + put("p" + idx++, Collection.class.getName()); put("p" + idx++, cls.getName()); put("p" + idx++, concat(vals, ";")); @@ -2922,7 +2958,11 @@ public VisorGatewayArgument collection(Class cls, Object... vals) { * @param vals Values. * @return This helper for chaining method calls. */ - public VisorGatewayArgument set(Class cls, Object... vals) { + public VisorGatewayArgument addSetArgument(Class cls, @Nullable Object... vals) { + if (idx == 3) + throw new IllegalStateException("Task argument class should be declared before adding of additional argument. " + + "Use VisorGatewayArgument.setTaskArgument first"); + put("p" + idx++, Set.class.getName()); put("p" + idx++, cls.getName()); put("p" + idx++, concat(vals, ";")); @@ -2937,7 +2977,14 @@ public VisorGatewayArgument set(Class cls, Object... vals) { * @param valCls Value class. * @param map Map. */ - public VisorGatewayArgument map(Class keyCls, Class valCls, Map map) throws UnsupportedEncodingException { + public VisorGatewayArgument addMapArgument(Class keyCls, Class valCls, @NotNull Map map) throws UnsupportedEncodingException { + if (idx == 3) + throw new IllegalStateException("Task argument class should be declared before adding of additional arguments. " + + "Use VisorGatewayArgument.setTaskArgument first"); + + if (map == null) + throw new IllegalArgumentException("Map argument should be specified"); + put("p" + idx++, Map.class.getName()); put("p" + idx++, keyCls.getName()); put("p" + idx++, valCls.getName()); @@ -2974,14 +3021,18 @@ private static String concat(Object[] vals, String delim) { boolean first = true; - for (Object val : vals) { - if (!first) - sb.a(delim); + if (vals != null) { + for (Object val : vals) { + if (!first) + sb.a(delim); - sb.a(val); + sb.a(val); - first = false; + first = false; + } } + else + sb.a(vals); return sb.toString(); } diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java index 5e0350e61f734..d5ec9d7a0af53 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java @@ -712,6 +712,59 @@ public void testDdlWrongSyntax() throws SQLException { "Failed to parse query. Syntax error in SQL statement \"ALTER TABLE TEST DROP COLUMN [*]"); } + /** + * Checks execution DML request on read-only cluster error code and message. + * + * @throws Exception If failed. + */ + public void testUpdatesRejectedInReadOnlyMode() throws Exception { + try (Connection conn = getConnection()) { + try (Statement statement = conn.createStatement()) { + statement.executeUpdate("CREATE TABLE PUBLIC.TEST_READ_ONLY (ID LONG PRIMARY KEY, VAL LONG)"); + } + } + + grid(0).cluster().readOnly(true); + + try { + checkErrorState((conn) -> { + try (Statement statement = conn.createStatement()) { + statement.executeUpdate("INSERT INTO PUBLIC.TEST_READ_ONLY VALUES (1, 2)"); + } + }, "90097", "Failed to execute DML statement. Cluster in read-only mode"); + } + finally { + grid(0).cluster().readOnly(false); + } + } + + /** + * Checks execution batch DML request on read-only cluster error code and message. + * + * @throws Exception If failed. + */ + public void testBatchUpdatesRejectedInReadOnlyMode() throws Exception { + try (Connection conn = getConnection()) { + try (Statement statement = conn.createStatement()) { + statement.executeUpdate("CREATE TABLE PUBLIC.TEST_READ_ONLY_BATCH (ID LONG PRIMARY KEY, VAL LONG)"); + } + } + + grid(0).cluster().readOnly(true); + + try { + checkErrorState((conn) -> { + try (Statement statement = conn.createStatement()) { + statement.addBatch("INSERT INTO PUBLIC.TEST_READ_ONLY_BATCH VALUES (1, 2)"); + statement.executeBatch(); + } + }, "90097", null); + } + finally { + grid(0).cluster().readOnly(false); + } + } + /** * @return Connection to execute statements on. * @throws SQLException if failed. diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java index 2ba36c369c227..7ac96990a01eb 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java @@ -28,12 +28,12 @@ import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.concurrent.Callable; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.odbc.ClientListenerProcessor; import org.apache.ignite.internal.processors.port.GridPortRecord; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.GridTestUtils.RunnableX; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; /** @@ -45,27 +45,18 @@ public class JdbcThinAbstractSelfTest extends GridCommonAbstractTest { * @param r Runnable to check support. */ protected void checkNotSupported(final RunnableX r) { - GridTestUtils.assertThrows(log, - new Callable() { - @Override public Object call() throws Exception { - r.run(); - - return null; - } - }, SQLFeatureNotSupportedException.class, null); + GridTestUtils.assertThrowsWithCause(r, SQLFeatureNotSupportedException.class); } /** * @param r Runnable to check on closed connection. */ protected void checkConnectionClosed(final RunnableX r) { - GridTestUtils.assertThrows(log, - new Callable() { - @Override public Object call() throws Exception { - r.run(); + GridTestUtils.assertThrowsAnyCause(log, + () -> { + r.run(); - return null; - } + return null; }, SQLException.class, "Connection is closed"); } @@ -73,13 +64,11 @@ protected void checkConnectionClosed(final RunnableX r) { * @param r Runnable to check on closed statement. */ protected void checkStatementClosed(final RunnableX r) { - GridTestUtils.assertThrows(log, - new Callable() { - @Override public Object call() throws Exception { - r.run(); + GridTestUtils.assertThrowsAnyCause(log, + () -> { + r.run(); - return null; - } + return null; }, SQLException.class, "Statement is closed"); } @@ -87,26 +76,14 @@ protected void checkStatementClosed(final RunnableX r) { * @param r Runnable to check on closed result set. */ protected void checkResultSetClosed(final RunnableX r) { - GridTestUtils.assertThrows(log, - new Callable() { - @Override public Object call() throws Exception { - r.run(); + GridTestUtils.assertThrowsAnyCause(log, + () -> { + r.run(); - return null; - } + return null; }, SQLException.class, "Result set is closed"); } - /** - * Runnable that can throw an exception. - */ - interface RunnableX { - /** - * @throws Exception On error. - */ - void run() throws Exception; - } - /** * @param node Node to connect to. * @param params Connection parameters. diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java index ad1e3126c88aa..22d7d71ea8f1f 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java @@ -21,6 +21,7 @@ import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; +import java.sql.SQLException; import java.sql.Statement; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.affinity.AffinityKey; @@ -31,6 +32,7 @@ import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.GridTestUtils; import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; import static org.apache.ignite.cache.CacheMode.PARTITIONED; @@ -266,6 +268,37 @@ public void testCalculatedValue() throws Exception { assert cnt == 3; } + /** + * @throws Exception If failed. + */ + public void testWrongArgumentType() throws Exception { + try (ResultSet rs = stmt.executeQuery("select * from \"org\".Organization where name = '2'")) { + assertFalse(rs.next()); + } + + // Check non-indexed field. + GridTestUtils.assertThrowsWithCause(() -> { + try (ResultSet rs = stmt.executeQuery("select * from \"org\".Organization where name = 2")) { + assertFalse(rs.next()); + } + + return null; + }, SQLException.class); + + // Check indexed field. + try (ResultSet rs = stmt.executeQuery("select * from \"pers\".Person where name = '2'")) { + assertFalse(rs.next()); + } + + GridTestUtils.assertThrowsWithCause(() -> { + try (ResultSet rs = stmt.executeQuery("select * from \"pers\".Person where name = 2")) { + assertFalse(rs.next()); + } + + return null; + }, SQLException.class); + } + /** * Person. */ @@ -276,7 +309,7 @@ private static class Person implements Serializable { private final int id; /** Name. */ - @QuerySqlField(index = false) + @QuerySqlField(index = true) private final String name; /** Age. */ diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java index 6403cac5037f1..bd816e6ef5d2c 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java @@ -58,6 +58,7 @@ import static java.sql.ResultSet.TYPE_FORWARD_ONLY; import static java.sql.Statement.NO_GENERATED_KEYS; import static java.sql.Statement.RETURN_GENERATED_KEYS; +import static org.apache.ignite.testframework.GridTestUtils.RunnableX; /** * Connection test. @@ -570,7 +571,7 @@ public void testCreateStatement() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createStatement(); } }); @@ -623,7 +624,7 @@ public void testCreateStatement2() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createStatement(TYPE_FORWARD_ONLY, CONCUR_READ_ONLY); } @@ -682,7 +683,7 @@ public void testCreateStatement3() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createStatement(TYPE_FORWARD_ONLY, CONCUR_READ_ONLY, HOLD_CURSORS_OVER_COMMIT); } @@ -716,7 +717,7 @@ public void testPrepareStatement() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.prepareStatement(sqlText); } }); @@ -774,7 +775,7 @@ public void testPrepareStatement3() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.prepareStatement(sqlText, TYPE_FORWARD_ONLY, CONCUR_READ_ONLY); } }); @@ -839,7 +840,7 @@ public void testPrepareStatement4() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.prepareStatement(sqlText, TYPE_FORWARD_ONLY, CONCUR_READ_ONLY, HOLD_CURSORS_OVER_COMMIT); } }); @@ -961,7 +962,7 @@ public void testNativeSql() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.nativeSQL(sqlText); } }); @@ -987,7 +988,7 @@ public void testGetSetAutoCommit() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setAutoCommit(true); } }); @@ -1022,7 +1023,7 @@ public void testCommit() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.commit(); } }); @@ -1057,7 +1058,7 @@ public void testRollback() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.rollback(); } }); @@ -1077,7 +1078,7 @@ public void testGetMetaData() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getMetaData(); } }); @@ -1093,14 +1094,14 @@ public void testGetSetReadOnly() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setReadOnly(true); } }); // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.isReadOnly(); } }); @@ -1124,14 +1125,14 @@ public void testGetSetCatalog() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setCatalog(""); } }); // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getCatalog(); } }); @@ -1176,14 +1177,14 @@ public void testGetSetTransactionIsolation() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getTransactionIsolation(); } }); // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setTransactionIsolation(TRANSACTION_SERIALIZABLE); } }); @@ -1209,14 +1210,14 @@ public void testClearGetWarnings() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getWarnings(); } }); // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.clearWarnings(); } }); @@ -1355,7 +1356,7 @@ public void testSetSavepoint() throws Exception { // Unsupported checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setSavepoint(); } }); @@ -1363,7 +1364,7 @@ public void testSetSavepoint() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setSavepoint(); } }); @@ -1409,7 +1410,7 @@ public void testSetSavepointName() throws Exception { // Unsupported checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setSavepoint(name); } }); @@ -1417,7 +1418,7 @@ public void testSetSavepointName() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setSavepoint(name); } }); @@ -1463,7 +1464,7 @@ public void testRollbackSavePoint() throws Exception { // Unsupported checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.rollback(savepoint); } }); @@ -1471,7 +1472,7 @@ public void testRollbackSavePoint() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.rollback(savepoint); } }); @@ -1501,7 +1502,7 @@ public void testReleaseSavepoint() throws Exception { final Savepoint savepoint = getFakeSavepoint(); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.releaseSavepoint(savepoint); } }); @@ -1509,7 +1510,7 @@ public void testReleaseSavepoint() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.releaseSavepoint(savepoint); } }); @@ -1655,7 +1656,7 @@ public void testGetSetClientInfoPair() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getClientInfo(name); } }); @@ -1693,7 +1694,7 @@ public void testGetSetClientInfoProperties() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getClientInfo(); } }); @@ -1734,7 +1735,7 @@ public void testCreateArrayOf() throws Exception { // Unsupported checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createArrayOf(typeName, elements); } }); @@ -1742,7 +1743,7 @@ public void testCreateArrayOf() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createArrayOf(typeName, elements); } }); @@ -1770,7 +1771,7 @@ public void testCreateStruct() throws Exception { final Object[] attrs = new Object[] {100, "Tom"}; checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createStruct(typeName, attrs); } }); @@ -1778,7 +1779,7 @@ public void testCreateStruct() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createStruct(typeName, attrs); } }); @@ -1805,13 +1806,13 @@ public void testGetSetSchema() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setSchema(schema); } }); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getSchema(); } }); @@ -1889,13 +1890,13 @@ public void testGetSetNetworkTimeout() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getNetworkTimeout(); } }); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setNetworkTimeout(executor, timeout); } }); @@ -1980,4 +1981,4 @@ private Savepoint getFakeSavepoint() { } }; } -} \ No newline at end of file +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementSelfTest.java index c5778537096f1..4635702a9dd78 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementSelfTest.java @@ -797,146 +797,146 @@ public void testClearParameter() throws Exception { public void testNotSupportedTypes() throws Exception { stmt = conn.prepareStatement(""); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setArray(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setAsciiStream(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setAsciiStream(1, null, 0); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setAsciiStream(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setBinaryStream(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setBinaryStream(1, null, 0); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setBinaryStream(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setBlob(1, (Blob)null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setBlob(1, (InputStream)null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setBlob(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setCharacterStream(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setCharacterStream(1, null, 0); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setCharacterStream(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setClob(1, (Clob)null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setClob(1, (Reader)null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setClob(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setNCharacterStream(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setNCharacterStream(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setNClob(1, (NClob)null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setNClob(1, (Reader)null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setNClob(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setRowId(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setRef(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setSQLXML(1, null); } }); @@ -1055,4 +1055,4 @@ private TestObject(int id) { this.id = id; } } -} \ No newline at end of file +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinResultSetSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinResultSetSelfTest.java index 4f9480261c66b..94713afe873fa 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinResultSetSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinResultSetSelfTest.java @@ -49,6 +49,9 @@ import static org.apache.ignite.cache.CacheMode.PARTITIONED; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; +import static org.apache.ignite.testframework.GridTestUtils.RunnableX; +import static org.apache.ignite.testframework.GridTestUtils.assertThrows; +import static org.apache.ignite.testframework.GridTestUtils.assertThrowsAnyCause; /** * Result set test. @@ -772,133 +775,133 @@ public void testNotSupportedTypes() throws Exception { assert rs.next(); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getArray(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getArray("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getAsciiStream(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getAsciiStream("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBinaryStream(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBinaryStream("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBlob(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBlob("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getClob(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getClob("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getCharacterStream(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getCharacterStream("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getNCharacterStream(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getNCharacterStream("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getNClob(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getNClob("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getRef(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getRef("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getRowId(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getRowId("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getSQLXML(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getSQLXML("id"); } }); @@ -913,499 +916,499 @@ public void testUpdateNotSupported() throws Exception { assert rs.next(); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBoolean(1, true); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBoolean("id", true); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateByte(1, (byte)0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateByte("id", (byte)0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateShort(1, (short)0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateShort("id", (short)0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateInt(1, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateInt("id", 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateLong(1, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateLong("id", 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateFloat(1, (float)0.0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateFloat("id", (float)0.0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateDouble(1, 0.0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateDouble("id", 0.0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateString(1, ""); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateString("id", ""); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateTime(1, new Time(0)); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateTime("id", new Time(0)); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateDate(1, new Date(0)); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateDate("id", new Date(0)); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateTimestamp(1, new Timestamp(0)); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateTimestamp("id", new Timestamp(0)); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBytes(1, new byte[]{}); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBytes("id", new byte[]{}); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateArray(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateArray("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBlob(1, (Blob)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBlob(1, (InputStream)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBlob(1, null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBlob("id", (Blob)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBlob("id", (InputStream)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBlob("id", null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateClob(1, (Clob)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateClob(1, (Reader)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateClob(1, null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateClob("id", (Clob)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateClob("id", (Reader)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateClob("id", null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNClob(1, (NClob)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNClob(1, (Reader)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNClob(1, null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNClob("id", (NClob)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNClob("id", (Reader)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNClob("id", null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateAsciiStream(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateAsciiStream(1, null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateAsciiStream(1, null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateAsciiStream("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateAsciiStream("id", null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateAsciiStream("id", null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateCharacterStream(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateCharacterStream(1, null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateCharacterStream(1, null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateCharacterStream("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateCharacterStream("id", null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateCharacterStream("id", null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNCharacterStream(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNCharacterStream(1, null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNCharacterStream(1, null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNCharacterStream("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNCharacterStream("id", null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNCharacterStream("id", null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateRef(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateRef("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateRowId(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateRowId("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNString(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNString("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateSQLXML(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateSQLXML("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateObject(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateObject(1, null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateObject("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateObject("id", null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBigDecimal(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBigDecimal("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNull(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNull("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.cancelRowUpdates(); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateRow(); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.deleteRow(); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.insertRow(); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.moveToInsertRow(); } }); @@ -1423,235 +1426,235 @@ public void testExceptionOnClosedResultSet() throws Exception { rs.close(); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBoolean(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBoolean("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getByte(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getByte("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getShort(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getShort("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getInt(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getInt("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getLong(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getLong("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getFloat(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getFloat("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getDouble(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getDouble("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getString(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getString("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBytes(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBytes("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getDate(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getDate(1, new GregorianCalendar()); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getDate("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getDate("id", new GregorianCalendar()); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTime(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTime(1, new GregorianCalendar()); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTime("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTime("id", new GregorianCalendar()); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTimestamp(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTimestamp(1, new GregorianCalendar()); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTimestamp("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTimestamp("id", new GregorianCalendar()); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.wasNull(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getMetaData(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.next(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.last(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.afterLast(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.beforeFirst(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.first(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.findColumn("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getRow(); } }); @@ -1847,4 +1850,4 @@ private TestObjectField(int a, String b) { return S.toString(TestObjectField.class, this); } } -} \ No newline at end of file +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStatementSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStatementSelfTest.java index 82c0512c7ab70..10dad914960cd 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStatementSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStatementSelfTest.java @@ -132,7 +132,7 @@ public class JdbcThinStatementSelfTest extends JdbcThinAbstractSelfTest { public void testExecuteQuery0() throws Exception { ResultSet rs = stmt.executeQuery(SQL); - assert rs != null; + assertNotNull(rs); int cnt = 0; @@ -140,22 +140,22 @@ public void testExecuteQuery0() throws Exception { int id = rs.getInt("id"); if (id == 2) { - assert "Joe".equals(rs.getString("firstName")); - assert "Black".equals(rs.getString("lastName")); - assert rs.getInt("age") == 35; + assertEquals("Joe", rs.getString("firstName")); + assertEquals("Black", rs.getString("lastName")); + assertEquals(35, rs.getInt("age")); } else if (id == 3) { - assert "Mike".equals(rs.getString("firstName")); - assert "Green".equals(rs.getString("lastName")); - assert rs.getInt("age") == 40; + assertEquals("Mike", rs.getString("firstName")); + assertEquals("Green", rs.getString("lastName")); + assertEquals(40, rs.getInt("age")); } else - assert false : "Wrong ID: " + id; + fail("Wrong ID: " + id); cnt++; } - assert cnt == 2; + assertEquals(2, cnt); } /** @@ -177,8 +177,8 @@ public void testExecuteQuery1() throws Exception { stmt.close(); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.executeQuery(sqlText); } }); @@ -188,15 +188,13 @@ public void testExecuteQuery1() throws Exception { * @throws Exception If failed. */ public void testExecute() throws Exception { - assert stmt.execute(SQL); + assertTrue(stmt.execute(SQL)); - assert stmt.getUpdateCount() == -1 : "Update count must be -1 for SELECT query"; + assertEquals("Update count must be -1 for SELECT query", -1, stmt.getUpdateCount()); ResultSet rs = stmt.getResultSet(); - assert rs != null; - - assert stmt.getResultSet() == null; + assertNotNull(rs); int cnt = 0; @@ -204,22 +202,24 @@ public void testExecute() throws Exception { int id = rs.getInt("id"); if (id == 2) { - assert "Joe".equals(rs.getString("firstName")); - assert "Black".equals(rs.getString("lastName")); - assert rs.getInt("age") == 35; + assertEquals("Joe", rs.getString("firstName")); + assertEquals("Black", rs.getString("lastName")); + assertEquals(35, rs.getInt("age")); } else if (id == 3) { - assert "Mike".equals(rs.getString("firstName")); - assert "Green".equals(rs.getString("lastName")); - assert rs.getInt("age") == 40; + assertEquals( "Mike", rs.getString("firstName")); + assertEquals( "Green", rs.getString("lastName")); + assertEquals(40, rs.getInt("age")); } else - assert false : "Wrong ID: " + id; + fail("Wrong ID: " + id); cnt++; } - assert cnt == 2; + assertEquals(2, cnt); + + assertFalse("Statement has more results.", stmt.getMoreResults()); } /** @@ -228,11 +228,11 @@ else if (id == 3) { public void testMaxRows() throws Exception { stmt.setMaxRows(1); - assert stmt.getMaxRows() == 1; + assertEquals(1, stmt.getMaxRows()); ResultSet rs = stmt.executeQuery(SQL); - assert rs != null; + assertNotNull(rs); int cnt = 0; @@ -240,28 +240,28 @@ public void testMaxRows() throws Exception { int id = rs.getInt("id"); if (id == 2) { - assert "Joe".equals(rs.getString("firstName")); - assert "Black".equals(rs.getString("lastName")); - assert rs.getInt("age") == 35; + assertEquals("Joe", rs.getString("firstName")); + assertEquals("Black", rs.getString("lastName")); + assertEquals(35, rs.getInt("age")); } else if (id == 3) { - assert "Mike".equals(rs.getString("firstName")); - assert "Green".equals(rs.getString("lastName")); - assert rs.getInt("age") == 40; + assertEquals( "Mike", rs.getString("firstName")); + assertEquals( "Green", rs.getString("lastName")); + assertEquals(40, rs.getInt("age")); } else - assert false : "Wrong ID: " + id; + fail("Wrong ID: " + id); cnt++; } - assert cnt == 1; + assertEquals(1, cnt); stmt.setMaxRows(0); rs = stmt.executeQuery(SQL); - assert rs != null; + assertNotNull(rs); cnt = 0; @@ -269,22 +269,22 @@ else if (id == 3) { int id = rs.getInt("id"); if (id == 2) { - assert "Joe".equals(rs.getString("firstName")); - assert "Black".equals(rs.getString("lastName")); - assert rs.getInt("age") == 35; + assertEquals("Joe", rs.getString("firstName")); + assertEquals("Black", rs.getString("lastName")); + assertEquals(35, rs.getInt("age")); } else if (id == 3) { - assert "Mike".equals(rs.getString("firstName")); - assert "Green".equals(rs.getString("lastName")); - assert rs.getInt("age") == 40; + assertEquals( "Mike", rs.getString("firstName")); + assertEquals( "Green", rs.getString("lastName")); + assertEquals(40, rs.getInt("age")); } else - assert false : "Wrong ID: " + id; + fail("Wrong ID: " + id); cnt++; } - assert cnt == 2; + assertEquals(2, cnt); } /** @@ -295,14 +295,14 @@ public void testCloseResultSet0() throws Exception { ResultSet rs1 = stmt.executeQuery(SQL); ResultSet rs2 = stmt.executeQuery(SQL); - assert rs0.isClosed() : "ResultSet must be implicitly closed after re-execute statement"; - assert rs1.isClosed() : "ResultSet must be implicitly closed after re-execute statement"; + assertTrue("ResultSet must be implicitly closed after re-execute statement", rs0.isClosed()); + assertTrue("ResultSet must be implicitly closed after re-execute statement", rs1.isClosed()); - assert !rs2.isClosed() : "Last result set must be available"; + assertFalse("Last result set must be available", rs2.isClosed()); stmt.close(); - assert rs2.isClosed() : "ResultSet must be explicitly closed after close statement"; + assertTrue("ResultSet must be explicitly closed after close statement", rs2.isClosed()); } /** @@ -315,7 +315,7 @@ public void testCloseResultSet1() throws Exception { stmt.close(); - assert rs.isClosed() : "ResultSet must be explicitly closed after close statement"; + assertTrue("ResultSet must be explicitly closed after close statement", rs.isClosed()); } /** @@ -326,66 +326,66 @@ public void testCloseResultSetByConnectionClose() throws Exception { conn.close(); - assert stmt.isClosed() : "Statement must be implicitly closed after close connection"; - assert rs.isClosed() : "ResultSet must be implicitly closed after close connection"; + assertTrue("Statement must be implicitly closed after close connection", stmt.isClosed()); + assertTrue("ResultSet must be implicitly closed after close connection", rs.isClosed()); } /** * @throws Exception If failed. */ public void testCloseOnCompletionAfterQuery() throws Exception { - assert !stmt.isCloseOnCompletion() : "Invalid default closeOnCompletion"; + assertFalse("Invalid default closeOnCompletion", stmt.isCloseOnCompletion()); ResultSet rs0 = stmt.executeQuery(SQL); ResultSet rs1 = stmt.executeQuery(SQL); - assert rs0.isClosed() : "Result set must be closed implicitly"; + assertTrue("Result set must be closed implicitly", rs0.isClosed()); - assert !stmt.isClosed() : "Statement must not be closed"; + assertFalse("Statement must not be closed", stmt.isClosed()); rs1.close(); - assert !stmt.isClosed() : "Statement must not be closed"; + assertFalse("Statement must not be closed", stmt.isClosed()); ResultSet rs2 = stmt.executeQuery(SQL); stmt.closeOnCompletion(); - assert stmt.isCloseOnCompletion() : "Invalid closeOnCompletion"; + assertTrue("Invalid closeOnCompletion", stmt.isCloseOnCompletion()); rs2.close(); - assert stmt.isClosed() : "Statement must be closed"; + assertTrue("Statement must be closed", stmt.isClosed()); } /** * @throws Exception If failed. */ public void testCloseOnCompletionBeforeQuery() throws Exception { - assert !stmt.isCloseOnCompletion() : "Invalid default closeOnCompletion"; + assertFalse("Invalid default closeOnCompletion", stmt.isCloseOnCompletion()); ResultSet rs0 = stmt.executeQuery(SQL); ResultSet rs1 = stmt.executeQuery(SQL); - assert rs0.isClosed() : "Result set must be closed implicitly"; + assertTrue("Result set must be closed implicitly", rs0.isClosed()); - assert !stmt.isClosed() : "Statement must not be closed"; + assertFalse("Statement must not be closed", stmt.isClosed()); rs1.close(); - assert !stmt.isClosed() : "Statement must not be closed"; + assertFalse("Statement must not be closed", stmt.isClosed()); stmt.closeOnCompletion(); ResultSet rs2 = stmt.executeQuery(SQL); - assert stmt.isCloseOnCompletion() : "Invalid closeOnCompletion"; + assertTrue("Invalid closeOnCompletion", stmt.isCloseOnCompletion()); rs2.close(); - assert stmt.isClosed() : "Statement must be closed"; + assertTrue("Statement must be closed", stmt.isClosed()); } /** @@ -414,7 +414,7 @@ public void testExecuteQueryTimeout() throws Exception { * @throws Exception If failed. */ public void testExecuteQueryMultipleOnlyResultSets() throws Exception { - assert conn.getMetaData().supportsMultipleResultSets(); + assertTrue(conn.getMetaData().supportsMultipleResultSets()); int stmtCnt = 10; @@ -543,8 +543,8 @@ public void testExecuteUpdate() throws Exception { stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.executeUpdate(sqlText); } }); @@ -634,15 +634,15 @@ public void testGetSetMaxFieldSizeUnsupported() throws Exception { stmt.close(); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getMaxFieldSize(); } }); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setMaxFieldSize(100); } }); @@ -684,15 +684,15 @@ public void testGetSetMaxRows() throws Exception { stmt.close(); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getMaxRows(); } }); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setMaxRows(maxRows); } }); @@ -728,8 +728,8 @@ public void testSetEscapeProcessing() throws Exception { stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setEscapeProcessing(true); } }); @@ -765,15 +765,15 @@ public void testGetSetQueryTimeout() throws Exception { stmt.close(); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getQueryTimeout(); } }); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setQueryTimeout(timeout); } }); @@ -783,7 +783,7 @@ public void testGetSetQueryTimeout() throws Exception { * @throws Exception If failed. */ public void testMaxFieldSize() throws Exception { - assert stmt.getMaxFieldSize() >= 0; + assertTrue(stmt.getMaxFieldSize() >= 0); GridTestUtils.assertThrows(log, new Callable() { @@ -797,8 +797,8 @@ public void testMaxFieldSize() throws Exception { "Invalid field limit" ); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setMaxFieldSize(100); } }); @@ -808,22 +808,22 @@ public void testMaxFieldSize() throws Exception { * @throws Exception If failed. */ public void testQueryTimeout() throws Exception { - assert stmt.getQueryTimeout() == 0 : "Default timeout invalid: " + stmt.getQueryTimeout(); + assertEquals("Default timeout invalid: " + stmt.getQueryTimeout(), 0, stmt.getQueryTimeout()); stmt.setQueryTimeout(10); - assert stmt.getQueryTimeout() == 10; + assertEquals(10, stmt.getQueryTimeout()); stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getQueryTimeout(); } }); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setQueryTimeout(10); } }); @@ -835,18 +835,18 @@ public void testQueryTimeout() throws Exception { public void testWarningsOnClosedStatement() throws Exception { stmt.clearWarnings(); - assert stmt.getWarnings() == null; + assertNull(null, stmt.getWarnings()); stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getWarnings(); } }); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.clearWarnings(); } }); @@ -856,16 +856,16 @@ public void testWarningsOnClosedStatement() throws Exception { * @throws Exception If failed. */ public void testCursorName() throws Exception { - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setCursorName("test"); } }); stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setCursorName("test"); } }); @@ -875,22 +875,22 @@ public void testCursorName() throws Exception { * @throws Exception If failed. */ public void testGetMoreResults() throws Exception { - assert !stmt.getMoreResults(); + assertFalse(stmt.getMoreResults()); stmt.execute("select 1; "); ResultSet rs = stmt.getResultSet(); - assert !stmt.getMoreResults(); + assertFalse(stmt.getMoreResults()); - assert stmt.getResultSet() == null; + assertNull(stmt.getResultSet()); - assert rs.isClosed(); + assertTrue(rs.isClosed()); stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getMoreResults(); } }); @@ -899,37 +899,59 @@ public void testGetMoreResults() throws Exception { /** * @throws Exception If failed. */ - public void testGetMoreResults1() throws Exception { - assert !stmt.getMoreResults(Statement.CLOSE_CURRENT_RESULT); - assert !stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT); - assert !stmt.getMoreResults(Statement.CLOSE_ALL_RESULTS); + public void testGetMoreResultsKeepCurrent() throws Exception { + assertFalse(stmt.getMoreResults(Statement.CLOSE_CURRENT_RESULT)); + assertFalse(stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT)); + assertFalse(stmt.getMoreResults(Statement.CLOSE_ALL_RESULTS)); stmt.execute("select 1; "); ResultSet rs = stmt.getResultSet(); - assert !stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT); + assertFalse(stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT)); - assert !rs.isClosed(); + assertFalse(rs.isClosed()); - assert !stmt.getMoreResults(Statement.CLOSE_ALL_RESULTS); + stmt.close(); - assert rs.isClosed(); + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { + stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT); + } + }); + } + + /** + * @throws Exception If failed. + */ + @org.junit.Test + public void testGetMoreResultsCloseAll() throws Exception { + assertFalse(stmt.getMoreResults(Statement.CLOSE_CURRENT_RESULT)); + assertFalse(stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT)); + assertFalse(stmt.getMoreResults(Statement.CLOSE_ALL_RESULTS)); + + stmt.execute("select 1; "); + + ResultSet rs = stmt.getResultSet(); + + assertFalse(stmt.getMoreResults(Statement.CLOSE_ALL_RESULTS)); stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT); } }); } /** + * Verifies that emty batch can be performed. + * * @throws Exception If failed. */ public void testBatchEmpty() throws Exception { - assert conn.getMetaData().supportsBatchUpdates(); + assertTrue(conn.getMetaData().supportsBatchUpdates()); stmt.addBatch(""); stmt.clearBatch(); @@ -951,7 +973,7 @@ public void testBatchEmpty() throws Exception { * @throws Exception If failed. */ public void testFetchDirection() throws Exception { - assert stmt.getFetchDirection() == ResultSet.FETCH_FORWARD; + assertEquals(ResultSet.FETCH_FORWARD, stmt.getFetchDirection()); GridTestUtils.assertThrows(log, new Callable() { @@ -967,14 +989,14 @@ public void testFetchDirection() throws Exception { stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setFetchDirection(-1); } }); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getFetchDirection(); } }); @@ -1006,46 +1028,46 @@ public void testAutogenerated() throws Exception { SQLException.class, "Invalid autoGeneratedKeys value"); - assert !conn.getMetaData().supportsGetGeneratedKeys(); + assertFalse(conn.getMetaData().supportsGetGeneratedKeys()); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getGeneratedKeys(); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.executeUpdate("select 1", Statement.RETURN_GENERATED_KEYS); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.executeUpdate("select 1", new int[] {1, 2}); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.executeUpdate("select 1", new String[] {"a", "b"}); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.execute("select 1", Statement.RETURN_GENERATED_KEYS); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.execute("select 1", new int[] {1, 2}); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.execute("select 1", new String[] {"a", "b"}); } }); @@ -1115,7 +1137,7 @@ public void testStatementTypeMismatchSelectForCachedQuery() throws Exception { SQLException.class, "Given statement type does not match that declared by JDBC driver"); - assert stmt.getResultSet() == null : "Not results expected. Last statement is executed with exception"; + assertNull("Not results expected. Last statement is executed with exception", stmt.getResultSet()); } /** @@ -1137,18 +1159,20 @@ public void testStatementTypeMismatchUpdate() throws Exception { boolean next = rs.next(); - assert next; + assertTrue(next); - assert rs.getInt(1) == 1 : "The data must not be updated. " + + assertEquals("The data must not be updated. " + "Because update statement is executed via 'executeQuery' method." + - " Data [val=" + rs.getInt(1) + ']'; + " Data [val=" + rs.getInt(1) + ']', + 1, + rs.getInt(1)); } /** */ private void fillCache() { IgniteCache cachePerson = grid(0).cache(DEFAULT_CACHE_NAME); - assert cachePerson != null; + assertNotNull(cachePerson); cachePerson.put("p1", new Person(1, "John", "White", 25)); cachePerson.put("p2", new Person(2, "Joe", "Black", 35)); @@ -1229,4 +1253,4 @@ private Person(int id, String firstName, String lastName, int age) { this.age = age; } } -} \ No newline at end of file +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingAbstractSelfTest.java index 70046356b210b..7222074b3b848 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingAbstractSelfTest.java @@ -198,6 +198,13 @@ public void testSimultaneousStreaming() throws Exception { assertEquals(i, grid(0).cache("T").get(i)); } + /** {@inheritDoc} */ + @Override public void testStreamedInsertFailsOnReadOnlyMode() throws Exception { + fail("https://ggsystems.atlassian.net/browse/GG-17406"); + + super.testStreamedInsertFailsOnReadOnlyMode(); + } + /** * */ @@ -502,4 +509,4 @@ static final class IndexingWithContext extends IgniteH2Indexing { return super.querySqlFields(schemaName, qry, cliCtx, keepBinary, failOnMultipleStmts, cancel); } } -} \ No newline at end of file +} diff --git a/modules/codegen/src/main/java/org/apache/ignite/codegen/MessageCodeGenerator.java b/modules/codegen/src/main/java/org/apache/ignite/codegen/MessageCodeGenerator.java index 9ecc46a72713b..54ad3c8f0e399 100644 --- a/modules/codegen/src/main/java/org/apache/ignite/codegen/MessageCodeGenerator.java +++ b/modules/codegen/src/main/java/org/apache/ignite/codegen/MessageCodeGenerator.java @@ -348,13 +348,13 @@ else if (line.startsWith(TAB + "}")) { } if (!writeFound) - System.out.println(" writeTo method doesn't exist."); + System.out.println(" writeTo method doesn't exist for " + cls.getSimpleName()); if (!readFound) - System.out.println(" readFrom method doesn't exist."); + System.out.println(" readFrom method doesn't exist for " + cls.getSimpleName()); if (!fieldCntFound) - System.out.println(" fieldCount method doesn't exist."); + System.out.println(" fieldCount method doesn't exist for " + cls.getSimpleName()); } finally { if (rdr != null) diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/PdsWithTtlCompatibilityTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/PdsWithTtlCompatibilityTest.java deleted file mode 100644 index 946caddb5f203..0000000000000 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/PdsWithTtlCompatibilityTest.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.compatibility; - -import java.util.Collection; -import java.util.concurrent.TimeUnit; -import javax.cache.Cache; -import javax.cache.expiry.AccessedExpiryPolicy; -import javax.cache.expiry.Duration; -import org.apache.ignite.Ignite; -import org.apache.ignite.IgniteCache; -import org.apache.ignite.cache.CacheAtomicityMode; -import org.apache.ignite.cache.CacheWriteSynchronizationMode; -import org.apache.ignite.compatibility.persistence.IgnitePersistenceCompatibilityAbstractTest; -import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.configuration.DataRegionConfiguration; -import org.apache.ignite.configuration.DataStorageConfiguration; -import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; -import org.apache.ignite.configuration.WALMode; -import org.apache.ignite.internal.IgniteEx; -import org.apache.ignite.internal.IgniteInterruptedCheckedException; -import org.apache.ignite.internal.processors.cache.GridCacheAbstractFullApiSelfTest; -import org.apache.ignite.internal.processors.cache.persistence.migration.UpgradePendingTreeToPerPartitionTask; -import org.apache.ignite.internal.util.typedef.PA; -import org.apache.ignite.lang.IgniteFuture; -import org.apache.ignite.lang.IgniteInClosure; -import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; -import org.apache.ignite.testframework.GridTestUtils; - -/** - * Test PendingTree upgrading to per-partition basis. Test fill cache with persistence enabled and with ExpirePolicy - * configured on ignite-2.1 version and check if entries will be correctly expired when a new version node started. - * - * Note: Test for ignite-2.3 version will always fails due to entry ttl update fails with assertion on checkpoint lock - * check. - */ -public class PdsWithTtlCompatibilityTest extends IgnitePersistenceCompatibilityAbstractTest { - /** */ - static final String TEST_CACHE_NAME = PdsWithTtlCompatibilityTest.class.getSimpleName(); - - /** */ - static final int DURATION_SEC = 10; - - /** */ - private static final int ENTRIES_CNT = 100; - - /** {@inheritDoc} */ - @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { - IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); - - cfg.setPeerClassLoadingEnabled(false); - - cfg.setDataStorageConfiguration( - new DataStorageConfiguration() - .setDefaultDataRegionConfiguration( - new DataRegionConfiguration() - .setMaxSize(32L * 1024 * 1024) - .setPersistenceEnabled(true) - .setCheckpointPageBufferSize(16L * 1024 * 1024) - ).setWalMode(WALMode.LOG_ONLY)); - - return cfg; - } - - /** - * Tests opportunity to read data from previous Ignite DB version. - * - * @throws Exception If failed. - */ - public void testNodeStartByOldVersionPersistenceData_2_1() throws Exception { - doTestStartupWithOldVersion("2.1.0"); - } - - /** - * Tests opportunity to read data from previous Ignite DB version. - * - * @param igniteVer 3-digits version of ignite - * @throws Exception If failed. - */ - protected void doTestStartupWithOldVersion(String igniteVer) throws Exception { - try { - startGrid(1, igniteVer, new ConfigurationClosure(), new PostStartupClosure()); - - stopAllGrids(); - - IgniteEx ignite = startGrid(0); - - assertEquals(1, ignite.context().discovery().topologyVersion()); - - ignite.active(true); - - validateResultingCacheData(ignite, ignite.cache(TEST_CACHE_NAME)); - } - finally { - stopAllGrids(); - } - } - - /** - * @param cache to be filled by different keys and values. Results may be validated in {@link - * #validateResultingCacheData(Ignite, IgniteCache)}. - */ - public static void saveCacheData(Cache cache) { - for (int i = 0; i < ENTRIES_CNT; i++) - cache.put(i, "data-" + i); - - //Touch - for (int i = 0; i < ENTRIES_CNT; i++) - assertNotNull(cache.get(i)); - } - - /** - * Asserts cache contained all expected values as it was saved before. - * - * @param cache cache should be filled using {@link #saveCacheData(Cache)}. - */ - public static void validateResultingCacheData(Ignite ignite, - IgniteCache cache) throws IgniteInterruptedCheckedException { - - final long expireTime = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(DURATION_SEC + 1); - - final IgniteFuture> future = ignite.compute().broadcastAsync(new UpgradePendingTreeToPerPartitionTask()); - - GridTestUtils.waitForCondition(new PA() { - @Override public boolean apply() { - return future.isDone() && expireTime < System.currentTimeMillis(); - } - }, TimeUnit.SECONDS.toMillis(DURATION_SEC + 2)); - - for (Boolean res : future.get()) - assertTrue(res); - - for (int i = 0; i < ENTRIES_CNT; i++) - assertNull(cache.get(i)); - } - - /** */ - public static class ConfigurationClosure implements IgniteInClosure { - /** {@inheritDoc} */ - @Override public void apply(IgniteConfiguration cfg) { - cfg.setLocalHost("127.0.0.1"); - - TcpDiscoverySpi disco = new TcpDiscoverySpi(); - disco.setIpFinder(GridCacheAbstractFullApiSelfTest.LOCAL_IP_FINDER); - - cfg.setDiscoverySpi(disco); - - cfg.setPeerClassLoadingEnabled(false); - - cfg.setMemoryConfiguration(new MemoryConfiguration().setDefaultMemoryPolicySize(256L * 1024 * 1024)); - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration().setWalMode(WALMode.LOG_ONLY) - .setCheckpointingPageBufferSize(16L * 1024 * 1024)); - } - } - - /** */ - public static class PostStartupClosure implements IgniteInClosure { - /** {@inheritDoc} */ - @Override public void apply(Ignite ignite) { - ignite.active(true); - - CacheConfiguration cacheCfg = new CacheConfiguration<>(); - cacheCfg.setName(TEST_CACHE_NAME); - cacheCfg.setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL); - cacheCfg.setBackups(1); - cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC); - cacheCfg.setExpiryPolicyFactory(AccessedExpiryPolicy.factoryOf(new Duration(TimeUnit.SECONDS, DURATION_SEC))); - cacheCfg.setEagerTtl(true); - cacheCfg.setGroupName("myGroup"); - - IgniteCache cache = ignite.createCache(cacheCfg); - - saveCacheData(cache); - - ignite.active(false); - } - } -} diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java index fcfd5a7939ca4..eaa38afdd6d61 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java @@ -19,7 +19,6 @@ import junit.framework.TestSuite; import org.apache.ignite.compatibility.persistence.DummyPersistenceCompatibilityTest; -import org.apache.ignite.compatibility.PdsWithTtlCompatibilityTest; import org.apache.ignite.compatibility.persistence.FoldersReuseCompatibilityTest; import org.apache.ignite.compatibility.persistence.IgniteUuidCompatibilityTest; import org.apache.ignite.compatibility.persistence.MigratingToWalV2SerializerWithCompactionTest; @@ -37,8 +36,6 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(DummyPersistenceCompatibilityTest.class); - suite.addTestSuite(PdsWithTtlCompatibilityTest.class); - suite.addTestSuite(FoldersReuseCompatibilityTest.class); suite.addTestSuite(MigratingToWalV2SerializerWithCompactionTest.class); diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteCluster.java b/modules/core/src/main/java/org/apache/ignite/IgniteCluster.java index fc0e81bcae2ba..f0492f4321195 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteCluster.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteCluster.java @@ -457,6 +457,21 @@ public IgniteFuture> startNodesAsync(Collecti */ public void active(boolean active); + /** + * Checks Ignite grid is in read-only mode or not. + * + * @return {@code True} if grid is in read-only mode and {@code False} otherwise. + */ + public boolean readOnly(); + + /** + * Enable or disable Ignite grid read-only mode. + * + * @param readOnly If {@code True} enable read-only mode. If {@code False} disable read-only mode. + * @throws IgniteException If Ignite grid isn't active. + */ + public void readOnly(boolean readOnly) throws IgniteException; + /** * Gets current baseline topology. If baseline topology was not set, will return {@code null}. * diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java index 4f8d06238099f..6d9c4cb23a128 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java @@ -17,13 +17,13 @@ package org.apache.ignite; +import javax.net.ssl.HostnameVerifier; import java.io.Serializable; import java.lang.management.RuntimeMXBean; import java.util.Arrays; import java.util.Iterator; import java.util.Map; import java.util.Properties; -import javax.net.ssl.HostnameVerifier; import org.apache.ignite.cache.CacheEntryProcessor; import org.apache.ignite.cluster.ClusterGroup; import org.apache.ignite.configuration.CacheConfiguration; @@ -33,6 +33,7 @@ import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.marshaller.optimized.OptimizedMarshaller; import org.apache.ignite.internal.processors.rest.GridRestCommand; +import org.apache.ignite.spi.communication.tcp.TcpCommunicationMetricsListener; import org.apache.ignite.stream.StreamTransformer; import org.jetbrains.annotations.Nullable; @@ -89,8 +90,12 @@ public final class IgniteSystemProperties { /** * If this system property is set to {@code false} - no checks for new versions will * be performed by Ignite. By default, Ignite periodically checks for the new - * version and prints out the message into the log if new version of Ignite is + * version and prints out the message into the log if a new version of Ignite is * available for download. + * + * Update notifier enabled flag is a cluster-wide value and determined according to the local setting + * during the start of the first node in the cluster. The chosen value will survive the first node shutdown + * and will override the property value on all newly joining nodes. */ public static final String IGNITE_UPDATE_NOTIFIER = "IGNITE_UPDATE_NOTIFIER"; @@ -785,8 +790,10 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_WAL_LOG_TX_RECORDS = "IGNITE_WAL_LOG_TX_RECORDS"; - /** If this property is set, {@link DataStorageConfiguration#writeThrottlingEnabled} will be overridden to true - * independent of initial value in configuration. */ + /** + * If this property is set, {@link DataStorageConfiguration#isWriteThrottlingEnabled()} + * will be overridden to {@code true} regardless the initial value in the configuration. + */ public static final String IGNITE_OVERRIDE_WRITE_THROTTLING_ENABLED = "IGNITE_OVERRIDE_WRITE_THROTTLING_ENABLED"; /** @@ -882,6 +889,24 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_LOADED_PAGES_BACKWARD_SHIFT_MAP = "IGNITE_LOADED_PAGES_BACKWARD_SHIFT_MAP"; + /** + * Property for setup percentage of archive size for checkpoint trigger. Default value is 0.25 + */ + public static final String IGNITE_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE = "IGNITE_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE"; + + /** + * Property for setup percentage of WAL archive size to calculate threshold since which removing of old archive should be started. + * Default value is 0.5 + */ + public static final String IGNITE_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE = "IGNITE_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE"; + + /** + * Threshold time (in millis) to print warning to log if waiting for next wal segment took longer than the threshold. + * + * Default value is 1000 ms. + */ + public static final String IGNITE_THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT = "IGNITE_THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT"; + /** * Count of WAL compressor worker threads. Default value is 4. */ @@ -941,7 +966,7 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_DUMP_THREADS_ON_FAILURE = "IGNITE_DUMP_THREADS_ON_FAILURE"; - /** + /** * Throttling timeout in millis which avoid excessive PendingTree access on unwind if there is nothing to clean yet. * * Default is 500 ms. @@ -1004,6 +1029,13 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_ALLOW_START_CACHES_IN_PARALLEL = "IGNITE_ALLOW_START_CACHES_IN_PARALLEL"; + /** + * Allows to log additional information about all restored partitions after binary and logical recovery phases. + * + * Default is {@code true}. + */ + public static final String IGNITE_RECOVERY_VERBOSE_LOGGING = "IGNITE_RECOVERY_VERBOSE_LOGGING"; + /** * Disables cache interceptor triggering in case of conflicts. * @@ -1121,6 +1153,68 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_LOG_CLASSPATH_CONTENT_ON_STARTUP = "IGNITE_LOG_CLASSPATH_CONTENT_ON_STARTUP"; + /** + * Index rebuilding parallelism level. If specified, sets the count of threads that are used for index rebuilding + * and can only be greater than 0, otherwise default value will be used. Maximum count of threads + * can't be greater than total available processors count. + * Default value is minimum of 4 and processors count / 4, but always greater than 0. + */ + public static final String INDEX_REBUILDING_PARALLELISM = "INDEX_REBUILDING_PARALLELISM"; + + /** Enable write rebalnce statistics into log. Default: false */ + public static final String IGNITE_WRITE_REBALANCE_STATISTICS = "IGNITE_WRITE_REBALANCE_STATISTICS"; + + /** Enable write rebalnce statistics by partitions into log. Default: false */ + public static final String IGNITE_WRITE_REBALANCE_PARTITION_STATISTICS = + "IGNITE_WRITE_REBALANCE_PARTITION_STATISTICS"; + + /** + * Threshold timeout for long transactions, if transaction exceeds it, it will be dumped in log with + * information about how much time did it spent in system time (time while aquiring locks, preparing, + * commiting, etc) and user time (time when client node runs some code while holding transaction and not + * waiting it). Equals 0 if not set. No long transactions are dumped in log if nor this parameter + * neither {@link #IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_COEFFICIENT} is set. + */ + public static final String IGNITE_LONG_TRANSACTION_TIME_DUMP_THRESHOLD = "IGNITE_LONG_TRANSACTION_TIME_DUMP_THRESHOLD"; + + /** + * The coefficient for samples of completed transactions that will be dumped in log. Must be float value + * between 0.0 and 1.0 inclusive. Default value is 0.0. + */ + public static final String IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_COEFFICIENT = + "IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_COEFFICIENT"; + + /** + * The limit of samples of completed transactions that will be dumped in log per second, if + * {@link #IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_COEFFICIENT} is above 0.0. Must be integer value + * greater than 0. Default value is 5. + */ + public static final String IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_PER_SECOND_LIMIT = + "IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_PER_SECOND_LIMIT"; + + /** + * Disables smart DR throttling. Default value is false. + */ + public static final String IGNITE_DISABLE_SMART_DR_THROTTLING = + "IGNITE_DISABLE_SMART_DR_THROTTLING"; + + /** */ + public static final String IGNITE_USE_POOL_FOR_LAZY_QUERIES = "IGNITE_USE_POOL_FOR_LAZY_QUERIES"; + + /** + * Enables logging time between request and response messages.
+ * Default: {@code false} + * {see {@link TcpCommunicationMetricsListener}} + */ + public static final String IGNITE_ENABLE_MESSAGES_TIME_LOGGING = "IGNITE_ENABLE_MESSAGES_TIME_LOGGING"; + + /** + * Bounds for histogram metrics in milliseconds.
+ * Default: {10, 20, 40, 80, 160, 320, 500, 1000, 2000, 4000} + * {see {@link TcpCommunicationMetricsListener}} + */ + public static final String IGNITE_COMM_SPI_TIME_HIST_BOUNDS = "IGNITE_COMM_SPI_TIME_HIST_BOUNDS"; + /** * Enforces singleton. */ diff --git a/modules/core/src/main/java/org/apache/ignite/cache/CacheMetrics.java b/modules/core/src/main/java/org/apache/ignite/cache/CacheMetrics.java index 951b48bd1f678..714303f58129e 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/CacheMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/CacheMetrics.java @@ -238,6 +238,7 @@ public interface CacheMetrics { /** * Gets number of non-{@code null} values in the cache. + * Note this method will always return {@code 0} * * @return Number of non-{@code null} values in the cache. * @deprecated Can overflow. Use {@link CacheMetrics#getCacheSize()} instead. @@ -254,6 +255,7 @@ public interface CacheMetrics { /** * Gets number of keys in the cache, possibly with {@code null} values. + * Note this method will always return {@code 0} * * @return Number of keys in the cache. * @deprecated Can overflow. Use {@link CacheMetrics#getCacheSize()} instead. diff --git a/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java b/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java index 81fd50b8ee5fa..e20224746991b 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java @@ -686,13 +686,10 @@ private static QueryEntity convert(QueryEntityTypeDescriptor desc) { * @return Type descriptor. */ private static QueryEntityTypeDescriptor processKeyAndValueClasses( - Class keyCls, - Class valCls + @NotNull Class keyCls, + @NotNull Class valCls ) { - QueryEntityTypeDescriptor d = new QueryEntityTypeDescriptor(); - - d.keyClass(keyCls); - d.valueClass(valCls); + QueryEntityTypeDescriptor d = new QueryEntityTypeDescriptor(keyCls, valCls); processAnnotationsInClass(true, d.keyClass(), d, null); processAnnotationsInClass(false, d.valueClass(), d, null); diff --git a/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQuery.java b/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQuery.java index 549be542fdedf..b0ec17016a60d 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQuery.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQuery.java @@ -30,22 +30,22 @@ /** * API for configuring continuous cache queries. *

- * Continuous queries allow to register a remote filter and a local listener + * Continuous queries allow registering a remote filter and a local listener * for cache updates. If an update event passes the filter, it will be sent to - * the node that executed the query and local listener will be notified. + * the node that executed the query, and local listener will be notified. *

- * Additionally, you can execute initial query to get currently existing data. + * Additionally, you can execute an initial query to get currently existing data. * Query can be of any type (SQL, TEXT or SCAN) and can be set via {@link #setInitialQuery(Query)} * method. *

* Query can be executed either on all nodes in topology using {@link IgniteCache#query(Query)} * method, or only on the local node, if {@link Query#setLocal(boolean)} parameter is set to {@code true}. - * Note that in case query is distributed and a new node joins, it will get the remote - * filter for the query during discovery process before it actually joins topology, + * Note that if the query is distributed and a new node joins, it will get the remote + * filter for the query during discovery process before it actually joins a topology, * so no updates will be missed. *

Example

- * As an example, suppose we have cache with {@code 'Person'} objects and we need - * to query all persons with salary above 1000. + * As an example, suppose we have a cache with {@code 'Person'} objects and we need + * to query for all people with salary above 1000. *

* Here is the {@code Person} class: *

@@ -60,17 +60,17 @@
  * }
  * 
*

- * You can create and execute continuous query like so: + * You can create and execute a continuous query like so: *

- * // Create new continuous query.
+ * // Create a new continuous query.
  * ContinuousQuery<Long, Person> qry = new ContinuousQuery<>();
  *
- * // Initial iteration query will return all persons with salary above 1000.
+ * // Initial iteration query will return all people with salary above 1000.
  * qry.setInitialQuery(new ScanQuery<>((id, p) -> p.getSalary() > 1000));
  *
  *
  * // Callback that is called locally when update notifications are received.
- * // It simply prints out information about all created persons.
+ * // It simply prints out information about all created or modified records.
  * qry.setLocalListener((evts) -> {
  *     for (CacheEntryEvent<? extends Long, ? extends Person> e : evts) {
  *         Person p = e.getValue();
@@ -79,29 +79,29 @@
  *     }
  * });
  *
- * // Continuous listener will be notified for persons with salary above 1000.
+ * // The continuous listener will be notified for people with salary above 1000.
  * qry.setRemoteFilter(evt -> evt.getValue().getSalary() > 1000);
  *
- * // Execute query and get cursor that iterates through initial data.
+ * // Execute the query and get a cursor that iterates through the initial data.
  * QueryCursor<Cache.Entry<Long, Person>> cur = cache.query(qry);
  * 
- * This will execute query on all nodes that have cache you are working with and - * listener will start to receive notifications for cache updates. + * This will execute query on all nodes that have the cache you are working with and + * listener will start receiving notifications for cache updates. *

* To stop receiving updates call {@link QueryCursor#close()} method: *

  * cur.close();
  * 
- * Note that this works even if you didn't provide initial query. Cursor will + * Note that this works even if you didn't provide the initial query. Cursor will * be empty in this case, but it will still unregister listeners when {@link QueryCursor#close()} * is called. *

* {@link IgniteAsyncCallback} annotation is supported for {@link CacheEntryEventFilter} * (see {@link #setRemoteFilterFactory(Factory)}) and {@link CacheEntryUpdatedListener} * (see {@link #setLocalListener(CacheEntryUpdatedListener)}). - * If filter and/or listener are annotated with {@link IgniteAsyncCallback} then annotated callback - * is executed in async callback pool (see {@link IgniteConfiguration#getAsyncCallbackPoolSize()}) - * and notification order is kept the same as update order for given cache key. + * If a filter and/or listener are annotated with {@link IgniteAsyncCallback} then the annotated callback + * is executed in an async callback pool (see {@link IgniteConfiguration#getAsyncCallbackPoolSize()}) + * and a notification order is kept the same as an update order for a given cache key. * * @see ContinuousQueryWithTransformer * @see IgniteAsyncCallback @@ -130,10 +130,10 @@ public ContinuousQuery setInitialQuery(Query> initQry) { } /** - * Sets local callback. This callback is called only in local node when new updates are received. + * Sets a local callback. This callback is called only on local node when new updates are received. *

- * The callback predicate accepts ID of the node from where updates are received and collection - * of received entries. Note that for removed entries value will be {@code null}. + * The callback predicate accepts ID of the node from where updates are received and a collection + * of the received entries. Note that for removed entries values will be {@code null}. *

* If the predicate returns {@code false}, query execution will be cancelled. *

@@ -141,7 +141,7 @@ public ContinuousQuery setInitialQuery(Query> initQry) { * synchronization or transactional cache operations), should be executed asynchronously without * blocking the thread that called the callback. Otherwise, you can get deadlocks. *

- * If local listener are annotated with {@link IgniteAsyncCallback} then it is executed in async callback pool + * If local listener are annotated with {@link IgniteAsyncCallback} then it is executed in an async callback pool * (see {@link IgniteConfiguration#getAsyncCallbackPoolSize()}) that allow to perform a cache operations. * * @param locLsnr Local callback. @@ -157,8 +157,6 @@ public ContinuousQuery setLocalListener(CacheEntryUpdatedListener lo } /** - * Gets local listener. - * * @return Local listener. */ public CacheEntryUpdatedListener getLocalListener() { @@ -214,7 +212,7 @@ public ContinuousQuery setAutoUnsubscribe(boolean autoUnsubscribe) { } /** - * Sets whether this query should be executed on local node only. + * Sets whether this query should be executed on a local node only. * * Note: backup event queues are not kept for local continuous queries. It may lead to loss of notifications in case * of node failures. Use {@link ContinuousQuery#setRemoteFilterFactory(Factory)} to register cache event listeners diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2KeyValueIterator.java b/modules/core/src/main/java/org/apache/ignite/cache/query/QueryRetryException.java similarity index 54% rename from modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2KeyValueIterator.java rename to modules/core/src/main/java/org/apache/ignite/cache/query/QueryRetryException.java index 4bca779a5c892..1574385e79f85 100644 --- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2KeyValueIterator.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/query/QueryRetryException.java @@ -15,34 +15,21 @@ * limitations under the License. */ -package org.apache.ignite.internal.processors.query.h2; +package org.apache.ignite.cache.query; -import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.lang.IgniteBiTuple; - -import java.sql.ResultSet; +import org.apache.ignite.IgniteException; /** - * Special key/value iterator based on database result set. + * The exception is thrown if a query must be retried because database schema or topology are changed. */ -public class H2KeyValueIterator extends H2ResultSetIterator> { +public class QueryRetryException extends IgniteException { /** */ private static final long serialVersionUID = 0L; /** - * @param data Data array. - * @throws IgniteCheckedException If failed. + * @param tableName Table name. */ - protected H2KeyValueIterator(ResultSet data) throws IgniteCheckedException { - super(data, null, null, null); - } - - /** {@inheritDoc} */ - @SuppressWarnings("unchecked") - @Override protected IgniteBiTuple createRow() { - K key = (K)row[0]; - V val = (V)row[1]; - - return new IgniteBiTuple<>(key, val); + public QueryRetryException(String tableName) { + super("Table was modified concurrently (please retry the query): " + tableName); } -} +} \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/DefaultCommunicationFailureResolver.java b/modules/core/src/main/java/org/apache/ignite/configuration/DefaultCommunicationFailureResolver.java index 7db42d3b239cf..46c79cb41cb25 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/DefaultCommunicationFailureResolver.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/DefaultCommunicationFailureResolver.java @@ -49,11 +49,12 @@ public class DefaultCommunicationFailureResolver implements CommunicationFailure if (largestCluster == null) return; - log.info("Communication problem resolver found fully connected independent cluster [" - + "serverNodesCnt=" + largestCluster.srvNodesCnt + ", " - + "clientNodesCnt=" + largestCluster.connectedClients.size() + ", " - + "totalAliveNodes=" + ctx.topologySnapshot().size() + ", " - + "serverNodesIds=" + clusterNodeIds(largestCluster.srvNodesSet, ctx.topologySnapshot(), 1000) + "]"); + if (log.isInfoEnabled()) + log.info("Communication problem resolver found fully connected independent cluster [" + + "serverNodesCnt=" + largestCluster.srvNodesCnt + ", " + + "clientNodesCnt=" + largestCluster.connectedClients.size() + ", " + + "totalAliveNodes=" + ctx.topologySnapshot().size() + ", " + + "serverNodesIds=" + clusterNodeIds(largestCluster.srvNodesSet, ctx.topologySnapshot(), 1000) + "]"); keepCluster(ctx, largestCluster); } diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java index db0a118eb97be..ef5930cb16d97 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java @@ -223,6 +223,9 @@ public class IgniteConfiguration { @SuppressWarnings("UnnecessaryBoxing") public static final Long DFLT_FAILURE_DETECTION_TIMEOUT = new Long(10_000); + /** Default system worker blocked timeout in millis. */ + public static final Long DFLT_SYS_WORKER_BLOCKED_TIMEOUT = 2 * 60 * 1000L; + /** Default failure detection timeout for client nodes in millis. */ @SuppressWarnings("UnnecessaryBoxing") public static final Long DFLT_CLIENT_FAILURE_DETECTION_TIMEOUT = new Long(30_000); @@ -433,7 +436,7 @@ public class IgniteConfiguration { private Long failureDetectionTimeout = DFLT_FAILURE_DETECTION_TIMEOUT; /** Timeout for blocked system workers detection. */ - private Long sysWorkerBlockedTimeout; + private Long sysWorkerBlockedTimeout = DFLT_SYS_WORKER_BLOCKED_TIMEOUT; /** Failure detection timeout for client nodes. */ private Long clientFailureDetectionTimeout = DFLT_CLIENT_FAILURE_DETECTION_TIMEOUT; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/ComputeTaskInternalFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/ComputeTaskInternalFuture.java index 2cb3dfad5e487..6ce9001138b1b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/ComputeTaskInternalFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/ComputeTaskInternalFuture.java @@ -234,7 +234,7 @@ public ComputeTaskSession getTaskSession() { /** {@inheritDoc} */ @Override public boolean cancel() throws IgniteCheckedException { - ctx.security().authorize(ses.getTaskName(), SecurityPermission.TASK_CANCEL, null); + ctx.security().authorize(ses.getTaskName(), SecurityPermission.TASK_CANCEL); if (onCancelled()) { ctx.task().onCancelled(ses.getId()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridEventConsumeHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/GridEventConsumeHandler.java index ac568f065bcc7..7d9f74e5e6273 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridEventConsumeHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridEventConsumeHandler.java @@ -44,6 +44,8 @@ import org.apache.ignite.internal.processors.continuous.GridContinuousBatchAdapter; import org.apache.ignite.internal.processors.continuous.GridContinuousHandler; import org.apache.ignite.internal.processors.platform.PlatformEventFilterListener; +import org.apache.ignite.internal.util.future.GridFinishedFuture; +import org.apache.ignite.internal.util.future.GridFutureAdapter; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.P2; import org.apache.ignite.internal.util.typedef.T2; @@ -92,6 +94,9 @@ class GridEventConsumeHandler implements GridContinuousHandler { /** Listener. */ private GridLocalEventListener lsnr; + /** P2P unmarshalling future. */ + private IgniteInternalFuture p2pUnmarshalFut = new GridFinishedFuture<>(); + /** * Required by {@link Externalizable}. */ @@ -142,6 +147,21 @@ public GridEventConsumeHandler() { // No-op. } + /** + * Performs remote filter initialization. + * + * @param filter Remote filter. + * @param ctx Kernal context. + * @throws IgniteCheckedException In case if initialization failed. + */ + private void initFilter(IgnitePredicate filter, GridKernalContext ctx) throws IgniteCheckedException { + if (filter != null) + ctx.resource().injectGeneric(filter); + + if (filter instanceof PlatformEventFilterListener) + ((PlatformEventFilterListener)filter).initialize(ctx); + } + /** {@inheritDoc} */ @Override public RegisterStatus register(final UUID nodeId, final UUID routineId, final GridKernalContext ctx) throws IgniteCheckedException { @@ -152,12 +172,6 @@ public GridEventConsumeHandler() { if (cb != null) ctx.resource().injectGeneric(cb); - if (filter != null) - ctx.resource().injectGeneric(filter); - - if (filter instanceof PlatformEventFilterListener) - ((PlatformEventFilterListener)filter).initialize(ctx); - final boolean loc = nodeId.equals(ctx.localNodeId()); lsnr = new GridLocalEventListener() { @@ -257,7 +271,18 @@ public GridEventConsumeHandler() { if (F.isEmpty(types)) types = EVTS_ALL; - ctx.event().addLocalEventListener(lsnr, types); + p2pUnmarshalFut.listen((fut) -> { + if (fut.error() == null) { + try { + initFilter(filter, ctx); + } + catch (IgniteCheckedException e) { + throw F.wrap(e); + } + + ctx.event().addLocalEventListener(lsnr, types); + } + }); return RegisterStatus.REGISTERED; } @@ -382,13 +407,22 @@ public GridEventConsumeHandler() { assert ctx.config().isPeerClassLoadingEnabled(); if (filterBytes != null) { - GridDeployment dep = ctx.deploy().getGlobalDeployment(depInfo.deployMode(), clsName, clsName, - depInfo.userVersion(), nodeId, depInfo.classLoaderId(), depInfo.participants(), null); + try { + GridDeployment dep = ctx.deploy().getGlobalDeployment(depInfo.deployMode(), clsName, clsName, + depInfo.userVersion(), nodeId, depInfo.classLoaderId(), depInfo.participants(), null); - if (dep == null) - throw new IgniteDeploymentCheckedException("Failed to obtain deployment for class: " + clsName); + if (dep == null) + throw new IgniteDeploymentCheckedException("Failed to obtain deployment for class: " + clsName); + + filter = U.unmarshal(ctx, filterBytes, U.resolveClassLoader(dep.classLoader(), ctx.config())); - filter = U.unmarshal(ctx, filterBytes, U.resolveClassLoader(dep.classLoader(), ctx.config())); + ((GridFutureAdapter)p2pUnmarshalFut).onDone(); + } + catch (IgniteCheckedException e) { + ((GridFutureAdapter)p2pUnmarshalFut).onDone(e); + + throw e; + } } } @@ -449,6 +483,7 @@ public GridEventConsumeHandler() { boolean b = in.readBoolean(); if (b) { + p2pUnmarshalFut = new GridFutureAdapter<>(); filterBytes = U.readByteArray(in); clsName = U.readString(in); depInfo = (GridDeploymentInfo)in.readObject(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java index c38486a27a72d..9061e9927e635 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java @@ -61,7 +61,7 @@ import org.apache.ignite.internal.processors.resource.GridResourceProcessor; import org.apache.ignite.internal.processors.rest.GridRestProcessor; import org.apache.ignite.internal.processors.schedule.IgniteScheduleProcessorAdapter; -import org.apache.ignite.internal.processors.security.GridSecurityProcessor; +import org.apache.ignite.internal.processors.security.IgniteSecurity; import org.apache.ignite.internal.processors.segmentation.GridSegmentationProcessor; import org.apache.ignite.internal.processors.service.GridServiceProcessor; import org.apache.ignite.internal.processors.session.GridTaskSessionProcessor; @@ -405,11 +405,11 @@ public interface GridKernalContext extends Iterable { public GridCollisionManager collision(); /** - * Gets authentication processor. + * Gets instance of {@link IgniteSecurity}. * - * @return Authentication processor. + * @return Ignite security. */ - public GridSecurityProcessor security(); + public IgniteSecurity security(); /** * Gets load balancing manager. @@ -620,6 +620,13 @@ public interface GridKernalContext extends Iterable { */ public ExecutorService getSchemaExecutorService(); + /** + * Executor service that is in charge of processing rebalance messages. + * + * @return Executor service that is in charge of processing rebalance messages. + */ + public ExecutorService getRebalanceExecutorService(); + /** * Gets exception registry. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java index b05d10913cb9d..8bdbf8a5dff3b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java @@ -80,7 +80,7 @@ import org.apache.ignite.internal.processors.resource.GridResourceProcessor; import org.apache.ignite.internal.processors.rest.GridRestProcessor; import org.apache.ignite.internal.processors.schedule.IgniteScheduleProcessorAdapter; -import org.apache.ignite.internal.processors.security.GridSecurityProcessor; +import org.apache.ignite.internal.processors.security.IgniteSecurity; import org.apache.ignite.internal.processors.segmentation.GridSegmentationProcessor; import org.apache.ignite.internal.processors.service.GridServiceProcessor; import org.apache.ignite.internal.processors.session.GridTaskSessionProcessor; @@ -157,7 +157,7 @@ public class GridKernalContextImpl implements GridKernalContext, Externalizable /** */ @GridToStringExclude - private GridSecurityProcessor securityProc; + private IgniteSecurity security; /** */ @GridToStringExclude @@ -362,7 +362,11 @@ public class GridKernalContextImpl implements GridKernalContext, Externalizable /** */ @GridToStringExclude - Map customExecSvcs; + protected ExecutorService rebalanceExecSvc; + + /** */ + @GridToStringExclude + private Map customExecSvcs; /** */ @GridToStringExclude @@ -449,6 +453,7 @@ public GridKernalContextImpl() { * @param callbackExecSvc Callback executor service. * @param qryExecSvc Query executor service. * @param schemaExecSvc Schema executor service. + * @param rebalanceExecSvc Rebalance executor service. * @param customExecSvcs Custom named executors. * @param plugins Plugin providers. * @param workerRegistry Worker registry. @@ -476,6 +481,7 @@ protected GridKernalContextImpl( IgniteStripedThreadPoolExecutor callbackExecSvc, ExecutorService qryExecSvc, ExecutorService schemaExecSvc, + ExecutorService rebalanceExecSvc, @Nullable Map customExecSvcs, List plugins, IgnitePredicate clsFilter, @@ -505,6 +511,7 @@ protected GridKernalContextImpl( this.callbackExecSvc = callbackExecSvc; this.qryExecSvc = qryExecSvc; this.schemaExecSvc = schemaExecSvc; + this.rebalanceExecSvc = rebalanceExecSvc; this.customExecSvcs = customExecSvcs; this.workersRegistry = workerRegistry; this.hnd = hnd; @@ -567,8 +574,6 @@ else if (comp instanceof GridFailoverManager) failoverMgr = (GridFailoverManager)comp; else if (comp instanceof GridCollisionManager) colMgr = (GridCollisionManager)comp; - else if (comp instanceof GridSecurityProcessor) - securityProc = (GridSecurityProcessor)comp; else if (comp instanceof GridLoadBalancerManager) loadMgr = (GridLoadBalancerManager)comp; else if (comp instanceof GridIndexingManager) @@ -643,6 +648,8 @@ else if (comp instanceof GridInternalSubscriptionProcessor) internalSubscriptionProc = (GridInternalSubscriptionProcessor)comp; else if (comp instanceof IgniteAuthenticationProcessor) authProc = (IgniteAuthenticationProcessor)comp; + else if (comp instanceof IgniteSecurity) + security = (IgniteSecurity)comp; else if (comp instanceof DiagnosticProcessor) diagnosticProcessor = (DiagnosticProcessor)comp; else if (!(comp instanceof DiscoveryNodeValidationProcessor @@ -803,8 +810,8 @@ else if (helper instanceof HadoopHelper) } /** {@inheritDoc} */ - @Override public GridSecurityProcessor security() { - return securityProc; + @Override public IgniteSecurity security() { + return security; } /** {@inheritDoc} */ @@ -1087,6 +1094,11 @@ protected Object readResolve() throws ObjectStreamException { return schemaExecSvc; } + /** {@inheritDoc} */ + @Override public ExecutorService getRebalanceExecutorService() { + return rebalanceExecSvc; + } + /** {@inheritDoc} */ @Override public Map customExecutors() { return customExecSvcs; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridMessageListenHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/GridMessageListenHandler.java index c146eca255aba..688ca17fad53b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridMessageListenHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridMessageListenHandler.java @@ -25,12 +25,15 @@ import java.util.Map; import java.util.UUID; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; import org.apache.ignite.internal.managers.deployment.GridDeployment; import org.apache.ignite.internal.managers.deployment.GridDeploymentInfoBean; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.continuous.GridContinuousBatch; import org.apache.ignite.internal.processors.continuous.GridContinuousBatchAdapter; import org.apache.ignite.internal.processors.continuous.GridContinuousHandler; +import org.apache.ignite.internal.util.future.GridFinishedFuture; +import org.apache.ignite.internal.util.future.GridFutureAdapter; import org.apache.ignite.internal.util.lang.GridPeerDeployAware; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.internal.S; @@ -66,6 +69,9 @@ public class GridMessageListenHandler implements GridContinuousHandler { /** */ private boolean depEnabled; + /** P2P unmarshalling future. */ + private IgniteInternalFuture p2pUnmarshalFut = new GridFinishedFuture<>(); + /** * Required by {@link Externalizable}. */ @@ -84,22 +90,6 @@ public GridMessageListenHandler(@Nullable Object topic, IgniteBiPredicate { + if (fut.error() == null) + ctx.io().addUserMessageListener(topic, pred, nodeId); + }); return RegisterStatus.REGISTERED; } @@ -180,18 +172,27 @@ public GridMessageListenHandler(GridMessageListenHandler orig) { assert ctx != null; assert ctx.config().isPeerClassLoadingEnabled(); - GridDeployment dep = ctx.deploy().getGlobalDeployment(depInfo.deployMode(), clsName, clsName, - depInfo.userVersion(), nodeId, depInfo.classLoaderId(), depInfo.participants(), null); + try { + GridDeployment dep = ctx.deploy().getGlobalDeployment(depInfo.deployMode(), clsName, clsName, + depInfo.userVersion(), nodeId, depInfo.classLoaderId(), depInfo.participants(), null); - if (dep == null) - throw new IgniteDeploymentCheckedException("Failed to obtain deployment for class: " + clsName); + if (dep == null) + throw new IgniteDeploymentCheckedException("Failed to obtain deployment for class: " + clsName); + + ClassLoader ldr = dep.classLoader(); - ClassLoader ldr = dep.classLoader(); + if (topicBytes != null) + topic = U.unmarshal(ctx, topicBytes, U.resolveClassLoader(ldr, ctx.config())); - if (topicBytes != null) - topic = U.unmarshal(ctx, topicBytes, U.resolveClassLoader(ldr, ctx.config())); + pred = U.unmarshal(ctx, predBytes, U.resolveClassLoader(ldr, ctx.config())); + } + catch (IgniteCheckedException | IgniteException e) { + ((GridFutureAdapter)p2pUnmarshalFut).onDone(e); + + throw e; + } - pred = U.unmarshal(ctx, predBytes, U.resolveClassLoader(ldr, ctx.config())); + ((GridFutureAdapter)p2pUnmarshalFut).onDone(); } /** {@inheritDoc} */ @@ -250,6 +251,7 @@ public GridMessageListenHandler(GridMessageListenHandler orig) { depEnabled = in.readBoolean(); if (depEnabled) { + p2pUnmarshalFut = new GridFutureAdapter<>(); topicBytes = U.readByteArray(in); predBytes = U.readByteArray(in); clsName = U.readString(in); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteEventsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteEventsImpl.java index 030e2dbe05956..f19fd4e56142f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteEventsImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteEventsImpl.java @@ -172,6 +172,9 @@ public IgniteEventsImpl(GridKernalContext ctx, ClusterGroupAdapter prj, boolean autoUnsubscribe, prj.predicate())); } + catch (IgniteCheckedException e) { + throw U.convertException(e); + } finally { unguard(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java index 188a538a998cd..9ec670dfad761 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java @@ -22,6 +22,7 @@ import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi; import org.apache.ignite.spi.communication.tcp.messages.HandshakeWaitMessage; +import static org.apache.ignite.IgniteSystemProperties.getBoolean; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_IGNITE_FEATURES; /** @@ -55,8 +56,34 @@ public enum IgniteFeatures { /** Command which allow to detect and cleanup garbage which could left after destroying caches in shared groups */ FIND_AND_DELETE_GARBAGE_COMMAND(8), + /** Support of cluster read-only mode. */ + CLUSTER_READ_ONLY_MODE(9), + /** Supports tracking update counter for transactions. */ - TX_TRACKING_UPDATE_COUNTER(12); + TX_TRACKING_UPDATE_COUNTER(12), + + /** Distributed metastorage. */ + IGNITE_SECURITY_PROCESSOR(13), + + /** Replacing TcpDiscoveryNode field with nodeId field in discovery messages. */ + TCP_DISCOVERY_MESSAGE_NODE_COMPACT_REPRESENTATION(14), + + /** LRT system and user time dump settings. */ + LRT_SYSTEM_USER_TIME_DUMP_SETTINGS(18), + + /** + * A mode when data nodes throttle update rate regarding to DR sender load + */ + DR_DATA_NODE_SMART_THROTTLING(19), + + /** Support of DR events from Web Console. */ + WC_DR_EVENTS(20), + + /** Support of chain parameter in snapshot delete task for Web Console. */ + WC_SNAPSHOT_CHAIN_MODE(22), + + /** Support of DR-specific visor tasks used by control utility. */ + DR_CONTROL_UTILITY(25); /** * Unique feature identifier. @@ -101,6 +128,9 @@ public static boolean nodeSupports(ClusterNode clusterNode, IgniteFeatures featu * @return {@code True} if feature is declared to be supported by remote node. */ public static boolean nodeSupports(byte[] featuresAttrBytes, IgniteFeatures feature) { + if (featuresAttrBytes == null) + return false; + int featureId = feature.getFeatureId(); // Same as "BitSet.valueOf(features).get(featureId)" @@ -139,6 +169,10 @@ public static byte[] allFeatures() { final BitSet set = new BitSet(); for (IgniteFeatures value : IgniteFeatures.values()) { + // After rolling upgrade, our security has more strict validation. This may come as a surprise to customers. + if (IGNITE_SECURITY_PROCESSOR == value && !getBoolean(IGNITE_SECURITY_PROCESSOR.name(), false)) + continue; + final int featureId = value.getFeatureId(); assert !set.get(featureId) : "Duplicate feature ID found for [" + value + "] having same ID [" diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java index 867d6039ae0f7..0e30ae3b5a721 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java @@ -161,6 +161,9 @@ import org.apache.ignite.internal.processors.resource.GridSpringResourceContext; import org.apache.ignite.internal.processors.rest.GridRestProcessor; import org.apache.ignite.internal.processors.security.GridSecurityProcessor; +import org.apache.ignite.internal.processors.security.IgniteSecurityProcessor; +import org.apache.ignite.internal.processors.security.IgniteSecurity; +import org.apache.ignite.internal.processors.security.NoOpIgniteSecurityProcessor; import org.apache.ignite.internal.processors.segmentation.GridSegmentationProcessor; import org.apache.ignite.internal.processors.service.GridServiceProcessor; import org.apache.ignite.internal.processors.session.GridTaskSessionProcessor; @@ -171,6 +174,7 @@ import org.apache.ignite.internal.suggestions.JvmConfigurationSuggestions; import org.apache.ignite.internal.suggestions.OsConfigurationSuggestions; import org.apache.ignite.internal.util.StripedExecutor; +import org.apache.ignite.internal.util.TimeBag; import org.apache.ignite.internal.util.future.GridCompoundFuture; import org.apache.ignite.internal.util.future.GridFinishedFuture; import org.apache.ignite.internal.util.future.GridFutureAdapter; @@ -218,6 +222,7 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_SKIP_CONFIGURATION_CONSISTENCY_CHECK; import static org.apache.ignite.IgniteSystemProperties.IGNITE_STARVATION_CHECK_INTERVAL; import static org.apache.ignite.IgniteSystemProperties.IGNITE_SUCCESS_FILE; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_USE_POOL_FOR_LAZY_QUERIES; import static org.apache.ignite.IgniteSystemProperties.getBoolean; import static org.apache.ignite.IgniteSystemProperties.snapshot; import static org.apache.ignite.internal.GridKernalState.DISCONNECTED; @@ -264,6 +269,7 @@ import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_SPI_CLASS; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_TX_CONFIG; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_USER_NAME; +import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_USE_POOL_FOR_LAZY_QUERIES; import static org.apache.ignite.internal.IgniteVersionUtils.ACK_VER_STR; import static org.apache.ignite.internal.IgniteVersionUtils.BUILD_TSTAMP_STR; import static org.apache.ignite.internal.IgniteVersionUtils.COPYRIGHT; @@ -863,6 +869,7 @@ private void ackClassPathContent() { * @param callbackExecSvc Callback executor service. * @param qryExecSvc Query executor service. * @param schemaExecSvc Schema executor service. + * @param rebalanceExecSvc Rebalance excutor service. * @param customExecSvcs Custom named executors. * @param errHnd Error handler to use for notification about startup problems. * @param workerRegistry Worker registry. @@ -887,10 +894,12 @@ public void start( IgniteStripedThreadPoolExecutor callbackExecSvc, ExecutorService qryExecSvc, ExecutorService schemaExecSvc, + ExecutorService rebalanceExecSvc, @Nullable final Map customExecSvcs, GridAbsClosure errHnd, WorkersRegistry workerRegistry, - Thread.UncaughtExceptionHandler hnd + Thread.UncaughtExceptionHandler hnd, + TimeBag startTimer ) throws IgniteCheckedException { gw.compareAndSet(null, new GridKernalGatewayImpl(cfg.getIgniteInstanceName())); @@ -1008,6 +1017,7 @@ public void start( callbackExecSvc, qryExecSvc, schemaExecSvc, + rebalanceExecSvc, customExecSvcs, plugins, MarshallerUtils.classNameFilter(this.getClass().getClassLoader()), @@ -1079,7 +1089,7 @@ public void start( startProcessor(new GridTimeoutProcessor(ctx)); // Start security processors. - startProcessor(createComponent(GridSecurityProcessor.class, ctx)); + startProcessor(securityProcessor()); // Start SPI managers. // NOTE: that order matters as there are dependencies between managers. @@ -1108,7 +1118,13 @@ public void start( startProcessor(createComponent(DiscoveryNodeValidationProcessor.class, ctx)); startProcessor(new GridAffinityProcessor(ctx)); startProcessor(createComponent(GridSegmentationProcessor.class, ctx)); + + startTimer.finishGlobalStage("Start managers"); + startProcessor(createComponent(IgniteCacheObjectProcessor.class, ctx)); + + startTimer.finishGlobalStage("Configure binary metadata"); + startProcessor(createComponent(IGridClusterStateProcessor.class, ctx)); startProcessor(new IgniteAuthenticationProcessor(ctx)); startProcessor(new GridCacheProcessor(ctx)); @@ -1128,11 +1144,15 @@ public void start( startProcessor(createComponent(PlatformProcessor.class, ctx)); startProcessor(new GridMarshallerMappingProcessor(ctx)); + startTimer.finishGlobalStage("Start processors"); + // Start plugins. for (PluginProvider provider : ctx.plugins().allProviders()) { ctx.add(new GridPluginComponent(provider)); provider.start(ctx.plugins().pluginContextForProvider(provider)); + + startTimer.finishGlobalStage("Start '"+ provider.name() + "' plugin"); } // Start platform plugins. @@ -1143,9 +1163,11 @@ public void start( fillNodeAttributes(clusterProc.updateNotifierEnabled()); - ctx.cache().context().database().startMemoryRestore(ctx); + ctx.cache().context().database().startMemoryRestore(ctx, startTimer); ctx.recoveryMode(false); + + startTimer.finishGlobalStage("Finish recovery"); } catch (Throwable e) { U.error( @@ -1169,6 +1191,8 @@ public void start( gw.writeUnlock(); } + startTimer.finishGlobalStage("Join topology"); + // Check whether physical RAM is not exceeded. checkPhysicalRam(); @@ -1206,6 +1230,8 @@ public void start( else active = joinData.active(); + startTimer.finishGlobalStage("Await transition"); + boolean recon = false; // Callbacks. @@ -1252,7 +1278,7 @@ public void start( // Register MBeans. mBeansMgr.registerAllMBeans(utilityCachePool, execSvc, svcExecSvc, sysExecSvc, stripedExecSvc, p2pExecSvc, mgmtExecSvc, igfsExecSvc, dataStreamExecSvc, restExecSvc, affExecSvc, idxExecSvc, callbackExecSvc, - qryExecSvc, schemaExecSvc, customExecSvcs, ctx.workersRegistry()); + qryExecSvc, schemaExecSvc, rebalanceExecSvc, customExecSvcs, ctx.workersRegistry()); // Lifecycle bean notifications. notifyLifecycleBeans(AFTER_NODE_START); @@ -1416,7 +1442,8 @@ private long checkPoolStarvation( dblFmt.format(freeNonHeapPct) + "%, comm=" + dblFmt.format(nonHeapCommInMBytes) + "MB]" + NL + " ^-- Outbound messages queue [size=" + m.getOutboundMessagesQueueSize() + "]" + NL + " ^-- " + createExecutorDescription("Public thread pool", execSvc) + NL + - " ^-- " + createExecutorDescription("System thread pool", sysExecSvc); + " ^-- " + createExecutorDescription("System thread pool", sysExecSvc) + NL + + " ^-- " + createExecutorDescription("Striped thread pool", stripedExecSvc); if (customExecSvcs != null) { StringBuilder customSvcsMsg = new StringBuilder(); @@ -1472,6 +1499,19 @@ private long checkPoolStarvation( if (!isDaemon()) ctx.discovery().ackTopology(ctx.discovery().localJoin().joinTopologyVersion().topologyVersion(), EventType.EVT_NODE_JOINED, localNode()); + + startTimer.finishGlobalStage("Await exchange"); + } + + /** + * @return GridProcessor that implements {@link IgniteSecurity} + */ + private GridProcessor securityProcessor() throws IgniteCheckedException { + GridSecurityProcessor prc = createComponent(GridSecurityProcessor.class, ctx); + + return prc != null && prc.enabled() + ? new IgniteSecurityProcessor(ctx, prc) + : new NoOpIgniteSecurityProcessor(ctx, prc); } /** @@ -1481,19 +1521,26 @@ private long checkPoolStarvation( * @param execSvc service to create a description for */ private String createExecutorDescription(String execSvcName, ExecutorService execSvc) { + int poolSize = 0; int poolActiveThreads = 0; - int poolIdleThreads = 0; int poolQSize = 0; if (execSvc instanceof ThreadPoolExecutor) { ThreadPoolExecutor exec = (ThreadPoolExecutor)execSvc; - int poolSize = exec.getPoolSize(); - + poolSize = exec.getPoolSize(); poolActiveThreads = Math.min(poolSize, exec.getActiveCount()); - poolIdleThreads = poolSize - poolActiveThreads; poolQSize = exec.getQueue().size(); } + else if (execSvc instanceof StripedExecutor) { + StripedExecutor exec = (StripedExecutor) execSvc; + + poolSize = exec.stripes(); + poolActiveThreads = exec.activeStripesCount(); + poolQSize = exec.queueSize(); + } + + int poolIdleThreads = poolSize - poolActiveThreads; return execSvcName + " [active=" + poolActiveThreads + ", idle=" + poolIdleThreads + ", qSize=" + poolQSize + "]"; } @@ -1652,6 +1699,7 @@ private void suggestOptimizations(IgniteConfiguration cfg) { private void fillNodeAttributes(boolean notifyEnabled) throws IgniteCheckedException { ctx.addNodeAttribute(ATTR_REBALANCE_POOL_SIZE, configuration().getRebalanceThreadPoolSize()); ctx.addNodeAttribute(ATTR_DATA_STREAMER_POOL_SIZE, configuration().getDataStreamerThreadPoolSize()); + ctx.addNodeAttribute(ATTR_USE_POOL_FOR_LAZY_QUERIES, IgniteSystemProperties.getBoolean(IGNITE_USE_POOL_FOR_LAZY_QUERIES)); final String[] incProps = cfg.getIncludeProperties(); @@ -2565,10 +2613,6 @@ private void ackRebalanceConfiguration() throws IgniteCheckedException { U.warn(log, "Setting the rebalance pool size has no effect on the client mode"); } else { - if (cfg.getSystemThreadPoolSize() <= cfg.getRebalanceThreadPoolSize()) - throw new IgniteCheckedException("Rebalance thread pool size exceed or equals System thread pool size. " + - "Change IgniteConfiguration.rebalanceThreadPoolSize property before next start."); - if (cfg.getRebalanceThreadPoolSize() < 1) throw new IgniteCheckedException("Rebalance thread pool size minimal allowed value is 1. " + "Change IgniteConfiguration.rebalanceThreadPoolSize property before next start."); @@ -3566,6 +3610,7 @@ public IgniteInternalFuture getOrCreateCacheAsync(String cacheName, String te Ignition.stop(igniteInstanceName, true); } + /** {@inheritDoc} */ @Override public Affinity affinity(String cacheName) { CU.validateCacheName(cacheName); checkClusterState(); @@ -4086,6 +4131,9 @@ private static T createComponent(Class cls, GridKer if (cls.equals(IGridClusterStateProcessor.class)) return (T)new GridClusterStateProcessor(ctx); + if(cls.equals(GridSecurityProcessor.class)) + return null; + Class implCls = null; try { @@ -4279,6 +4327,24 @@ void waitPreviousReconnect() { ctx.cluster().get().clearNodeMap(); } + /** {@inheritDoc} */ + @Override public boolean readOnlyMode() { + return ctx.state().publicApiReadOnlyMode(); + } + + /** {@inheritDoc} */ + @Override public void readOnlyMode(boolean readOnly) { + ctx.state().changeGlobalState(readOnly); + } + + /** {@inheritDoc} */ + @Override public long getReadOnlyModeDuration() { + if (ctx.state().publicApiReadOnlyMode()) + return U.currentTimeMillis() - ctx.state().readOnlyModeStateChangeTime(); + else + return 0; + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(IgniteKernal.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteMessagingImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteMessagingImpl.java index 4c23dd5a24397..8d992a870ade1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteMessagingImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteMessagingImpl.java @@ -241,6 +241,9 @@ private void send0(@Nullable Object topic, Collection msgs, boolean async) th false, prj.predicate())); } + catch (IgniteCheckedException e) { + throw U.convertException(e); + } finally { unguard(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteNodeAttributes.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteNodeAttributes.java index da791fd84813f..802e6262cf0e4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteNodeAttributes.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteNodeAttributes.java @@ -211,6 +211,8 @@ public final class IgniteNodeAttributes { /** Supported features. */ public static final String ATTR_IGNITE_FEATURES = ATTR_PREFIX + ".features"; + /** */ + public static final String ATTR_USE_POOL_FOR_LAZY_QUERIES = ATTR_PREFIX + ".query.lazy.usepool"; /** * Enforces singleton. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java index 641dde3cf34d9..ca76464009cb2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java @@ -17,6 +17,9 @@ package org.apache.ignite.internal; +import javax.management.JMException; +import javax.management.MBeanServer; +import javax.management.ObjectName; import java.io.File; import java.io.IOException; import java.io.InputStream; @@ -46,9 +49,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.logging.Handler; -import javax.management.JMException; -import javax.management.MBeanServer; -import javax.management.ObjectName; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; @@ -84,6 +84,7 @@ import org.apache.ignite.internal.util.GridConcurrentHashSet; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.StripedExecutor; +import org.apache.ignite.internal.util.TimeBag; import org.apache.ignite.internal.util.spring.IgniteSpringHelper; import org.apache.ignite.internal.util.typedef.CA; import org.apache.ignite.internal.util.typedef.F; @@ -125,6 +126,7 @@ import org.apache.ignite.thread.IgniteThreadPoolExecutor; import org.jetbrains.annotations.Nullable; +import static java.util.stream.Collectors.joining; import static org.apache.ignite.IgniteState.STARTED; import static org.apache.ignite.IgniteState.STOPPED; import static org.apache.ignite.IgniteState.STOPPED_ON_FAILURE; @@ -138,6 +140,7 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_RESTART_CODE; import static org.apache.ignite.IgniteSystemProperties.IGNITE_SUCCESS_FILE; import static org.apache.ignite.IgniteSystemProperties.IGNITE_SYSTEM_WORKER_BLOCKED_TIMEOUT; +import static org.apache.ignite.IgniteSystemProperties.getLong; import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; import static org.apache.ignite.cache.CacheMode.REPLICATED; import static org.apache.ignite.cache.CacheRebalanceMode.SYNC; @@ -1614,6 +1617,9 @@ private static final class IgniteNamedInstance { /** Query executor service. */ private ThreadPoolExecutor schemaExecSvc; + /** Rebalance executor service. */ + private ThreadPoolExecutor rebalanceExecSvc; + /** Executor service. */ private Map customExecSvcs; @@ -1726,7 +1732,17 @@ synchronized void start(GridStartContext startCtx) throws IgniteCheckedException try { starterThread = Thread.currentThread(); - start0(startCtx); + IgniteConfiguration myCfg = initializeConfiguration( + startCtx.config() != null ? startCtx.config() : new IgniteConfiguration() + ); + + TimeBag startNodeTimer = new TimeBag(TimeUnit.MILLISECONDS); + + start0(startCtx, myCfg, startNodeTimer); + + if (log.isInfoEnabled()) + log.info("Node started : " + + startNodeTimer.stagesTimings().stream().collect(joining(",", "[", "]"))); } catch (Exception e) { if (log != null) @@ -1747,27 +1763,24 @@ synchronized void start(GridStartContext startCtx) throws IgniteCheckedException * @throws IgniteCheckedException If start failed. */ @SuppressWarnings({"unchecked", "TooBroadScope"}) - private void start0(GridStartContext startCtx) throws IgniteCheckedException { + private void start0(GridStartContext startCtx, IgniteConfiguration cfg, TimeBag startTimer) + throws IgniteCheckedException { assert grid == null : "Grid is already started: " + name; - IgniteConfiguration cfg = startCtx.config() != null ? startCtx.config() : new IgniteConfiguration(); - - IgniteConfiguration myCfg = initializeConfiguration(cfg); - // Set configuration URL, if any, into system property. if (startCtx.configUrl() != null) System.setProperty(IGNITE_CONFIG_URL, startCtx.configUrl().toString()); // Ensure that SPIs support multiple grid instances, if required. if (!startCtx.single()) { - ensureMultiInstanceSupport(myCfg.getDeploymentSpi()); - ensureMultiInstanceSupport(myCfg.getCommunicationSpi()); - ensureMultiInstanceSupport(myCfg.getDiscoverySpi()); - ensureMultiInstanceSupport(myCfg.getCheckpointSpi()); - ensureMultiInstanceSupport(myCfg.getEventStorageSpi()); - ensureMultiInstanceSupport(myCfg.getCollisionSpi()); - ensureMultiInstanceSupport(myCfg.getFailoverSpi()); - ensureMultiInstanceSupport(myCfg.getLoadBalancingSpi()); + ensureMultiInstanceSupport(cfg.getDeploymentSpi()); + ensureMultiInstanceSupport(cfg.getCommunicationSpi()); + ensureMultiInstanceSupport(cfg.getDiscoverySpi()); + ensureMultiInstanceSupport(cfg.getCheckpointSpi()); + ensureMultiInstanceSupport(cfg.getEventStorageSpi()); + ensureMultiInstanceSupport(cfg.getCollisionSpi()); + ensureMultiInstanceSupport(cfg.getFailoverSpi()); + ensureMultiInstanceSupport(cfg.getLoadBalancingSpi()); } validateThreadPoolSize(cfg.getPublicThreadPoolSize(), "public"); @@ -1830,11 +1843,9 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { new IgniteException(S.toString(GridWorker.class, deadWorker)))); } }, - IgniteSystemProperties.getLong(IGNITE_SYSTEM_WORKER_BLOCKED_TIMEOUT, - cfg.getSystemWorkerBlockedTimeout() != null - ? cfg.getSystemWorkerBlockedTimeout() - : cfg.getFailureDetectionTimeout()), - log); + getLong(IGNITE_SYSTEM_WORKER_BLOCKED_TIMEOUT, cfg.getSystemWorkerBlockedTimeout()), + log + ); stripedExecSvc = new StripedExecutor( cfg.getStripedPoolSize(), @@ -1917,16 +1928,18 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { cfg.getAsyncCallbackPoolSize(), cfg.getIgniteInstanceName(), "callback", - oomeHnd); + oomeHnd, + false, + 0); - if (myCfg.getConnectorConfiguration() != null) { - validateThreadPoolSize(myCfg.getConnectorConfiguration().getThreadPoolSize(), "connector"); + if (cfg.getConnectorConfiguration() != null) { + validateThreadPoolSize(cfg.getConnectorConfiguration().getThreadPoolSize(), "connector"); restExecSvc = new IgniteThreadPoolExecutor( "rest", - myCfg.getIgniteInstanceName(), - myCfg.getConnectorConfiguration().getThreadPoolSize(), - myCfg.getConnectorConfiguration().getThreadPoolSize(), + cfg.getIgniteInstanceName(), + cfg.getConnectorConfiguration().getThreadPoolSize(), + cfg.getConnectorConfiguration().getThreadPoolSize(), DFLT_THREAD_KEEP_ALIVE_TIME, new LinkedBlockingQueue<>(), GridIoPolicy.UNDEFINED, @@ -1936,14 +1949,14 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { restExecSvc.allowCoreThreadTimeOut(true); } - validateThreadPoolSize(myCfg.getUtilityCacheThreadPoolSize(), "utility cache"); + validateThreadPoolSize(cfg.getUtilityCacheThreadPoolSize(), "utility cache"); utilityCacheExecSvc = new IgniteThreadPoolExecutor( "utility", cfg.getIgniteInstanceName(), - myCfg.getUtilityCacheThreadPoolSize(), - myCfg.getUtilityCacheThreadPoolSize(), - myCfg.getUtilityCacheKeepAliveTime(), + cfg.getUtilityCacheThreadPoolSize(), + cfg.getUtilityCacheThreadPoolSize(), + cfg.getUtilityCacheKeepAliveTime(), new LinkedBlockingQueue<>(), GridIoPolicy.UTILITY_CACHE_POOL, oomeHnd); @@ -2003,6 +2016,18 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { schemaExecSvc.allowCoreThreadTimeOut(true); + validateThreadPoolSize(cfg.getRebalanceThreadPoolSize(), "rebalance"); + + rebalanceExecSvc = new IgniteThreadPoolExecutor( + "rebalance", + cfg.getIgniteInstanceName(), + cfg.getRebalanceThreadPoolSize(), + cfg.getRebalanceThreadPoolSize(), + DFLT_THREAD_KEEP_ALIVE_TIME, + new LinkedBlockingQueue<>(), + GridIoPolicy.UNDEFINED, + oomeHnd); + if (!F.isEmpty(cfg.getExecutorConfiguration())) { validateCustomExecutorsConfiguration(cfg.getExecutorConfiguration()); @@ -2024,7 +2049,7 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { } // Register Ignite MBean for current grid instance. - registerFactoryMbean(myCfg.getMBeanServer()); + registerFactoryMbean(cfg.getMBeanServer()); boolean started = false; @@ -2034,8 +2059,10 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { // Init here to make grid available to lifecycle listeners. grid = grid0; + startTimer.finishGlobalStage("Configure system pool"); + grid0.start( - myCfg, + cfg, utilityCacheExecSvc, execSvc, svcExecSvc, @@ -2051,6 +2078,7 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { callbackExecSvc, qryExecSvc, schemaExecSvc, + rebalanceExecSvc, customExecSvcs, new CA() { @Override public void apply() { @@ -2058,7 +2086,8 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { } }, workerRegistry, - oomeHnd + oomeHnd, + startTimer ); state = STARTED; @@ -2171,8 +2200,10 @@ private IgniteConfiguration initializeConfiguration(IgniteConfiguration cfg) // If user provided IGNITE_HOME - set it as a system property. U.setIgniteHome(ggHome); + String userProvidedWorkDir = cfg.getWorkDirectory(); + // Correctly resolve work directory and set it back to configuration. - String workDir = U.workDirectory(cfg.getWorkDirectory(), ggHome); + String workDir = U.workDirectory(userProvidedWorkDir, ggHome); myCfg.setWorkDirectory(workDir); @@ -2196,6 +2227,9 @@ private IgniteConfiguration initializeConfiguration(IgniteConfiguration cfg) myCfg.setGridLogger(cfgLog); + if(F.isEmpty(userProvidedWorkDir) && F.isEmpty(U.IGNITE_WORK_DIR)) + log.warning("Ignite work directory is not provided, automatically resolved to: " + workDir); + // Check Ignite home folder (after log is available). if (ggHome != null) { File ggHomeFile = new File(ggHome); @@ -2678,6 +2712,10 @@ private void stopExecutors0(IgniteLogger log) { schemaExecSvc = null; + U.shutdownNow(getClass(), rebalanceExecSvc, log); + + rebalanceExecSvc = null; + U.shutdownNow(getClass(), stripedExecSvc, log); stripedExecSvc = null; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/TransactionMetricsMxBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/TransactionMetricsMxBeanImpl.java index 58b58672faa9d..916f5c10d990c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/TransactionMetricsMxBeanImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/TransactionMetricsMxBeanImpl.java @@ -99,6 +99,26 @@ public TransactionMetricsMxBeanImpl(TransactionMetrics transactionMetrics) { @Override public long getOwnerTransactionsNumber() { return transactionMetrics.getOwnerTransactionsNumber(); } + + /** {@inheritDoc} */ + @Override public long getTotalNodeSystemTime() { + return transactionMetrics.getTotalNodeSystemTime(); + } + + /** {@inheritDoc} */ + @Override public long getTotalNodeUserTime() { + return transactionMetrics.getTotalNodeUserTime(); + } + + /** {@inheritDoc} */ + @Override public String getNodeSystemTimeHistogram() { + return transactionMetrics.getNodeSystemTimeHistogram(); + } + + /** {@inheritDoc} */ + @Override public String getNodeUserTimeHistogram() { + return transactionMetrics.getNodeUserTimeHistogram(); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/TransactionsMXBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/TransactionsMXBeanImpl.java index 1969d292cd70b..f4304396eb9b9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/TransactionsMXBeanImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/TransactionsMXBeanImpl.java @@ -141,6 +141,36 @@ else if ("servers".equals(prj)) ctx.cache().setTxOwnerDumpRequestsAllowed(allowed); } + /** {@inheritDoc} */ + @Override public long getLongTransactionTimeDumpThreshold() { + return ctx.cache().context().tm().longTransactionTimeDumpThreshold(); + } + + /** {@inheritDoc} */ + @Override public void setLongTransactionTimeDumpThreshold(long threshold) { + ctx.cache().longTransactionTimeDumpThreshold(threshold); + } + + /** {@inheritDoc} */ + @Override public double getTransactionTimeDumpSamplesCoefficient() { + return ctx.cache().context().tm().transactionTimeDumpSamplesCoefficient(); + } + + /** {@inheritDoc} */ + @Override public void setTransactionTimeDumpSamplesCoefficient(double coefficient) { + ctx.cache().transactionTimeDumpSamplesCoefficient(coefficient); + } + + /** {@inheritDoc} */ + @Override public int getTransactionTimeDumpSamplesPerSecondLimit() { + return ctx.cache().context().tm().transactionTimeDumpSamplesPerSecondLimit(); + } + + /** {@inheritDoc} */ + @Override public void setTransactionTimeDumpSamplesPerSecondLimit(int limit) { + ctx.cache().longTransactionTimeDumpSamplesPerSecondLimit(limit); + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(TransactionsMXBeanImpl.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryClassDescriptor.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryClassDescriptor.java index 7f86391d52748..e23b4f42f50ac 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryClassDescriptor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryClassDescriptor.java @@ -404,6 +404,20 @@ boolean isEnum() { return mode == BinaryWriteMode.ENUM; } + /** + * @return {@code True} if the type is registered as an OBJECT. + */ + boolean isObject() { + return mode == BinaryWriteMode.OBJECT; + } + + /** + * @return {@code True} if the type is registered as a BINARY object. + */ + boolean isBinary() { + return mode == BinaryWriteMode.BINARY; + } + /** * @return Described class. */ @@ -915,6 +929,56 @@ Object read(BinaryReaderExImpl reader) throws BinaryObjectException { } } + /** + * @return A copy of this {@code BinaryClassDescriptor} marked as registered. + */ + BinaryClassDescriptor makeRegistered() { + if (registered) + return this; + else + return new BinaryClassDescriptor(ctx, + cls, + userType, + typeId, + typeName, + affKeyFieldName, + mapper, + initialSerializer, + stableFieldsMeta != null, + true); + } + + /** + * @return Instance of {@link BinaryMetadata} for this type. + */ + BinaryMetadata metadata() { + return new BinaryMetadata( + typeId, + typeName, + stableFieldsMeta, + affKeyFieldName, + null, + isEnum(), + cls.isEnum() ? enumMap(cls) : null); + } + + /** + * @param cls Enum class. + * @return Enum name to ordinal mapping. + */ + private static Map enumMap(Class cls) { + assert cls.isEnum(); + + Object[] enumVals = cls.getEnumConstants(); + + Map enumMap = new LinkedHashMap<>(enumVals.length); + + for (Object enumVal : enumVals) + enumMap.put(((Enum)enumVal).name(), ((Enum)enumVal).ordinal()); + + return enumMap; + } + /** * Pre-write phase. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryContext.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryContext.java index c263def8aae75..ca792f2c1c317 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryContext.java @@ -48,10 +48,6 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; -import org.apache.ignite.internal.UnregisteredBinaryTypeException; -import org.apache.ignite.internal.UnregisteredClassException; -import org.apache.ignite.internal.processors.marshaller.MappingExchangeResult; -import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.binary.BinaryBasicIdMapper; import org.apache.ignite.binary.BinaryBasicNameMapper; import org.apache.ignite.binary.BinaryIdMapper; @@ -69,6 +65,8 @@ import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.igfs.IgfsPath; import org.apache.ignite.internal.DuplicateTypeIdException; +import org.apache.ignite.internal.UnregisteredBinaryTypeException; +import org.apache.ignite.internal.UnregisteredClassException; import org.apache.ignite.internal.marshaller.optimized.OptimizedMarshaller; import org.apache.ignite.internal.processors.cache.binary.BinaryMetadataKey; import org.apache.ignite.internal.processors.closure.GridClosureProcessor; @@ -109,10 +107,12 @@ import org.apache.ignite.internal.processors.igfs.meta.IgfsMetaFileUnlockProcessor; import org.apache.ignite.internal.processors.igfs.meta.IgfsMetaUpdatePropertiesProcessor; import org.apache.ignite.internal.processors.igfs.meta.IgfsMetaUpdateTimesProcessor; +import org.apache.ignite.internal.processors.marshaller.MappingExchangeResult; import org.apache.ignite.internal.processors.platform.PlatformJavaObjectFactoryProxy; import org.apache.ignite.internal.processors.platform.websession.PlatformDotNetSessionData; import org.apache.ignite.internal.processors.platform.websession.PlatformDotNetSessionLockResult; import org.apache.ignite.internal.processors.query.QueryUtils; +import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.lang.GridMapEntry; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.T2; @@ -121,6 +121,7 @@ import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.marshaller.MarshallerContext; import org.apache.ignite.marshaller.MarshallerUtils; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.internal.MarshallerPlatformIds.JAVA_ID; @@ -612,76 +613,112 @@ else if (cpElement.isFile()) { } /** - * @param cls Class. + * Attempts registration of the provided class. If the type is already registered, then an existing descriptor is + * returned. + * + * @param cls Class to register. + * @param registerMeta If {@code true}, then metadata will be registered along with the class descriptor. * @param failIfUnregistered Throw exception if class isn't registered. * @return Class descriptor. * @throws BinaryObjectException In case of error. */ - public BinaryClassDescriptor descriptorForClass(Class cls, boolean deserialize, boolean failIfUnregistered) - throws BinaryObjectException { + @NotNull public BinaryClassDescriptor registerClass( + Class cls, + boolean registerMeta, + boolean failIfUnregistered + ) throws BinaryObjectException { assert cls != null; - BinaryClassDescriptor desc = descByCls.get(cls); + BinaryClassDescriptor desc = descriptorForClass(cls); - if (desc == null) { + if (!desc.registered()) { if (failIfUnregistered) throw new UnregisteredClassException(cls); - - desc = registerClassDescriptor(cls, deserialize); + else + desc = registerDescriptor(desc, registerMeta); } - else if (!desc.registered()) { - if (!desc.userType()) { - BinaryClassDescriptor desc0 = new BinaryClassDescriptor( - this, - desc.describedClass(), - false, - desc.typeId(), - desc.typeName(), - desc.affFieldKeyName(), - desc.mapper(), - desc.initialSerializer(), - false, - true - ); - - if (descByCls.replace(cls, desc, desc0)) { - Collection schemas = - desc0.schema() != null ? Collections.singleton(desc.schema()) : null; - - BinaryMetadata meta = new BinaryMetadata(desc0.typeId(), - desc0.typeName(), - desc0.fieldsMeta(), - desc0.affFieldKeyName(), - schemas, desc0.isEnum(), - cls.isEnum() ? enumMap(cls) : null); - - metaHnd.addMeta(desc0.typeId(), meta.wrap(this), false); - - return desc0; - } - } - else { - if (failIfUnregistered) - throw new UnregisteredClassException(cls); - desc = registerUserClassDescriptor(desc); - } + return desc; + } + + /** + * @param cls Class. + * @return A descriptor for the given class. If the class hasn't been registered yet, then a new descriptor will be + * created, but its {@link BinaryClassDescriptor#registered()} will be {@code false}. + */ + @NotNull BinaryClassDescriptor descriptorForClass(Class cls) { + assert cls != null; + + BinaryClassDescriptor desc = descByCls.get(cls); + + if (desc != null) + return desc; + else + return createDescriptorForClass(cls); + } + + /** + * @param cls Class to create a descriptor for. + * @return A descriptor for the given class. The descriptor needs to be registered in order to be used. + */ + @NotNull private BinaryClassDescriptor createDescriptorForClass(Class cls) { + String clsName = cls.getName(); + + if (marshCtx.isSystemType(clsName)) { + BinarySerializer serializer = null; + + if (BINARYLIZABLE_SYS_CLSS.contains(clsName)) + serializer = new BinaryReflectiveSerializer(); + + return new BinaryClassDescriptor(this, + cls, + false, + clsName.hashCode(), + clsName, + null, + SIMPLE_NAME_LOWER_CASE_MAPPER, + serializer, + false, + false + ); } + else { + BinaryInternalMapper mapper = userTypeMapper(clsName); - return desc; + final String typeName = mapper.typeName(clsName); + + final int typeId = mapper.typeId(clsName); + + BinarySerializer serializer = serializerForClass(cls); + + String affFieldName = affinityFieldName(cls); + + return new BinaryClassDescriptor(this, + cls, + true, + typeId, + typeName, + affFieldName, + mapper, + serializer, + true, + false + ); + } } /** * @param userType User type or not. * @param typeId Type ID. * @param ldr Class loader. + * @param registerMeta If {@code true}, then metadata will be registered along with the type descriptor. * @return Class descriptor. */ public BinaryClassDescriptor descriptorForTypeId( boolean userType, int typeId, ClassLoader ldr, - boolean deserialize + boolean registerMeta ) { assert typeId != GridBinaryMarshaller.UNREGISTERED_TYPE_ID; @@ -703,21 +740,21 @@ public BinaryClassDescriptor descriptorForTypeId( } catch (ClassNotFoundException e) { // Class might have been loaded by default class loader. - if (userType && !ldr.equals(sysLdr) && (desc = descriptorForTypeId(true, typeId, sysLdr, deserialize)) != null) + if (userType && !ldr.equals(sysLdr) && (desc = descriptorForTypeId(true, typeId, sysLdr, registerMeta)) != null) return desc; throw new BinaryInvalidTypeException(e); } catch (IgniteCheckedException e) { // Class might have been loaded by default class loader. - if (userType && !ldr.equals(sysLdr) && (desc = descriptorForTypeId(true, typeId, sysLdr, deserialize)) != null) + if (userType && !ldr.equals(sysLdr) && (desc = descriptorForTypeId(true, typeId, sysLdr, registerMeta)) != null) return desc; throw new BinaryObjectException("Failed resolve class for ID: " + typeId, e); } if (desc == null) { - desc = registerClassDescriptor(cls, deserialize); + desc = registerClass(cls, registerMeta, false); assert desc.typeId() == typeId : "Duplicate typeId [typeId=" + typeId + ", cls=" + cls + ", desc=" + desc + "]"; @@ -727,125 +764,63 @@ public BinaryClassDescriptor descriptorForTypeId( } /** - * Creates and registers {@link BinaryClassDescriptor} for the given {@code class}. + * Attempts registration of the provided {@link BinaryClassDescriptor} in the cluster. * - * @param cls Class. - * @return Class descriptor. + * @param desc Class descriptor to register. + * @param registerMeta If {@code true}, then metadata will be registered along with the class descriptor. + * @return Registered class descriptor. */ - private BinaryClassDescriptor registerClassDescriptor(Class cls, boolean deserialize) { - BinaryClassDescriptor desc; - - String clsName = cls.getName(); - - if (marshCtx.isSystemType(clsName)) { - BinarySerializer serializer = null; - - if (BINARYLIZABLE_SYS_CLSS.contains(clsName)) - serializer = new BinaryReflectiveSerializer(); - - desc = new BinaryClassDescriptor(this, - cls, - false, - clsName.hashCode(), - clsName, - null, - SIMPLE_NAME_LOWER_CASE_MAPPER, - serializer, - false, - true /* registered */ - ); + @NotNull public BinaryClassDescriptor registerDescriptor( + BinaryClassDescriptor desc, + boolean registerMeta + ) { + if (desc.userType()) + return registerUserClassDescriptor(desc, registerMeta); + else { + BinaryClassDescriptor regDesc = desc.makeRegistered(); - BinaryClassDescriptor old = descByCls.putIfAbsent(cls, desc); + BinaryClassDescriptor old = descByCls.putIfAbsent(desc.describedClass(), regDesc); - if (old != null) - desc = old; + return old != null + ? old + : regDesc; } - else - desc = registerUserClassDescriptor(cls, deserialize); - - return desc; } /** - * Creates and registers {@link BinaryClassDescriptor} for the given user {@code class}. + * Attempts registration of the provided {@link BinaryClassDescriptor} in the cluster. The provided descriptor should correspond + * to a user class. * - * @param cls Class. + * @param desc Class descriptor to register. + * @param registerMeta If {@code true}, then metadata will be registered along with the class descriptor. * @return Class descriptor. */ - private BinaryClassDescriptor registerUserClassDescriptor(Class cls, boolean deserialize) { - boolean registered; - - final String clsName = cls.getName(); - - BinaryInternalMapper mapper = userTypeMapper(clsName); - - final String typeName = mapper.typeName(clsName); - - final int typeId = mapper.typeId(clsName); - - registered = registerUserClassName(typeId, cls.getName(), false); - - BinarySerializer serializer = serializerForClass(cls); - - String affFieldName = affinityFieldName(cls); - - BinaryClassDescriptor desc = new BinaryClassDescriptor(this, - cls, - true, - typeId, - typeName, - affFieldName, - mapper, - serializer, - true, - registered - ); + @NotNull private BinaryClassDescriptor registerUserClassDescriptor( + BinaryClassDescriptor desc, + boolean registerMeta + ) { + assert desc.userType() : "The descriptor doesn't correspond to a user class."; - if (!deserialize) - metaHnd.addMeta(typeId, new BinaryMetadata(typeId, typeName, desc.fieldsMeta(), affFieldName, null, - desc.isEnum(), cls.isEnum() ? enumMap(cls) : null).wrap(this), false); + Class cls = desc.describedClass(); - descByCls.put(cls, desc); + int typeId = desc.typeId(); - typeId2Mapper.putIfAbsent(typeId, mapper); - - return desc; - } - - /** - * Creates and registers {@link BinaryClassDescriptor} for the given user {@code class}. - * - * @param desc Old descriptor that should be re-registered. - * @return Class descriptor. - */ - private BinaryClassDescriptor registerUserClassDescriptor(BinaryClassDescriptor desc) { - boolean registered; - - registered = registerUserClassName(desc.typeId(), desc.describedClass().getName(), false); + boolean registered = registerUserClassName(typeId, cls.getName(), false); if (registered) { - BinarySerializer serializer = desc.initialSerializer(); + BinaryClassDescriptor regDesc = desc.makeRegistered(); - if (serializer == null) - serializer = serializerForClass(desc.describedClass()); + if (registerMeta) + metaHnd.addMeta(typeId, regDesc.metadata().wrap(this), false); - desc = new BinaryClassDescriptor( - this, - desc.describedClass(), - true, - desc.typeId(), - desc.typeName(), - desc.affFieldKeyName(), - desc.mapper(), - serializer, - true, - true - ); + descByCls.put(cls, regDesc); - descByCls.put(desc.describedClass(), desc); - } + typeId2Mapper.putIfAbsent(typeId, regDesc.mapper()); - return desc; + return regDesc; + } + else + return desc; } /** @@ -1188,7 +1163,7 @@ public void registerUserTypesSchema() { /** * Register "type ID to class name" mapping on all nodes to allow for mapping requests resolution form client. * Other {@link BinaryContext}'s "register" methods and method - * {@link BinaryContext#descriptorForClass(Class, boolean, boolean)} already call this functionality + * {@link BinaryContext#registerClass(Class, boolean, boolean)} already call this functionality * so use this method only when registering class names whose {@link Class} is unknown. * * @param typeId Type ID. @@ -1442,24 +1417,6 @@ public void onUndeploy(ClassLoader ldr) { U.clearClassCache(ldr); } - /** - * - * @param cls Class - * @return Enum name to ordinal mapping. - */ - private static Map enumMap(Class cls) { - assert cls.isEnum(); - - Object[] enumVals = cls.getEnumConstants(); - - Map enumMap = new LinkedHashMap<>(enumVals.length); - - for (Object enumVal : enumVals) - enumMap.put(((Enum)enumVal).name(), ((Enum)enumVal).ordinal()); - - return enumMap; - } - /** * Type descriptors. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryEnumObjectImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryEnumObjectImpl.java index 275169561fd56..96d0551fa96da 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryEnumObjectImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryEnumObjectImpl.java @@ -176,7 +176,7 @@ public BinaryEnumObjectImpl(BinaryContext ctx, byte[] arr) { /** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public T deserialize() throws BinaryObjectException { - Class cls = BinaryUtils.resolveClass(ctx, typeId, clsName, ctx.configuration().getClassLoader(), true); + Class cls = BinaryUtils.resolveClass(ctx, typeId, clsName, ctx.configuration().getClassLoader(), false); return (T)BinaryEnumCache.get(cls, ord); } @@ -430,13 +430,4 @@ public BinaryEnumObjectImpl(BinaryContext ctx, byte[] arr) { return reader.afterMessageRead(BinaryEnumObjectImpl.class); } - - /** - * @param cls type to examine. - * @return true if typeId equals for passed type and current - * binary enum. - */ - public boolean isTypeEquals(final Class cls) { - return ctx.descriptorForClass(cls, false, false).typeId() == typeId(); - } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldImpl.java index de0b2d0d3d3cc..acd3678d50d88 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldImpl.java @@ -26,6 +26,7 @@ import java.util.Date; import java.util.UUID; import org.apache.ignite.binary.BinaryObjectException; +import org.apache.ignite.binary.BinaryType; import org.apache.ignite.internal.binary.streams.BinaryByteBufferInputStream; import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.typedef.internal.S; @@ -33,6 +34,7 @@ import org.apache.ignite.binary.BinaryField; import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Objects.nonNull; /** * Implementation of binary field descriptor. @@ -282,9 +284,28 @@ public int fieldId() { */ public int fieldOrder(BinaryObjectExImpl obj) { if (typeId != obj.typeId()) { - throw new BinaryObjectException("Failed to get field because type ID of passed object differs" + - " from type ID this " + BinaryField.class.getSimpleName() + " belongs to [expected=" + typeId + - ", actual=" + obj.typeId() + ']'); + BinaryType expType = ctx.metadata(typeId); + BinaryType actualType = obj.type(); + + String actualTypeName = null; + + Exception actualTypeNameEx = null; + + try { + actualTypeName = actualType.typeName(); + } + catch (BinaryObjectException e) { + actualTypeNameEx = new BinaryObjectException("Failed to get actual binary type name.", e); + } + + throw new BinaryObjectException( + "Failed to get field because type ID of passed object differs from type ID this " + + BinaryField.class.getSimpleName() + " belongs to [expected=[typeId=" + typeId + ", typeName=" + + (nonNull(expType) ? expType.typeName() : null) + "], actual=[typeId=" + actualType.typeId() + + ", typeName=" + actualTypeName + "], fieldId=" + fieldId + ", fieldName=" + fieldName + + ", fieldType=" + (nonNull(expType) ? expType.fieldTypeName(fieldName) : null) + ']', + actualTypeNameEx + ); } int schemaId = obj.schemaId(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectImpl.java index 65fb349e84d9e..243c21f6a278b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectImpl.java @@ -189,7 +189,11 @@ public BinaryObjectImpl(BinaryContext ctx, byte[] arr, int start) { /** {@inheritDoc} */ @Override public void finishUnmarshal(CacheObjectValueContext ctx, ClassLoader ldr) throws IgniteCheckedException { - this.ctx = ((CacheObjectBinaryProcessorImpl)ctx.kernalContext().cacheObjects()).binaryContext(); + CacheObjectBinaryProcessorImpl binaryProc = (CacheObjectBinaryProcessorImpl)ctx.kernalContext().cacheObjects(); + + this.ctx = binaryProc.binaryContext(); + + binaryProc.waitMetadataWriteIfNeeded(typeId()); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryReaderExImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryReaderExImpl.java index 601141c306416..10d7569fcb804 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryReaderExImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryReaderExImpl.java @@ -265,7 +265,7 @@ public BinaryReaderExImpl(BinaryContext ctx, if (forUnmarshal) { // Registers class by type ID, at least locally if the cache is not ready yet. - desc = ctx.descriptorForClass(BinaryUtils.doReadClass(in, ctx, ldr, typeId0), false, false); + desc = ctx.registerClass(BinaryUtils.doReadClass(in, ctx, ldr, typeId0), true, false); typeId = desc.typeId(); } @@ -314,7 +314,7 @@ public BinaryInputStream in() { */ BinaryClassDescriptor descriptor() { if (desc == null) - desc = ctx.descriptorForTypeId(userType, typeId, ldr, true); + desc = ctx.descriptorForTypeId(userType, typeId, ldr, false); return desc; } @@ -1754,7 +1754,7 @@ private String fieldFlagName(byte flag) { case OBJ: if (desc == null) - desc = ctx.descriptorForTypeId(userType, typeId, ldr, true); + desc = ctx.descriptorForTypeId(userType, typeId, ldr, false); streamPosition(dataStart); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryUtils.java index 77dce5602ec4e..0086b73fdabaa 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryUtils.java @@ -1637,7 +1637,7 @@ public static Class doReadClass(BinaryInputStream in, BinaryContext ctx, ClassLo Class cls; if (typeId != GridBinaryMarshaller.UNREGISTERED_TYPE_ID) - cls = ctx.descriptorForTypeId(true, typeId, ldr, true).describedClass(); + cls = ctx.descriptorForTypeId(true, typeId, ldr, false).describedClass(); else { String clsName = doReadClassName(in); @@ -1648,8 +1648,7 @@ public static Class doReadClass(BinaryInputStream in, BinaryContext ctx, ClassLo throw new BinaryInvalidTypeException("Failed to load the class: " + clsName, e); } - // forces registering of class by type id, at least locally - ctx.descriptorForClass(cls, true, false); + ctx.registerClass(cls, false, false); } return cls; @@ -1665,11 +1664,11 @@ public static Class doReadClass(BinaryInputStream in, BinaryContext ctx, ClassLo * @return Resovled class. */ public static Class resolveClass(BinaryContext ctx, int typeId, @Nullable String clsName, - @Nullable ClassLoader ldr, boolean deserialize) { + @Nullable ClassLoader ldr, boolean registerMeta) { Class cls; if (typeId != GridBinaryMarshaller.UNREGISTERED_TYPE_ID) - cls = ctx.descriptorForTypeId(true, typeId, ldr, deserialize).describedClass(); + cls = ctx.descriptorForTypeId(true, typeId, ldr, registerMeta).describedClass(); else { try { cls = U.forName(clsName, ldr); @@ -1678,8 +1677,7 @@ public static Class resolveClass(BinaryContext ctx, int typeId, @Nullable String throw new BinaryInvalidTypeException("Failed to load the class: " + clsName, e); } - // forces registering of class by type id, at least locally - ctx.descriptorForClass(cls, true, false); + ctx.registerClass(cls, false, false); } return cls; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryWriterExImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryWriterExImpl.java index e6efb0c509a52..be66fc48c8878 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryWriterExImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryWriterExImpl.java @@ -33,6 +33,7 @@ import org.apache.ignite.binary.BinaryObjectException; import org.apache.ignite.binary.BinaryRawWriter; import org.apache.ignite.binary.BinaryWriter; +import org.apache.ignite.internal.UnregisteredClassException; import org.apache.ignite.internal.binary.streams.BinaryHeapOutputStream; import org.apache.ignite.internal.binary.streams.BinaryOutputStream; import org.apache.ignite.internal.util.IgniteUtils; @@ -178,10 +179,18 @@ private void marshal0(Object obj, boolean enableReplace) throws BinaryObjectExce Class cls = obj.getClass(); - BinaryClassDescriptor desc = ctx.descriptorForClass(cls, false, failIfUnregistered); + BinaryClassDescriptor desc = ctx.descriptorForClass(cls); - if (desc == null) - throw new BinaryObjectException("Object is not binary: [class=" + cls + ']'); + if (!desc.registered()) { + if (failIfUnregistered) + throw new UnregisteredClassException(cls); + else { + // Metadata is registered for OBJECT and BINARY during actual writing. + boolean registerMeta = !(desc.isObject() || desc.isBinary()); + + desc = ctx.registerDescriptor(desc, registerMeta); + } + } if (desc.excluded()) { out.writeByte(GridBinaryMarshaller.NULL); @@ -743,9 +752,9 @@ void doWriteObjectArray(@Nullable Object[] val) throws BinaryObjectException { if (tryWriteAsHandle(val)) return; - BinaryClassDescriptor desc = ctx.descriptorForClass( + BinaryClassDescriptor desc = ctx.registerClass( val.getClass().getComponentType(), - false, + true, failIfUnregistered); out.unsafeEnsure(1 + 4); @@ -817,7 +826,7 @@ void doWriteEnum(@Nullable Enum val) { if (val == null) out.writeByte(GridBinaryMarshaller.NULL); else { - BinaryClassDescriptor desc = ctx.descriptorForClass(val.getDeclaringClass(), false, failIfUnregistered); + BinaryClassDescriptor desc = ctx.registerClass(val.getDeclaringClass(), true, failIfUnregistered); out.unsafeEnsure(1 + 4); @@ -870,9 +879,9 @@ void doWriteEnumArray(@Nullable Object[] val) { if (val == null) out.writeByte(GridBinaryMarshaller.NULL); else { - BinaryClassDescriptor desc = ctx.descriptorForClass( + BinaryClassDescriptor desc = ctx.registerClass( val.getClass().getComponentType(), - false, + true, failIfUnregistered); out.unsafeEnsure(1 + 4); @@ -902,7 +911,7 @@ void doWriteClass(@Nullable Class val) { if (val == null) out.writeByte(GridBinaryMarshaller.NULL); else { - BinaryClassDescriptor desc = ctx.descriptorForClass(val, false, failIfUnregistered); + BinaryClassDescriptor desc = ctx.registerClass(val, true, failIfUnregistered); out.unsafeEnsure(1 + 4); @@ -931,7 +940,7 @@ public void doWriteProxy(Proxy proxy, Class[] intfs) { out.unsafeWriteInt(intfs.length); for (Class intf : intfs) { - BinaryClassDescriptor desc = ctx.descriptorForClass(intf, false, failIfUnregistered); + BinaryClassDescriptor desc = ctx.registerClass(intf, true, failIfUnregistered); if (desc.registered()) out.writeInt(desc.typeId()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderEnum.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderEnum.java index 3930c463528e2..25f17d573c9da 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderEnum.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderEnum.java @@ -63,7 +63,7 @@ public BinaryBuilderEnum(BinaryBuilderReader reader) { throw new BinaryInvalidTypeException("Failed to load the class: " + clsName, e); } - this.typeId = reader.binaryContext().descriptorForClass(cls, false, false).typeId(); + this.typeId = reader.binaryContext().registerClass(cls, true, false).typeId(); } else { this.typeId = typeId; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderSerializer.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderSerializer.java index edc80b6feccef..9e6411fe6824a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderSerializer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderSerializer.java @@ -129,7 +129,7 @@ public void writeValue(BinaryWriterExImpl writer, Object val, boolean forceCol, writer.context().updateMetadata(typeId, meta, writer.failIfUnregistered()); // Need register class for marshaller to be able to deserialize enum value. - writer.context().descriptorForClass(((Enum)val).getDeclaringClass(), false, false); + writer.context().registerClass(((Enum)val).getDeclaringClass(), true, false); writer.writeByte(GridBinaryMarshaller.ENUM); writer.writeInt(typeId); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryEnumArrayLazyValue.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryEnumArrayLazyValue.java index c0e79ec760594..eaacbd561b7fd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryEnumArrayLazyValue.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryEnumArrayLazyValue.java @@ -56,7 +56,7 @@ protected BinaryEnumArrayLazyValue(BinaryBuilderReader reader) { throw new BinaryInvalidTypeException("Failed to load the class: " + clsName, e); } - compTypeId = reader.binaryContext().descriptorForClass(cls, true, false).typeId(); + compTypeId = reader.binaryContext().registerClass(cls, false, false).typeId(); } else { compTypeId = typeId; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectArrayLazyValue.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectArrayLazyValue.java index d4882dc6fb462..bd90569ffd961 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectArrayLazyValue.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectArrayLazyValue.java @@ -55,7 +55,7 @@ protected BinaryObjectArrayLazyValue(BinaryBuilderReader reader) { throw new BinaryInvalidTypeException("Failed to load the class: " + clsName, e); } - compTypeId = reader.binaryContext().descriptorForClass(cls, true, false).typeId(); + compTypeId = reader.binaryContext().registerClass(cls, false, false).typeId(); } else { compTypeId = typeId; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectBuilderImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectBuilderImpl.java index d3b0973ca3533..ba36d858436c4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectBuilderImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectBuilderImpl.java @@ -158,7 +158,7 @@ public BinaryObjectBuilderImpl(BinaryObjectImpl obj) { throw new BinaryInvalidTypeException("Failed to load the class: " + clsNameToWrite, e); } - this.typeId = ctx.descriptorForClass(cls, false, false).typeId(); + this.typeId = ctx.registerClass(cls, true, false).typeId(); registeredType = false; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/GridClientClusterState.java b/modules/core/src/main/java/org/apache/ignite/internal/client/GridClientClusterState.java index 4fa25cec6b8f7..8a029da7e5524 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/GridClientClusterState.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/GridClientClusterState.java @@ -18,7 +18,7 @@ package org.apache.ignite.internal.client; /** - * Interface for manage state of grid cluster. + * Interface for manage state of grid cluster and obtain information about it: ID and tag. */ public interface GridClientClusterState { /** @@ -30,4 +30,26 @@ public interface GridClientClusterState { * @return {@code Boolean} - Current cluster state. {@code True} active, {@code False} inactive. */ public boolean active() throws GridClientException; + + /** + * @return {@code True} if the cluster is in read-only mode and {@code False} otherwise. + * @throws GridClientException If request current cluster read-only mode failed. + */ + public boolean readOnly() throws GridClientException; + + /** + * Enable or disable Ignite grid read-only mode. + * + * @param readOnly If {@code True} enable read-only mode. If {@code False} disable read-only mode. + * @throws GridClientException If change of read-only mode is failed. + */ + public void readOnly(boolean readOnly) throws GridClientException; + + /** + * Get the cluster name. + * + * @return The name of the cluster. + * @throws GridClientException If the request to get the cluster name failed. + * */ + String clusterName() throws GridClientException; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/impl/GridClientClusterStateImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/client/impl/GridClientClusterStateImpl.java index 2dcf06d619822..4c1331cfbcedb 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/impl/GridClientClusterStateImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/impl/GridClientClusterStateImpl.java @@ -55,7 +55,8 @@ public GridClientClusterStateImpl( @Override public void active(final boolean active) throws GridClientException { withReconnectHandling(new ClientProjectionClosure() { @Override public GridClientFuture apply( - GridClientConnection conn, UUID nodeId + GridClientConnection conn, + UUID nodeId ) throws GridClientConnectionResetException, GridClientClosedException { return conn.changeState(active, nodeId); } @@ -64,12 +65,28 @@ public GridClientClusterStateImpl( /** {@inheritDoc} */ @Override public boolean active() throws GridClientException { - return withReconnectHandling(new ClientProjectionClosure() { - @Override public GridClientFuture apply( - GridClientConnection conn, UUID nodeId + return withReconnectHandling(GridClientConnection::currentState).get(); + } + + /** {@inheritDoc} */ + @Override public boolean readOnly() throws GridClientException { + return withReconnectHandling(GridClientConnection::readOnlyState).get(); + } + + /** {@inheritDoc} */ + @Override public void readOnly(boolean readOnly) throws GridClientException { + withReconnectHandling(new ClientProjectionClosure() { + @Override public GridClientFuture apply( + GridClientConnection conn, + UUID nodeId ) throws GridClientConnectionResetException, GridClientClosedException { - return conn.currentState(nodeId); + return conn.changeReadOnlyState(readOnly, nodeId); } }).get(); } + + /** {@inheritDoc} */ + @Override public String clusterName() throws GridClientException { + return withReconnectHandling(GridClientConnection::clusterName).get(); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientConnection.java index c75bd24185bf0..306cc1fbf7b30 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientConnection.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientConnection.java @@ -324,6 +324,39 @@ public abstract GridClientFuture changeState(boolean active, UUID destNodeId) public abstract GridClientFuture currentState(UUID destNodeId) throws GridClientClosedException, GridClientConnectionResetException; + /** + * Get current read-only mode status. If future contains {@code True} - read-only mode enabled, and {@code False} + * otherwise. + * + * @param destNodeId Destination node id. + * @throws GridClientConnectionResetException In case of error. + * @throws GridClientClosedException If client was manually closed before request was sent over network. + */ + public abstract GridClientFuture readOnlyState(UUID destNodeId) + throws GridClientClosedException, GridClientConnectionResetException; + + /** + * Change read-only mode. Cluster must be activated. + * + * @param readOnly Read-only mode enabled flag. + * @param destNodeId Destination node id. + * @throws GridClientConnectionResetException In case of error. + * @throws GridClientClosedException If client was manually closed before request was sent over network. + */ + public abstract GridClientFuture changeReadOnlyState(boolean readOnly, UUID destNodeId) + throws GridClientClosedException, GridClientConnectionResetException; + + /** + * Get a cluster name. + * + * @param destNodeId Destination node id. + * @return Future to get the cluster name. + * @throws GridClientConnectionResetException In case of error. + * @throws GridClientClosedException If client was manually closed before request was sent over network. + */ + public abstract GridClientFuture clusterName(UUID destNodeId) + throws GridClientClosedException, GridClientConnectionResetException; + /** * Gets node by node ID. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientNioTcpConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientNioTcpConnection.java index ecc1cdf32154a..a788dddc1f9ce 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientNioTcpConnection.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientNioTcpConnection.java @@ -59,6 +59,8 @@ import org.apache.ignite.internal.client.marshaller.optimized.GridClientZipOptimizedMarshaller; import org.apache.ignite.internal.processors.rest.client.message.GridClientAuthenticationRequest; import org.apache.ignite.internal.processors.rest.client.message.GridClientCacheRequest; +import org.apache.ignite.internal.processors.rest.client.message.GridClientClusterNameRequest; +import org.apache.ignite.internal.processors.rest.client.message.GridClientReadOnlyModeRequest; import org.apache.ignite.internal.processors.rest.client.message.GridClientStateRequest; import org.apache.ignite.internal.processors.rest.client.message.GridClientHandshakeRequest; import org.apache.ignite.internal.processors.rest.client.message.GridClientMessage; @@ -816,6 +818,23 @@ private GridClientAuthenticationRequest buildAuthRequest() { return makeRequest(msg, destNodeId); } + /** {@inheritDoc} */ + @Override public GridClientFuture changeReadOnlyState( + boolean readOnly, + UUID destNodeId + ) throws GridClientClosedException, GridClientConnectionResetException { + return readOnly ? + makeRequest(GridClientReadOnlyModeRequest.enableReadOnly(), destNodeId) : + makeRequest(GridClientReadOnlyModeRequest.disableReadOnly(), destNodeId); + } + + /** {@inheritDoc} */ + @Override public GridClientFuture readOnlyState( + UUID destNodeId + ) throws GridClientClosedException, GridClientConnectionResetException { + return makeRequest(GridClientReadOnlyModeRequest.currentReadOnlyMode(), destNodeId); + } + /** {@inheritDoc} */ @Override public GridClientFuture currentState(UUID destNodeId) throws GridClientClosedException, GridClientConnectionResetException { @@ -931,6 +950,12 @@ private GridClientAuthenticationRequest buildAuthRequest() { return res; } + /** {@inheritDoc} */ + @Override public GridClientFuture clusterName(UUID destNodeId) + throws GridClientClosedException, GridClientConnectionResetException { + return makeRequest(new GridClientClusterNameRequest(), destNodeId); + } + /** * Creates client node instance from message. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cluster/DetachedClusterNode.java b/modules/core/src/main/java/org/apache/ignite/internal/cluster/DetachedClusterNode.java index 2c72bb02ed2f5..a3a69e19d9659 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/cluster/DetachedClusterNode.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/cluster/DetachedClusterNode.java @@ -30,7 +30,9 @@ import org.jetbrains.annotations.Nullable; /** - * Representation of cluster node that isn't currently present in cluster. + * Representation of cluster node that either isn't currently present in cluster, or semantically detached. + * For example nodes returned from {@code BaselineTopology.currentBaseline()} are always considered as + * semantically detached, even if they are currently present in cluster. */ public class DetachedClusterNode implements ClusterNode { /** */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterAsyncImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterAsyncImpl.java index d79710db8759c..df49800404dcd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterAsyncImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterAsyncImpl.java @@ -369,4 +369,14 @@ public IgniteClusterAsyncImpl(IgniteClusterImpl cluster) { @Override public void writeExternal(ObjectOutput out) throws IOException { out.writeObject(cluster); } -} \ No newline at end of file + + /** {@inheritDoc} */ + @Override public boolean readOnly() { + return cluster.readOnly(); + } + + /** {@inheritDoc} */ + @Override public void readOnly(boolean readOnly) throws IgniteException { + cluster.readOnly(readOnly); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java index 82779dab6089d..7440cef20efcf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java @@ -29,7 +29,10 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.concurrent.ConcurrentLinkedQueue; @@ -68,6 +71,8 @@ import org.apache.ignite.lang.IgniteProductVersion; import org.jetbrains.annotations.Nullable; +import static org.apache.ignite.internal.IgniteFeatures.CLUSTER_READ_ONLY_MODE; +import static org.apache.ignite.internal.IgniteFeatures.allNodesSupports; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_IPS; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_MACS; import static org.apache.ignite.internal.util.nodestart.IgniteNodeStartUtils.parseFile; @@ -319,6 +324,41 @@ public IgniteClusterImpl(GridKernalContext ctx) { } } + /** {@inheritDoc} */ + @Override public boolean readOnly() { + guard(); + + try { + return ctx.state().publicApiReadOnlyMode(); + } + finally { + unguard(); + } + } + + /** {@inheritDoc} */ + @Override public void readOnly(boolean readOnly) throws IgniteException { + guard(); + + try { + verifyReadOnlyModeSupport(); + + ctx.state().changeGlobalState(readOnly).get(); + } + catch (IgniteCheckedException e) { + throw U.convertException(e); + } + finally { + unguard(); + } + } + + /** */ + private void verifyReadOnlyModeSupport() { + if (!allNodesSupports(ctx.discovery().discoCache().serverNodes(), CLUSTER_READ_ONLY_MODE)) + throw new IgniteException("Not all nodes in cluster supports cluster read-only mode"); + } + /** */ private Collection baselineNodes() { Collection srvNodes = ctx.cluster().get().forServers().nodes(); @@ -409,6 +449,22 @@ private void validateBeforeBaselineChange(Collection bas if (baselineTop.isEmpty()) throw new IgniteException("BaselineTopology must contain at least one node."); + List currBlT = Optional.ofNullable(ctx.state().clusterState().baselineTopology()). + map(BaselineTopology::currentBaseline).orElse(Collections.emptyList()); + + Collection srvrs = ctx.cluster().get().forServers().nodes(); + + for (BaselineNode node : baselineTop) { + Object consistentId = node.consistentId(); + + if (currBlT.stream().noneMatch( + currBlTNode -> Objects.equals(currBlTNode.consistentId(), consistentId)) && + srvrs.stream().noneMatch( + currServersNode -> Objects.equals(currServersNode.consistentId(), consistentId))) + throw new IgniteException("Check arguments. Node with consistent ID [" + consistentId + + "] not found in server nodes."); + } + Collection onlineNodes = onlineBaselineNodesRequestedForRemoval(baselineTop); if (onlineNodes != null) { @@ -470,7 +526,7 @@ private Collection getConsistentIds(Collection n Collection target = new ArrayList<>(top.size()); for (ClusterNode node : top) { - if (!node.isClient()) + if (!node.isClient() && !node.isDaemon()) target.add(node); } @@ -709,7 +765,7 @@ IgniteInternalFuture> startNodesAsync0( Collections.emptyList()); // Exceeding max line width for readability. - GridCompoundFuture> fut = + GridCompoundFuture> fut = new GridCompoundFuture<>(CU.objectsReducer()); AtomicInteger cnt = new AtomicInteger(nodeCallCnt); @@ -828,4 +884,4 @@ public void clientReconnectFuture(IgniteFuture reconnecFut) { public String toString() { return "IgniteCluster [igniteInstanceName=" + ctx.igniteInstanceName() + ']'; } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/BaselineCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/BaselineCommand.java index 4eb3cd359c685..af63a4f31bfb2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/BaselineCommand.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/BaselineCommand.java @@ -69,7 +69,7 @@ public class BaselineCommand implements Command { /** {@inheritDoc} */ @Override public String confirmationPrompt() { - if (BaselineSubcommands.COLLECT != baselineArgs.getCmd()) + if (baselineArgs != null && BaselineSubcommands.COLLECT != baselineArgs.getCmd()) return "Warning: the command will perform changes in baseline."; return null; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/ClusterReadOnlyModeDisableCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/ClusterReadOnlyModeDisableCommand.java new file mode 100644 index 0000000000000..fca9832c818bc --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/ClusterReadOnlyModeDisableCommand.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline; + +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; + +import static org.apache.ignite.internal.commandline.CommandList.READ_ONLY_DISABLE; +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.CommonArgParser.CMD_AUTO_CONFIRMATION; + +/** + * Command to disable cluster read-only mode. + */ +public class ClusterReadOnlyModeDisableCommand implements Command { + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + client.state().readOnly(false); + + log.info("Cluster read-only mode disabled"); + } + catch (Throwable e) { + log.info("Failed to disable read-only mode"); + + throw e; + } + + return null; + } + + /** {@inheritDoc} */ + @Override public String confirmationPrompt() { + return "Warning: the command will disable read-only mode on a cluster."; + } + + /** {@inheritDoc} */ + @Override public Void arg() { + return null; + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage( + log, + "Disable read-only mode on active cluster:", + READ_ONLY_DISABLE, + optional(CMD_AUTO_CONFIRMATION) + ); + } + + /** {@inheritDoc} */ + @Override public String name() { + return READ_ONLY_DISABLE.toCommandName(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/ClusterReadOnlyModeEnableCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/ClusterReadOnlyModeEnableCommand.java new file mode 100644 index 0000000000000..ca3fe6b5b2ad3 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/ClusterReadOnlyModeEnableCommand.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline; + +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; + +import static org.apache.ignite.internal.commandline.CommandList.READ_ONLY_ENABLE; +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.CommonArgParser.CMD_AUTO_CONFIRMATION; + +/** + * Command to enable cluster read-only mode. + */ +public class ClusterReadOnlyModeEnableCommand implements Command { + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + client.state().readOnly(true); + + log.info("Cluster read-only mode enabled"); + } + catch (Throwable e) { + log.info("Failed to enable read-only mode"); + + throw e; + } + + return null; + } + + /** {@inheritDoc} */ + @Override public String confirmationPrompt() { + return "Warning: the command will enable read-only mode on a cluster."; + } + + /** {@inheritDoc} */ + @Override public Void arg() { + return null; + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + Command.usage( + log, + "Enable read-only mode on active cluster:", + READ_ONLY_ENABLE, + optional(CMD_AUTO_CONFIRMATION) + ); + } + + /** {@inheritDoc} */ + @Override public String name() { + return READ_ONLY_ENABLE.toCommandName(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/Command.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/Command.java index c1f382e98554e..6f033a224ca56 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/Command.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/Command.java @@ -73,6 +73,16 @@ public static void usage(Logger logger, String desc, CommandList cmd, String... */ public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception; + /** + * Prepares confirmation for the command. + * + * @param clientCfg Thin client configuration. + * @throws Exception If error occur. + */ + default void prepareConfirmation(GridClientConfiguration clientCfg) throws Exception{ + //no-op + } + /** * @return Message text to show user for. If null it means that confirmantion is not needed. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandArgIterator.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandArgIterator.java index ae36596a59df7..cb409708912d1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandArgIterator.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandArgIterator.java @@ -117,6 +117,25 @@ public long nextLongArg(String argName) { } } + /** + * @return Numeric value. + */ + public byte nextByteArg(String argName) { + String str = nextArg("Expecting " + argName); + + try { + byte val = Byte.parseByte(str); + + if (val < 0) + throw new IllegalArgumentException("Invalid value for " + argName + ": " + val); + + return val; + } + catch (NumberFormatException ignored) { + throw new IllegalArgumentException("Invalid value for " + argName + ": " + str); + } + } + /** * @param argName Name of argument. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java index 152fee0b16478..6f82259fcec56 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.commandline; import java.io.File; +import java.time.Duration; import java.time.LocalDateTime; import java.util.Arrays; import java.util.Collections; @@ -43,6 +44,7 @@ import org.apache.ignite.internal.client.ssl.GridSslBasicContextFactory; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.logger.java.JavaLoggerFileHandler; import org.apache.ignite.logger.java.JavaLoggerFormatter; @@ -53,6 +55,7 @@ import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import static java.lang.System.lineSeparator; import static org.apache.ignite.internal.IgniteVersionUtils.ACK_VER_STR; import static org.apache.ignite.internal.IgniteVersionUtils.COPYRIGHT; import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; @@ -75,7 +78,7 @@ public class CommandHandler { public static final String CONFIRM_MSG = "y"; /** */ - static final String DELIM = "--------------------------------------------------------------------------------"; + public static final String DELIM = "--------------------------------------------------------------------------------"; /** */ public static final int EXIT_CODE_OK = 0; @@ -98,15 +101,15 @@ public class CommandHandler { /** */ private static final long DFLT_PING_TIMEOUT = 30_000L; - /** */ - private static final Scanner IN = new Scanner(System.in); - /** Utility name. */ public static final String UTILITY_NAME = "control.(sh|bat)"; /** */ public static final String NULL = "null"; + /** */ + private final Scanner in = new Scanner(System.in); + /** JULs logger. */ private final Logger logger; @@ -205,12 +208,14 @@ public CommandHandler(Logger logger) { * @return Exit code. */ public int execute(List rawArgs) { + LocalDateTime startTime = LocalDateTime.now(); + Thread.currentThread().setName("session=" + ses); logger.info("Control utility [ver. " + ACK_VER_STR + "]"); logger.info(COPYRIGHT); logger.info("User: " + System.getProperty("user.name")); - logger.info("Time: " + LocalDateTime.now()); + logger.info("Time: " + startTime); String commandName = ""; @@ -226,11 +231,7 @@ public int execute(List rawArgs) { Command command = args.command(); commandName = command.name(); - if (!args.autoConfirmation() && !confirm(command.confirmationPrompt())) { - logger.info("Operation cancelled."); - - return EXIT_CODE_OK; - } + GridClientConfiguration clientCfg = getClientConfiguration(args); boolean tryConnectAgain = true; @@ -238,15 +239,25 @@ public int execute(List rawArgs) { boolean suppliedAuth = !F.isEmpty(args.userName()) && !F.isEmpty(args.password()); - GridClientConfiguration clientCfg = getClientConfiguration(args); - while (tryConnectAgain) { tryConnectAgain = false; try { + if (!args.autoConfirmation()) { + command.prepareConfirmation(clientCfg); + + if (!confirm(command.confirmationPrompt())) { + logger.info("Operation cancelled."); + + return EXIT_CODE_OK; + } + } + logger.info("Command [" + commandName + "] started"); - logger.info("Arguments: " + String.join(" ", rawArgs)); + logger.info("Arguments: " + argumentsToString(rawArgs)); + logger.info(DELIM); + lastOperationRes = command.execute(clientCfg, logger); } catch (Throwable e) { @@ -297,10 +308,17 @@ public int execute(List rawArgs) { if (isConnectionError(e)) { IgniteCheckedException cause = X.cause(e, IgniteCheckedException.class); - if (cause != null && cause.getMessage() != null && cause.getMessage().contains("SSL")) - e = cause; + if (isConnectionClosedSilentlyException(e)) + logger.severe("Connection to cluster failed. Please check firewall settings and " + + "client and server are using the same SSL configuration."); + else { + if (isSSLMisconfigurationError(cause)) + e = cause; + + logger.severe("Connection to cluster failed. " + CommandLogger.errorMessage(e)); + + } - logger.severe("Connection to cluster failed. " + CommandLogger.errorMessage(e)); logger.info("Command [" + commandName + "] finished with code: " + EXIT_CODE_CONNECTION_FAILED); return EXIT_CODE_CONNECTION_FAILED; @@ -312,12 +330,118 @@ public int execute(List rawArgs) { return EXIT_CODE_UNEXPECTED_ERROR; } finally { + LocalDateTime endTime = LocalDateTime.now(); + + Duration diff = Duration.between(startTime, endTime); + + logger.info("Control utility has completed execution at: " + endTime); + logger.info("Execution time: " + diff.toMillis() + " ms"); + Arrays.stream(logger.getHandlers()) .filter(handler -> handler instanceof FileHandler) .forEach(Handler::close); } } + /** + * Analyses passed exception to find out whether it is related to SSL misconfiguration issues. + * + * (!) Implementation depends heavily on structure of exception stack trace + * thus is very fragile to any changes in that structure. + * + * @param e Exception to analyze. + * + * @return {@code True} if exception may be related to SSL misconfiguration issues. + */ + private boolean isSSLMisconfigurationError(Throwable e) { + return e != null && e.getMessage() != null && e.getMessage().contains("SSL"); + } + + /** + * Analyses passed exception to find out whether it is caused by server closing connection silently. + * This happens when client tries to establish unprotected connection + * to the cluster supporting only secured communications (e.g. when server is configured to use SSL certificates + * and client is not). + * + * (!) Implementation depends heavily on structure of exception stack trace + * thus is very fragile to any changes in that structure. + * + * @param e Exception to analyse. + * @return {@code True} if exception may be related to the attempt to establish unprotected connection + * to secured cluster. + */ + private boolean isConnectionClosedSilentlyException(Throwable e) { + if (!(e instanceof GridClientDisconnectedException)) + return false; + + Throwable cause = e.getCause(); + + if (cause == null) + return false; + + cause = cause.getCause(); + + if (cause instanceof GridClientConnectionResetException && + cause.getMessage() != null && + cause.getMessage().contains("Failed to perform handshake") + ) + return true; + + return false; + } + + /** + * @param rawArgs Arguments which user has provided. + * @return String which could be shown in console and pritned to log. + */ + private String argumentsToString(List rawArgs) { + boolean hide = false; + + SB sb = new SB(); + + for (int i = 0; i < rawArgs.size(); i++) { + if (hide) { + sb.a("***** "); + + hide = false; + + continue; + } + + String arg = rawArgs.get(i); + + sb.a(arg).a(' '); + + hide = CommonArgParser.isSensitiveArgument(arg); + } + + return sb.toString(); + } + + /** + * Does one of three things: + *
    + *
  • returns user name from connection parameters if it is there;
  • + *
  • returns user name from client configuration if it is there;
  • + *
  • requests user input and returns entered name.
  • + *
+ * + * @param args Connection parameters. + * @param clientCfg Client configuration. + * @throws IgniteCheckedException If security credetials cannot be provided from client configuration. + */ + private String retrieveUserName( + ConnectionAndSslParameters args, + GridClientConfiguration clientCfg + ) throws IgniteCheckedException { + if (!F.isEmpty(args.userName())) + return args.userName(); + else if (clientCfg.getSecurityCredentialsProvider() == null) + return requestDataFromConsole("user: "); + else + return (String)clientCfg.getSecurityCredentialsProvider().credentials().getLogin(); + } + /** * @param args Common arguments. * @return Thin client configuration to connect to cluster. @@ -444,7 +568,7 @@ public T getLastOperationResult() { private String readLine(String prompt) { System.out.print(prompt); - return IN.nextLine(); + return in.nextLine(); } @@ -453,11 +577,11 @@ private String readLine(String prompt) { * * @return {@code true} if operation confirmed (or not needed), {@code false} otherwise. */ - private boolean confirm(String str) { + private boolean confirm(String str) { if (str == null) return true; - String prompt = str + "\nPress '" + CONFIRM_MSG + "' to continue . . . "; + String prompt = str + lineSeparator() + "Press '" + CONFIRM_MSG + "' to continue . . . "; return CONFIRM_MSG.equalsIgnoreCase(readLine(prompt)); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandList.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandList.java index 5e5d4f0cb5a6c..151227a1ee764 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandList.java @@ -22,6 +22,7 @@ import org.apache.ignite.internal.commandline.cache.CacheCommands; import org.apache.ignite.internal.commandline.diagnostic.DiagnosticCommand; +import org.apache.ignite.internal.commandline.dr.DrCommand; /** * High-level commands. @@ -49,7 +50,16 @@ public enum CommandList { WAL("--wal", new WalCommands()), /** */ - DIAGNOSTIC("--diagnostic", new DiagnosticCommand()); + DIAGNOSTIC("--diagnostic", new DiagnosticCommand()), + + /** */ + DATA_CENTER_REPLICATION("--dr", new DrCommand()), + + /** */ + READ_ONLY_ENABLE("--read-only-on", new ClusterReadOnlyModeEnableCommand()), + + /** */ + READ_ONLY_DISABLE("--read-only-off", new ClusterReadOnlyModeDisableCommand()); /** Private values copy so there's no need in cloning it every time. */ private static final CommandList[] VALUES = CommandList.values(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandLogger.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandLogger.java index 64ae204f7b406..17ff0f4d0684a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandLogger.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandLogger.java @@ -44,7 +44,7 @@ public class CommandLogger { * @param params Other input parameter. * @return Joined paramaters with specified {@code delimeter}. */ - public static String join(String delimeter, Object... params) { + public static String join(String delimeter, T... params) { return join(new SB(), "", delimeter, params).toString(); } @@ -57,7 +57,7 @@ public static String join(String delimeter, Object... params) { * @param params Other input parameter. * @return SB with appended to the end joined paramaters with specified {@code delimeter}. */ - public static SB join(SB sb, String sbDelimeter, String delimeter, Object... params) { + public static SB join(SB sb, String sbDelimeter, String delimeter, T... params) { if (!F.isEmpty(params)) { sb.a(sbDelimeter); @@ -77,7 +77,7 @@ public static SB join(SB sb, String sbDelimeter, String delimeter, Object... par * @param params Other input parameter. * @return Joined parameters wrapped optional braces. */ - public static String optional(Object... params) { + public static String optional(Object... params) { return join(new SB(), "[", " ", params).a("]").toString(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java index f8d3372e71fdc..f4ddf8d174225 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java @@ -56,7 +56,7 @@ public class CommonArgParser { static final String CMD_USER = "--user"; /** Option is used for auto confirmation. */ - static final String CMD_AUTO_CONFIRMATION = "--yes"; + public static final String CMD_AUTO_CONFIRMATION = "--yes"; /** */ static final String CMD_PING_INTERVAL = "--ping-interval"; @@ -96,6 +96,9 @@ public class CommonArgParser { /** List of optional auxiliary commands. */ private static final Set AUX_COMMANDS = new HashSet<>(); + /** Set of sensitive arguments */ + private static final Set SENSITIVE_ARGUMENTS = new HashSet<>(); + static { AUX_COMMANDS.add(CMD_HOST); AUX_COMMANDS.add(CMD_PORT); @@ -119,8 +122,21 @@ public class CommonArgParser { AUX_COMMANDS.add(CMD_TRUSTSTORE); AUX_COMMANDS.add(CMD_TRUSTSTORE_PASSWORD); AUX_COMMANDS.add(CMD_TRUSTSTORE_TYPE); + + SENSITIVE_ARGUMENTS.add(CMD_PASSWORD); + SENSITIVE_ARGUMENTS.add(CMD_KEYSTORE_PASSWORD); + SENSITIVE_ARGUMENTS.add(CMD_TRUSTSTORE_PASSWORD); } + /** + * @param arg To check. + * @return True if provided argument is among sensitive one and not should be displayed. + */ + public static boolean isSensitiveArgument(String arg) { + return SENSITIVE_ARGUMENTS.contains(arg); + } + + /** * @param logger Logger. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/ConnectionAndSslParameters.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/ConnectionAndSslParameters.java index befe4510d6e42..b73c0fd739087 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/ConnectionAndSslParameters.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/ConnectionAndSslParameters.java @@ -18,6 +18,8 @@ package org.apache.ignite.internal.commandline; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.util.tostring.GridToStringExclude; +import org.apache.ignite.internal.util.typedef.internal.S; /** * Container with common parsed and validated arguments. @@ -33,6 +35,7 @@ public class ConnectionAndSslParameters { private String user; /** Password. */ + @GridToStringExclude private String pwd; /** Force option is used for auto confirmation. */ @@ -60,6 +63,7 @@ public class ConnectionAndSslParameters { private String sslKeyStoreType; /** Keystore Password. */ + @GridToStringExclude private char[] sslKeyStorePassword; /** Truststore. */ @@ -69,6 +73,7 @@ public class ConnectionAndSslParameters { private String sslTrustStoreType; /** Truststore Password. */ + @GridToStringExclude private char[] sslTrustStorePassword; /** High-level command. */ @@ -259,4 +264,13 @@ public String sslTrustStoreType() { public char[] sslTrustStorePassword() { return sslTrustStorePassword; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(ConnectionAndSslParameters.class, this, + "password", pwd == null ? null : "*****", + "sslKeyStorePassword", sslKeyStorePassword == null ? null: "*****", + "sslTrustStorePassword", sslTrustStorePassword == null? null: "*****" + ); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/DeactivateCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/DeactivateCommand.java index 0d90c35264982..32185849ef713 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/DeactivateCommand.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/DeactivateCommand.java @@ -33,14 +33,24 @@ * Command to deactivate cluster. */ public class DeactivateCommand implements Command { + /** Cluster name. */ + private String clusterName; + /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { Command.usage(logger, "Deactivate cluster:", DEACTIVATE, optional(CMD_AUTO_CONFIRMATION)); } + /** {@inheritDoc} */ + @Override public void prepareConfirmation(GridClientConfiguration clientCfg) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + clusterName = client.state().clusterName(); + } + } + /** {@inheritDoc} */ @Override public String confirmationPrompt() { - return "Warning: the command will deactivate a cluster."; + return "Warning: the command will deactivate a cluster \"" + clusterName + "\"."; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/StateCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/StateCommand.java index d3c730638b47b..c6523e01b8fd9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/StateCommand.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/StateCommand.java @@ -42,14 +42,21 @@ public class StateCommand implements Command { * @param clientCfg Client configuration. * @throws Exception If failed to print state. */ - @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { try (GridClient client = Command.startClient(clientCfg)){ GridClientClusterState state = client.state(); - logger.info("Cluster is " + (state.active() ? "active" : "inactive")); + if (state.active()) { + if (state.readOnly()) + log.info("Cluster is active (read-only)"); + else + log.info("Cluster is active"); + } + else + log.info("Cluster is inactive"); } catch (Throwable e) { - logger.severe("Failed to get cluster state."); + log.severe("Failed to get cluster state."); throw e; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/TxCommands.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/TxCommands.java index c88e928b55e61..c66037e3bf342 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/TxCommands.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/TxCommands.java @@ -194,7 +194,7 @@ private void transactions(GridClient client, GridClientConfiguration conf) throw /** {@inheritDoc} */ @Override public String confirmationPrompt() { - if (args.getOperation() == VisorTxOperation.KILL) + if (args != null && args.getOperation() == VisorTxOperation.KILL) return "Warning: the command will kill some transactions."; return null; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java index e2489cf0c99a1..2ac9c8794d896 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java @@ -39,6 +39,7 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_ENABLE_EXPERIMENTAL_COMMAND; import static org.apache.ignite.internal.commandline.CommandArgIterator.isCommandOrOption; +import static org.apache.ignite.internal.commandline.CommandHandler.UTILITY_NAME; import static org.apache.ignite.internal.commandline.CommandList.WAL; import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; @@ -69,13 +70,15 @@ public class WalCommands implements Command> { */ private String walArgs; + /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { - if (IgniteSystemProperties.getBoolean(IGNITE_ENABLE_EXPERIMENTAL_COMMAND, false)) { - Command.usage(logger, "Print absolute paths of unused archived wal segments on each node:", WAL, - WAL_PRINT, "[consistentId1,consistentId2,....,consistentIdN]"); - Command.usage(logger, "Delete unused archived wal segments on each node:", WAL, WAL_DELETE, - "[consistentId1,consistentId2,....,consistentIdN]", optional(CMD_AUTO_CONFIRMATION)); - } + if (!enableExperimental()) + return; + + Command.usage(logger, "Print absolute paths of unused archived wal segments on each node:", WAL, + WAL_PRINT, "[consistentId1,consistentId2,....,consistentIdN]"); + Command.usage(logger, "Delete unused archived wal segments on each node:", WAL, WAL_DELETE, + "[consistentId1,consistentId2,....,consistentIdN]", optional(CMD_AUTO_CONFIRMATION)); } /** @@ -85,21 +88,26 @@ public class WalCommands implements Command> { * @throws Exception If failed to execute wal action. */ @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { - this.logger = logger; + if (enableExperimental()) { + this.logger = logger; - try (GridClient client = Command.startClient(clientCfg)) { - switch (walAct) { - case WAL_DELETE: - deleteUnusedWalSegments(client, walArgs, clientCfg); + try (GridClient client = Command.startClient(clientCfg)) { + switch (walAct) { + case WAL_DELETE: + deleteUnusedWalSegments(client, walArgs, clientCfg); - break; + break; - case WAL_PRINT: - default: - printUnusedWalSegments(client, walArgs, clientCfg); + case WAL_PRINT: + default: + printUnusedWalSegments(client, walArgs, clientCfg); - break; + break; + } } + } else { + logger.warning(String.format("For use experimental command add %s=true to JVM_OPTS in %s", + IGNITE_ENABLE_EXPERIMENTAL_COMMAND, UTILITY_NAME)); } return null; @@ -124,8 +132,10 @@ public class WalCommands implements Command> { ? argIter.nextArg("Unexpected argument for " + WAL.text() + ": " + walAct) : ""; - this.walAct = walAct; - this.walArgs = walArgs; + if (enableExperimental()) { + this.walAct = walAct; + this.walArgs = walArgs; + } } else throw new IllegalArgumentException("Unexpected action " + walAct + " for " + WAL.text()); @@ -268,4 +278,11 @@ private void printDeleteWalSegments0(VisorWalTaskResult taskRes) { @Override public String name() { return WAL.toCommandName(); } + + /** + * @return Value of {@link IgniteSystemProperties#IGNITE_ENABLE_EXPERIMENTAL_COMMAND} + */ + private boolean enableExperimental() { + return IgniteSystemProperties.getBoolean(IGNITE_ENABLE_EXPERIMENTAL_COMMAND, false); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/baseline/BaselineArguments.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/baseline/BaselineArguments.java index 395a4ef04ede0..22938856cc061 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/baseline/BaselineArguments.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/baseline/BaselineArguments.java @@ -21,6 +21,8 @@ package org.apache.ignite.internal.commandline.baseline; import java.util.List; +import org.apache.ignite.internal.util.tostring.GridToStringInclude; +import org.apache.ignite.internal.util.typedef.internal.S; /** * This class contains all possible arguments after parsing baseline command input. @@ -38,6 +40,7 @@ public class BaselineArguments { /** Requested topology version. */ private long topVer = -1; /** List of consistent ids for operation. */ + @GridToStringInclude List consistentIds; /** @@ -92,6 +95,11 @@ public List getConsistentIds() { return consistentIds; } + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(BaselineArguments.class, this); + } + /** * Builder of {@link BaselineArguments}. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java index a7b6b7d8f059f..0c00fbbbac948 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java @@ -42,7 +42,6 @@ import static org.apache.ignite.internal.commandline.cache.CacheSubcommands.HELP; import static org.apache.ignite.internal.commandline.cache.CacheSubcommands.LIST; import static org.apache.ignite.internal.commandline.cache.CacheSubcommands.VALIDATE_INDEXES; -import static org.apache.ignite.spi.discovery.tcp.ipfinder.sharedfs.TcpDiscoverySharedFsIpFinder.DELIM; /** * High-level "cache" command implementation. @@ -160,7 +159,7 @@ protected static void usageCache( Map paramsDesc, String... args ) { - logger.info(INDENT + DELIM); + logger.info(""); logger.info(INDENT + CommandLogger.join(" ", CACHE, cmd, CommandLogger.join(" ", args))); logger.info(DOUBLE_INDENT + description); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java index 458c9a19245ae..10c615d9cfa6f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java @@ -29,6 +29,7 @@ import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; import org.apache.ignite.internal.processors.cache.verify.ContentionInfo; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.visor.verify.VisorContentionTask; import org.apache.ignite.internal.visor.verify.VisorContentionTaskArg; import org.apache.ignite.internal.visor.verify.VisorContentionTaskResult; @@ -93,6 +94,11 @@ public int minQueueSize() { public int maxPrint() { return maxPrint; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java index 320f19b6f530f..64552d1b3f584 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java @@ -36,6 +36,7 @@ import org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionTask; import org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionTaskArg; import org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionTaskResult; +import org.apache.ignite.internal.util.typedef.internal.S; import static org.apache.ignite.internal.commandline.CommandHandler.NULL; import static org.apache.ignite.internal.commandline.CommandLogger.optional; @@ -102,6 +103,11 @@ public UUID nodeId() { public Set getUserAttributes() { return userAttributes; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } } /** Command parsed arguments */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java index fdbf74283d3e8..eb8595da25282 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java @@ -36,6 +36,7 @@ import org.apache.ignite.internal.commandline.cache.argument.ValidateIndexesCommandArg; import org.apache.ignite.internal.processors.cache.verify.PartitionKey; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.visor.verify.IndexIntegrityCheckIssue; import org.apache.ignite.internal.visor.verify.IndexValidationIssue; @@ -137,6 +138,11 @@ public int checkThrough() { public UUID nodeId() { return nodeId; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } } /** Command parsed arguments. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java index eb83cf66ce808..55ed3740e3555 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java @@ -36,6 +36,7 @@ import org.apache.ignite.internal.commandline.argument.CommandArgUtils; import org.apache.ignite.internal.commandline.cache.argument.ListCommandArg; import org.apache.ignite.internal.processors.cache.verify.CacheInfo; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.visor.cache.VisorCacheAffinityConfiguration; @@ -149,6 +150,11 @@ public VisorViewCacheCmd cacheCommand() { * @return Full config flag. */ public boolean fullConfig(){ return fullConfig; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } } /** Command parsed arguments */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java index 0814dcd5f69cb..5006f075cd009 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java @@ -32,6 +32,7 @@ import org.apache.ignite.internal.commandline.CommandLogger; import org.apache.ignite.internal.commandline.argument.CommandArgUtils; import org.apache.ignite.internal.commandline.cache.argument.FindAndDeleteGarbageArg; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.visor.cache.VisorFindAndDeleteGarbageInPersistenceJobResult; import org.apache.ignite.internal.visor.cache.VisorFindAndDeleteGarbageInPersistenceTask; import org.apache.ignite.internal.visor.cache.VisorFindAndDeleteGarbageInPersistenceTaskArg; @@ -100,6 +101,11 @@ public Set groups() { public boolean delete() { return delete; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } } /** Command parsed arguments. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java index f0a2058c3d1f9..63afa6d894f7b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java @@ -27,8 +27,8 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.logging.Logger; import java.util.function.Consumer; +import java.util.logging.Logger; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; import org.apache.ignite.IgniteException; @@ -46,6 +46,7 @@ import org.apache.ignite.internal.processors.cache.verify.PartitionKey; import org.apache.ignite.internal.processors.cache.verify.VerifyBackupPartitionsTaskV2; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.visor.verify.CacheFilterEnum; import org.apache.ignite.internal.visor.verify.VisorIdleVerifyDumpTask; @@ -179,6 +180,11 @@ public boolean idleCheckCrc() { public boolean isSkipZeros() { return skipZeros; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } } /** Command parsed arguments. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java index 65d60a03f7390..cd6dc77677d91 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java @@ -32,6 +32,7 @@ import org.apache.ignite.internal.commandline.TaskExecutor; import org.apache.ignite.internal.commandline.argument.CommandArg; import org.apache.ignite.internal.commandline.argument.CommandArgUtils; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.visor.diagnostic.Operation; import org.apache.ignite.internal.visor.diagnostic.VisorPageLocksResult; import org.apache.ignite.internal.visor.diagnostic.VisorPageLocksTask; @@ -43,10 +44,10 @@ import static org.apache.ignite.internal.commandline.CommandLogger.optional; import static org.apache.ignite.internal.commandline.diagnostic.DiagnosticSubCommand.PAGE_LOCKS; import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.ALL; -import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.NODES; -import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.PATH; import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.DUMP; import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.DUMP_LOG; +import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.NODES; +import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.PATH; import static org.apache.ignite.internal.processors.diagnostic.DiagnosticProcessor.DEFAULT_TARGET_FOLDER; /** @@ -76,7 +77,7 @@ public class PageLocksCommand implements Command { }); } - VisorPageLocksTrackerArgs taskArg = new VisorPageLocksTrackerArgs(arguments.op, arguments.filePath, nodeIds); + VisorPageLocksTrackerArgs taskArg = new VisorPageLocksTrackerArgs(arguments.operation, arguments.filePath, nodeIds); res = TaskExecutor.executeTask( client, @@ -180,7 +181,7 @@ private void printResult(Map res) { /** */ public static class Arguments { /** */ - private final Operation op; + private final Operation operation; /** */ private final String filePath; /** */ @@ -189,22 +190,27 @@ public static class Arguments { private final Set nodeIds; /** - * @param op Operation. + * @param operation Operation. * @param filePath File path. * @param allNodes If {@code True} include all available nodes for command. If {@code False} include only subset. * @param nodeIds Node ids. */ public Arguments( - Operation op, + Operation operation, String filePath, boolean allNodes, Set nodeIds ) { - this.op = op; + this.operation = operation; this.filePath = filePath; this.allNodes = allNodes; this.nodeIds = nodeIds; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } } enum PageLocksCommandArg implements CommandArg { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/DrCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/DrCommand.java new file mode 100644 index 0000000000000..9cddab745432f --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/DrCommand.java @@ -0,0 +1,144 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.dr; + +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.dr.subcommands.DrCacheCommand; +import org.apache.ignite.internal.commandline.dr.subcommands.DrNodeCommand; +import org.apache.ignite.internal.commandline.dr.subcommands.DrStateCommand; +import org.apache.ignite.internal.commandline.dr.subcommands.DrTopologyCommand; + +import static org.apache.ignite.internal.commandline.Command.usage; +import static org.apache.ignite.internal.commandline.CommandList.DATA_CENTER_REPLICATION; +import static org.apache.ignite.internal.commandline.CommandLogger.join; +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.CommonArgParser.CMD_AUTO_CONFIRMATION; +import static org.apache.ignite.internal.commandline.dr.DrSubCommandsList.CACHE; +import static org.apache.ignite.internal.commandline.dr.DrSubCommandsList.FULL_STATE_TRANSFER; +import static org.apache.ignite.internal.commandline.dr.DrSubCommandsList.HELP; +import static org.apache.ignite.internal.commandline.dr.DrSubCommandsList.NODE; +import static org.apache.ignite.internal.commandline.dr.DrSubCommandsList.PAUSE; +import static org.apache.ignite.internal.commandline.dr.DrSubCommandsList.RESUME; +import static org.apache.ignite.internal.commandline.dr.DrSubCommandsList.STATE; +import static org.apache.ignite.internal.commandline.dr.DrSubCommandsList.TOPOLOGY; + +/** */ +public class DrCommand implements Command { + /** */ + private Command delegate; + + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + usage(log, "Print data center replication command help:", + DATA_CENTER_REPLICATION, + HELP.toString() + ); + + usage(log, "Print state of data center replication:", + DATA_CENTER_REPLICATION, + STATE.toString(), + optional(DrStateCommand.VERBOSE_PARAM) + ); + + usage(log, "Print topology of the cluster with the data center replication related details:", + DATA_CENTER_REPLICATION, + TOPOLOGY.toString(), + optional(DrTopologyCommand.SENDER_HUBS_PARAM), + optional(DrTopologyCommand.RECEIVER_HUBS_PARAM), + optional(DrTopologyCommand.DATA_NODES_PARAM), + optional(DrTopologyCommand.OTHER_NODES_PARAM) + ); + + usage(log, "Print node specific data center replication related details and clear node's DR store:", + DATA_CENTER_REPLICATION, + NODE.toString(), + "", + optional(DrNodeCommand.CONFIG_PARAM), + optional(DrNodeCommand.METRICS_PARAM), + optional(DrNodeCommand.CLEAR_STORE_PARAM), + optional(CMD_AUTO_CONFIRMATION) + ); + + usage(log, "Print cache specific data center replication related details about caches and maybe change replication state on them:", + DATA_CENTER_REPLICATION, + CACHE.toString(), + "", + optional(DrCacheCommand.CONFIG_PARAM), + optional(DrCacheCommand.METRICS_PARAM), + optional(DrCacheCommand.CACHE_FILTER_PARAM, join("|", DrCacheCommand.CacheFilter.values())), + optional(DrCacheCommand.SENDER_GROUP_PARAM, "|" + join("|", DrCacheCommand.SenderGroup.values())), + optional(DrCacheCommand.ACTION_PARAM, join("|", DrCacheCommand.Action.values())), + optional(CMD_AUTO_CONFIRMATION) + ); + + usage(log, "Execute full state transfer on all caches in cluster if data center replication is configured:", + DATA_CENTER_REPLICATION, + FULL_STATE_TRANSFER.toString(), + optional(CMD_AUTO_CONFIRMATION) + ); + + usage(log, "Stop data center replication on all caches in cluster:", + DATA_CENTER_REPLICATION, + PAUSE.toString(), + "", + optional(CMD_AUTO_CONFIRMATION) + ); + + usage(log, "Start data center replication on all caches in cluster:", + DATA_CENTER_REPLICATION, + RESUME.toString(), + "", + optional(CMD_AUTO_CONFIRMATION) + ); + } + + /** {@inheritDoc} */ + @Override public String name() { + return DATA_CENTER_REPLICATION.toCommandName(); + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + DrSubCommandsList subcommand = DrSubCommandsList.parse(argIter.nextArg("Expected dr action.")); + + if (subcommand == null) + throw new IllegalArgumentException("Expected correct dr action."); + + delegate = subcommand.command(); + + delegate.parseArguments(argIter); + } + + /** {@inheritDoc} */ + @Override public String confirmationPrompt() { + return delegate != null ? delegate.confirmationPrompt() : null; + } + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + return delegate.execute(clientCfg, log); + } + + /** {@inheritDoc} */ + @Override public Object arg() { + return delegate.arg(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/DrSubCommandsList.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/DrSubCommandsList.java new file mode 100644 index 0000000000000..b70f8ecc22ba7 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/DrSubCommandsList.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.dr; + +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.dr.subcommands.DrCacheCommand; +import org.apache.ignite.internal.commandline.dr.subcommands.DrFullStateTransferCommand; +import org.apache.ignite.internal.commandline.dr.subcommands.DrHelpCommand; +import org.apache.ignite.internal.commandline.dr.subcommands.DrNodeCommand; +import org.apache.ignite.internal.commandline.dr.subcommands.DrPauseCommand; +import org.apache.ignite.internal.commandline.dr.subcommands.DrResumeCommand; +import org.apache.ignite.internal.commandline.dr.subcommands.DrStateCommand; +import org.apache.ignite.internal.commandline.dr.subcommands.DrTopologyCommand; +import org.jetbrains.annotations.NotNull; + +/** */ +public enum DrSubCommandsList { + /** */ + HELP("help", new DrHelpCommand()), + /** */ + STATE("state", new DrStateCommand()), + /** */ + TOPOLOGY("topology", new DrTopologyCommand()), + /** */ + NODE("node", new DrNodeCommand()), + /** */ + CACHE("cache", new DrCacheCommand()), + /** */ + FULL_STATE_TRANSFER("full-state-transfer", new DrFullStateTransferCommand()), + /** */ + PAUSE("pause", new DrPauseCommand()), + /** */ + RESUME("resume", new DrResumeCommand()); + + /** */ + private final String name; + + /** */ + private final Command cmd; + + /** */ + DrSubCommandsList(String name, Command cmd) { + this.name = name; + this.cmd = cmd; + } + + /** */ + public String text() { + return name; + } + + /** */ + @NotNull + public Command command() { + return cmd; + } + + /** */ + public static DrSubCommandsList parse(String name) { + for (DrSubCommandsList cmd : values()) { + if (cmd.name.equalsIgnoreCase(name)) + return cmd; + } + + return null; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return name; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrAbstractRemoteSubCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrAbstractRemoteSubCommand.java new file mode 100644 index 0000000000000..e618adc2f4720 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrAbstractRemoteSubCommand.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.dr.subcommands; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.UUID; +import java.util.logging.Logger; +import org.apache.ignite.internal.IgniteNodeAttributes; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientCompute; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.client.GridClientDisconnectedException; +import org.apache.ignite.internal.client.GridClientNode; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.visor.VisorTaskArgument; + +import static java.util.stream.Collectors.toCollection; +import static java.util.stream.Collectors.toList; +import static org.apache.ignite.internal.IgniteFeatures.DR_CONTROL_UTILITY; +import static org.apache.ignite.internal.IgniteFeatures.nodeSupports; +import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_IGNITE_FEATURES; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; + +/** */ +public abstract class DrAbstractRemoteSubCommand< + VisorArgsDto extends IgniteDataTransferObject, + VisorResultDto extends IgniteDataTransferObject, + DrArgs extends DrAbstractRemoteSubCommand.Arguments +> implements Command { + /** */ + protected static boolean drControlUtilitySupported(GridClientNode node) { + return nodeSupports((byte[])node.attribute(ATTR_IGNITE_FEATURES), DR_CONTROL_UTILITY); + } + + /** */ + private DrArgs args; + + /** */ + private final List nodesWithoutDrTasks = new ArrayList<>(); + + /** {@inheritDoc} */ + @Override public final void printUsage(Logger log) { + throw new UnsupportedOperationException("printUsage"); + } + + /** {@inheritDoc} */ + @Override public final void parseArguments(CommandArgIterator argIter) { + args = parseArguments0(argIter); + } + + /** {@inheritDoc} */ + @Override public final Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + VisorResultDto res = execute0(clientCfg, client); + + printResult(res, log); + } + catch (Throwable e) { + log.severe("Failed to execute dr command='" + name() + "'"); + log.severe(CommandLogger.errorMessage(e)); + + throw e; + } + + return null; + } + + /** */ + protected VisorResultDto execute0( + GridClientConfiguration clientCfg, + GridClient client + ) throws Exception { + GridClientCompute compute = client.compute(); + + Collection nodes = compute.nodes(); + + nodes.stream() + .filter(node -> !drControlUtilitySupported(node)) + .collect(toCollection(() -> nodesWithoutDrTasks)); + + List nodeIds = nodes.stream() + .filter(DrAbstractRemoteSubCommand::drControlUtilitySupported) + .map(GridClientNode::nodeId) + .collect(toList()); + + if (F.isEmpty(nodeIds)) + throw new GridClientDisconnectedException("Connectable nodes not found", null); + + return compute.projection(DrAbstractRemoteSubCommand::drControlUtilitySupported) + .execute(visorTaskName(), new VisorTaskArgument<>(nodeIds, args.toVisorArgs(), false)); + } + + /** */ + protected void printUnrecognizedNodesMessage(Logger log, boolean verbose) { + if (!nodesWithoutDrTasks.isEmpty()) { + log.warning("Unrecognized nodes found that have no DR API for control utility: " + nodesWithoutDrTasks.size()); + + if (verbose) { + for (GridClientNode node : nodesWithoutDrTasks) { + boolean clientNode = node.attribute(IgniteNodeAttributes.ATTR_CLIENT_MODE); + + log.warning(String.format(INDENT + "nodeId=%s, Mode=%s", node.nodeId(), clientNode ? "Client" : "Server")); + } + } + else + log.warning("Please use \"--dr topology\" command to see full list."); + } + } + + /** {@inheritDoc} */ + @Override public final DrArgs arg() { + return args; + } + + /** */ + protected abstract String visorTaskName(); + + /** */ + protected abstract DrArgs parseArguments0(CommandArgIterator argIter); + + /** */ + protected abstract void printResult(VisorResultDto res, Logger log); + + /** */ + @SuppressWarnings("PublicInnerClass") + public interface Arguments { + /** */ + ArgsDto toVisorArgs(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrCacheCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrCacheCommand.java new file mode 100644 index 0000000000000..e0184c54fe77d --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrCacheCommand.java @@ -0,0 +1,422 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.dr.subcommands; + +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.UUID; +import java.util.logging.Logger; +import java.util.regex.Pattern; +import java.util.regex.PatternSyntaxException; +import java.util.stream.Collectors; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientCompute; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.client.GridClientDisconnectedException; +import org.apache.ignite.internal.client.GridClientException; +import org.apache.ignite.internal.client.GridClientNode; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.dr.DrSubCommandsList; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.visor.VisorTaskArgument; +import org.apache.ignite.internal.visor.dr.VisorDrCacheTaskArgs; +import org.apache.ignite.internal.visor.dr.VisorDrCacheTaskResult; + +import static org.apache.ignite.internal.commandline.CommandHandler.DELIM; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; + +/** */ +public class DrCacheCommand extends + DrAbstractRemoteSubCommand +{ + /** Config parameter. */ + public static final String CONFIG_PARAM = "--config"; + /** Metrics parameter. */ + public static final String METRICS_PARAM = "--metrics"; + /** Cache filter parameter. */ + public static final String CACHE_FILTER_PARAM = "--cache-filter"; + /** Sender group parameter. */ + public static final String SENDER_GROUP_PARAM = "--sender-group"; + /** Action parameter. */ + public static final String ACTION_PARAM = "--action"; + + /** {@inheritDoc} */ + @Override protected String visorTaskName() { + throw new UnsupportedOperationException("visorTaskName"); + } + + /** {@inheritDoc} */ + @Override public DrCacheArguments parseArguments0(CommandArgIterator argIter) { + String regex = argIter.nextArg("Cache name regex expected."); + + if (CommandArgIterator.isCommandOrOption(regex)) + throw new IllegalArgumentException("Cache name regex expected."); + + Pattern pattern; + + try { + pattern = Pattern.compile(regex); + } + catch (PatternSyntaxException e) { + throw new IllegalArgumentException("Cache name regex is not valid.", e); + } + + boolean cfg = false; + boolean metrics = false; + CacheFilter cacheFilter = CacheFilter.ALL; + SenderGroup sndGrp = SenderGroup.ALL; + String sndGrpName = null; + Action act = null; + + String nextArg; + + //noinspection LabeledStatement + args_loop: while ((nextArg = argIter.peekNextArg()) != null) { + switch (nextArg.toLowerCase(Locale.ENGLISH)) { + case CONFIG_PARAM: + argIter.nextArg(null); + cfg = true; + + break; + + case METRICS_PARAM: + argIter.nextArg(null); + metrics = true; + + break; + + case CACHE_FILTER_PARAM: { + argIter.nextArg(null); + + String errorMsg = "--cache-filter parameter value required."; + + String cacheFilterStr = argIter.nextArg(errorMsg); + cacheFilter = CacheFilter.valueOf(cacheFilterStr.toUpperCase(Locale.ENGLISH)); + + if (cacheFilter == null) + throw new IllegalArgumentException(errorMsg); + + break; + } + + case SENDER_GROUP_PARAM: { + argIter.nextArg(null); + + String arg = argIter.nextArg("--sender-group parameter value required."); + + sndGrp = SenderGroup.parse(arg); + + if (sndGrp == null) + sndGrpName = arg; + + break; + } + + case ACTION_PARAM: { + argIter.nextArg(null); + + String errorMsg = "--action parameter value required."; + + act = Action.parse(argIter.nextArg(errorMsg)); + + if (act == null) + throw new IllegalArgumentException(errorMsg); + + break; + } + + default: + //noinspection BreakStatementWithLabel + break args_loop; + } + } + + return new DrCacheArguments(regex, pattern, cfg, metrics, cacheFilter, sndGrp, sndGrpName, act, (byte)0); + } + + /** {@inheritDoc} */ + @Override public String confirmationPrompt() { + if (arg().action != null) + return "Warning: this command will change data center replication state for selected caches."; + + return null; + } + + /** {@inheritDoc} */ + @Override + protected VisorDrCacheTaskResult execute0(GridClientConfiguration clientCfg, GridClient client) throws Exception { + return execute0(client, arg()); + } + + /** */ + public static VisorDrCacheTaskResult execute0( + GridClient client, + DrCacheArguments arg + ) throws GridClientException { + GridClientCompute compute = client.compute(); + + Collection nodes = compute.nodes(); + + Pattern cacheNamePattern = arg.pattern; + + List nodeIds = nodes.stream() + .filter(DrAbstractRemoteSubCommand::drControlUtilitySupported) + .map(GridClientNode::nodeId) + .collect(Collectors.toList()); + + if (F.isEmpty(nodeIds)) + throw new GridClientDisconnectedException("Connectable nodes not found", null); + + if (arg.remoteDataCenterId == 0 && arg.action != null) { + Map cacheNameToNodeMap = new HashMap<>(); + + for (GridClientNode node : nodes) { + for (String cacheName : node.caches().keySet()) { + if (cacheNamePattern.matcher(cacheName).matches()) + cacheNameToNodeMap.putIfAbsent(cacheName, node.nodeId()); + } + } + + arg.cacheNamesMap = cacheNameToNodeMap; + } + else if (arg.remoteDataCenterId != 0) { + for (GridClientNode node : nodes) { + if (node.attribute("plugins.gg.replication.snd.hub") != null) { + arg.actionCoordinator = node.nodeId(); + + break; + } + } + } + + return compute.projection(DrAbstractRemoteSubCommand::drControlUtilitySupported).execute( + "org.gridgain.grid.internal.visor.dr.console.VisorDrCacheTask", + new VisorTaskArgument<>(nodeIds, arg.toVisorArgs(), false) + ); + } + + /** {@inheritDoc} */ + @Override protected void printResult(VisorDrCacheTaskResult res, Logger log) { + printUnrecognizedNodesMessage(log, false); + + log.info("Data Center ID: " + res.getDataCenterId()); + + log.info(DELIM); + + if (res.getDataCenterId() == 0) { + log.info("Data Replication state: is not configured."); + + return; + } + + List cacheNames = res.getCacheNames(); + if (cacheNames.isEmpty()) { + log.info("No matching caches found"); + + return; + } + + log.info(String.format("%d matching cache(s): %s", cacheNames.size(), cacheNames)); + + for (String cacheName : cacheNames) { + List> cacheSndCfg = res.getSenderConfig().get(cacheName); + + printList(log, cacheSndCfg, String.format( + "Sender configuration for cache \"%s\":", + cacheName + )); + + List> cacheRcvCfg = res.getReceiverConfig().get(cacheName); + + printList(log, cacheRcvCfg, String.format( + "Receiver configuration for cache \"%s\":", + cacheName + )); + } + + for (String cacheName : cacheNames) { + List> cacheSndMetrics = res.getSenderMetrics().get(cacheName); + + printList(log, cacheSndMetrics, String.format( + "Sender metrics for cache \"%s\":", + cacheName + )); + + List> cacheRcvMetrics = res.getReceiverMetrics().get(cacheName); + + printList(log, cacheRcvMetrics, String.format( + "Receiver metrics for cache \"%s\":", + cacheName + )); + } + + for (String msg : res.getResultMessages()) + log.info(msg); + } + + /** */ + private static void printList(Logger log, List> cfg, String s) { + if (cfg != null && !cfg.isEmpty()) { + log.info(s); + + for (T2 t2 : cfg) + log.info(String.format(INDENT + "%s=%s", t2.toArray())); + } + } + + /** {@inheritDoc} */ + @Override public String name() { + return DrSubCommandsList.CACHE.text(); + } + + /** */ + @SuppressWarnings("PublicInnerClass") public enum CacheFilter { + /** All. */ ALL, + /** Sending. */ SENDING, + /** Receiving. */ RECEIVING, + /** Paused. */ PAUSED, + /** Error. */ ERROR + } + + /** */ + @SuppressWarnings("PublicInnerClass") public enum SenderGroup { + /** All. */ ALL, + /** Default. */ DEFAULT, + /** None. */ NONE; + + /** */ + public static SenderGroup parse(String text) { + try { + return valueOf(text.toUpperCase(Locale.ENGLISH)); + } + catch (IllegalArgumentException e) { + return null; + } + } + } + + /** */ + @SuppressWarnings("PublicInnerClass") public enum Action { + /** Stop. */ STOP("stop"), + /** Start. */ START("start"), + /** Full state transfer. */ FULL_STATE_TRANSFER("full-state-transfer"); + + /** String representation. */ + private final String text; + + /** */ + Action(String text) { + this.text = text; + } + + /** */ + public String text() { + return text; + } + + /** */ + public static Action parse(String text) { + for (Action action : values()) { + if (action.text.equalsIgnoreCase(text)) + return action; + } + + return null; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return text; + } + } + + /** */ + @SuppressWarnings("PublicInnerClass") + public static class DrCacheArguments implements DrAbstractRemoteSubCommand.Arguments { + /** Regex. */ + private final String regex; + /** Pattern. */ + private final Pattern pattern; + /** Config. */ + private final boolean config; + /** Metrics. */ + private final boolean metrics; + /** Filter. */ + private final CacheFilter filter; + /** Sender group. */ + private final SenderGroup senderGroup; + /** Sender group name. */ + private final String senderGroupName; + /** Action. */ + private final Action action; + /** Remote data center id. */ + private final byte remoteDataCenterId; + /** Cache names map. */ + private Map cacheNamesMap; + /** Action coordinator. */ + private UUID actionCoordinator; + + /** */ + public DrCacheArguments( + String regex, + Pattern pattern, + boolean config, + boolean metrics, + CacheFilter filter, + SenderGroup senderGroup, + String senderGroupName, + Action action, + byte remoteDataCenterId + ) { + this.regex = regex; + this.pattern = pattern; + this.config = config; + this.metrics = metrics; + this.filter = filter; + this.senderGroup = senderGroup; + this.senderGroupName = senderGroupName; + this.action = action; + this.remoteDataCenterId = remoteDataCenterId; + } + + /** */ + public UUID getActionCoordinator() { + return actionCoordinator; + } + + /** {@inheritDoc} */ + @Override public VisorDrCacheTaskArgs toVisorArgs() { + return new VisorDrCacheTaskArgs( + regex, + config, + metrics, + filter.ordinal(), + senderGroup == null ? VisorDrCacheTaskArgs.SENDER_GROUP_NAMED : senderGroup.ordinal(), + senderGroupName, + action == null ? VisorDrCacheTaskArgs.ACTION_NONE : action.ordinal(), + remoteDataCenterId, + cacheNamesMap, + actionCoordinator + ); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrFullStateTransferCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrFullStateTransferCommand.java new file mode 100644 index 0000000000000..d038ba894cde5 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrFullStateTransferCommand.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.dr.subcommands; + +import java.util.logging.Logger; +import java.util.regex.Pattern; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.dr.DrSubCommandsList; +import org.apache.ignite.internal.visor.dr.VisorDrCacheTaskArgs; +import org.apache.ignite.internal.visor.dr.VisorDrCacheTaskResult; + +import static org.apache.ignite.internal.commandline.CommandHandler.DELIM; + +/** */ +public class DrFullStateTransferCommand extends + DrAbstractRemoteSubCommand +{ + /** {@inheritDoc} */ + @Override protected String visorTaskName() { + throw new UnsupportedOperationException("visorTaskName"); + } + + /** {@inheritDoc} */ + @Override public DrCacheCommand.DrCacheArguments parseArguments0(CommandArgIterator argIter) { + return new DrCacheCommand.DrCacheArguments( + ".*", + Pattern.compile(".*"), + false, + false, + DrCacheCommand.CacheFilter.SENDING, + DrCacheCommand.SenderGroup.ALL, + null, + DrCacheCommand.Action.FULL_STATE_TRANSFER, + (byte)0 + ); + } + + /** {@inheritDoc} */ + @Override public String confirmationPrompt() { + return "Warning: this command will execute full state transfer for all caches. This migth take a long time."; + } + + /** {@inheritDoc} */ + @Override + protected VisorDrCacheTaskResult execute0(GridClientConfiguration clientCfg, GridClient client) throws Exception { + return DrCacheCommand.execute0(client, arg()); + } + + /** {@inheritDoc} */ + @Override protected void printResult(VisorDrCacheTaskResult res, Logger log) { + printUnrecognizedNodesMessage(log, false); + + log.info("Data Center ID: " + res.getDataCenterId()); + + log.info(DELIM); + + if (res.getDataCenterId() == 0) { + log.info("Data Replication state: is not configured."); + + return; + } + + if (res.getCacheNames().isEmpty()) + log.info("No suitable caches found for transfer."); + else if (res.getResultMessages().isEmpty()) + log.info("Full state transfer command completed successfully for caches " + res.getCacheNames()); + else { + for (String msg : res.getResultMessages()) + log.info(msg); + } + } + + /** {@inheritDoc} */ + @Override public String name() { + return DrSubCommandsList.FULL_STATE_TRANSFER.text(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrHelpCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrHelpCommand.java new file mode 100644 index 0000000000000..94a66fe7a5331 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrHelpCommand.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.dr.subcommands; + +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.dr.DrCommand; +import org.apache.ignite.internal.commandline.dr.DrSubCommandsList; + +/** */ +public class DrHelpCommand implements Command { + /** {@inheritDoc} */ + @Override public void printUsage(Logger log) { + throw new UnsupportedOperationException("printUsage"); + } + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger log) throws Exception { + new DrCommand().printUsage(log); + + return null; + } + + /** {@inheritDoc} */ + @Override public Void arg() { + return null; + } + + /** {@inheritDoc} */ + @Override public String name() { + return DrSubCommandsList.HELP.text(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrNodeCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrNodeCommand.java new file mode 100644 index 0000000000000..edf8992f3167b --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrNodeCommand.java @@ -0,0 +1,226 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.dr.subcommands; + +import java.util.Collection; +import java.util.List; +import java.util.Locale; +import java.util.UUID; +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientCompute; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.client.GridClientDisconnectedException; +import org.apache.ignite.internal.client.GridClientNode; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.dr.DrSubCommandsList; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.visor.VisorTaskArgument; +import org.apache.ignite.internal.visor.dr.VisorDrNodeTaskArgs; +import org.apache.ignite.internal.visor.dr.VisorDrNodeTaskResult; + +import static org.apache.ignite.internal.commandline.CommandHandler.DELIM; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; + +/** */ +public class DrNodeCommand + extends DrAbstractRemoteSubCommand +{ + /** Config parameter. */ + public static final String CONFIG_PARAM = "--config"; + /** Metrics parameter. */ + public static final String METRICS_PARAM = "--metrics"; + /** Clear store parameter. */ + public static final String CLEAR_STORE_PARAM = "--clear-store"; + /** Node Id. */ + private UUID nodeId; + + /** {@inheritDoc} */ + @Override protected String visorTaskName() { + return "org.gridgain.grid.internal.visor.dr.console.VisorDrNodeTask"; + } + + /** {@inheritDoc} */ + @Override public DrNodeArguments parseArguments0(CommandArgIterator argIter) { + String nodeIdStr = argIter.nextArg("nodeId value expected."); + + try { + nodeId = UUID.fromString(nodeIdStr); + } + catch (IllegalArgumentException e) { + throw new IllegalArgumentException("nodeId must be UUID.", e); + } + + boolean config = false; + boolean metrics = false; + boolean clearStore = false; + + String nextArg; + + //noinspection LabeledStatement + args_loop: while ((nextArg = argIter.peekNextArg()) != null) { + switch (nextArg.toLowerCase(Locale.ENGLISH)) { + case CONFIG_PARAM: + config = true; + + break; + + case METRICS_PARAM: + metrics = true; + + break; + + case CLEAR_STORE_PARAM: + clearStore = true; + + break; + + default: + //noinspection BreakStatementWithLabel + break args_loop; + } + + // Skip peeked argument. + argIter.nextArg(null); + } + + return new DrNodeArguments(config, metrics, clearStore); + } + + /** {@inheritDoc} */ + @Override public String confirmationPrompt() { + if (arg().clearStore) + return "Warning: this command will clear DR store."; + + return null; + } + + /** {@inheritDoc} */ + @Override protected VisorDrNodeTaskResult execute0( + GridClientConfiguration clientCfg, + GridClient client + ) throws Exception { + GridClientCompute compute = client.compute(); + + Collection connectableNodes = compute.nodes(GridClientNode::connectable); + + if (F.isEmpty(connectableNodes)) + throw new GridClientDisconnectedException("Connectable nodes not found", null); + + GridClientNode node = connectableNodes.stream() + .filter(n -> nodeId.equals(n.nodeId())) + .findAny().orElse(null); + + if (node == null) + node = compute.balancer().balancedNode(connectableNodes); + + return compute.projection(node).execute( + visorTaskName(), + new VisorTaskArgument<>(nodeId, arg().toVisorArgs(), false) + ); + } + + /** {@inheritDoc} */ + @Override protected void printResult(VisorDrNodeTaskResult res, Logger log) { + log.info("Data Center ID: " + res.getDataCenterId()); + + log.info("Node addresses: " + res.getAddresses()); + + log.info("Mode=" + res.getMode() + (res.getDataNode() ? ", Baseline node" : "")); + + log.info(DELIM); + + if (res.getDataCenterId() == 0) { + log.info("Data Replication state: is not configured."); + + return; + } + + List>> sndDataCenters = res.getSenderDataCenters(); + if (sndDataCenters != null && !sndDataCenters.isEmpty()) { + log.info("Node is configured to send data to:"); + + for (T2> dataCenter : sndDataCenters) + log.info(String.format(INDENT + "DataCenterId=%d, Addresses=%s", dataCenter.toArray())); + } + + String receiverAddr = res.getReceiverAddress(); + if (receiverAddr != null) { + log.info("Node is configured to receive data:"); + + log.info(INDENT + "Address=" + receiverAddr); + } + + if (!res.getResponseMsgs().isEmpty()) { + log.info(DELIM); + + for (String responseMsg : res.getResponseMsgs()) + log.info(responseMsg); + } + + printList(log, res.getCommonConfig(), "Common configuration:"); + printList(log, res.getSenderConfig(), "Sender configuration:"); + printList(log, res.getReceiverConfig(), "Receiver configuration:"); + + printList(log, res.getSenderMetrics(), "Sender metrics:"); + printList(log, res.getReceiverMetrics(), "Receiver metrics:"); + } + + /** */ + private static void printList(Logger log, List> cfg, String s) { + if (cfg != null && !cfg.isEmpty()) { + log.info(s); + + for (T2 t2 : cfg) + log.info(String.format(INDENT + "%s=%s", t2.toArray())); + } + } + + /** {@inheritDoc} */ + @Override public String name() { + return DrSubCommandsList.NODE.text(); + } + + /** */ + @SuppressWarnings("PublicInnerClass") + public static class DrNodeArguments implements DrAbstractRemoteSubCommand.Arguments { + /** Config. */ + private final boolean config; + /** Metrics. */ + private final boolean metrics; + /** Clear store. */ + private final boolean clearStore; + + /** + * @param config Config. + * @param metrics Metrics. + * @param clearStore Clear store. + */ + public DrNodeArguments(boolean config, boolean metrics, boolean clearStore) { + this.config = config; + this.metrics = metrics; + this.clearStore = clearStore; + } + + /** {@inheritDoc} */ + @Override public VisorDrNodeTaskArgs toVisorArgs() { + return new VisorDrNodeTaskArgs(config, metrics, clearStore); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrPauseCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrPauseCommand.java new file mode 100644 index 0000000000000..4ac1098dd829b --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrPauseCommand.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.dr.subcommands; + +import java.util.logging.Logger; +import java.util.regex.Pattern; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.dr.DrSubCommandsList; +import org.apache.ignite.internal.visor.dr.VisorDrCacheTaskArgs; +import org.apache.ignite.internal.visor.dr.VisorDrCacheTaskResult; + +import static org.apache.ignite.internal.commandline.CommandHandler.DELIM; + +/** */ +public class DrPauseCommand extends + DrAbstractRemoteSubCommand +{ + /** {@inheritDoc} */ + @Override protected String visorTaskName() { + throw new UnsupportedOperationException("visorTaskName"); + } + + /** {@inheritDoc} */ + @Override public DrCacheCommand.DrCacheArguments parseArguments0(CommandArgIterator argIter) { + return new DrCacheCommand.DrCacheArguments( + ".*", + Pattern.compile(".*"), + false, + false, + DrCacheCommand.CacheFilter.ALL, + DrCacheCommand.SenderGroup.ALL, + null, + DrCacheCommand.Action.STOP, + argIter.nextByteArg("remoteDataCenterId") + ); + } + + /** {@inheritDoc} */ + @Override public String confirmationPrompt() { + return "Warning: this command will pause data center replication for all caches."; + } + + /** {@inheritDoc} */ + @Override + protected VisorDrCacheTaskResult execute0(GridClientConfiguration clientCfg, GridClient client) throws Exception { + return DrCacheCommand.execute0(client, arg()); + } + + /** {@inheritDoc} */ + @Override protected void printResult(VisorDrCacheTaskResult res, Logger log) { + printUnrecognizedNodesMessage(log, false); + + log.info("Data Center ID: " + res.getDataCenterId()); + + log.info(DELIM); + + if (res.getDataCenterId() == 0) { + log.info("Data Replication state: is not configured."); + + return; + } + + if (arg().getActionCoordinator() == null) + log.info("Cannot find sender hub node to execute action."); + + for (String msg : res.getResultMessages()) + log.info(msg); + } + + /** {@inheritDoc} */ + @Override public String name() { + return DrSubCommandsList.PAUSE.text(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrResumeCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrResumeCommand.java new file mode 100644 index 0000000000000..878022344bf82 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrResumeCommand.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.dr.subcommands; + +import java.util.logging.Logger; +import java.util.regex.Pattern; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.dr.DrSubCommandsList; +import org.apache.ignite.internal.visor.dr.VisorDrCacheTaskArgs; +import org.apache.ignite.internal.visor.dr.VisorDrCacheTaskResult; + +import static org.apache.ignite.internal.commandline.CommandHandler.DELIM; + +/** */ +public class DrResumeCommand extends + DrAbstractRemoteSubCommand +{ + /** {@inheritDoc} */ + @Override protected String visorTaskName() { + throw new UnsupportedOperationException("visorTaskName"); + } + + /** {@inheritDoc} */ + @Override public DrCacheCommand.DrCacheArguments parseArguments0(CommandArgIterator argIter) { + return new DrCacheCommand.DrCacheArguments( + ".*", + Pattern.compile(".*"), + false, + false, + DrCacheCommand.CacheFilter.ALL, + DrCacheCommand.SenderGroup.ALL, + null, + DrCacheCommand.Action.START, + argIter.nextByteArg("remoteDataCenterId") + ); + } + + /** {@inheritDoc} */ + @Override public String confirmationPrompt() { + return "Warning: this command will resume data center replication for all caches."; + } + + /** {@inheritDoc} */ + @Override + protected VisorDrCacheTaskResult execute0(GridClientConfiguration clientCfg, GridClient client) throws Exception { + return DrCacheCommand.execute0(client, arg()); + } + + /** {@inheritDoc} */ + @Override protected void printResult(VisorDrCacheTaskResult res, Logger log) { + printUnrecognizedNodesMessage(log, false); + + log.info("Data Center ID: " + res.getDataCenterId()); + + log.info(DELIM); + + if (res.getDataCenterId() == 0) { + log.info("Data Replication state: is not configured."); + + return; + } + + if (arg().getActionCoordinator() == null) + log.info("Cannot find sender hub node to execute action."); + + for (String msg : res.getResultMessages()) + log.info(msg); + } + + /** {@inheritDoc} */ + @Override public String name() { + return DrSubCommandsList.RESUME.text(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrStateCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrStateCommand.java new file mode 100644 index 0000000000000..cda589a6a1f3f --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrStateCommand.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.dr.subcommands; + +import java.util.logging.Logger; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.dr.DrSubCommandsList; +import org.apache.ignite.internal.visor.dr.VisorDrStateTaskArgs; +import org.apache.ignite.internal.visor.dr.VisorDrStateTaskResult; + +import static org.apache.ignite.internal.commandline.CommandHandler.DELIM; + +/** */ +public class DrStateCommand extends + DrAbstractRemoteSubCommand +{ + /** Verbose parameter. */ + public static final String VERBOSE_PARAM = "--verbose"; + + /** {@inheritDoc} */ + @Override protected String visorTaskName() { + return "org.gridgain.grid.internal.visor.dr.console.VisorDrStateTask"; + } + + /** {@inheritDoc} */ + @Override protected DrStateArguments parseArguments0(CommandArgIterator argIter) { + boolean verbose = false; + + if (VERBOSE_PARAM.equalsIgnoreCase(argIter.peekNextArg())) { + argIter.nextArg("--verbose is expected"); + + verbose = true; + } + + return new DrStateArguments(verbose); + } + + /** {@inheritDoc} */ + @Override protected void printResult(VisorDrStateTaskResult res, Logger log) { + printUnrecognizedNodesMessage(log, false); + + log.info("Data Center ID: " + res.getDataCenterId()); + + log.info(DELIM); + + if (res.getDataCenterId() == 0) { + log.info("Data Replication state: is not configured."); + + return; + } + + for (String msg : res.getResultMessages()) + log.info(msg); + } + + /** {@inheritDoc} */ + @Override public String name() { + return DrSubCommandsList.STATE.text(); + } + + /** */ + @SuppressWarnings("PublicInnerClass") + public static class DrStateArguments implements DrAbstractRemoteSubCommand.Arguments { + /** */ + public final boolean verbose; + + /** */ + public DrStateArguments(boolean verbose) { + this.verbose = verbose; + } + + /** {@inheritDoc} */ + @Override public VisorDrStateTaskArgs toVisorArgs() { + return new VisorDrStateTaskArgs(verbose); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrTopologyCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrTopologyCommand.java new file mode 100644 index 0000000000000..cb4b36b67e37a --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/dr/subcommands/DrTopologyCommand.java @@ -0,0 +1,217 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.dr.subcommands; + +import java.util.List; +import java.util.Locale; +import java.util.UUID; +import java.util.logging.Logger; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.dr.DrSubCommandsList; +import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.util.typedef.T3; +import org.apache.ignite.internal.visor.dr.VisorDrTopologyTaskArgs; +import org.apache.ignite.internal.visor.dr.VisorDrTopologyTaskResult; + +import static org.apache.ignite.internal.commandline.CommandHandler.DELIM; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; +import static org.apache.ignite.internal.visor.dr.VisorDrTopologyTaskArgs.DATA_NODES_FLAG; +import static org.apache.ignite.internal.visor.dr.VisorDrTopologyTaskArgs.OTHER_NODES_FLAG; +import static org.apache.ignite.internal.visor.dr.VisorDrTopologyTaskArgs.RECEIVER_HUBS_FLAG; +import static org.apache.ignite.internal.visor.dr.VisorDrTopologyTaskArgs.SENDER_HUBS_FLAG; + +/** */ +public class DrTopologyCommand extends + DrAbstractRemoteSubCommand +{ + /** Sender hubs parameter. */ + public static final String SENDER_HUBS_PARAM = "--sender-hubs"; + /** Receiver hubs parameter. */ + public static final String RECEIVER_HUBS_PARAM = "--receiver-hubs"; + /** Data nodes parameter. */ + public static final String DATA_NODES_PARAM = "--data-nodes"; + /** Other nodes parameter. */ + public static final String OTHER_NODES_PARAM = "--other-nodes"; + + /** {@inheritDoc} */ + @Override protected String visorTaskName() { + return "org.gridgain.grid.internal.visor.dr.console.VisorDrTopologyTask"; + } + + /** {@inheritDoc} */ + @Override protected DrTopologyArguments parseArguments0(CommandArgIterator argIter) { + boolean senderHubs = false; + boolean receiverHubs = false; + boolean dataNodes = false; + boolean otherNodes = false; + + String nextArg; + + //noinspection LabeledStatement + args_loop: while ((nextArg = argIter.peekNextArg()) != null) { + switch (nextArg.toLowerCase(Locale.ENGLISH)) { + case SENDER_HUBS_PARAM: + senderHubs = true; + break; + + case RECEIVER_HUBS_PARAM: + receiverHubs = true; + break; + + case DATA_NODES_PARAM: + dataNodes = true; + break; + + case OTHER_NODES_PARAM: + otherNodes = true; + break; + + default: + //noinspection BreakStatementWithLabel + break args_loop; + } + + argIter.nextArg(null); // Skip peeked argument. + } + + if (!senderHubs && !receiverHubs && !dataNodes && !otherNodes) + senderHubs = receiverHubs = dataNodes = otherNodes = true; + + return new DrTopologyArguments(senderHubs, receiverHubs, dataNodes, otherNodes); + } + + /** {@inheritDoc} */ + @Override protected void printResult(VisorDrTopologyTaskResult res, Logger log) { + log.info("Data Center ID: " + res.getDataCenterId()); + + log.info(String.format( + "Topology: %d server(s), %d client(s)", + res.getServerNodesCount(), + res.getClientNodesCount() + )); + + if (res.getDataCenterId() == 0) { + log.info("Data Replication state: is not configured."); + + return; + } + + if (arg().dataNodes) { + List> dataNodes = res.getDataNodes(); + + if (dataNodes.isEmpty()) + log.info("Data nodes: not found"); + else + log.info("Data nodes: " + dataNodes.size()); + + for (T2 dataNode : dataNodes) + log.info(String.format(INDENT + "nodeId=%s, Address=%s", dataNode.toArray())); + + log.info(DELIM); + } + + if (arg().senderHubs) { + List> senderHubs = res.getSenderHubs(); + + if (senderHubs.isEmpty()) + log.info("Sender hubs: not found"); + else + log.info("Sender hubs: " + senderHubs.size()); + + for (T3 senderHub : senderHubs) + log.info(String.format(INDENT + "nodeId=%s, Address=%s, Mode=%s", senderHub.toArray())); + + log.info(DELIM); + } + + if (arg().receiverHubs) { + List> receiverHubs = res.getReceiverHubs(); + + if (receiverHubs.isEmpty()) + log.info("Receiver hubs: not found"); + else + log.info("Receiver hubs: " + receiverHubs.size()); + + for (T3 receiverHub : receiverHubs) + log.info(String.format(INDENT + "nodeId=%s, Address=%s, Mode=%s", receiverHub.toArray())); + + log.info(DELIM); + } + + if (arg().otherNodes) { + List> otherNodes = res.getOtherNodes(); + + if (otherNodes.isEmpty()) + log.info("Other nodes: not found"); + else + log.info("Other nodes: " + otherNodes.size()); + + for (T3 otherNode : otherNodes) + log.info(String.format(INDENT + "nodeId=%s, Address=%s, Mode=%s", otherNode.toArray())); + + log.info(DELIM); + } + + printUnrecognizedNodesMessage(log, true); + } + + /** {@inheritDoc} */ + @Override public String name() { + return DrSubCommandsList.TOPOLOGY.text(); + } + + /** */ + @SuppressWarnings("PublicInnerClass") + public static class DrTopologyArguments implements DrAbstractRemoteSubCommand.Arguments { + /** */ + private final boolean senderHubs; + /** */ + private final boolean receiverHubs; + /** */ + private final boolean dataNodes; + /** */ + private final boolean otherNodes; + + /** */ + public DrTopologyArguments(boolean senderHubs, boolean receiverHubs, boolean dataNodes, boolean otherNodes) { + this.senderHubs = senderHubs; + this.receiverHubs = receiverHubs; + this.dataNodes = dataNodes; + this.otherNodes = otherNodes; + } + + /** {@inheritDoc} */ + @Override public VisorDrTopologyTaskArgs toVisorArgs() { + int flags = 0; + + if (senderHubs) + flags |= SENDER_HUBS_FLAG; + + if (receiverHubs) + flags |= RECEIVER_HUBS_FLAG; + + if (dataNodes) + flags |= DATA_NODES_FLAG; + + if (otherNodes) + flags |= OTHER_NODES_FLAG; + + return new VisorDrTopologyTaskArgs(flags); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObject.java b/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObject.java index 3441742dfabd6..3e0f6fb2ece94 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObject.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObject.java @@ -117,7 +117,7 @@ public byte getProtocolVersion() { @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { int hdr = in.readInt(); - if ((hdr & MAGIC) != MAGIC) + if ((hdr & ~0xFF) != MAGIC) throw new IOException("Unexpected IgniteDataTransferObject header " + "[actual=" + Integer.toHexString(hdr) + ", expected=" + Integer.toHexString(MAGIC) + "]"); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcConnectionValidationTask.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcConnectionValidationTask.java index f4a57bf304ad5..d35bfb86c84f1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcConnectionValidationTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcConnectionValidationTask.java @@ -1,11 +1,12 @@ /* - * Copyright 2019 GridGain Systems, Inc. and Contributors. + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * Licensed under the GridGain Community Edition License (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.gridgain.com/products/software/community-edition/gridgain-community-edition-license + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcBatchUpdateTask.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcBatchUpdateTask.java index 774f9229babfc..6ea1bd2e494bf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcBatchUpdateTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcBatchUpdateTask.java @@ -30,7 +30,9 @@ import org.apache.ignite.internal.processors.cache.QueryCursorImpl; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx; +import org.apache.ignite.internal.processors.query.IgniteSQLException; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.lang.IgniteCallable; import org.apache.ignite.resources.IgniteInstanceResource; @@ -147,7 +149,12 @@ public JdbcBatchUpdateTask(Ignite ignite, String cacheName, String schemaName, S } } catch (Exception ex) { - throw new BatchUpdateException(Arrays.copyOf(updCntrs, idx), ex); + IgniteSQLException sqlEx = X.cause(ex, IgniteSQLException.class); + + if (sqlEx != null) + throw new BatchUpdateException(sqlEx.getMessage(), sqlEx.sqlState(), Arrays.copyOf(updCntrs, idx), ex); + else + throw new BatchUpdateException(Arrays.copyOf(updCntrs, idx), ex); } return updCntrs; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/IgniteMBeansManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/IgniteMBeansManager.java index 757e17d102739..0f4c780223e5e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/IgniteMBeansManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/IgniteMBeansManager.java @@ -94,6 +94,7 @@ public IgniteMBeansManager(IgniteKernal kernal) { * @param idxExecSvc Indexing executor service * @param callbackExecSvc Callback executor service * @param qryExecSvc Query executor service + * @param rebalanceExecSvc Rebalance executor service. * @param schemaExecSvc Schema executor service * @param customExecSvcs Custom named executors * @throws IgniteCheckedException if fails to register any of the MBeans @@ -114,6 +115,7 @@ public void registerAllMBeans( IgniteStripedThreadPoolExecutor callbackExecSvc, ExecutorService qryExecSvc, ExecutorService schemaExecSvc, + ExecutorService rebalanceExecSvc, @Nullable final Map customExecSvcs, WorkersRegistry workersRegistry ) throws IgniteCheckedException { @@ -158,6 +160,7 @@ public void registerAllMBeans( registerExecutorMBean("GridCallbackExecutor", callbackExecSvc); registerExecutorMBean("GridQueryExecutor", qryExecSvc); registerExecutorMBean("GridSchemaExecutor", schemaExecSvc); + registerExecutorMBean("GridRebalanceExecutor", rebalanceExecSvc); if (idxExecSvc != null) registerExecutorMBean("GridIndexingExecutor", idxExecSvc); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoManager.java index 267addb887169..2e9a52bb87963 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoManager.java @@ -59,6 +59,7 @@ import org.apache.ignite.internal.IgniteClientDisconnectedCheckedException; import org.apache.ignite.internal.IgniteComponentType; import org.apache.ignite.internal.IgniteDeploymentCheckedException; +import org.apache.ignite.internal.IgniteFeatures; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException; import org.apache.ignite.internal.direct.DirectMessageReader; @@ -69,6 +70,8 @@ import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener; import org.apache.ignite.internal.processors.platform.message.PlatformMessageFilter; import org.apache.ignite.internal.processors.pool.PoolProcessor; +import org.apache.ignite.internal.processors.security.OperationSecurityContext; +import org.apache.ignite.internal.processors.security.SecurityContext; import org.apache.ignite.internal.processors.timeout.GridTimeoutObject; import org.apache.ignite.internal.util.GridBoundedConcurrentLinkedHashSet; import org.apache.ignite.internal.util.StripedCompositeReadWriteLock; @@ -78,6 +81,7 @@ import org.apache.ignite.internal.util.lang.IgnitePair; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.LT; import org.apache.ignite.internal.util.typedef.internal.S; @@ -96,6 +100,7 @@ import org.apache.ignite.spi.communication.CommunicationListener; import org.apache.ignite.spi.communication.CommunicationSpi; import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.events.EventType.EVT_NODE_FAILED; @@ -103,6 +108,7 @@ import static org.apache.ignite.events.EventType.EVT_NODE_LEFT; import static org.apache.ignite.internal.GridTopic.TOPIC_COMM_USER; import static org.apache.ignite.internal.GridTopic.TOPIC_IO_TEST; +import static org.apache.ignite.internal.IgniteFeatures.IGNITE_SECURITY_PROCESSOR; import static org.apache.ignite.internal.managers.communication.GridIoPolicy.AFFINITY_POOL; import static org.apache.ignite.internal.managers.communication.GridIoPolicy.DATA_STREAMER_POOL; import static org.apache.ignite.internal.managers.communication.GridIoPolicy.IDX_POOL; @@ -111,6 +117,7 @@ import static org.apache.ignite.internal.managers.communication.GridIoPolicy.P2P_POOL; import static org.apache.ignite.internal.managers.communication.GridIoPolicy.PUBLIC_POOL; import static org.apache.ignite.internal.managers.communication.GridIoPolicy.QUERY_POOL; +import static org.apache.ignite.internal.managers.communication.GridIoPolicy.REBALANCE_POOL; import static org.apache.ignite.internal.managers.communication.GridIoPolicy.SCHEMA_POOL; import static org.apache.ignite.internal.managers.communication.GridIoPolicy.SERVICE_POOL; import static org.apache.ignite.internal.managers.communication.GridIoPolicy.SYSTEM_POOL; @@ -203,11 +210,7 @@ public class GridIoManager extends GridManagerAdapter {}; /** * @param ctx Grid kernal context. @@ -914,7 +917,7 @@ private void format(StringBuilder b, Collection> pairs, SimpleD } /** - * @param nodeId Node ID. + * @param nodeId Sender node ID. * @param msg Message bytes. * @param msgC Closure to call when message processing finished. */ @@ -974,7 +977,11 @@ private void onMessage0(UUID nodeId, GridIoMessage msg, IgniteRunnable msgC) { // If message is P2P, then process in P2P service. // This is done to avoid extra waiting and potential deadlocks // as thread pool may not have any available threads to give. - byte plc = msg.policy(); + byte plc = msg.message().policy(); + + // If override policy is not defined use sender defined policy. + if (plc == GridIoPolicy.UNDEFINED) + plc = msg.policy(); switch (plc) { case P2P_POOL: { @@ -994,6 +1001,7 @@ private void onMessage0(UUID nodeId, GridIoMessage msg, IgniteRunnable msgC) { case QUERY_POOL: case SCHEMA_POOL: case SERVICE_POOL: + case REBALANCE_POOL: { if (msg.isOrdered()) processOrderedMessage(nodeId, msg, plc, msgC); @@ -1048,7 +1056,7 @@ private void processP2PMessage( assert obj != null; - invokeListener(msg.policy(), lsnr, nodeId, obj); + invokeListener(msg.policy(), lsnr, nodeId, obj, secSubj(msg)); } finally { threadProcessingMessage(false, null); @@ -1090,7 +1098,11 @@ private void processRegularMessage( processRegularMessage0(msg, nodeId); } - finally { + catch (Throwable e) { + log.error("An error occurred processing the message [msg=" + msg + ", nodeId=" + nodeId + "].", e); + + throw e; + } finally { threadProcessingMessage(false, null); msgC.run(); @@ -1181,7 +1193,7 @@ private void processRegularMessage0(GridIoMessage msg, UUID nodeId) { assert obj != null; - invokeListener(msg.policy(), lsnr, nodeId, obj); + invokeListener(msg.policy(), lsnr, nodeId, obj, secSubj(msg)); } /** @@ -1543,8 +1555,9 @@ private void unwindMessageSet(GridCommunicationMessageSet msgSet, GridMessageLis * @param lsnr Listener. * @param nodeId Node ID. * @param msg Message. + * @param secCtxMsg Security subject that will be used to open a security session. */ - private void invokeListener(Byte plc, GridMessageListener lsnr, UUID nodeId, Object msg) { + private void invokeListener(Byte plc, GridMessageListener lsnr, UUID nodeId, Object msg, @Nullable T2 secCtxMsg) { Byte oldPlc = CUR_PLC.get(); boolean change = !F.eq(oldPlc, plc); @@ -1552,7 +1565,10 @@ private void invokeListener(Byte plc, GridMessageListener lsnr, UUID nodeId, Obj if (change) CUR_PLC.set(plc); - try { + SecurityContext secCtx = secCtxMsg != null ? secCtxMsg.get2() : null; + UUID newSecSubjId = secCtxMsg != null && secCtxMsg.get1() != null ? secCtxMsg.get1() : nodeId; + + try (OperationSecurityContext s = secCtx != null ? ctx.security().withContext(secCtx) : ctx.security().withContext(newSecSubjId)) { lsnr.onMessage(nodeId, msg, plc); } finally { @@ -1614,7 +1630,7 @@ private void send( assert !async || msg instanceof GridIoUserMessage : msg; // Async execution was added only for IgniteMessaging. assert topicOrd >= 0 || !(topic instanceof GridTopic) : msg; - GridIoMessage ioMsg = new GridIoMessage(plc, topic, topicOrd, msg, ordered, timeout, skipOnTimeout); + GridIoMessage ioMsg = createGridIoMessage(topic, topicOrd, msg, plc, ordered, timeout, skipOnTimeout); if (locNodeId.equals(node.id())) { assert plc != P2P_POOL; @@ -1656,6 +1672,37 @@ else if (async) } } + /** + * @return One of two message wrappers. The first is {@link GridIoMessage}, the second is secured version {@link + * GridIoSecurityAwareMessage}. + */ + private @NotNull GridIoMessage createGridIoMessage( + Object topic, + int topicOrd, + Message msg, + byte plc, + boolean ordered, + long timeout, + boolean skipOnTimeout) throws IgniteCheckedException { + if (ctx.security().enabled() && + IgniteFeatures.allNodesSupports(ctx.discovery().allNodes(), IGNITE_SECURITY_PROCESSOR)) { + UUID secSubjId = null; + + SecurityContext secCtx = ctx.security().securityContext(); + UUID curSecSubjId = secCtx.subject().id(); + + if (!locNodeId.equals(curSecSubjId)) + secSubjId = curSecSubjId; + + //Network optimization + byte[] secSubject = secSubjId != null && ctx.discovery().node(secSubjId) == null ? U.marshal(marsh, secCtx) : null; + + return new GridIoSecurityAwareMessage(secSubjId, secSubject, plc, topic, topicOrd, msg, ordered, timeout, skipOnTimeout); + } + + return new GridIoMessage(plc, topic, topicOrd, msg, ordered, timeout, skipOnTimeout); + } + /** * @param nodeId Id of destination node. * @param topic Topic to send the message to. @@ -1964,11 +2011,24 @@ else if (loc) { } /** + * Subscribe at messages from a topic. + * * @param topic Topic to subscribe to. * @param p Message predicate. */ - @SuppressWarnings("unchecked") - public void addUserMessageListener(@Nullable final Object topic, @Nullable final IgniteBiPredicate p) { + public void addUserMessageListener(final @Nullable Object topic, final @Nullable IgniteBiPredicate p) { + addUserMessageListener(topic, p, ctx.localNodeId()); + } + + /** + * @param topic Topic to subscribe to. + * @param p Message predicate. + */ + public void addUserMessageListener( + final @Nullable Object topic, + final @Nullable IgniteBiPredicate p, + final UUID nodeId + ) { if (p != null) { try { if (p instanceof PlatformMessageFilter) @@ -1977,7 +2037,7 @@ public void addUserMessageListener(@Nullable final Object topic, @Nullable final ctx.resource().injectGeneric(p); addMessageListener(TOPIC_COMM_USER, - new GridUserMessageListener(topic, (IgniteBiPredicate)p)); + new GridUserMessageListener(topic, (IgniteBiPredicate)p, nodeId)); } catch (IgniteCheckedException e) { throw new IgniteException(e); @@ -1991,13 +2051,8 @@ public void addUserMessageListener(@Nullable final Object topic, @Nullable final */ @SuppressWarnings("unchecked") public void removeUserMessageListener(@Nullable Object topic, IgniteBiPredicate p) { - try { - removeMessageListener(TOPIC_COMM_USER, - new GridUserMessageListener(topic, (IgniteBiPredicate)p)); - } - catch (IgniteCheckedException e) { - throw new IgniteException(e); - } + removeMessageListener(TOPIC_COMM_USER, + new GridUserMessageListener(topic, (IgniteBiPredicate)p)); } /** @@ -2416,15 +2471,27 @@ private class GridUserMessageListener implements GridMessageListener { /** User message topic. */ private final Object topic; + /** Initial node id. */ + private final UUID initNodeId; + /** * @param topic User topic. * @param predLsnr Predicate listener. - * @throws IgniteCheckedException If failed to inject resources to predicates. + * @param initNodeId Node id that registered given listener. */ - GridUserMessageListener(@Nullable Object topic, @Nullable IgniteBiPredicate predLsnr) - throws IgniteCheckedException { + GridUserMessageListener(@Nullable Object topic, @Nullable IgniteBiPredicate predLsnr, + @Nullable UUID initNodeId) { this.topic = topic; this.predLsnr = predLsnr; + this.initNodeId = initNodeId; + } + + /** + * @param topic User topic. + * @param predLsnr Predicate listener. + */ + GridUserMessageListener(@Nullable Object topic, @Nullable IgniteBiPredicate predLsnr) { + this(topic, predLsnr, null); } /** {@inheritDoc} */ @@ -2521,8 +2588,10 @@ private class GridUserMessageListener implements GridMessageListener { if (msgBody != null) { if (predLsnr != null) { - if (!predLsnr.apply(nodeId, msgBody)) - removeMessageListener(TOPIC_COMM_USER, this); + try(OperationSecurityContext s = ctx.security().withContext(initNodeId)) { + if (!predLsnr.apply(nodeId, msgBody)) + removeMessageListener(TOPIC_COMM_USER, this); + } } } } @@ -2749,7 +2818,7 @@ void unwind(GridMessageListener lsnr) { for (GridTuple3 t = msgs.poll(); t != null; t = msgs.poll()) { try { - invokeListener(plc, lsnr, nodeId, t.get1().message()); + invokeListener(plc, lsnr, nodeId, t.get1().message(), secSubj(t.get1())); } finally { if (t.get3() != null) @@ -3145,4 +3214,31 @@ public long binLatencyMcs() { return latencyLimit / (1000 * (resLatency.length - 1)); } } + + /** + * @param msg Communication message. + * @return A pair that represents a security subject id and security context. The returned value can be {@code null} + * in case of security context is not enabled. + */ + private T2 secSubj(GridIoMessage msg) { + if (ctx.security().enabled() && msg instanceof GridIoSecurityAwareMessage) { + GridIoSecurityAwareMessage secMsg = (GridIoSecurityAwareMessage)msg; + + SecurityContext secCtx = null; + + try { + secCtx = secMsg.getSecCtx() != null ? U.unmarshal(marsh, secMsg.getSecCtx(), U.resolveClassLoader(ctx.config())) : null; + } + catch (IgniteCheckedException e) { + log.error("Security context unmarshaled with error.", e); + } + + return new T2<>( + secMsg.secSubjId(), + secCtx + ); + } + + return null; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessage.java index fe61aec834672..fc7cd13d33ea7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessage.java @@ -109,7 +109,7 @@ public GridIoMessage( /** * @return Policy. */ - byte policy() { + @Override public byte policy() { return plc; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessageFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessageFactory.java index 0b296acf194f2..3c3f2a0f59c8d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessageFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessageFactory.java @@ -958,6 +958,11 @@ public GridIoMessageFactory(MessageFactory[] ext) { break; + case GridIoSecurityAwareMessage.TYPE_CODE: + msg = new GridIoSecurityAwareMessage(); + + break; + // [-3..119] [124..129] [-23..-28] [-36..-55] - this // [120..123] - DR // [-4..-22, -30..-35] - SQL diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoPolicy.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoPolicy.java index 3f31f92088315..990dfabec667c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoPolicy.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoPolicy.java @@ -61,6 +61,9 @@ public class GridIoPolicy { /** Schema pool. */ public static final byte SCHEMA_POOL = 12; + /** Rebalance pool. */ + public static final byte REBALANCE_POOL = 13; + /** * Defines the range of reserved pools that are not available for plugins. * @param key The key. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoSecurityAwareMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoSecurityAwareMessage.java new file mode 100644 index 0000000000000..825644ddb0bf7 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoSecurityAwareMessage.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.communication; + +import java.io.Externalizable; +import java.nio.ByteBuffer; +import java.util.UUID; +import org.apache.ignite.plugin.extensions.communication.Message; +import org.apache.ignite.plugin.extensions.communication.MessageReader; +import org.apache.ignite.plugin.extensions.communication.MessageWriter; + +/** + * Represents a security communication message. + */ +public class GridIoSecurityAwareMessage extends GridIoMessage { + /** */ + private static final long serialVersionUID = 0L; + /** */ + public static final short TYPE_CODE = 174; + + /** Security subject id that will be used during message processing on an remote node. */ + private UUID secSubjId; + + /** Security context transmitting from node initiator of action. */ + private byte[] secCtx; + + /** + * No-op constructor to support {@link Externalizable} interface. + * This constructor is not meant to be used for other purposes. + */ + public GridIoSecurityAwareMessage() { + // No-op. + } + + /** + * @param secSubjId Security subject id. + * @param plc Policy. + * @param topic Communication topic. + * @param topicOrd Topic ordinal value. + * @param msg Message. + * @param ordered Message ordered flag. + * @param timeout Timeout. + * @param skipOnTimeout Whether message can be skipped on timeout. + */ + public GridIoSecurityAwareMessage( + UUID secSubjId, + byte[] secSubject, + byte plc, + Object topic, + int topicOrd, + Message msg, + boolean ordered, + long timeout, + boolean skipOnTimeout) { + super(plc, topic, topicOrd, msg, ordered, timeout, skipOnTimeout); + + this.secSubjId = secSubjId; + this.secCtx = secSubject; + } + + /** + * @return Security subject id. + */ + UUID secSubjId() { + return secSubjId; + } + + /** + * @return Security context + */ + public byte[] getSecCtx() { + return secCtx; + } + + /** {@inheritDoc} */ + @Override public short directType() { + return TYPE_CODE; + } + + /** {@inheritDoc} */ + @Override public byte fieldsCount() { + return 9; + } + + /** {@inheritDoc} */ + @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { + writer.setBuffer(buf); + + if (!super.writeTo(buf, writer)) + return false; + + if (!writer.isHeaderWritten()) { + if (!writer.writeHeader(directType(), fieldsCount())) + return false; + + writer.onHeaderWritten(); + } + + switch (writer.state()) { + case 7: + if (!writer.writeByteArray("secCtx", secCtx)) + return false; + + writer.incrementState(); + + case 8: + if (!writer.writeUuid("secSubjId", secSubjId)) + return false; + + writer.incrementState(); + + } + + return true; + } + + /** {@inheritDoc} */ + @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) { + reader.setBuffer(buf); + + if (!reader.beforeMessageRead()) + return false; + + if (!super.readFrom(buf, reader)) + return false; + + switch (reader.state()) { + case 7: + secCtx = reader.readByteArray("secCtx"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 8: + secSubjId = reader.readUuid("secSubjId"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + } + + return reader.afterMessageRead(GridIoSecurityAwareMessage.class); + } +} \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeployment.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeployment.java index 3450aa5195c4a..053cc6cad5566 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeployment.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeployment.java @@ -30,8 +30,10 @@ import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicStampedReference; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; import org.apache.ignite.compute.ComputeTask; import org.apache.ignite.configuration.DeploymentMode; import org.apache.ignite.internal.processors.task.GridInternal; @@ -42,6 +44,7 @@ import org.apache.ignite.internal.util.lang.GridTuple; import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; @@ -457,7 +460,7 @@ public Class existingDeployedClass(String clsName) { if (cls == null) { try { - cls = Class.forName(clsName, true, clsLdr); + cls = U.forName(clsName, clsLdr); Class cur = clss.putIfAbsent(clsName, cls); @@ -478,7 +481,7 @@ public Class existingDeployedClass(String clsName) { return cls; else if (!a.equals(clsName)) { try { - cls = Class.forName(a, true, clsLdr); + cls = U.forName(a, clsLdr); } catch (ClassNotFoundException ignored0) { continue; @@ -501,6 +504,10 @@ else if (!a.equals(clsName)) { } } } + catch (IgniteException e) { + if (!X.hasCause(e, TimeoutException.class)) + throw e; + } } return cls; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentClassLoader.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentClassLoader.java index ca9ce328b6b5c..531d6c254f0d3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentClassLoader.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentClassLoader.java @@ -28,7 +28,9 @@ import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeoutException; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.DeploymentMode; @@ -37,6 +39,7 @@ import org.apache.ignite.internal.util.GridByteArrayList; import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.tostring.GridToStringInclude; +import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteUuid; @@ -445,6 +448,9 @@ private boolean isLocallyExcluded(String name) { // Catch Throwable to secure against any errors resulted from // corrupted class definitions or other user errors. catch (Exception e) { + if (X.hasCause(e, TimeoutException.class)) + throw e; + throw new ClassNotFoundException("Failed to load class due to unexpected error: " + name, e); } @@ -581,6 +587,8 @@ private GridByteArrayList sendClassRequest(String name, String path) throws Clas IgniteCheckedException err = null; + TimeoutException te = null; + for (UUID nodeId : nodeListCp) { if (nodeId.equals(ctx.discovery().localNode().id())) // Skip local node as it is already used as parent class loader. @@ -598,7 +606,14 @@ private GridByteArrayList sendClassRequest(String name, String path) throws Clas } try { - GridDeploymentResponse res = comm.sendResourceRequest(path, ldrId, node, endTime); + GridDeploymentResponse res = null; + + try { + res = comm.sendResourceRequest(path, ldrId, node, endTime); + } + catch (TimeoutException e) { + te = e; + } if (res == null) { String msg = "Failed to send class-loading request to node (is node alive?) [node=" + @@ -657,12 +672,28 @@ else if (log.isDebugEnabled()) } } + if (te != null) { + err.addSuppressed(te); + + throw new IgniteException(err); + } + throw new ClassNotFoundException("Failed to peer load class [class=" + name + ", nodeClsLdrs=" + nodeLdrMapCp + ", parentClsLoader=" + getParent() + ']', err); } /** {@inheritDoc} */ @Nullable @Override public InputStream getResourceAsStream(String name) { + try { + return getResourceAsStreamEx(name); + } + catch (TimeoutException ignore) { + return null; + } + } + + /** */ + @Nullable public InputStream getResourceAsStreamEx(String name) throws TimeoutException { assert !Thread.holdsLock(mux); if (byteMap != null && name.endsWith(".class")) { @@ -702,7 +733,7 @@ else if (log.isDebugEnabled()) * @param name Resource name. * @return InputStream for resource or {@code null} if resource could not be found. */ - @Nullable private InputStream sendResourceRequest(String name) { + @Nullable private InputStream sendResourceRequest(String name) throws TimeoutException { assert !Thread.holdsLock(mux); long endTime = computeEndTime(p2pTimeout); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentCommunication.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentCommunication.java index e14c8dfafcb10..973c51ecfe02f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentCommunication.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentCommunication.java @@ -22,6 +22,7 @@ import java.util.Collection; import java.util.HashSet; import java.util.UUID; +import java.util.concurrent.TimeoutException; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.cluster.ClusterNode; @@ -355,7 +356,7 @@ void sendUndeployRequest(String rsrcName, Collection rmtNodes) thro */ @SuppressWarnings({"SynchronizationOnLocalVariableOrMethodParameter"}) GridDeploymentResponse sendResourceRequest(final String rsrcName, IgniteUuid clsLdrId, - final ClusterNode dstNode, long threshold) throws IgniteCheckedException { + final ClusterNode dstNode, long threshold) throws IgniteCheckedException, TimeoutException { assert rsrcName != null; assert dstNode != null; assert clsLdrId != null; @@ -472,13 +473,21 @@ GridDeploymentResponse sendResourceRequest(final String rsrcName, IgniteUuid cls timeout = threshold - U.currentTimeMillis(); } + + if (timeout <= 0) + throw new TimeoutException(); } catch (InterruptedException e) { // Interrupt again to get it in the users code. Thread.currentThread().interrupt(); - throw new IgniteCheckedException("Got interrupted while waiting for response from node: " + - dstNode.id(), e); + TimeoutException te = new TimeoutException( + "Got interrupted while waiting for response from node: " + dstNode.id() + ); + + te.initCause(e); + + throw te; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentLocalStore.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentLocalStore.java index b27cc4bd0275f..1d36571e7b1bd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentLocalStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentLocalStore.java @@ -188,7 +188,7 @@ class GridDeploymentLocalStore extends GridDeploymentStoreAdapter { // Check that class can be loaded. String clsName = meta.className(); - Class cls = Class.forName(clsName != null ? clsName : alias, true, ldr); + Class cls = U.forName(clsName != null ? clsName : alias, ldr); spi.register(ldr, cls); @@ -227,6 +227,11 @@ class GridDeploymentLocalStore extends GridDeploymentStoreAdapter { return dep; } + /** {@inheritDoc} */ + @Override public GridDeployment searchDeploymentCache(GridDeploymentMetadata meta) { + return deployment(meta.alias()); + } + /** * @param alias Class alias. * @return Deployment. @@ -446,7 +451,7 @@ private void recordDeployFailed(Class cls, ClassLoader clsLdr, boolean record evt.message(msg); evt.node(ctx.discovery().localNode()); - evt.type(isTask(cls) ? EVT_CLASS_DEPLOY_FAILED : EVT_TASK_DEPLOY_FAILED); + evt.type(isTask ? EVT_CLASS_DEPLOY_FAILED : EVT_TASK_DEPLOY_FAILED); evt.alias(taskName); ctx.event().record(evt); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentManager.java index 01d8604ceaffd..04cfd60610b42 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentManager.java @@ -458,6 +458,11 @@ else if (locDep != null) { } } + GridDeployment dep = verStore.searchDeploymentCache(meta); + + if (dep != null) + return dep; + if (reuse) { GridDeployment locDep = locStore.getDeployment(meta); @@ -496,7 +501,12 @@ else if (locDep != null) { // Private or Isolated mode. meta.record(false); - GridDeployment dep = locStore.getDeployment(meta); + GridDeployment dep = ldrStore.searchDeploymentCache(meta); + + if (dep != null) + return dep; + + dep = locStore.getDeployment(meta); if (sndNodeId.equals(ctx.localNodeId())) { if (dep == null) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerLoaderStore.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerLoaderStore.java index 4ba308c9ef6ef..0477523949fce 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerLoaderStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerLoaderStore.java @@ -219,7 +219,7 @@ public class GridDeploymentPerLoaderStore extends GridDeploymentStoreAdapter { IsolatedDeployment dep; synchronized (mux) { - dep = cache.get(meta.classLoaderId()); + dep = (IsolatedDeployment)searchDeploymentCache(meta); if (dep == null) { long undeployTimeout = 0; @@ -331,6 +331,11 @@ else if (d.sequenceNumber() > meta.sequenceNumber()) { return dep; } + /** {@inheritDoc} */ + @Override public GridDeployment searchDeploymentCache(GridDeploymentMetadata meta) { + return cache.get(meta.classLoaderId()); + } + /** {@inheritDoc} */ @Override public void addParticipants(Map allParticipants, Map addedParticipants) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java index 56a3f3e026fb0..0c5964d8fdbeb 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java @@ -29,6 +29,7 @@ import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeoutException; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.configuration.DeploymentMode; import org.apache.ignite.events.DeploymentEvent; @@ -277,6 +278,24 @@ else if (log.isDebugEnabled()) } } + /** {@inheritDoc} */ + @Override public GridDeployment searchDeploymentCache(GridDeploymentMetadata meta) { + synchronized (mux) { + List deps = cache.get(meta.userVersion()); + + if (deps != null) { + assert !deps.isEmpty(); + + for (SharedDeployment d : deps) { + if (d.hasParticipant(meta.senderNodeId(), meta.classLoaderId())) + return d; + } + } + } + + return null; + } + /** {@inheritDoc} */ @Override @Nullable public GridDeployment getDeployment(GridDeploymentMetadata meta) { assert meta != null; @@ -356,22 +375,14 @@ else if (ctx.discovery().node(meta.senderNodeId()) == null) { return null; } - List deps = cache.get(meta.userVersion()); + dep = (SharedDeployment)searchDeploymentCache(meta); - if (deps != null) { - assert !deps.isEmpty(); + if (dep == null) { + List deps = cache.get(meta.userVersion()); - for (SharedDeployment d : deps) { - if (d.hasParticipant(meta.senderNodeId(), meta.classLoaderId()) || - meta.senderNodeId().equals(ctx.localNodeId())) { - // Done. - dep = d; + if (deps != null) { + assert !deps.isEmpty(); - break; - } - } - - if (dep == null) { checkRedeploy(meta); // Find existing deployments that need to be checked @@ -413,12 +424,12 @@ else if (ctx.discovery().node(meta.senderNodeId()) == null) { deps.add(dep); } } - } - else { - checkRedeploy(meta); + else { + checkRedeploy(meta); - // Create peer class loader. - dep = createNewDeployment(meta, true); + // Create peer class loader. + dep = createNewDeployment(meta, true); + } } } @@ -689,7 +700,7 @@ private boolean checkLoadRemoteClass(String clsName, GridDeploymentMetadata meta return false; // Temporary class loader. - ClassLoader temp = new GridDeploymentClassLoader( + GridDeploymentClassLoader temp = new GridDeploymentClassLoader( IgniteUuid.fromUuid(ctx.localNodeId()), meta.userVersion(), meta.deploymentMode(), @@ -712,7 +723,14 @@ private boolean checkLoadRemoteClass(String clsName, GridDeploymentMetadata meta InputStream rsrcIn = null; try { - rsrcIn = temp.getResourceAsStream(path); + boolean timeout = false; + + try { + rsrcIn = temp.getResourceAsStreamEx(path); + } + catch (TimeoutException e) { + timeout = true; + } boolean found = rsrcIn != null; @@ -732,7 +750,7 @@ private boolean checkLoadRemoteClass(String clsName, GridDeploymentMetadata meta return false; } - else + else if (!timeout) // Cache result if classloader is still alive. ldrRsrcCache.put(clsName, found); } @@ -1190,8 +1208,6 @@ boolean hasParticipant(UUID nodeId, IgniteUuid ldrId) { assert nodeId != null; assert ldrId != null; - assert Thread.holdsLock(mux); - return classLoader().hasRegisteredNode(nodeId, ldrId); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentStore.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentStore.java index 07e1e22750a5f..d529eaf47a1b9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentStore.java @@ -58,6 +58,12 @@ public interface GridDeploymentStore { */ @Nullable public GridDeployment getDeployment(GridDeploymentMetadata meta); + /** + * @param meta Deployment meatdata. + * @return Grid deployment instance if it was finded in cache, {@code null} otherwise. + */ + @Nullable public GridDeployment searchDeploymentCache(GridDeploymentMetadata meta); + /** * Gets class loader based on ID. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java index 0fed03ce86542..748261608cc5c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java @@ -2600,6 +2600,23 @@ public void resolveCommunicationError(ClusterNode node, Exception err) { ((IgniteDiscoverySpi)spi).resolveCommunicationFailure(node, err); } + /** + * Resolves by ID cluster node which is alive or has recently left the cluster. + * + * @param nodeId Node id. + * @return resolved node, or null if node not found. + */ + public ClusterNode historicalNode(UUID nodeId) { + for (DiscoCache discoCache : discoCacheHist.descendingValues()) { + ClusterNode node = discoCache.node(nodeId); + + if (node != null) + return node; + } + + return null; + } + /** Worker for network segment checks. */ private class SegmentCheckWorker extends GridWorker { /** */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IncompleteDeserializationException.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IncompleteDeserializationException.java new file mode 100644 index 0000000000000..5a440cefa1cca --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IncompleteDeserializationException.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.discovery; + +import org.jetbrains.annotations.NotNull; + +/** + * Exception which can be used to access a message which failed to be deserialized completely using Java serialization. + * Throwed from deserialization methods it can be caught by a caller. + *

+ * Should be {@link RuntimeException} because of limitations of Java serialization mechanisms. + *

+ * Catching {@link ClassNotFoundException} inside deserialization methods cannot do the same trick because + * Java deserialization remembers such exception internally and will rethrow it anyway upon returing to a user. + */ +public class IncompleteDeserializationException extends RuntimeException { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private final DiscoveryCustomMessage m; + + /** + * @param m Message. + */ + public IncompleteDeserializationException(@NotNull DiscoveryCustomMessage m) { + super(null, null, false, false); + + this.m = m; + } + + /** + * @return Message. + */ + @NotNull public DiscoveryCustomMessage message() { + return m; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java index 793b4fc20a97e..92963403f97f6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java @@ -72,9 +72,12 @@ import static org.apache.ignite.events.EventType.EVTS_ALL; import static org.apache.ignite.events.EventType.EVTS_DISCOVERY_ALL; +import static org.apache.ignite.events.EventType.EVT_JOB_MAPPED; import static org.apache.ignite.events.EventType.EVT_NODE_FAILED; import static org.apache.ignite.events.EventType.EVT_NODE_LEFT; import static org.apache.ignite.events.EventType.EVT_NODE_METRICS_UPDATED; +import static org.apache.ignite.events.EventType.EVT_TASK_FAILED; +import static org.apache.ignite.events.EventType.EVT_TASK_FINISHED; import static org.apache.ignite.internal.GridTopic.TOPIC_EVENT; import static org.apache.ignite.internal.events.DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT; import static org.apache.ignite.internal.managers.communication.GridIoPolicy.PUBLIC_POOL; @@ -375,7 +378,7 @@ public int[] enabledEvents() { public synchronized void enableEvents(int[] types) { assert types != null; - ctx.security().authorize(null, SecurityPermission.EVENTS_ENABLE, null); + ctx.security().authorize(SecurityPermission.EVENTS_ENABLE); boolean[] userRecordableEvts0 = userRecordableEvts; boolean[] recordableEvts0 = recordableEvts; @@ -418,7 +421,7 @@ public synchronized void enableEvents(int[] types) { public synchronized void disableEvents(int[] types) { assert types != null; - ctx.security().authorize(null, SecurityPermission.EVENTS_DISABLE, null); + ctx.security().authorize(SecurityPermission.EVENTS_DISABLE); boolean[] userRecordableEvts0 = userRecordableEvts; boolean[] recordableEvts0 = recordableEvts; @@ -507,7 +510,16 @@ private boolean isHiddenEvent(int type) { * @return {@code true} if this is an internal event. */ private boolean isInternalEvent(int type) { - return type == EVT_DISCOVERY_CUSTOM_EVT || F.contains(EVTS_DISCOVERY_ALL, type); + switch (type) { + case EVT_DISCOVERY_CUSTOM_EVT: + case EVT_TASK_FINISHED: + case EVT_TASK_FAILED: + case EVT_JOB_MAPPED: + return true; + + default: + return F.contains(EVTS_DISCOVERY_ALL, type); + } } /** @@ -562,13 +574,8 @@ public boolean hasListener(int type) { public boolean isAllUserRecordable(int[] types) { assert types != null; - boolean[] userRecordableEvts0 = userRecordableEvts; - for (int type : types) { - if (type < 0 || type >= len) - throw new IllegalArgumentException("Invalid event type: " + type); - - if (!userRecordableEvts0[type]) + if (!isUserRecordable(type)) return false; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/FullPageId.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/FullPageId.java index 17c552d9ddecf..fce50a90e3c8b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/FullPageId.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/FullPageId.java @@ -123,6 +123,13 @@ public long pageId() { return pageId; } + /** + * @return Effective page ID. + */ + public long effectivePageId() { + return effectivePageId; + } + /** * @return Cache group ID. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java index 56ddf5a1b5338..83c01d76d57cc 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java @@ -72,8 +72,10 @@ public interface IgniteWriteAheadLogManager extends GridCacheSharedManager, Igni * @throws IgniteCheckedException If failed to write. * @throws StorageException If IO exception occurred during the write. If an exception is thrown from this * method, the WAL will be invalidated and the node will be stopped. + * @return Last WAL position which was flushed to WAL segment file. May be greater than or equal to a {@code ptr}. + * May be {@code null}, it means nothing has been flushed. */ - public void flush(WALPointer ptr, boolean explicitFsync) throws IgniteCheckedException, StorageException; + public WALPointer flush(WALPointer ptr, boolean explicitFsync) throws IgniteCheckedException, StorageException; /** * Reads WAL record by the specified pointer. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/PageSnapshot.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/PageSnapshot.java index 1aa065e10df40..8957d9b6ec27b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/PageSnapshot.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/PageSnapshot.java @@ -19,8 +19,6 @@ import java.nio.ByteBuffer; import java.nio.ByteOrder; -import java.util.Arrays; -import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.util.GridUnsafe; @@ -92,10 +90,6 @@ public FullPageId fullPageId() { + "],\nsuper = [" + super.toString() + "]]"; } - catch (IgniteCheckedException ignored) { - return "Error during call'toString' of PageSnapshot [fullPageId=" + fullPageId() + - ", pageData = " + Arrays.toString(pageData) + ", super=" + super.toString() + "]"; - } finally { GridUnsafe.cleanDirectBuffer(buf); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java index 0031b22efa478..87ca9d689f9c4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java @@ -32,126 +32,126 @@ public abstract class WALRecord { */ public enum RecordType { /** */ - TX_RECORD, + TX_RECORD (0), /** */ - PAGE_RECORD, + PAGE_RECORD (1), /** */ - DATA_RECORD, + DATA_RECORD (2), /** Checkpoint (begin) record */ - CHECKPOINT_RECORD, + CHECKPOINT_RECORD (3), /** WAL segment header record. */ - HEADER_RECORD, + HEADER_RECORD (4), // Delta records. /** */ - INIT_NEW_PAGE_RECORD, + INIT_NEW_PAGE_RECORD (5), /** */ - DATA_PAGE_INSERT_RECORD, + DATA_PAGE_INSERT_RECORD (6), /** */ - DATA_PAGE_INSERT_FRAGMENT_RECORD, + DATA_PAGE_INSERT_FRAGMENT_RECORD (7), /** */ - DATA_PAGE_REMOVE_RECORD, + DATA_PAGE_REMOVE_RECORD (8), /** */ - DATA_PAGE_SET_FREE_LIST_PAGE, + DATA_PAGE_SET_FREE_LIST_PAGE (9), /** */ - BTREE_META_PAGE_INIT_ROOT, + BTREE_META_PAGE_INIT_ROOT (10), /** */ - BTREE_META_PAGE_ADD_ROOT, + BTREE_META_PAGE_ADD_ROOT (11), /** */ - BTREE_META_PAGE_CUT_ROOT, + BTREE_META_PAGE_CUT_ROOT (12), /** */ - BTREE_INIT_NEW_ROOT, + BTREE_INIT_NEW_ROOT (13), /** */ - BTREE_PAGE_RECYCLE, + BTREE_PAGE_RECYCLE (14), /** */ - BTREE_PAGE_INSERT, + BTREE_PAGE_INSERT (15), /** */ - BTREE_FIX_LEFTMOST_CHILD, + BTREE_FIX_LEFTMOST_CHILD (16), /** */ - BTREE_FIX_COUNT, + BTREE_FIX_COUNT (17), /** */ - BTREE_PAGE_REPLACE, + BTREE_PAGE_REPLACE (18), /** */ - BTREE_PAGE_REMOVE, + BTREE_PAGE_REMOVE (19), /** */ - BTREE_PAGE_INNER_REPLACE, + BTREE_PAGE_INNER_REPLACE (20), /** */ - BTREE_FIX_REMOVE_ID, + BTREE_FIX_REMOVE_ID (21), /** */ - BTREE_FORWARD_PAGE_SPLIT, + BTREE_FORWARD_PAGE_SPLIT (22), /** */ - BTREE_EXISTING_PAGE_SPLIT, + BTREE_EXISTING_PAGE_SPLIT (23), /** */ - BTREE_PAGE_MERGE, + BTREE_PAGE_MERGE (24), /** */ - PAGES_LIST_SET_NEXT, + PAGES_LIST_SET_NEXT (25), /** */ - PAGES_LIST_SET_PREVIOUS, + PAGES_LIST_SET_PREVIOUS (26), /** */ - PAGES_LIST_INIT_NEW_PAGE, + PAGES_LIST_INIT_NEW_PAGE (27), /** */ - PAGES_LIST_ADD_PAGE, + PAGES_LIST_ADD_PAGE (28), /** */ - PAGES_LIST_REMOVE_PAGE, + PAGES_LIST_REMOVE_PAGE (29), /** */ - META_PAGE_INIT, + META_PAGE_INIT (30), /** */ - PARTITION_META_PAGE_UPDATE_COUNTERS, + PARTITION_META_PAGE_UPDATE_COUNTERS (31), /** Memory recovering start marker */ - MEMORY_RECOVERY, + MEMORY_RECOVERY (32), /** */ - TRACKING_PAGE_DELTA, + TRACKING_PAGE_DELTA (33), /** Meta page update last successful snapshot id. */ - META_PAGE_UPDATE_LAST_SUCCESSFUL_SNAPSHOT_ID, + META_PAGE_UPDATE_LAST_SUCCESSFUL_SNAPSHOT_ID (34), /** Meta page update last successful full snapshot id. */ - META_PAGE_UPDATE_LAST_SUCCESSFUL_FULL_SNAPSHOT_ID, + META_PAGE_UPDATE_LAST_SUCCESSFUL_FULL_SNAPSHOT_ID (35), /** Meta page update next snapshot id. */ - META_PAGE_UPDATE_NEXT_SNAPSHOT_ID, + META_PAGE_UPDATE_NEXT_SNAPSHOT_ID (36), /** Meta page update last allocated index. */ - META_PAGE_UPDATE_LAST_ALLOCATED_INDEX, + META_PAGE_UPDATE_LAST_ALLOCATED_INDEX (37), /** Partition meta update state. */ - PART_META_UPDATE_STATE, + PART_META_UPDATE_STATE (38), /** Page list meta reset count record. */ - PAGE_LIST_META_RESET_COUNT_RECORD, + PAGE_LIST_META_RESET_COUNT_RECORD (39), /** Switch segment record. * Marker record for indicate end of segment. @@ -160,41 +160,74 @@ public enum RecordType { * that one byte in the end,then we write SWITCH_SEGMENT_RECORD as marker end of segment. * No need write CRC or WAL pointer for this record. It is byte marker record. * */ - SWITCH_SEGMENT_RECORD, + SWITCH_SEGMENT_RECORD (40), /** */ - DATA_PAGE_UPDATE_RECORD, + DATA_PAGE_UPDATE_RECORD (41), /** init */ - BTREE_META_PAGE_INIT_ROOT2, + BTREE_META_PAGE_INIT_ROOT2 (42), /** Partition destroy. */ - PARTITION_DESTROY, + PARTITION_DESTROY (43), /** Snapshot record. */ - SNAPSHOT, + SNAPSHOT (44), /** Metastore data record. */ - METASTORE_DATA_RECORD, + METASTORE_DATA_RECORD (45), /** Exchange record. */ - EXCHANGE, + EXCHANGE (46), /** Reserved for future record. */ - RESERVED, + RESERVED (47), /** Rollback tx record. */ - ROLLBACK_TX_RECORD, + ROLLBACK_TX_RECORD (57), /** */ - PARTITION_META_PAGE_UPDATE_COUNTERS_V2; + PARTITION_META_PAGE_UPDATE_COUNTERS_V2 (58), + + /** Init root meta page (with flags and created version) */ + BTREE_META_PAGE_INIT_ROOT_V3 (59); + + /** Index for serialization. Should be consistent throughout all versions. */ + private final int idx; + + /** + * @param idx Index for serialization. + */ + RecordType(int idx) { + this.idx = idx; + } + + /** + * @return Index for serialization. + */ + public int index() { + return idx; + } /** */ - private static final RecordType[] VALS = RecordType.values(); + private static final RecordType[] VALS; + + static { + RecordType[] recordTypes = RecordType.values(); + + int maxIdx = 0; + for (RecordType recordType : recordTypes) + maxIdx = Math.max(maxIdx, recordType.idx); + + VALS = new RecordType[maxIdx + 1]; + + for (RecordType recordType : recordTypes) + VALS[recordType.idx] = recordType; + } /** */ - public static RecordType fromOrdinal(int ord) { - return ord < 0 || ord >= VALS.length ? null : VALS[ord]; + public static RecordType fromIndex(int idx) { + return idx < 0 || idx >= VALS.length ? null : VALS[idx]; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageInitRootInlineFlagsCreatedVersionRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageInitRootInlineFlagsCreatedVersionRecord.java new file mode 100644 index 0000000000000..1163f1f7e6d05 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageInitRootInlineFlagsCreatedVersionRecord.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.pagemem.wal.record.delta; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.IgniteVersionUtils; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusMetaIO; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.lang.IgniteProductVersion; + +/** + * + */ +public class MetaPageInitRootInlineFlagsCreatedVersionRecord extends MetaPageInitRootInlineRecord { + /** Created version. */ + private final long flags; + + /** Created version. */ + private final IgniteProductVersion createdVer; + + /** + * @param grpId Cache group ID. + * @param pageId Meta page ID. + * @param rootId Root id. + * @param inlineSize Inline size. + */ + public MetaPageInitRootInlineFlagsCreatedVersionRecord(int grpId, long pageId, long rootId, int inlineSize) { + super(grpId, pageId, rootId, inlineSize); + + createdVer = IgniteVersionUtils.VER; + flags = BPlusMetaIO.FLAGS_DEFAULT; + } + + /** + * @param grpId Cache group ID. + * @param pageId Meta page ID. + * @param rootId Root id. + * @param inlineSize Inline size. + * @param flags Flags. + * @param createdVer The version of ignite that creates this tree. + */ + public MetaPageInitRootInlineFlagsCreatedVersionRecord(int grpId, long pageId, long rootId, int inlineSize, + long flags, IgniteProductVersion createdVer) { + super(grpId, pageId, rootId, inlineSize); + + this.flags = flags; + this.createdVer = createdVer; + } + + /** {@inheritDoc} */ + @Override public void applyDelta(PageMemory pageMem, long pageAddr) throws IgniteCheckedException { + super.applyDelta(pageMem, pageAddr); + + BPlusMetaIO io = BPlusMetaIO.VERSIONS.forPage(pageAddr); + + io.initFlagsAndVersion(pageAddr, flags, createdVer); + } + + /** {@inheritDoc} */ + @Override public RecordType type() { + return RecordType.BTREE_META_PAGE_INIT_ROOT_V3; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(MetaPageInitRootInlineFlagsCreatedVersionRecord.class, this, "super", super.toString()); + } + + /** + * @return Created version. + */ + public IgniteProductVersion createdVersion() { + return createdVer; + } + + /** + * @return Meta page flags. + */ + public long flags() { + return flags; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityTopologyVersion.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityTopologyVersion.java index 2c02f26be6641..3b9119b6d5b92 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityTopologyVersion.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityTopologyVersion.java @@ -117,10 +117,26 @@ public int minorTopologyVersion() { * @param upper Upper bound. * @return {@code True} if this topology version is within provided bounds (inclusive). */ - public boolean isBetween(AffinityTopologyVersion lower, AffinityTopologyVersion upper) { + public final boolean isBetween(AffinityTopologyVersion lower, AffinityTopologyVersion upper) { return compareTo(lower) >= 0 && compareTo(upper) <= 0; } + /** + * @param topVer Test version. + * @return {@code True} if this topology happens strictly after than {@code topVer}. + */ + public final boolean after(AffinityTopologyVersion topVer) { + return compareTo(topVer) > 0; + } + + /** + * @param topVer Test version. + * @return {@code True} if this topology happens strictly before than {@code topVer}. + */ + public final boolean before(AffinityTopologyVersion topVer) { + return compareTo(topVer) < 0; + } + /** {@inheritDoc} */ @Override public void onAckReceived() { // No-op. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentCache.java index 6aee4b2b08967..fb5a7b9966ace 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentCache.java @@ -43,6 +43,7 @@ import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.managers.discovery.DiscoCache; import org.apache.ignite.internal.processors.cache.ExchangeDiscoveryEvents; +import org.apache.ignite.internal.processors.cache.GridCachePartitionExchangeManager; import org.apache.ignite.internal.processors.cluster.BaselineTopology; import org.apache.ignite.internal.util.future.GridFutureAdapter; import org.apache.ignite.internal.util.typedef.F; @@ -61,11 +62,27 @@ * Affinity cached function. */ public class GridAffinityAssignmentCache { - /** Cleanup history size. */ - private final int MAX_HIST_SIZE = getInteger(IGNITE_AFFINITY_HISTORY_SIZE, 50); + /** + * Affinity cache will shrink when total number of non-shallow (see {@link HistoryAffinityAssignmentImpl}) + * historical instances will be greater than value of this constant. + */ + private final int MAX_NON_SHALLOW_HIST_SIZE = getInteger(IGNITE_AFFINITY_HISTORY_SIZE, 25); - /** Cleanup history links size (calculated by both real entries and shallow copies). */ - private final int MAX_HIST_LINKS_SIZE = MAX_HIST_SIZE * 10; + /** + * Affinity cache will also shrink when total number of both shallow ({@link HistoryAffinityAssignmentShallowCopy}) + * and non-shallow (see {@link HistoryAffinityAssignmentImpl}) historical instances will be greater than + * value of this constant. + */ + private final int MAX_TOTAL_HIST_SIZE = MAX_NON_SHALLOW_HIST_SIZE * 10; + + /** + * Independent of {@link #MAX_NON_SHALLOW_HIST_SIZE} and {@link #MAX_TOTAL_HIST_SIZE}, affinity cache will always + * keep this number of non-shallow (see {@link HistoryAffinityAssignmentImpl}) instances. + * We need at least one real instance, otherwise we won't be able to get affinity cache for + * {@link GridCachePartitionExchangeManager#lastAffinityChangedTopologyVersion} in case cluster has experienced + * too many client joins / client leaves / local cache starts. + */ + private final int MIN_NON_SHALLOW_HIST_SIZE = 2; /** Group name if specified or cache name. */ private final String cacheOrGrpName; @@ -118,8 +135,8 @@ public class GridAffinityAssignmentCache { /** Node stop flag. */ private volatile IgniteCheckedException stopErr; - /** Full history size. */ - private final AtomicInteger fullHistSize = new AtomicInteger(); + /** Numner of non-shallow (see {@link HistoryAffinityAssignmentImpl}) affinity cache instances. */ + private final AtomicInteger nonShallowHistSize = new AtomicInteger(); /** */ private final Object similarAffKey; @@ -270,7 +287,7 @@ public void onReconnected() { affCache.clear(); - fullHistSize.set(0); + nonShallowHistSize.set(0); head.set(new GridAffinityAssignmentV2(AffinityTopologyVersion.NONE)); @@ -675,8 +692,8 @@ public AffinityAssignment cachedAffinity( cache = e.getValue(); if (cache == null) { - throw new IllegalStateException("Getting affinity for too old topology version that is already " + - "out of history [locNode=" + ctx.discovery().localNode() + + throw new IllegalStateException("Getting affinity for topology version earlier than affinity is " + + "calculated [locNode=" + ctx.discovery().localNode() + ", grp=" + cacheOrGrpName + ", topVer=" + topVer + ", lastAffChangeTopVer=" + lastAffChangeTopVer + @@ -686,8 +703,8 @@ public AffinityAssignment cachedAffinity( } if (cache.topologyVersion().compareTo(topVer) > 0) { - throw new IllegalStateException("Getting affinity for topology version earlier than affinity is " + - "calculated [locNode=" + ctx.discovery().localNode() + + throw new IllegalStateException("Getting affinity for too old topology version that is already " + + "out of history [locNode=" + ctx.discovery().localNode() + ", grp=" + cacheOrGrpName + ", topVer=" + topVer + ", lastAffChangeTopVer=" + lastAffChangeTopVer + @@ -803,58 +820,73 @@ private void onHistoryAdded( cleanupNeeded = true; if (added.requiresHistoryCleanup()) - fullHistSize.incrementAndGet(); + nonShallowHistSize.incrementAndGet(); } else { if (replaced.requiresHistoryCleanup() != added.requiresHistoryCleanup()) { if (added.requiresHistoryCleanup()) { cleanupNeeded = true; - fullHistSize.incrementAndGet(); + nonShallowHistSize.incrementAndGet(); } else - fullHistSize.decrementAndGet(); + nonShallowHistSize.decrementAndGet(); } } if (!cleanupNeeded) return; - int fullSize = fullHistSize.get(); - - int linksSize = affCache.size(); + int nonShallowSize = nonShallowHistSize.get(); - int fullRmvCnt = fullSize > MAX_HIST_SIZE ? (MAX_HIST_SIZE / 2) : 0; + int totalSize = affCache.size(); - int linksRmvCnt = linksSize > MAX_HIST_LINKS_SIZE ? (MAX_HIST_LINKS_SIZE / 2) : 0; + if (shouldContinueCleanup(nonShallowSize, totalSize)) { + int initNonShallowSize = nonShallowSize; - if (fullRmvCnt > 0 || linksRmvCnt > 0) { Iterator it = affCache.values().iterator(); - AffinityTopologyVersion topVerRmv = null; - - while (it.hasNext() && (fullRmvCnt > 0 || linksRmvCnt > 0)) { + while (it.hasNext()) { HistoryAffinityAssignment aff0 = it.next(); - if (aff0.requiresHistoryCleanup()) { // Don't decrement counter in case of fullHistoryCleanupRequired copy remove. - fullRmvCnt--; + if (aff0.requiresHistoryCleanup()) { + // We can stop cleanup only on non-shallow item. + // Keeping part of shallow items chain if corresponding real item is missing makes no sense. + if (!shouldContinueCleanup(nonShallowSize, totalSize)) { + nonShallowHistSize.getAndAdd(nonShallowSize - initNonShallowSize); + + // GridAffinityProcessor#affMap has the same size and instance set as #affCache. + ctx.affinity().removeCachedAffinity(aff0.topologyVersion()); - fullHistSize.decrementAndGet(); + return; + } + + nonShallowSize--; } - linksRmvCnt--; + totalSize--; it.remove(); - - topVerRmv = aff0.topologyVersion(); } - topVerRmv = it.hasNext() ? it.next().topologyVersion() : topVerRmv; - - ctx.affinity().removeCachedAffinity(topVerRmv); + assert false : "All elements have been removed from affinity cache during cleanup"; } } + /** + * Checks whether affinity cache size conditions are still unsatisfied. + * + * @param nonShallowSize Non shallow size. + * @param totalSize Total size. + * @return true if affinity cache cleanup is not finished yet. + */ + private boolean shouldContinueCleanup(int nonShallowSize, int totalSize) { + if (nonShallowSize <= MIN_NON_SHALLOW_HIST_SIZE) + return false; + + return nonShallowSize > MAX_NON_SHALLOW_HIST_SIZE || totalSize > MAX_TOTAL_HIST_SIZE; + } + /** * @return All initialized versions. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java index 9594c40ce8c3b..24e6b195b7a20 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java @@ -57,11 +57,11 @@ import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture; -import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridClientPartitionTopology; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; import org.apache.ignite.internal.processors.cluster.DiscoveryDataClusterState; +import org.apache.ignite.internal.util.GridConcurrentHashSet; import org.apache.ignite.internal.util.GridLongList; import org.apache.ignite.internal.util.GridPartitionStateMap; import org.apache.ignite.internal.util.future.GridCompoundFuture; @@ -750,17 +750,15 @@ private void scheduleClientChangeMessage(Map startedCaches, Se /** * @param fut Exchange future. - * @param crd Coordinator flag. * @param exchActions Exchange actions. */ public void onCustomMessageNoAffinityChange( GridDhtPartitionsExchangeFuture fut, - boolean crd, @Nullable final ExchangeActions exchActions ) { final ExchangeDiscoveryEvents evts = fut.context().events(); - forAllCacheGroups(crd, new IgniteInClosureX() { + forAllCacheGroups(new IgniteInClosureX() { @Override public void applyx(GridAffinityAssignmentCache aff) { if (exchActions != null && exchActions.cacheGroupStopping(aff.groupId())) return; @@ -845,7 +843,7 @@ public IgniteInternalFuture onCacheChangeRequest( IgniteInternalFuture res = cachesRegistry.update(exchActions); // Affinity did not change for existing caches. - onCustomMessageNoAffinityChange(fut, crd, exchActions); + onCustomMessageNoAffinityChange(fut, exchActions); fut.timeBag().finishGlobalStage("Update caches registry"); @@ -1085,12 +1083,10 @@ public void clearGroupHoldersAndRegistry() { * Called when received {@link CacheAffinityChangeMessage} which should complete exchange. * * @param exchFut Exchange future. - * @param crd Coordinator flag. * @param msg Affinity change message. */ public void onExchangeChangeAffinityMessage( GridDhtPartitionsExchangeFuture exchFut, - boolean crd, CacheAffinityChangeMessage msg ) { if (log.isDebugEnabled()) { @@ -1106,8 +1102,8 @@ public void onExchangeChangeAffinityMessage( assert assignment != null; - forAllCacheGroups(crd, new IgniteInClosureX() { - @Override public void applyx(GridAffinityAssignmentCache aff) throws IgniteCheckedException { + forAllCacheGroups(new IgniteInClosureX() { + @Override public void applyx(GridAffinityAssignmentCache aff) { List> idealAssignment = aff.idealAssignment(); assert idealAssignment != null; @@ -1137,13 +1133,10 @@ public void onExchangeChangeAffinityMessage( * Called on exchange initiated by {@link CacheAffinityChangeMessage} which sent after rebalance finished. * * @param exchFut Exchange future. - * @param crd Coordinator flag. * @param msg Message. - * @throws IgniteCheckedException If failed. */ public void onChangeAffinityMessage( final GridDhtPartitionsExchangeFuture exchFut, - boolean crd, final CacheAffinityChangeMessage msg ) { assert msg.topologyVersion() != null && msg.exchangeId() == null : msg; @@ -1161,8 +1154,8 @@ public void onChangeAffinityMessage( final Map deploymentIds = msg.cacheDeploymentIds(); - forAllCacheGroups(crd, new IgniteInClosureX() { - @Override public void applyx(GridAffinityAssignmentCache aff) throws IgniteCheckedException { + forAllCacheGroups(new IgniteInClosureX() { + @Override public void applyx(GridAffinityAssignmentCache aff) { AffinityTopologyVersion affTopVer = aff.lastVersion(); assert affTopVer.topologyVersion() > 0 : affTopVer; @@ -1222,14 +1215,13 @@ public void onChangeAffinityMessage( * Called on exchange initiated by client node join/fail. * * @param fut Exchange future. - * @param crd Coordinator flag. * @throws IgniteCheckedException If failed. */ - public void onClientEvent(final GridDhtPartitionsExchangeFuture fut, boolean crd) throws IgniteCheckedException { + public void onClientEvent(final GridDhtPartitionsExchangeFuture fut) throws IgniteCheckedException { boolean locJoin = fut.firstEvent().eventNode().isLocal(); if (!locJoin) { - forAllCacheGroups(crd, new IgniteInClosureX() { + forAllCacheGroups(new IgniteInClosureX() { @Override public void applyx(GridAffinityAssignmentCache aff) throws IgniteCheckedException { AffinityTopologyVersion topVer = fut.initialVersion(); @@ -1297,27 +1289,13 @@ private void forAllRegisteredCacheGroups(IgniteInClosureX } /** - * @param crd Coordinator flag. * @param c Closure. */ - private void forAllCacheGroups(boolean crd, IgniteInClosureX c) { - Collection affinityCaches; - - Collection affinityCaches1 = grpHolders.values().stream() + private void forAllCacheGroups(IgniteInClosureX c) { + Collection affinityCaches = grpHolders.values().stream() .map(CacheGroupHolder::affinity) .collect(Collectors.toList()); - Collection affinityCaches2 = cctx.kernalContext().cache().cacheGroups().stream() - .filter(grp -> !grp.isLocal()) - .filter(grp -> !grp.isRecoveryMode()) - .map(CacheGroupContext::affinity) - .collect(Collectors.toList()); - - if (!cctx.localNode().isClient()) - affinityCaches = affinityCaches1; - else - affinityCaches = affinityCaches2; - try { U.doInParallel(cctx.kernalContext().getSystemExecutorService(), affinityCaches, t -> { c.applyx(t); @@ -1471,21 +1449,19 @@ public GridAffinityAssignmentCache affinity(Integer grpId) { * Applies affinity diff from the received full message. * * @param fut Current exchange future. - * @param msg Finish exchange message. + * @param idealAffDiff Map [Cache group id - Affinity distribution] which contains difference with ideal affinity. */ public void applyAffinityFromFullMessage( final GridDhtPartitionsExchangeFuture fut, - final GridDhtPartitionsFullMessage msg + final Map idealAffDiff ) { // Please do not use following pattern of code (nodesByOrder, affCache). NEVER. final Map nodesByOrder = new ConcurrentHashMap<>(); - forAllCacheGroups(false, new IgniteInClosureX() { - @Override public void applyx(GridAffinityAssignmentCache aff) throws IgniteCheckedException { + forAllCacheGroups(new IgniteInClosureX() { + @Override public void applyx(GridAffinityAssignmentCache aff) { ExchangeDiscoveryEvents evts = fut.context().events(); - Map idealAffDiff = msg.idealAffinityDiff(); - List> idealAssignment = aff.calculate(evts.topologyVersion(), evts, evts.discoveryCache()); CacheGroupAffinityMessage affMsg = idealAffDiff != null ? idealAffDiff.get(aff.groupId()) : null; @@ -1520,26 +1496,23 @@ public void applyAffinityFromFullMessage( /** * @param fut Current exchange future. - * @param msg Message finish message. + * @param receivedAff Map [Cache group id - Affinity distribution] received from coordinator to apply. * @param resTopVer Result topology version. + * @return Set of cache groups with no affinity localed in given {@code receivedAff}. */ - public void onLocalJoin( + public Set onLocalJoin( final GridDhtPartitionsExchangeFuture fut, - GridDhtPartitionsFullMessage msg, + final Map receivedAff, final AffinityTopologyVersion resTopVer ) { final Set affReq = fut.context().groupsAffinityRequestOnJoin(); - final Map receivedAff = msg.joinedNodeAffinity(); - - assert F.isEmpty(affReq) || (!F.isEmpty(receivedAff) && receivedAff.size() >= affReq.size()) - : ("Requested and received affinity are different " + - "[requestedCnt=" + (affReq != null ? affReq.size() : "none") + - ", receivedCnt=" + (receivedAff != null ? receivedAff.size() : "none") + - ", msg=" + msg + "]"); - final Map nodesByOrder = new ConcurrentHashMap<>(); + // Such cache group may exist if cache is already destroyed on server nodes + // and coordinator have no affinity for that group. + final Set noAffinityGroups = new GridConcurrentHashSet<>(); + forAllRegisteredCacheGroups(new IgniteInClosureX() { @Override public void applyx(CacheGroupDescriptor desc) throws IgniteCheckedException { ExchangeDiscoveryEvents evts = fut.context().events(); @@ -1555,7 +1528,14 @@ public void onLocalJoin( CacheGroupAffinityMessage affMsg = receivedAff.get(aff.groupId()); - assert affMsg != null; + if (affMsg == null) { + noAffinityGroups.add(aff.groupId()); + + // Use ideal affinity to resume cache initialize process. + calculateAndInit(evts, aff, evts.topologyVersion()); + + return; + } List> assignments = affMsg.createAssignments(nodesByOrder, evts.discoveryCache()); @@ -1585,15 +1565,15 @@ else if (grp != null && fut.cacheGroupAddedOnExchange(aff.groupId(), grp.receive "[grp=" + aff.cacheOrGroupName() + "]"); } }); + + return noAffinityGroups; } /** * @param fut Current exchange future. * @param crd Coordinator flag. - * @throws IgniteCheckedException If failed. */ - public void onServerJoinWithExchangeMergeProtocol(GridDhtPartitionsExchangeFuture fut, boolean crd) - throws IgniteCheckedException { + public void onServerJoinWithExchangeMergeProtocol(GridDhtPartitionsExchangeFuture fut, boolean crd) { final ExchangeDiscoveryEvents evts = fut.context().events(); assert fut.context().mergeExchanges(); @@ -1616,7 +1596,6 @@ public void onServerJoinWithExchangeMergeProtocol(GridDhtPartitionsExchangeFutur /** * @param fut Current exchange future. * @return Computed difference with ideal affinity. - * @throws IgniteCheckedException If failed. */ public Map onServerLeftWithExchangeMergeProtocol( final GridDhtPartitionsExchangeFuture fut) throws IgniteCheckedException { @@ -1625,9 +1604,7 @@ public Map onServerLeftWithExchangeMergeProt assert fut.context().mergeExchanges(); assert evts.hasServerLeft(); - Map result = onReassignmentEnforced(fut); - - return result; + return onReassignmentEnforced(fut); } /** @@ -1819,9 +1796,6 @@ private void fetchAffinityOnJoin(GridDhtPartitionsExchangeFuture fut) throws Ign forAllRegisteredCacheGroups(new IgniteInClosureX() { @Override public void applyx(CacheGroupDescriptor desc) throws IgniteCheckedException { - if (cctx.kernalContext().clientNode() && cctx.cache().cacheGroup(desc.groupId()) == null) - return; // Skip non-started caches on client nodes. - CacheGroupHolder holder = getOrCreateGroupHolder(topVer, desc); if (fut.cacheGroupAddedOnExchange(desc.groupId(), desc.receivedFrom())) { @@ -2150,9 +2124,6 @@ private CacheGroupHolder createGroupHolder( forAllRegisteredCacheGroups(new IgniteInClosureX() { @Override public void applyx(CacheGroupDescriptor desc) throws IgniteCheckedException { - if (cctx.localNode().isClient() && cctx.cache().cacheGroup(desc.groupId()) == null) - return; - CacheGroupHolder grpHolder = getOrCreateGroupHolder(evts.topologyVersion(), desc); // Already calculated. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java index 3206bae3c100f..01a5d87ba76fb 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java @@ -177,6 +177,9 @@ public class CacheGroupContext { /** */ private volatile boolean hasAtomicCaches; + /** Store cache group metrics. */ + private final CacheGroupMetricsImpl metrics; + /** * @param ctx Context. * @param grpId Group ID. @@ -236,6 +239,8 @@ public class CacheGroupContext { log = ctx.kernalContext().log(getClass()); + metrics = new CacheGroupMetricsImpl(); + mxBean = new CacheGroupMetricsMXBeanImpl(this); if (systemCache()) { @@ -931,8 +936,6 @@ public void onPartitionEvicted(int part) { cctx.dr().partitionEvicted(part); cctx.continuousQueries().onPartitionEvicted(part); - - cctx.dataStructures().onPartitionEvicted(part); } } @@ -1107,6 +1110,8 @@ private void processAffinityAssignmentRequest0(UUID nodeId, final GridDhtAffinit topVer, assignment.assignment()); + res.copyTimestamps(req); + if (aff.centralizedAffinityFunction()) { assert assignment.idealAssignment() != null; @@ -1192,8 +1197,9 @@ public boolean globalWalEnabled() { */ public void globalWalEnabled(boolean enabled) { if (globalWalEnabled != enabled) { - log.info("Global WAL state for group=" + cacheOrGroupName() + - " changed from " + globalWalEnabled + " to " + enabled); + if (log.isInfoEnabled()) + log.info("Global WAL state for group=" + cacheOrGroupName() + + " changed from " + globalWalEnabled + " to " + enabled); persistGlobalWalState(enabled); @@ -1206,8 +1212,9 @@ public void globalWalEnabled(boolean enabled) { */ public void localWalEnabled(boolean enabled) { if (localWalEnabled != enabled){ - log.info("Local WAL state for group=" + cacheOrGroupName() + - " changed from " + localWalEnabled + " to " + enabled); + if (log.isInfoEnabled()) + log.info("Local WAL state for group=" + cacheOrGroupName() + + " changed from " + localWalEnabled + " to " + enabled); persistLocalWalState(enabled); @@ -1236,6 +1243,13 @@ public boolean hasAtomicCaches() { return hasAtomicCaches; } + /** + * @return Metrics. + */ + public CacheGroupMetricsImpl metrics0() { + return metrics; + } + /** * @return Statistics holder to track cache IO operations. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CheckpointFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetrics.java similarity index 64% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CheckpointFuture.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetrics.java index 23287f195dc5b..6a79a54d4bda0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CheckpointFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetrics.java @@ -15,26 +15,16 @@ * limitations under the License. */ -package org.apache.ignite.internal.processors.cache.persistence; - -import org.apache.ignite.internal.util.future.GridFutureAdapter; +package org.apache.ignite.internal.processors.cache; /** - * Checkpoint futures. + * Cache group metrics. */ -public interface CheckpointFuture { - /** - * @return Begin future. - */ - public GridFutureAdapter beginFuture(); - - /** - * @return Finish future. - */ - public GridFutureAdapter finishFuture(); - +public interface CacheGroupMetrics { /** - * @return Checkpoint was already started. + * @return Number of partitions need processed for finished indexes create or rebuilding. + * It is calculated as the number of local partition minus the processed. + * A value of 0 indicates that the index is built. */ - public boolean started(); + public long getIndexBuildCountPartitionsLeft(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsImpl.java new file mode 100644 index 0000000000000..1eca7cf59f7a7 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsImpl.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Cache group metrics. + */ +public class CacheGroupMetricsImpl implements CacheGroupMetrics { + /** Number of partitions need processed for finished indexes create or rebuilding. */ + private final AtomicLong idxBuildCntPartitionsLeft; + + /** */ + public CacheGroupMetricsImpl() { + idxBuildCntPartitionsLeft = new AtomicLong(); + } + + + /** {@inheritDoc} */ + @Override public long getIndexBuildCountPartitionsLeft() { + return idxBuildCntPartitionsLeft.get(); + } + + /** Set number of partitions need processed for finished indexes create or rebuilding. */ + public void setIndexBuildCountPartitionsLeft(long idxBuildCntPartitionsLeft) { + this.idxBuildCntPartitionsLeft.set(idxBuildCntPartitionsLeft); + } + + /** + * Commit the complete index building for partition. + * + * @return Decrement number of partitions need processed for finished indexes create or rebuilding. + */ + public long decrementIndexBuildCountPartitionsLeft() { + return idxBuildCntPartitionsLeft.decrementAndGet(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsMXBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsMXBeanImpl.java index 5ece77f57ba4d..5c6cdfaa4a1c9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsMXBeanImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsMXBeanImpl.java @@ -359,4 +359,9 @@ private Map> clusterPartitionsMapByState(GridDhtPartitionSt @Override public long getTotalAllocatedSize() { return getTotalAllocatedPages() * ctx.dataRegion().pageMemory().pageSize(); } + + /** {@inheritDoc} */ + @Override public long getIndexBuildCountPartitionsLeft() { + return ctx.metrics0().getIndexBuildCountPartitionsLeft(); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheJoinNodeDiscoveryData.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheJoinNodeDiscoveryData.java index bb0b59bf9845e..c7a59a4606df7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheJoinNodeDiscoveryData.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheJoinNodeDiscoveryData.java @@ -161,6 +161,13 @@ public boolean isStaticallyConfigured() { return staticallyConfigured; } + /** + * @return Long which bits represent some flags. + */ + public long getFlags() { + return flags; + } + /** * @param ois ObjectInputStream. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectUtils.java index aeca79e984809..329f141daee49 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectUtils.java @@ -20,6 +20,8 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Map; +import org.apache.ignite.IgniteException; +import org.apache.ignite.binary.BinaryObject; import org.apache.ignite.internal.binary.BinaryUtils; import org.apache.ignite.internal.util.MutableSingletonList; import org.apache.ignite.internal.util.typedef.F; @@ -187,6 +189,32 @@ else if (o instanceof Object[]) return o; } + /** + * Checks the cache object is binary object. + * + * @param o Cache object. + * @return {@code true} if the key is binary object. Otherwise (key's type is a platform type) returns false. + */ + public static boolean isBinary(CacheObject o) { + return o instanceof BinaryObject + || (o instanceof KeyCacheObjectImpl + && o.value(null, false) instanceof BinaryObject); + } + + /** + * @param o Cache object. + * @return Binary object. + * @throws IgniteException is the object is not binary object (e.g. platform / primitive type) + */ + public static BinaryObject binary(CacheObject o) { + if (o instanceof BinaryObject) + return (BinaryObject)o; + else if (o instanceof KeyCacheObjectImpl && o.value(null, false) instanceof BinaryObject) + return o.value(null, false); + + throw new IgniteException("The object is not binary object [obj=" + o + ']'); + } + /** * Private constructor. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CachesRegistry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CachesRegistry.java index d37f69ca5f6ad..649b4d1ecd20f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CachesRegistry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CachesRegistry.java @@ -31,6 +31,7 @@ import org.apache.ignite.failure.FailureType; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.util.future.GridFinishedFuture; +import org.apache.ignite.internal.util.lang.GridPlainRunnable; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; import org.jetbrains.annotations.Nullable; @@ -257,7 +258,11 @@ private IgniteInternalFuture registerAllCachesAndGroups( if (cachesToPersist.isEmpty()) return cachesConfPersistFuture = new GridFinishedFuture<>(); - return cachesConfPersistFuture = persistCacheConfigurations(cachesToPersist); + List cacheConfigsToPersist = cacheDescriptors.stream() + .map(DynamicCacheDescriptor::toStoredData) + .collect(Collectors.toList()); + + return cachesConfPersistFuture = persistCacheConfigurations(cacheConfigsToPersist); } /** @@ -273,16 +278,12 @@ private boolean shouldPersist(CacheConfiguration cacheCfg) { } /** - * Persists cache configurations from given {@code cacheDescriptors}. + * Persists cache configurations. * - * @param cacheDescriptors Cache descriptors to retrieve cache configurations. + * @param cacheConfigsToPersist Cache configurations to persist. * @return Future that will be completed when all cache configurations will be persisted to cache work directory. */ - private IgniteInternalFuture persistCacheConfigurations(List cacheDescriptors) { - List cacheConfigsToPersist = cacheDescriptors.stream() - .map(DynamicCacheDescriptor::toStoredData) - .collect(Collectors.toList()); - + private IgniteInternalFuture persistCacheConfigurations(List cacheConfigsToPersist) { // Pre-create cache work directories if they don't exist. for (StoredCacheData data : cacheConfigsToPersist) { try { @@ -297,13 +298,15 @@ private IgniteInternalFuture persistCacheConfigurations(List { - try { - for (StoredCacheData data : cacheConfigsToPersist) - cctx.pageStore().storeCacheData(data, false); - } - catch (IgniteCheckedException e) { - U.error(log, "Error while saving cache configurations on disk", e); + return cctx.kernalContext().closure().runLocalSafe(new GridPlainRunnable() { + @Override public void run() { + try { + for (StoredCacheData data : cacheConfigsToPersist) + cctx.cache().saveCacheConfiguration(data, false); + } + catch (IgniteCheckedException e) { + U.error(log, "Error while saving cache configurations on disk", e); + } } }); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java index 81675bdd85111..a9d68aad0574e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java @@ -33,6 +33,7 @@ import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.stream.Collectors; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.cache.CacheExistsException; @@ -136,15 +137,13 @@ public ClusterCachesInfo(GridKernalContext ctx) { * Filters all dynamic cache descriptors and groups that were not presented on node start * and were received with grid discovery data. * - * @param localConfigData node's local cache configurations - * (both from static config and stored with persistent caches). - * + * @param localCachesOnStart Caches which were already presented on node start. */ - public void filterDynamicCacheDescriptors(CacheJoinNodeDiscoveryData localConfigData) { + public void filterDynamicCacheDescriptors(Set localCachesOnStart) { if (ctx.isDaemon()) return; - filterRegisteredCachesAndCacheGroups(localConfigData.caches()); + filterRegisteredCachesAndCacheGroups(localCachesOnStart); List> locJoinStartCaches = locJoinCachesCtx.caches(); @@ -163,14 +162,14 @@ public void filterDynamicCacheDescriptors(CacheJoinNodeDiscoveryData localConfig * * @param locCaches Caches from local node configuration (static configuration and persistent caches). */ - private void filterRegisteredCachesAndCacheGroups(Map locCaches) { + private void filterRegisteredCachesAndCacheGroups(Set locCaches) { //filter registered caches Iterator> cachesIter = registeredCaches.entrySet().iterator(); while (cachesIter.hasNext()) { Map.Entry e = cachesIter.next(); - if (!locCaches.containsKey(e.getKey())) { + if (!locCaches.contains(e.getKey())) { cachesIter.remove(); ctx.discovery().removeCacheFilter(e.getKey()); @@ -1660,6 +1659,53 @@ else if (joiningNodeData instanceof CacheJoinNodeDiscoveryData) } } + /** + * @param data Joining node data. + * @return Message with error or null if everything was OK. + */ + public String validateJoiningNodeData(DiscoveryDataBag.JoiningNodeDiscoveryData data) { + if (data.hasJoiningNodeData()) { + Serializable joiningNodeData = data.joiningNodeData(); + + if (joiningNodeData instanceof CacheJoinNodeDiscoveryData) { + CacheJoinNodeDiscoveryData joinData = (CacheJoinNodeDiscoveryData)joiningNodeData; + + Set problemCaches = null; + + for (CacheJoinNodeDiscoveryData.CacheInfo cacheInfo : joinData.caches().values()) { + CacheConfiguration cfg = cacheInfo.cacheData().config(); + + if (!registeredCaches.containsKey(cfg.getName())) { + String conflictErr = checkCacheConflict(cfg); + + if (conflictErr != null) { + U.warn(log, "Ignore cache received from joining node. " + conflictErr); + + continue; + } + + long flags = cacheInfo.getFlags(); + + if (flags == 1L) { + if (problemCaches == null) + problemCaches = new HashSet<>(); + + problemCaches.add(cfg.getName()); + } + } + } + + if (!F.isEmpty(problemCaches)) + return problemCaches.stream().collect(Collectors.joining(", ", + "Joining node has caches with data which are not presented on cluster, " + + "it could mean that they were already destroyed, to add the node to cluster - " + + "remove directories with the caches[", "]")); + } + } + + return null; + } + /** * @param clientData Discovery data. * @param clientNodeId Client node ID. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeRequest.java index 268756238366b..3dbee2a9b140a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeRequest.java @@ -394,7 +394,7 @@ public void receivedFrom(UUID nodeId) { /** * @return ID of node provided cache configuration in discovery data. */ - @Nullable public UUID receivedFrom() { + public @Nullable UUID receivedFrom() { return rcvdFrom; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java index 7db59ca58c2fd..fa42caffda2a2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java @@ -678,14 +678,14 @@ public boolean isLocal() { * @return {@code True} if cache is replicated cache. */ public boolean isReplicated() { - return cacheCfg.getCacheMode() == CacheMode.REPLICATED; + return config().getCacheMode() == CacheMode.REPLICATED; } /** * @return {@code True} if cache is partitioned cache. */ public boolean isPartitioned() { - return cacheCfg.getCacheMode() == CacheMode.PARTITIONED; + return config().getCacheMode() == CacheMode.PARTITIONED; } /** @@ -699,7 +699,7 @@ public boolean isDrEnabled() { * @return {@code True} in case cache supports query. */ public boolean isQueryEnabled() { - return !F.isEmpty(cacheCfg.getQueryEntities()); + return !F.isEmpty(config().getQueryEntities()); } /** @@ -824,7 +824,7 @@ public void checkSecurity(SecurityPermission op) throws SecurityException { if (CU.isSystemCache(name())) return; - ctx.security().authorize(name(), op, null); + ctx.security().authorize(name(), op); } /** @@ -852,14 +852,16 @@ public boolean rebalanceEnabled() { * @return {@code True} if atomic. */ public boolean atomic() { - return cacheCfg.getAtomicityMode() == ATOMIC; + return config().getAtomicityMode() == ATOMIC; } /** * @return {@code True} if transactional. */ public boolean transactional() { - return cacheCfg.getAtomicityMode() == TRANSACTIONAL; + CacheConfiguration cfg = config(); + + return cfg.getAtomicityMode() == TRANSACTIONAL; } /** @@ -1039,9 +1041,15 @@ public GridCacheAdapter cache() { /** * @return Cache configuration for given cache instance. + * @throws IllegalStateException If this cache context was cleaned up. */ public CacheConfiguration config() { - return cacheCfg; + CacheConfiguration res = cacheCfg; + + if (res == null) + throw new IllegalStateException((new CacheStoppedException(name()))); + + return res; } /** @@ -1050,7 +1058,7 @@ public CacheConfiguration config() { * are set to {@code true} or the store is local. */ public boolean writeToStoreFromDht() { - return store().isLocal() || cacheCfg.isWriteBehindEnabled(); + return store().isLocal() || config().isWriteBehindEnabled(); } /** @@ -1502,56 +1510,56 @@ public boolean deploymentEnabled() { * @return {@code True} if store read-through mode is enabled. */ public boolean readThrough() { - return cacheCfg.isReadThrough() && !skipStore(); + return config().isReadThrough() && !skipStore(); } /** * @return {@code True} if store and read-through mode are enabled in configuration. */ public boolean readThroughConfigured() { - return store().configured() && cacheCfg.isReadThrough(); + return store().configured() && config().isReadThrough(); } /** * @return {@code True} if {@link CacheConfiguration#isLoadPreviousValue()} flag is set. */ public boolean loadPreviousValue() { - return cacheCfg.isLoadPreviousValue(); + return config().isLoadPreviousValue(); } /** * @return {@code True} if store write-through is enabled. */ public boolean writeThrough() { - return cacheCfg.isWriteThrough() && !skipStore(); + return config().isWriteThrough() && !skipStore(); } /** * @return {@code True} if invalidation is enabled. */ public boolean isInvalidate() { - return cacheCfg.isInvalidate(); + return config().isInvalidate(); } /** * @return {@code True} if synchronous commit is enabled. */ public boolean syncCommit() { - return cacheCfg.getWriteSynchronizationMode() == FULL_SYNC; + return config().getWriteSynchronizationMode() == FULL_SYNC; } /** * @return {@code True} if synchronous rollback is enabled. */ public boolean syncRollback() { - return cacheCfg.getWriteSynchronizationMode() == FULL_SYNC; + return config().getWriteSynchronizationMode() == FULL_SYNC; } /** * @return {@code True} if only primary node should be updated synchronously. */ public boolean syncPrimary() { - return cacheCfg.getWriteSynchronizationMode() == PRIMARY_SYNC; + return config().getWriteSynchronizationMode() == PRIMARY_SYNC; } /** @@ -1767,7 +1775,7 @@ public boolean keepBinary() { * of {@link CacheConfiguration#isCopyOnRead()}. */ public boolean needValueCopy() { - return affNode && cacheCfg.isCopyOnRead(); + return affNode && config().isCopyOnRead(); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java index 3df507a4434ba..56f1592f74a4c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java @@ -232,8 +232,16 @@ public void onUndeploy(final ClassLoader ldr, final GridCacheContext ctx) // Unwind immediately for local and replicate caches. // We go through preloader for proper synchronization. - if (ctx.isLocal()) - ctx.preloader().unwindUndeploys(); + if (ctx.isLocal()) { + ctx.preloader().pause(); + + try { + ctx.group().unwindUndeploys(); + } + finally { + ctx.preloader().resume(); + } + } } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java index 1ae1b8d9269d3..4d9bf4795d5cf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java @@ -865,12 +865,11 @@ public Collection localCandidates(@Nullable GridCacheVer /** * Update index from within entry lock, passing key, value, and expiration time to provided closure. * - * @param filter Row filter. * @param clo Closure to apply to key, value, and expiration time. * @throws IgniteCheckedException If failed. * @throws GridCacheEntryRemovedException If entry was removed. */ - public void updateIndex(SchemaIndexCacheFilter filter, SchemaIndexCacheVisitorClosure clo) + public void updateIndex(SchemaIndexCacheVisitorClosure clo) throws IgniteCheckedException, GridCacheEntryRemovedException; /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java index 726a6c88c1804..c095ebe27fdc7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java @@ -20,6 +20,7 @@ import java.util.Collection; import java.util.UUID; import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.events.CacheEvent; import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener; import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx; @@ -388,11 +389,18 @@ public boolean isRecordable(int type) { GridCacheContext cctx0 = cctx; // Event recording is impossible in recovery mode. - if (cctx0 != null && cctx0.kernalContext().recoveryMode()) + if (cctx0 == null || cctx0.kernalContext().recoveryMode()) return false; - return cctx0 != null && cctx0.userCache() && cctx0.gridEvents().isRecordable(type) - && !cctx0.config().isEventsDisabled(); + try { + CacheConfiguration cfg = cctx0.config(); + + return cctx0.userCache() && cctx0.gridEvents().isRecordable(type) && !cfg.isEventsDisabled(); + } + catch (IllegalStateException e) { + // Cache context was cleaned up. + return false; + } } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java index 2853f00d0dd86..33f529279477b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java @@ -27,8 +27,8 @@ import org.apache.ignite.internal.processors.cache.version.GridCacheVersionManager; import org.apache.ignite.internal.util.GridBusyLock; import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.LT; import org.apache.ignite.internal.util.typedef.internal.U; -import org.apache.ignite.lang.IgniteUuid; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.events.EventType.EVT_CACHE_ENTRY_EVICTED; @@ -313,8 +313,20 @@ private void notifyPolicy(GridCacheEntryEx e) { if (log.isDebugEnabled()) log.debug("Notifying eviction policy with entry: " + e); - if (filter == null || filter.evictAllowed(e.wrapLazyValue(cctx.keepBinary()))) - plc.onEntryAccessed(e.obsoleteOrDeleted(), e.wrapEviction()); + if (filter == null || filter.evictAllowed(e.wrapLazyValue(cctx.keepBinary()))) { + try { + plc.onEntryAccessed(e.obsoleteOrDeleted(), e.wrapEviction()); + } + catch (RuntimeException ex) { + if (!e.obsoleteOrDeleted()) { + U.ignoreRuntimeException(() -> plc.onEntryAccessed(true, e.wrapEviction())); + e.wrapEviction().evict(); + } + + LT.warn(log, "Eviction manager caught an error [msg=" + ex.getMessage() + "]." + + " Entry [key=" + e.key() + "] wasn't inserted."); + } + } } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheGateway.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheGateway.java index 658ca2a8de039..7e1d867c4ec21 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheGateway.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheGateway.java @@ -133,6 +133,8 @@ public void leaveNoLock() { ctx.tm().resetContext(); ctx.mvcc().contextReset(); + ctx.tm().leaveNearTxSystemSection(); + // Unwind eviction notifications. if (!ctx.shared().closed(ctx)) CU.unwindEvicts(ctx); @@ -172,6 +174,8 @@ public void leave() { onEnter(); + ctx.tm().enterNearTxSystemSection(); + Lock lock = rwLock.readLock(); lock.lock(); @@ -239,6 +243,8 @@ public void leaveNoLock(CacheOperationContext prev) { // Unwind eviction notifications. CU.unwindEvicts(ctx); + ctx.tm().leaveNearTxSystemSection(); + // Return back previous thread local operation context per call. ctx.operationContextPerCall(prev); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIdMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIdMessage.java index e0944397ecf3d..84d890c56dc18 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIdMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIdMessage.java @@ -112,6 +112,6 @@ public void cacheId(int cacheId) { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(GridCacheIdMessage.class, this); + return S.toString(GridCacheIdMessage.class, this, "super", super.toString()); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIoManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIoManager.java index 08af2a39693c1..34495ac012829 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIoManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIoManager.java @@ -229,7 +229,7 @@ else if (desc.receivedFromStartVersion() != null) else { AffinityTopologyVersion locAffVer = cctx.exchange().readyAffinityVersion(); - if (locAffVer.compareTo(lastAffChangedVer) < 0) { + if (locAffVer.before(lastAffChangedVer)) { IgniteLogger log = cacheMsg.messageLogger(cctx); if (log.isDebugEnabled()) { @@ -413,6 +413,8 @@ private boolean processMissedHandler(UUID nodeId, GridCacheMessage cacheMsg) { dhtRes.nearEvicted(nearEvicted); + dhtRes.copyTimestamps(req); + sendMessageForMissedHandler(cacheMsg, nodeId, dhtRes, @@ -752,6 +754,8 @@ private void processFailedMessage(UUID nodeId, 0, false); + res.copyTimestamps(req); + sendResponseOnFailedMessage(nodeId, res, cctx, plc); } @@ -767,6 +771,8 @@ private void processFailedMessage(UUID nodeId, req.miniId(), req.deployInfo() != null); + res.copyTimestamps(req); + res.error(req.classError()); sendResponseOnFailedMessage(nodeId, res, cctx, req.policy()); @@ -783,6 +789,8 @@ private void processFailedMessage(UUID nodeId, req.futureId(), false); + res.copyTimestamps(req); + res.onError(req.classError()); sendResponseOnFailedMessage(nodeId, res, cctx, plc); @@ -813,6 +821,8 @@ private void processFailedMessage(UUID nodeId, false, false); + res.copyTimestamps(req); + res.error(req.classError()); sendResponseOnFailedMessage(nodeId, res, cctx, plc); @@ -830,6 +840,8 @@ private void processFailedMessage(UUID nodeId, false ); + res.copyTimestamps(req); + res.error(req.classError()); sendResponseOnFailedMessage(nodeId, res, cctx, plc); @@ -847,6 +859,8 @@ private void processFailedMessage(UUID nodeId, req.version(), req.deployInfo() != null); + res.copyTimestamps(req); + res.error(req.classError()); sendResponseOnFailedMessage(nodeId, res, cctx, plc); @@ -885,8 +899,11 @@ private void processFailedMessage(UUID nodeId, 0, req.classError(), null, + false, false); + res.copyTimestamps(req); + sendResponseOnFailedMessage(nodeId, res, cctx, plc); } @@ -910,6 +927,8 @@ private void processFailedMessage(UUID nodeId, res.error(req.classError()); + res.copyTimestamps(req); + sendResponseOnFailedMessage(nodeId, res, cctx, req.policy()); } @@ -954,6 +973,8 @@ private void processFailedMessage(UUID nodeId, res.error(req.classError()); + res.copyTimestamps(req); + sendResponseOnFailedMessage(nodeId, res, cctx, plc); } @@ -990,6 +1011,8 @@ private void processFailedMessage(UUID nodeId, false, false); + res.copyTimestamps(req); + res.error(req.classError()); sendResponseOnFailedMessage(nodeId, res, cctx, plc); @@ -1008,6 +1031,8 @@ private void processFailedMessage(UUID nodeId, false, false); + res.copyTimestamps(req); + res.error(req.classError()); sendResponseOnFailedMessage(nodeId, res, cctx, plc); @@ -1026,6 +1051,8 @@ private void processFailedMessage(UUID nodeId, false, false); + res.copyTimestamps(req); + res.error(req.classError()); sendResponseOnFailedMessage(nodeId, res, cctx, plc); @@ -1042,6 +1069,8 @@ private void processFailedMessage(UUID nodeId, req.futureId(), false); + res.copyTimestamps(req); + res.onError(req.classError()); sendResponseOnFailedMessage(nodeId, res, cctx, plc); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java index 1fdbed59742e5..f0945d3f804ac 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java @@ -96,6 +96,7 @@ import static org.apache.ignite.internal.processors.cache.GridCacheOperation.DELETE; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.TRANSFORM; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.UPDATE; +import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.RENTING; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_NONE; /** @@ -1124,8 +1125,6 @@ else if (interceptorVal != val0) null, topVer); } - - cctx.dataStructures().onEntryUpdated(key, false, keepBinary); } finally { unlockEntry(); @@ -1348,8 +1347,6 @@ else if (log.isDebugEnabled()) topVer); } - cctx.dataStructures().onEntryUpdated(key, true, keepBinary); - deferred = cctx.deferredDelete() && !detached() && !isInternal(); if (intercept) @@ -1730,8 +1727,6 @@ else if (ttl != CU.TTL_ZERO) onUpdateFinished(updateCntr); } - cctx.dataStructures().onEntryUpdated(key, op == DELETE, keepBinary); - if (intercept) { if (op == UPDATE) cctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(cctx, key, key0, updated, updated0, keepBinary, 0L)); @@ -2018,8 +2013,6 @@ else if (ttl != CU.TTL_ZERO) topVer); } - cctx.dataStructures().onEntryUpdated(key, c.op == DELETE, keepBinary); - if (intercept && c.wasIntercepted) { assert c.op == UPDATE || c.op == DELETE : c.op; @@ -2821,7 +2814,7 @@ private boolean skipInterceptor(@Nullable GridCacheVersion explicitVer) { val = cctx.kernalContext().cacheObjects().prepareForCache(val, cctx); - final boolean unswapped = ((flags & IS_UNSWAPPED_MASK) != 0); + final boolean unswapped = (flags & IS_UNSWAPPED_MASK) != 0; boolean update; @@ -2829,16 +2822,16 @@ private boolean skipInterceptor(@Nullable GridCacheVersion explicitVer) { @Override public boolean apply(@Nullable CacheDataRow row) { boolean update0; - GridCacheVersion currentVer = row != null ? row.version() : GridCacheMapEntry.this.ver; + GridCacheVersion currVer = row != null ? row.version() : GridCacheMapEntry.this.ver; - boolean isStartVer = cctx.shared().versions().isStartVersion(currentVer); + boolean isStartVer = cctx.shared().versions().isStartVersion(currVer); if (cctx.group().persistenceEnabled()) { if (!isStartVer) { if (cctx.atomic()) - update0 = ATOMIC_VER_COMPARATOR.compare(currentVer, ver) < 0; + update0 = ATOMIC_VER_COMPARATOR.compare(currVer, ver) < 0; else - update0 = currentVer.compareTo(ver) < 0; + update0 = currVer.compareTo(ver) < 0; } else update0 = true; @@ -2873,8 +2866,14 @@ else if (val == null) storeValue(val, expTime, ver); } } - else // Optimization to access storage only once. - update = storeValue(val, expTime, ver, p); + else { + // Optimization to access storage only once. + UpdateClosure c = storeValue(val, expTime, ver, p); + + // Update if tree is changed or removal is replicated from supplier node and is absent locally. + update = c.operationType() != IgniteTree.OperationType.NOOP || + preload && val == null && !c.filtered() && c.oldRow() == null; + } if (update) { update(val, expTime, ttl, ver, true); @@ -2923,8 +2922,6 @@ else if (deletedUnlocked()) updateCntr, null, topVer); - - cctx.dataStructures().onEntryUpdated(key, false, false); } onUpdateFinished(updateCntr); @@ -3697,10 +3694,10 @@ private IgniteTxLocalAdapter currentTx() { * @param ver New entry version. * @throws IgniteCheckedException If update failed. */ - protected boolean storeValue(@Nullable CacheObject val, + protected void storeValue(@Nullable CacheObject val, long expireTime, GridCacheVersion ver) throws IgniteCheckedException { - return storeValue(val, expireTime, ver, null); + storeValue(val, expireTime, ver, null); } /** @@ -3709,23 +3706,26 @@ protected boolean storeValue(@Nullable CacheObject val, * @param val Value. * @param expireTime Expire time. * @param ver New entry version. - * @param predicate Optional predicate. + * @param pred Optional predicate. * * @return {@code True} if storage was modified. + * @param pred Optional predicate. + * @return Update closure containing invocation context. * @throws IgniteCheckedException If update failed. */ - protected boolean storeValue( + protected UpdateClosure storeValue( @Nullable CacheObject val, long expireTime, GridCacheVersion ver, - @Nullable IgnitePredicate predicate) throws IgniteCheckedException { + @Nullable IgnitePredicate pred) throws IgniteCheckedException { assert lock.isHeldByCurrentThread(); + assert localPartition() == null || localPartition().state() != RENTING : localPartition(); - UpdateClosure closure = new UpdateClosure(this, val, ver, expireTime, predicate); + UpdateClosure c = new UpdateClosure(this, val, ver, expireTime, pred); - cctx.offheap().invoke(cctx, key, localPartition(), closure); + cctx.offheap().invoke(cctx, key, localPartition(), c); - return closure.treeOp != IgniteTree.OperationType.NOOP; + return c; } /** @@ -3800,6 +3800,7 @@ protected WALPointer logTxUpdate(IgniteInternalTx tx, CacheObject val, long expi protected void removeValue() throws IgniteCheckedException { assert lock.isHeldByCurrentThread(); + // Removals are possible from RENTING partition on clearing/evicting. cctx.offheap().remove(cctx, key, partition(), localPartition()); } @@ -3870,8 +3871,8 @@ protected void removeValue() throws IgniteCheckedException { } /** {@inheritDoc} */ - @Override public void updateIndex(SchemaIndexCacheFilter filter, SchemaIndexCacheVisitorClosure clo) - throws IgniteCheckedException, GridCacheEntryRemovedException { + @Override public void updateIndex(SchemaIndexCacheVisitorClosure clo) throws IgniteCheckedException, + GridCacheEntryRemovedException { lockEntry(); try { @@ -3882,7 +3883,7 @@ protected void removeValue() throws IgniteCheckedException { CacheDataRow row = cctx.offheap().read(this); - if (row != null && (filter == null || filter.apply(row))) + if (row != null) clo.apply(row); } finally { @@ -4270,9 +4271,25 @@ private void obsoleteVersionExtras(@Nullable GridCacheVersion obsoleteVer, GridC * @param owners Current owners. * @param val Entry value. */ - protected final void checkOwnerChanged(@Nullable CacheLockCandidates prevOwners, + protected final void checkOwnerChanged( + @Nullable CacheLockCandidates prevOwners, + @Nullable CacheLockCandidates owners, + CacheObject val + ) { + checkOwnerChanged(prevOwners, owners, val, false); + } + /** + * @param prevOwners Previous owners. + * @param owners Current owners. + * @param val Entry value. + * @param inThreadChain {@code True} if called during thread chain checking. + */ + protected final void checkOwnerChanged( + @Nullable CacheLockCandidates prevOwners, @Nullable CacheLockCandidates owners, - CacheObject val) { + CacheObject val, + boolean inThreadChain + ) { assert !lock.isHeldByCurrentThread(); if (prevOwners != null && owners == null) { @@ -4308,7 +4325,7 @@ protected final void checkOwnerChanged(@Nullable CacheLockCandidates prevOwners, if (locked) { cctx.mvcc().callback().onOwnerChanged(this, owner); - if (owner.local()) + if (owner.local() && !inThreadChain) checkThreadChain(owner); if (cctx.events().isRecordable(EVT_CACHE_OBJECT_LOCKED)) { @@ -4547,6 +4564,9 @@ private static class UpdateClosure implements IgniteCacheOffheapManager.OffheapI /** */ private IgniteTree.OperationType treeOp = IgniteTree.OperationType.PUT; + /** */ + private boolean filtered; + /** * @param entry Entry. * @param val New value. @@ -4574,6 +4594,8 @@ private static class UpdateClosure implements IgniteCacheOffheapManager.OffheapI this.oldRow = oldRow; if (predicate != null && !predicate.apply(oldRow)) { + filtered = true; + treeOp = IgniteTree.OperationType.NOOP; return; @@ -4610,6 +4632,13 @@ private static class UpdateClosure implements IgniteCacheOffheapManager.OffheapI return oldRow; } + /** + * @return {@code True} if update was filtered by predicate. + */ + protected boolean filtered() { + return filtered; + } + /** * Checks row for expiration and fire expire events if needed. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMvcc.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMvcc.java index 9ba8c63342059..6a471e98f2287 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMvcc.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMvcc.java @@ -122,8 +122,7 @@ public GridCacheMvcc(GridCacheContext cctx) { } /** - * @return Remote candidate only if it's first in the list and is marked - * as 'used'. + * @return Remote candidate only if it's first in the list and is marked as 'used'. */ @Nullable private GridCacheMvccCandidate remoteOwner() { if (rmts != null) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePartitionExchangeManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePartitionExchangeManager.java index 108bc21cf1d0f..2d5baffd11b33 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePartitionExchangeManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePartitionExchangeManager.java @@ -96,6 +96,7 @@ import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.latch.ExchangeLatchManager; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridClientPartitionTopology; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; +import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal; import org.apache.ignite.internal.processors.cache.persistence.snapshot.IgniteCacheSnapshotManager; import org.apache.ignite.internal.processors.cache.persistence.snapshot.SnapshotDiscoveryMessage; import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx; @@ -107,6 +108,7 @@ import org.apache.ignite.internal.processors.timeout.GridTimeoutObject; import org.apache.ignite.internal.util.GridListSet; import org.apache.ignite.internal.util.GridPartitionStateMap; +import org.apache.ignite.internal.util.GridStringBuilder; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.future.GridCompoundFuture; import org.apache.ignite.internal.util.future.GridFinishedFuture; @@ -447,18 +449,18 @@ else if (exchangeInProgress()) { if (grp != null) { if (m instanceof GridDhtPartitionSupplyMessage) { - grp.preloader().handleSupplyMessage(idx, id, (GridDhtPartitionSupplyMessage) m); + grp.preloader().handleSupplyMessage(id, (GridDhtPartitionSupplyMessage)m); return; } else if (m instanceof GridDhtPartitionDemandMessage) { - grp.preloader().handleDemandMessage(idx, id, (GridDhtPartitionDemandMessage) m); + grp.preloader().handleDemandMessage(idx, id, (GridDhtPartitionDemandMessage)m); return; } else if (m instanceof GridDhtPartitionDemandLegacyMessage) { grp.preloader().handleDemandMessage(idx, id, - new GridDhtPartitionDemandMessage((GridDhtPartitionDemandLegacyMessage) m)); + new GridDhtPartitionDemandMessage((GridDhtPartitionDemandLegacyMessage)m)); return; } @@ -939,7 +941,7 @@ public void lastFinishedFuture(GridDhtPartitionsExchangeFuture fut) { * @param ver Topology version. * @return Future or {@code null} is future is already completed. */ - @Nullable public IgniteInternalFuture affinityReadyFuture(AffinityTopologyVersion ver) { + @NotNull public IgniteInternalFuture affinityReadyFuture(AffinityTopologyVersion ver) { GridDhtPartitionsExchangeFuture lastInitializedFut0 = lastInitializedFut; if (lastInitializedFut0 != null && lastInitializedFut0.initialVersion().compareTo(ver) == 0 @@ -1979,6 +1981,41 @@ public void dumpDebugInfo(@Nullable GridDhtPartitionsExchangeFuture exchFut) thr diagCtx.send(cctx.kernalContext(), null); } + /** + * Builds warning string for long running transaction. + * + * @param tx Transaction. + * @param curTime Current timestamp. + * @return Warning string. + */ + private String longRunningTransactionWarning(IgniteInternalTx tx, long curTime) { + GridStringBuilder warning = new GridStringBuilder() + .a(">>> Transaction [startTime=") + .a(formatTime(tx.startTime())) + .a(", curTime=") + .a(formatTime(curTime)); + + if (tx instanceof GridNearTxLocal) { + GridNearTxLocal nearTxLoc = (GridNearTxLocal)tx; + + long sysTimeCurr = nearTxLoc.systemTimeCurrent(); + + //in some cases totalTimeMillis can be less than systemTimeMillis, as they are calculated with different precision + long userTime = Math.max(curTime - nearTxLoc.startTime() - sysTimeCurr, 0); + + warning.a(", systemTime=") + .a(sysTimeCurr) + .a(", userTime=") + .a(userTime); + } + + warning.a(", tx=") + .a(tx) + .a("]"); + + return warning.toString(); + } + /** * @param timeout Operation timeout. * @return {@code True} if found long running operations. @@ -2005,8 +2042,7 @@ private boolean dumpLongRunningOperations0(long timeout) { found = true; if (warnings.canAddMessage()) { - warnings.add(">>> Transaction [startTime=" + formatTime(tx.startTime()) + - ", curTime=" + formatTime(curTime) + ", tx=" + tx + ']'); + warnings.add(longRunningTransactionWarning(tx, curTime)); if (ltrDumpLimiter.allowAction(tx)) dumpLongRunningTransaction(tx); @@ -3044,7 +3080,7 @@ else if (task instanceof ForceRebalanceExchangeTask) { ? Math.min(curTimeout, dumpTimeout) : dumpTimeout; - blockingSectionEnd(); + blockingSectionBegin(); try { resVer = exchFut.get(exchTimeout, TimeUnit.MILLISECONDS); @@ -3106,7 +3142,6 @@ else if (task instanceof ForceRebalanceExchangeTask) { // Just pick first worker to do this, so we don't // invoke topology callback more than once for the // same event. - boolean changed = false; for (CacheGroupContext grp : cctx.cache().cacheGroups()) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePreloader.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePreloader.java index 3eac9b0900db6..825d573e349ab 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePreloader.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePreloader.java @@ -162,19 +162,13 @@ public GridDhtFuture request(GridCacheContext cctx, */ public IgniteInternalFuture forceRebalance(); - /** - * Unwinds undeploys. - */ - public void unwindUndeploys(); - /** * Handles Supply message. * - * @param idx Index. * @param id Node Id. * @param s Supply message. */ - public void handleSupplyMessage(int idx, UUID id, final GridDhtPartitionSupplyMessage s); + public void handleSupplyMessage(UUID id, final GridDhtPartitionSupplyMessage s); /** * Handles Demand message. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePreloaderAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePreloaderAdapter.java index 3e52a23a86eaa..571198f5cc252 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePreloaderAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePreloaderAdapter.java @@ -125,12 +125,7 @@ public GridCachePreloaderAdapter(CacheGroupContext grp) { } /** {@inheritDoc} */ - @Override public void unwindUndeploys() { - grp.unwindUndeploys(); - } - - /** {@inheritDoc} */ - @Override public void handleSupplyMessage(int idx, UUID id, GridDhtPartitionSupplyMessage s) { + @Override public void handleSupplyMessage(UUID id, GridDhtPartitionSupplyMessage s) { // No-op. } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java index 429b12090b97a..909911c842210 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java @@ -17,15 +17,14 @@ package org.apache.ignite.internal.processors.cache; +import javax.management.MBeanServer; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Comparator; -import java.util.Deque; import java.util.HashMap; import java.util.HashSet; import java.util.IdentityHashMap; -import java.util.LinkedList; import java.util.List; import java.util.ListIterator; import java.util.Map; @@ -35,11 +34,15 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; import java.util.stream.Collectors; -import javax.management.MBeanServer; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteCompute; import org.apache.ignite.IgniteException; +import org.apache.ignite.IgniteInterruptedException; import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.cache.CacheExistsException; import org.apache.ignite.cache.CacheMode; @@ -80,7 +83,6 @@ import org.apache.ignite.internal.processors.GridProcessorAdapter; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.affinity.GridAffinityAssignmentCache; -import org.apache.ignite.internal.processors.cache.CacheJoinNodeDiscoveryData.CacheInfo; import org.apache.ignite.internal.processors.cache.binary.CacheObjectBinaryProcessorImpl; import org.apache.ignite.internal.processors.cache.datastructures.CacheDataStructuresManager; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCache; @@ -97,7 +99,6 @@ import org.apache.ignite.internal.processors.cache.jta.CacheJtaManagerAdapter; import org.apache.ignite.internal.processors.cache.local.GridLocalCache; import org.apache.ignite.internal.processors.cache.local.atomic.GridLocalAtomicCache; -import org.apache.ignite.internal.processors.cache.persistence.CheckpointFuture; import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.processors.cache.persistence.DatabaseLifecycleListener; import org.apache.ignite.internal.processors.cache.persistence.DbCheckpointListener; @@ -135,11 +136,13 @@ import org.apache.ignite.internal.processors.query.schema.SchemaNodeLeaveExchangeWorkerTask; import org.apache.ignite.internal.processors.query.schema.message.SchemaAbstractDiscoveryMessage; import org.apache.ignite.internal.processors.query.schema.message.SchemaProposeDiscoveryMessage; +import org.apache.ignite.internal.processors.security.OperationSecurityContext; import org.apache.ignite.internal.processors.security.SecurityContext; import org.apache.ignite.internal.processors.timeout.GridTimeoutObject; import org.apache.ignite.internal.suggestions.GridPerformanceSuggestions; import org.apache.ignite.internal.util.F0; import org.apache.ignite.internal.util.InitializationProtector; +import org.apache.ignite.internal.util.StripedExecutor; import org.apache.ignite.internal.util.future.GridCompoundFuture; import org.apache.ignite.internal.util.future.GridFinishedFuture; import org.apache.ignite.internal.util.future.GridFutureAdapter; @@ -158,6 +161,7 @@ import org.apache.ignite.lang.IgniteClosure; import org.apache.ignite.lang.IgniteFuture; import org.apache.ignite.lang.IgnitePredicate; +import org.apache.ignite.lang.IgniteRunnable; import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.lifecycle.LifecycleAware; import org.apache.ignite.marshaller.Marshaller; @@ -190,11 +194,13 @@ import static org.apache.ignite.configuration.DeploymentMode.SHARED; import static org.apache.ignite.internal.GridComponent.DiscoveryDataExchangeType.CACHE_PROC; import static org.apache.ignite.internal.IgniteComponentType.JTA; +import static org.apache.ignite.internal.IgniteFeatures.LRT_SYSTEM_USER_TIME_DUMP_SETTINGS; import static org.apache.ignite.internal.IgniteFeatures.TRANSACTION_OWNER_THREAD_DUMP_PROVIDING; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_CONSISTENCY_CHECK_SKIPPED; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_TX_CONFIG; import static org.apache.ignite.internal.processors.cache.GridCacheUtils.isNearEnabled; import static org.apache.ignite.internal.processors.cache.GridCacheUtils.isPersistentCache; +import static org.apache.ignite.internal.processors.security.SecurityUtils.nodeSecurityContext; import static org.apache.ignite.internal.util.IgniteUtils.doInParallel; /** @@ -211,9 +217,12 @@ public class GridCacheProcessor extends GridProcessorAdapter { "(the config of the cache '%s' has to be merged which is impossible on active grid). " + "Deactivate grid and retry node join or clean the joining node."; /** */ - private final boolean startClientCaches = - IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_START_CACHES_ON_JOIN, false); + private static final String CACHE_NAME_AND_OPERATION_FORMAT = "[cacheName=%s, operation=%s]"; + + /** */ + private static final String CACHE_NAMES_AND_OPERATION_FORMAT = "[cacheNames=%s, operation=%s]"; + /** */ private final boolean walFsyncWithDedicatedWorker = IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_WAL_FSYNC_WITH_DEDICATED_WORKER, false); @@ -231,6 +240,9 @@ public class GridCacheProcessor extends GridProcessorAdapter { /** */ private final ConcurrentMap cacheGrps = new ConcurrentHashMap<>(); + /** Flag that caches were already filtered out. */ + private final AtomicBoolean alreadyFiltered = new AtomicBoolean(); + /** */ private final Map> caches; @@ -240,9 +252,6 @@ public class GridCacheProcessor extends GridProcessorAdapter { /** Map of proxies. */ private final ConcurrentHashMap> jCacheProxies; - /** Caches stop sequence. */ - private final Deque stopSeq; - /** Transaction interface implementation. */ private IgniteTransactionsImpl transactions; @@ -262,6 +271,9 @@ public class GridCacheProcessor extends GridProcessorAdapter { /** */ private ClusterCachesInfo cachesInfo; + /** */ + private GridLocalConfigManager locCfgMgr; + /** */ private IdentityHashMap sesHolders = new IdentityHashMap<>(); @@ -297,24 +309,11 @@ public GridCacheProcessor(GridKernalContext ctx) { caches = new ConcurrentHashMap<>(); jCacheProxies = new ConcurrentHashMap<>(); - stopSeq = new LinkedList<>(); internalCaches = new HashSet<>(); marsh = MarshallerUtils.jdkMarshaller(ctx.igniteInstanceName()); } - /** - * @param cfg Initializes cache configuration with proper defaults. - * @param cacheObjCtx Cache object context. - * @throws IgniteCheckedException If configuration is not valid. - */ - private void initialize(CacheConfiguration cfg, CacheObjectContext cacheObjCtx) - throws IgniteCheckedException { - CU.initializeConfigDefaults(log, cfg, cacheObjCtx); - - ctx.igfsHelper().preProcessCacheConfiguration(cfg); - } - /** * @param cfg Configuration to check for possible performance issues. * @param hasStore {@code True} if store is configured. @@ -710,31 +709,6 @@ private void cleanup(CacheConfiguration cfg, @Nullable Object rsrc, boolean near } } - /** - * @throws IgniteCheckedException If failed. - */ - private void restoreCacheConfigurations() throws IgniteCheckedException { - if (ctx.isDaemon()) - return; - - Map caches = new HashMap<>(); - - Map templates = new HashMap<>(); - - addCacheOnJoinFromConfig(caches, templates); - - CacheJoinNodeDiscoveryData discoData = new CacheJoinNodeDiscoveryData( - IgniteUuid.randomUuid(), - caches, - templates, - startAllCachesOnClientStart() - ); - - localConfigs = discoData; - - cachesInfo.onStart(discoData); - } - /** {@inheritDoc} */ @SuppressWarnings({"unchecked"}) @Override public void start() throws IgniteCheckedException { @@ -758,14 +732,20 @@ private void restoreCacheConfigurations() throws IgniteCheckedException { sharedCtx = createSharedContext(ctx, sessionListeners); + locCfgMgr = new GridLocalConfigManager(this, ctx); + transactions = new IgniteTransactionsImpl(sharedCtx, null); // Start shared managers. for (GridCacheSharedManager mgr : sharedCtx.managers()) mgr.start(sharedCtx); - if (!ctx.isDaemon() && (!CU.isPersistenceEnabled(ctx.config())) || ctx.config().isClientMode()) - restoreCacheConfigurations(); + if (!ctx.isDaemon() && (!CU.isPersistenceEnabled(ctx.config())) || ctx.config().isClientMode()) { + CacheJoinNodeDiscoveryData data = locCfgMgr.restoreCacheConfigurations(); + + if (data != null) + cachesInfo.onStart(data); + } if (log.isDebugEnabled()) log.debug("Started cache processor."); @@ -774,123 +754,16 @@ private void restoreCacheConfigurations() throws IgniteCheckedException { ctx.authentication().cacheProcessorStarted(); } - /** - * @param cfg Cache configuration. - * @param sql SQL flag. - * @param caches Caches map. - * @param templates Templates map. - * @throws IgniteCheckedException If failed. - */ - private void addCacheOnJoin(CacheConfiguration cfg, boolean sql, - Map caches, - Map templates) throws IgniteCheckedException { - String cacheName = cfg.getName(); - - CU.validateCacheName(cacheName); - - cloneCheckSerializable(cfg); - - CacheObjectContext cacheObjCtx = ctx.cacheObjects().contextForCache(cfg); - - // Initialize defaults. - initialize(cfg, cacheObjCtx); - - StoredCacheData cacheData = new StoredCacheData(cfg); - - cacheData.sql(sql); - - if (GridCacheUtils.isCacheTemplateName(cacheName)) - templates.put(cacheName, new CacheInfo(cacheData, CacheType.USER, false, 0, true)); - else { - if (caches.containsKey(cacheName)) { - throw new IgniteCheckedException("Duplicate cache name found (check configuration and " + - "assign unique name to each cache): " + cacheName); - } - - CacheType cacheType = cacheType(cacheName); - - if (cacheType != CacheType.USER && cfg.getDataRegionName() == null) - cfg.setDataRegionName(sharedCtx.database().systemDateRegionName()); - - addStoredCache(caches, cacheData, cacheName, cacheType, true); - } - } /** - * Add stored cache data to caches storage. - * - * @param caches Cache storage. - * @param cacheData Cache data to add. - * @param cacheName Cache name. - * @param cacheType Cache type. - * @param isStaticalyConfigured Statically configured flag. - */ - private void addStoredCache(Map caches, StoredCacheData cacheData, String cacheName, - CacheType cacheType, boolean isStaticalyConfigured) { - if (!caches.containsKey(cacheName)) { - if (!cacheType.userCache()) - stopSeq.addLast(cacheName); - else - stopSeq.addFirst(cacheName); - } - - caches.put(cacheName, new CacheInfo(cacheData, cacheType, cacheData.sql(), 0, isStaticalyConfigured)); - } - - /** - * @param caches Caches map. - * @param templates Templates map. - * @throws IgniteCheckedException If failed. + * @param cfg Initializes cache configuration with proper defaults. + * @param cacheObjCtx Cache object context. + * @throws IgniteCheckedException If configuration is not valid. */ - private void addCacheOnJoinFromConfig( - Map caches, - Map templates - ) throws IgniteCheckedException { - assert !ctx.config().isDaemon(); - - CacheConfiguration[] cfgs = ctx.config().getCacheConfiguration(); - - for (int i = 0; i < cfgs.length; i++) { - CacheConfiguration cfg = new CacheConfiguration(cfgs[i]); - - // Replace original configuration value. - cfgs[i] = cfg; - - addCacheOnJoin(cfg, false, caches, templates); - } - - if (CU.isPersistenceEnabled(ctx.config()) && ctx.cache().context().pageStore() != null) { - Map storedCaches = ctx.cache().context().pageStore().readCacheConfigurations(); - - if (!F.isEmpty(storedCaches)) { - List skippedConfigs = new ArrayList<>(); - - for (StoredCacheData storedCacheData : storedCaches.values()) { - String cacheName = storedCacheData.config().getName(); - - CacheType type = cacheType(cacheName); - - if (!caches.containsKey(cacheName)) - // No static cache - add the configuration. - addStoredCache(caches, storedCacheData, cacheName, type, false); - else { - // A static cache with the same name already exists. - if (!keepStaticCacheConfiguration) { - addStoredCache(caches, storedCacheData, cacheName, type, false); - - if (type == CacheType.USER) - skippedConfigs.add(cacheName); - } - } - } + void initialize(CacheConfiguration cfg, CacheObjectContext cacheObjCtx) throws IgniteCheckedException { + CU.initializeConfigDefaults(log, cfg, cacheObjCtx); - if (!F.isEmpty(skippedConfigs)) - U.warn(log, "Static configuration for the following caches will be ignored because a persistent " + - "cache with the same name already exist (see " + - "https://apacheignite.readme.io/docs/cache-configuration for more information): " + - skippedConfigs); - } - } + ctx.igfsHelper().preProcessCacheConfiguration(cfg); } /** @@ -1041,7 +914,7 @@ private void checkConsistency() throws IgniteCheckedException { * @param cancel Cancel. */ public void stopCaches(boolean cancel) { - for (String cacheName : stopSeq) { + for (String cacheName : locCfgMgr.stopSequence()) { GridCacheAdapter cache = stoppedCaches.remove(cacheName); if (cache != null) @@ -1065,6 +938,28 @@ public void blockGateways() { proxy.context0().gate().onStopped(); } + /** + * Blocks (stops) cache gateway for caches according to given {@code cacheGroupIds}. + * + * @param cacheGrpIds Cache group ids for which cache gateway should be stopped. + * @return Caches for which cache gateway is blocked (stopped). + */ + public List blockGateways(Collection cacheGrpIds) { + List affectedCaches = internalCaches().stream() + .filter(cache -> cacheGrpIds.contains(cache.context().groupId())) + .collect(Collectors.toList()); + + affectedCaches.forEach(cache -> { + // Add proxy if it's not initialized. + addjCacheProxy(cache.context().name(), new IgniteCacheProxyImpl(cache.context(), cache, false)); + + // Stop proxy. + blockGateway(cache.context().name(), true, false); + }); + + return affectedCaches; + } + /** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public void onKernalStop(boolean cancel) { @@ -1109,7 +1004,7 @@ public void onKernalStopCaches(boolean cancel) { aff.cancelFutures(affErr); } - for (String cacheName : stopSeq) { + for (String cacheName : locCfgMgr.stopSequence()) { GridCacheAdapter cache = caches.remove(cacheName); if (cache != null) { @@ -1312,16 +1207,26 @@ private void stopCache(GridCacheAdapter cache, boolean cancel, boolean des U.stopLifecycleAware(log, lifecycleAwares(ctx.group(), cache.configuration(), ctx.store().configuredStore())); - IgnitePageStoreManager pageStore; + try { + IgniteWriteAheadLogManager wal; - if (destroy && (pageStore = sharedCtx.pageStore()) != null) { - try { + if ((wal = sharedCtx.wal()) != null) + wal.flush(null, false); + } + catch (IgniteCheckedException e) { + U.error(log, "Failed to flush WAL data while destroying cache" + + "[cache=" + ctx.name() + "]", e); + } + + try { + IgnitePageStoreManager pageStore; + + if (destroy && (pageStore = sharedCtx.pageStore()) != null) pageStore.removeCacheData(new StoredCacheData(ctx.config())); - } - catch (IgniteCheckedException e) { - U.error(log, "Failed to delete cache configuration data while destroying cache" + - "[cache=" + ctx.name() + "]", e); - } + } + catch (IgniteCheckedException e) { + U.error(log, "Failed to delete cache configuration data while destroying cache" + + "[cache=" + ctx.name() + "]", e); } if (log.isInfoEnabled()) { @@ -1899,10 +1804,9 @@ public CacheMode cacheMode(String cacheName) { * @return Caches to be started when this node starts. */ @Nullable public LocalJoinCachesContext localJoinCachesContext() { - if (ctx.discovery().localNode().order() == 1 && localConfigs != null) - cachesInfo.filterDynamicCacheDescriptors(localConfigs); - - localConfigs = null; + if (ctx.discovery().localNode().order() == 1 && alreadyFiltered.compareAndSet(false, true)) { + cachesInfo.filterDynamicCacheDescriptors(locCfgMgr.localCachesOnStart()); + } return cachesInfo.localJoinCachesContext(); } @@ -2662,7 +2566,7 @@ private CacheGroupContext startCacheGroup( * @param stop {@code True} for stop cache, {@code false} for close cache. * @param restart Restart flag. */ - void blockGateway(String cacheName, boolean stop, boolean restart) { + public void blockGateway(String cacheName, boolean stop, boolean restart) { // Break the proxy before exchange future is done. IgniteCacheProxyImpl proxy = jcacheProxy(cacheName, false); @@ -2869,6 +2773,8 @@ private void processCacheStopRequestOnExchangeDone(ExchangeActions exchActions) .map(a -> F.t(cacheGrps.get(a.descriptor().groupId()), a.destroy())) .collect(Collectors.toList()); + grpToStop.forEach(t -> sharedCtx.evict().onCacheGroupStopped(t.get1())); + if (!exchActions.cacheStopRequests().isEmpty()) removeOffheapListenerAfterCheckpoint(grpToStop); @@ -2900,6 +2806,14 @@ private void processCacheStopRequestOnExchangeDone(ExchangeActions exchActions) for (ExchangeActions.CacheActionData action: cachesToStopByGrp.getValue()) { stopGateway(action.request()); + context().tm().rollbackTransactionsForStoppingCache(action.descriptor().cacheId()); + + // TTL manager has to be unregistered before the checkpointReadLock is acquired. + GridCacheAdapter cache = caches.get(action.request().cacheName()); + + if (cache != null) + cache.context().ttl().unregister(); + sharedCtx.database().checkpointReadLock(); try { @@ -2943,28 +2857,14 @@ private void processCacheStopRequestOnExchangeDone(ExchangeActions exchActions) * @param grpToStop Cache group to stop. */ private void removeOffheapListenerAfterCheckpoint(List> grpToStop) { - CheckpointFuture checkpointFut; - do { - do { - checkpointFut = sharedCtx.database().forceCheckpoint("caches stop"); - } - while (checkpointFut != null && checkpointFut.started()); - - if (checkpointFut != null) - checkpointFut.finishFuture().listen((fut) -> removeOffheapCheckpointListener(grpToStop)); + try { + sharedCtx.database().waitForCheckpoint( + "caches stop", (fut) -> removeOffheapCheckpointListener(grpToStop) + ); } - while (checkpointFut != null && checkpointFut.finishFuture().isDone()); - - if (checkpointFut != null) { - try { - checkpointFut.finishFuture().get(); - } - catch (IgniteCheckedException e) { - U.error(log, "Failed to wait for checkpoint finish during cache stop.", e); - } + catch (IgniteCheckedException e) { + U.error(log, "Failed to wait for checkpoint finish during cache stop.", e); } - else - removeOffheapCheckpointListener(grpToStop); } /** @@ -3012,39 +2912,10 @@ public void onExchangeDone( ctx.service().updateUtilityCache(); } - rollbackCoveredTx(exchActions); - if (err == null) processCacheStopRequestOnExchangeDone(exchActions); } - /** - * Rollback tx covered by stopped caches. - * - * @param exchActions Change requests. - */ - private void rollbackCoveredTx(ExchangeActions exchActions) { - if (!exchActions.cacheGroupsToStop().isEmpty() || !exchActions.cacheStopRequests().isEmpty()) { - Set cachesToStop = new HashSet<>(); - - for (ExchangeActions.CacheGroupActionData act : exchActions.cacheGroupsToStop()) { - @Nullable CacheGroupContext grpCtx = context().cache().cacheGroup(act.descriptor().groupId()); - - if (grpCtx != null && grpCtx.sharedGroup()) - cachesToStop.addAll(grpCtx.cacheIds()); - } - - for (ExchangeActions.CacheActionData act : exchActions.cacheStopRequests()) - cachesToStop.add(act.descriptor().cacheId()); - - if (!cachesToStop.isEmpty()) { - IgniteTxManager tm = context().tm(); - - tm.rollbackTransactionsForCaches(cachesToStop); - } - } - } - /** * @param grpId Group ID. */ @@ -3218,12 +3089,17 @@ private GridCacheSharedContext createSharedContext( } /** {@inheritDoc} */ - @Nullable @Override public IgniteNodeValidationResult validateNode( + @Override public @Nullable IgniteNodeValidationResult validateNode( ClusterNode node, JoiningNodeDiscoveryData discoData ) { if(!cachesInfo.isMergeConfigSupports(node)) return null; + String validationRes = cachesInfo.validateJoiningNodeData(discoData); + + if (validationRes != null) + return new IgniteNodeValidationResult(node.id(), validationRes, validationRes); + if (discoData.hasJoiningNodeData() && discoData.joiningNodeData() instanceof CacheJoinNodeDiscoveryData) { CacheJoinNodeDiscoveryData nodeData = (CacheJoinNodeDiscoveryData)discoData.joiningNodeData(); @@ -3231,22 +3107,28 @@ private GridCacheSharedContext createSharedContext( StringBuilder errorMessage = new StringBuilder(); - for (CacheJoinNodeDiscoveryData.CacheInfo cacheInfo : nodeData.caches().values()) { - try { - byte[] secCtxBytes = node.attribute(IgniteNodeAttributes.ATTR_SECURITY_SUBJECT_V2); + SecurityContext secCtx = null; - if (secCtxBytes != null) { - SecurityContext secCtx = U.unmarshal(marsh, secCtxBytes, U.resolveClassLoader(ctx.config())); + if (ctx.security().enabled()) { + try { + secCtx = nodeSecurityContext(marsh, U.resolveClassLoader(ctx.config()), node); + } + catch (SecurityException se) { + errorMessage.append(se.getMessage()); + } + } - if (secCtx != null && cacheInfo.cacheType() == CacheType.USER) - authorizeCacheCreate(cacheInfo.cacheData().config(), secCtx); + for (CacheJoinNodeDiscoveryData.CacheInfo cacheInfo : nodeData.caches().values()) { + if (secCtx != null && cacheInfo.cacheType() == CacheType.USER) { + try (OperationSecurityContext s = ctx.security().withContext(secCtx)) { + authorizeCacheCreate(cacheInfo.cacheData().config()); } - } - catch (SecurityException | IgniteCheckedException ex) { - if (errorMessage.length() > 0) - errorMessage.append("\n"); + catch (SecurityException ex) { + if (errorMessage.length() > 0) + errorMessage.append("\n"); - errorMessage.append(ex.getMessage()); + errorMessage.append(ex.getMessage()); + } } DynamicCacheDescriptor localDesc = cacheDescriptor(cacheInfo.cacheData().config().getName()); @@ -3460,13 +3342,6 @@ private void stopCachesOnClientReconnect(Collection stoppedCac } } - /** - * @return {@code True} if need locally start all existing caches on client node start. - */ - private boolean startAllCachesOnClientStart() { - return startClientCaches && ctx.clientNode(); - } - /** * Dynamically starts cache using template configuration. * @@ -3691,8 +3566,10 @@ public IgniteInternalFuture dynamicStartCache( ) { assert cacheName != null; - if (checkThreadTx) - checkEmptyTransactions(); + if (checkThreadTx) { + checkEmptyTransactionsEx(() -> String.format(CACHE_NAME_AND_OPERATION_FORMAT, cacheName, + "dynamicStartCache")); + } try { DynamicCacheChangeRequest req = prepareCacheChangeRequest( @@ -3786,8 +3663,16 @@ public IgniteInternalFuture dynamicStartCachesByStoredConf( boolean disabledAfterStart, IgniteUuid restartId ) { - if (checkThreadTx) - checkEmptyTransactions(); + if (checkThreadTx) { + checkEmptyTransactionsEx(() -> { + List cacheNames = storedCacheDataList.stream() + .map(StoredCacheData::config) + .map(CacheConfiguration::getName) + .collect(Collectors.toList()); + + return String.format(CACHE_NAMES_AND_OPERATION_FORMAT, cacheNames, "dynamicStartCachesByStoredConf"); + }); + } List srvReqs = null; Map clientReqs = null; @@ -3879,8 +3764,10 @@ public IgniteInternalFuture dynamicDestroyCache( ) { assert cacheName != null; - if (checkThreadTx) - checkEmptyTransactions(); + if (checkThreadTx) { + checkEmptyTransactionsEx(() -> String.format(CACHE_NAME_AND_OPERATION_FORMAT, cacheName, + "dynamicDestroyCache")); + } DynamicCacheChangeRequest req = DynamicCacheChangeRequest.stopRequest(ctx, cacheName, sql, true); @@ -3912,8 +3799,10 @@ public IgniteInternalFuture dynamicDestroyCaches( boolean checkThreadTx, boolean destroy ) { - if (checkThreadTx) - checkEmptyTransactions(); + if (checkThreadTx) { + checkEmptyTransactionsEx(() -> String.format(CACHE_NAMES_AND_OPERATION_FORMAT, cacheNames, + "dynamicDestroyCaches")); + } List reqs = new ArrayList<>(cacheNames.size()); @@ -3999,7 +3888,7 @@ IgniteInternalFuture dynamicCloseCache(String cacheName) { if (proxy == null || proxy.isProxyClosed()) return new GridFinishedFuture<>(); // No-op. - checkEmptyTransactions(); + checkEmptyTransactionsEx(() -> String.format(CACHE_NAME_AND_OPERATION_FORMAT, cacheName, "dynamicCloseCache")); if (proxy.context().isLocal()) return dynamicDestroyCache(cacheName, false, true, false, null); @@ -4014,11 +3903,14 @@ IgniteInternalFuture dynamicCloseCache(String cacheName) { * @return Future that will be completed when state is changed for all caches. */ public IgniteInternalFuture resetCacheState(Collection cacheNames) { - checkEmptyTransactions(); - if (F.isEmpty(cacheNames)) cacheNames = cachesInfo.registeredCaches().keySet(); + Collection forCheckCacheNames = cacheNames; + + checkEmptyTransactionsEx(() -> String.format(CACHE_NAME_AND_OPERATION_FORMAT, forCheckCacheNames, + "resetCacheState")); + Collection reqs = new ArrayList<>(cacheNames.size()); for (String cacheName : cacheNames) { @@ -4068,9 +3960,19 @@ else if (DataStructuresProcessor.isDataStructureCache(cacheName)) public void saveCacheConfiguration(DynamicCacheDescriptor desc) throws IgniteCheckedException { assert desc != null; - if (sharedCtx.pageStore() != null && !sharedCtx.kernalContext().clientNode() && - isPersistentCache(desc.cacheConfiguration(), sharedCtx.gridConfig().getDataStorageConfiguration())) - sharedCtx.pageStore().storeCacheData(desc.toStoredData(), true); + locCfgMgr.saveCacheConfiguration(desc.toStoredData(), true); + } + + /** + * Save cache configuration to persistent store if necessary. + * + * @param storedCacheData Stored cache data. + * @param overwrite Overwrite existing. + */ + public void saveCacheConfiguration(StoredCacheData storedCacheData, boolean overwrite) throws IgniteCheckedException { + assert storedCacheData != null; + + locCfgMgr.saveCacheConfiguration(storedCacheData, overwrite); } /** @@ -4169,28 +4071,28 @@ private Collection initiateCacheChanges( * Authorize creating cache. * * @param cfg Cache configuration. - * @param secCtx Optional security context. */ - private void authorizeCacheCreate(CacheConfiguration cfg, SecurityContext secCtx) { - ctx.security().authorize(null, SecurityPermission.CACHE_CREATE, secCtx); + void authorizeCacheCreate(CacheConfiguration cfg) { + if(cfg != null) { + ctx.security().authorize(cfg.getName(), SecurityPermission.CACHE_CREATE); - if (cfg != null && cfg.isOnheapCacheEnabled() && - IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_DISABLE_ONHEAP_CACHE)) - throw new SecurityException("Authorization failed for enabling on-heap cache."); + if (cfg.isOnheapCacheEnabled() && + IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_DISABLE_ONHEAP_CACHE)) + throw new SecurityException("Authorization failed for enabling on-heap cache."); + } } /** - * Authorize dynamic cache management for this node. + * Authorize dynamic cache management. * * @param req start/stop cache request. */ private void authorizeCacheChange(DynamicCacheChangeRequest req) { - // Null security context means authorize this node. if (req.cacheType() == null || req.cacheType() == CacheType.USER) { if (req.stop()) - ctx.security().authorize(null, SecurityPermission.CACHE_DESTROY, null); + ctx.security().authorize(req.cacheName(), SecurityPermission.CACHE_DESTROY); else - authorizeCacheCreate(req.startCacheConfiguration(), null); + authorizeCacheCreate(req.startCacheConfiguration()); } } @@ -4610,23 +4512,16 @@ public IgniteInternalCache getOrStartCache( * @return All configured cache instances. */ public Collection> caches() { - return F.viewReadOnly(jCacheProxies.values(), new IgniteClosure, - IgniteInternalCache>() { - @Override public IgniteInternalCache apply(IgniteCacheProxy entries) { - return entries.internalProxy(); - } - }); + return F.viewReadOnly(jCacheProxies.values(), + (IgniteClosure, IgniteInternalCache>)IgniteCacheProxy::internalProxy); } /** * @return All configured cache instances. */ public Collection> jcaches() { - return F.viewReadOnly(jCacheProxies.values(), new IgniteClosure, IgniteCacheProxy>() { - @Override public IgniteCacheProxy apply(IgniteCacheProxyImpl proxy) { - return proxy.gatewayWrapper(); - } - }); + return F.viewReadOnly(jCacheProxies.values(), + (IgniteClosure, IgniteCacheProxy>)IgniteCacheProxyImpl::gatewayWrapper); } /** @@ -4718,7 +4613,7 @@ public IgniteCacheProxy publicJCache(String cacheName) throws Ignit * @throws IgniteCheckedException If failed. */ @SuppressWarnings({"unchecked", "ConstantConditions"}) - @Nullable public IgniteCacheProxy publicJCache(String cacheName, + public @Nullable IgniteCacheProxy publicJCache(String cacheName, boolean failIfNotStarted, boolean checkThreadTx) throws IgniteCheckedException { assert cacheName != null; @@ -4744,10 +4639,13 @@ public IgniteCacheProxy publicJCache(String cacheName) throws Ignit } /** - * Get configuration for the given cache. + * Get configuration for the given cache. Fails if cache does not exist or restarting. * * @param name Cache name. * @return Cache configuration. + * @throws org.apache.ignite.IgniteCacheRestartingException If the cache with the given name + * is currently restarting. + * @throws IllegalStateException If the cache with the given name does not exist. */ public CacheConfiguration cacheConfiguration(String name) { assert name != null; @@ -4772,6 +4670,20 @@ public CacheConfiguration cacheConfiguration(String name) { return desc.cacheConfiguration(); } + /** + * Get configuration for the given cache. If a cache with the given name does not exist, will return {@code null}. + * + * @param name Cache name. + * @return Cache configuration or {@code null}. + */ + public CacheConfiguration cacheConfigurationNoProxyCheck(String name) { + assert name != null; + + DynamicCacheDescriptor desc = cacheDescriptor(name); + + return desc == null ? null : desc.cacheConfiguration(); + } + /** * Get registered cache descriptor. * @@ -4820,7 +4732,7 @@ public Map cacheGroupDescriptors() { * @param cacheId Cache ID. * @return Cache descriptor. */ - @Nullable public DynamicCacheDescriptor cacheDescriptor(int cacheId) { + public @Nullable DynamicCacheDescriptor cacheDescriptor(int cacheId) { for (DynamicCacheDescriptor cacheDesc : cacheDescriptors().values()) { CacheConfiguration ccfg = cacheDesc.cacheConfiguration(); @@ -4917,7 +4829,7 @@ public IgniteCacheProxy jcache(String name) { * @param awaitInit Await proxy initialization. * @return Cache proxy. */ - @Nullable public IgniteCacheProxyImpl jcacheProxy(String name, boolean awaitInit) { + public @Nullable IgniteCacheProxyImpl jcacheProxy(String name, boolean awaitInit) { IgniteCacheProxyImpl cache = jCacheProxies.get(name); if (awaitInit) @@ -4931,7 +4843,7 @@ public IgniteCacheProxy jcache(String name) { * @param proxy Cache proxy. * @return Previous cache proxy. */ - @Nullable public IgniteCacheProxyImpl addjCacheProxy(String name, IgniteCacheProxyImpl proxy) { + public @Nullable IgniteCacheProxyImpl addjCacheProxy(String name, IgniteCacheProxyImpl proxy) { return jCacheProxies.putIfAbsent(name, proxy); } @@ -5196,12 +5108,29 @@ public void checkEmptyTransactions() throws IgniteException { throw new IgniteException("Cannot start/stop cache within lock or transaction."); } + /** + * Method invoke {@link #checkEmptyTransactions()} and add message in case exception. + * + * @param eMsgSupplier supplier additional text message + * @throws IgniteException If {@link #checkEmptyTransactions()} throw {@link IgniteException} + * */ + private void checkEmptyTransactionsEx(final Supplier eMsgSupplier) throws IgniteException { + assert eMsgSupplier != null; + + try { + checkEmptyTransactions(); + } + catch (IgniteException e) { + throw new IgniteException(e.getMessage() + ' ' + eMsgSupplier.get(), e); + } + } + /** * @param val Object to check. * @return Configuration copy. * @throws IgniteCheckedException If validation failed. */ - private CacheConfiguration cloneCheckSerializable(final CacheConfiguration val) throws IgniteCheckedException { + CacheConfiguration cloneCheckSerializable(final CacheConfiguration val) throws IgniteCheckedException { if (val == null) return null; @@ -5528,6 +5457,71 @@ public void setTxOwnerDumpRequestsAllowed(boolean allowed) { compute.broadcast(new TxOwnerDumpRequestAllowedSettingClosure(allowed)); } + /** + * Sets threshold timeout in milliseconds for long transactions, if transaction exceeds it, + * it will be dumped in log with information about how much time did + * it spent in system time (time while aquiring locks, preparing, commiting, etc.) + * and user time (time when client node runs some code while holding transaction). + * Can be set to 0 - no transactions will be dumped in log in this case. + * + * @param threshold Threshold timeout in milliseconds. + */ + public void longTransactionTimeDumpThreshold(long threshold) { + assert threshold >= 0 : "Threshold timeout must be greater than or equal to 0."; + + broadcastToNodesSupportingFeature( + new LongRunningTxTimeDumpSettingsClosure(threshold, null, null), + LRT_SYSTEM_USER_TIME_DUMP_SETTINGS + ); + } + + /** + * Sets the coefficient for samples of long running transactions that will be dumped in log, if + * {@link #longTransactionTimeDumpThreshold} is set to non-zero value." + * + * @param coefficient Coefficient, must be value between 0.0 and 1.0 inclusively. + */ + public void transactionTimeDumpSamplesCoefficient(double coefficient) { + assert coefficient >= 0.0 && coefficient <= 1.0 : "Percentage value must be between 0.0 and 1.0 inclusively."; + + broadcastToNodesSupportingFeature( + new LongRunningTxTimeDumpSettingsClosure(null, coefficient, null), + LRT_SYSTEM_USER_TIME_DUMP_SETTINGS + ); + } + + /** + * Sets the limit of samples of completed transactions that will be dumped in log per second, + * if {@link #transactionTimeDumpSamplesCoefficient} is above 0.0. + * Must be integer value greater than 0. + * + * @param limit Limit value. + */ + public void longTransactionTimeDumpSamplesPerSecondLimit(int limit) { + assert limit > 0 : "Limit value must be greater than 0."; + + broadcastToNodesSupportingFeature( + new LongRunningTxTimeDumpSettingsClosure(null, null, limit), + LRT_SYSTEM_USER_TIME_DUMP_SETTINGS + ); + } + + /** + * Broadcasts given job to nodes that support ignite feature. + * + * @param job Ignite job. + * @param feature Ignite feature. + */ + private void broadcastToNodesSupportingFeature(IgniteRunnable job, IgniteFeatures feature) { + ClusterGroup grp = ctx.grid() + .cluster() + .forPredicate(node -> IgniteFeatures.nodeSupports(node, feature)); + + IgniteCompute compute = ctx.grid().compute(grp); + + compute.broadcast(job); + } + /** * Recovery lifecycle for caches. */ @@ -5537,7 +5531,9 @@ private class CacheRecoveryLifecycle implements MetastorageLifecycleListener, Da /** {@inheritDoc} */ @Override public void onReadyForRead(ReadOnlyMetastorage metastorage) throws IgniteCheckedException { - restoreCacheConfigurations(); + CacheJoinNodeDiscoveryData data = locCfgMgr.restoreCacheConfigurations(); + + cachesInfo.onStart(data); } /** {@inheritDoc} */ @@ -5575,15 +5571,53 @@ private void restorePartitionStates( if (log.isInfoEnabled()) log.info("Restoring partition state for local groups."); - long totalProcessed = 0; + AtomicLong totalProcessed = new AtomicLong(); + + AtomicReference restoreStateError = new AtomicReference<>(); + + StripedExecutor stripedExec = ctx.getStripedExecutorService(); + + int roundRobin = 0; + + for (CacheGroupContext grp : forGroups) { + stripedExec.execute(roundRobin % stripedExec.stripes(), () -> { + try { + long processed = grp.offheap().restorePartitionStates(partitionStates); + + totalProcessed.addAndGet(processed); + } + catch (IgniteCheckedException | RuntimeException | Error e) { + U.error(log, "Failed to restore partition state for " + + "groupName=" + grp.name() + " groupId=" + grp.groupId(), e); + + restoreStateError.compareAndSet( + null, + e instanceof IgniteCheckedException + ? ((IgniteCheckedException)e) + : new IgniteCheckedException(e) + ); + } + }); + + roundRobin++; + } + + try { + // Await completion restore state tasks in all stripes. + stripedExec.awaitComplete(); + } + catch (InterruptedException e) { + throw new IgniteInterruptedException(e); + } - for (CacheGroupContext grp : forGroups) - totalProcessed += grp.offheap().restorePartitionStates(partitionStates); + // Checking error after all task applied. + if (restoreStateError.get() != null) + throw restoreStateError.get(); if (log.isInfoEnabled()) log.info("Finished restoring partition state for local groups [" + "groupsProcessed=" + forGroups.size() + - ", partitionsProcessed=" + totalProcessed + + ", partitionsProcessed=" + totalProcessed.get() + ", time=" + (U.currentTimeMillis() - startRestorePart) + "ms]"); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProxyImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProxyImpl.java index efa844de3df06..8a78dc15a8b69 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProxyImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProxyImpl.java @@ -102,7 +102,11 @@ public GridCacheProxyImpl( gate = ctx.gate(); - aff = new GridCacheAffinityProxy<>(ctx, ctx.cache().affinity()); + GridCacheAdapter adapter = ctx.cache(); + if (adapter == null) + throw new IllegalStateException(new CacheStoppedException(ctx.name())); + + aff = new GridCacheAffinityProxy<>(ctx, adapter.affinity()); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedTtlCleanupManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedTtlCleanupManager.java index 2fba52daf87da..bd9b6756fd716 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedTtlCleanupManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedTtlCleanupManager.java @@ -17,8 +17,10 @@ package org.apache.ignite.internal.processors.cache; -import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReentrantLock; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.failure.FailureContext; import org.apache.ignite.internal.IgniteInterruptedCheckedException; @@ -44,17 +46,15 @@ public class GridCacheSharedTtlCleanupManager extends GridCacheSharedManagerAdap /** Cleanup worker. */ private CleanupWorker cleanupWorker; - /** Mutex on worker thread creation. */ - private final Object mux = new Object(); + /** Lock on worker thread creation. */ + private final ReentrantLock lock = new ReentrantLock(); - /** List of registered ttl managers. */ - private List mgrs = new CopyOnWriteArrayList<>(); + /** Map of registered ttl managers, where the cache id is used as the key. */ + private final Map mgrs = new ConcurrentHashMap<>(); /** {@inheritDoc} */ @Override protected void onKernalStop0(boolean cancel) { - synchronized (mux) { - stopCleanupWorker(); - } + stopCleanupWorker(); } /** @@ -63,12 +63,10 @@ public class GridCacheSharedTtlCleanupManager extends GridCacheSharedManagerAdap * @param mgr ttl manager of cache. * */ public void register(GridCacheTtlManager mgr) { - synchronized (mux) { - if (cleanupWorker == null) - startCleanupWorker(); + if (mgrs.isEmpty()) + startCleanupWorker(); - mgrs.add(mgr); - } + mgrs.put(mgr.context().cacheId(), mgr); } /** @@ -77,12 +75,10 @@ public void register(GridCacheTtlManager mgr) { * @param mgr ttl manager of cache. * */ public void unregister(GridCacheTtlManager mgr) { - synchronized (mux) { - mgrs.remove(mgr); + mgrs.remove(mgr.context().cacheId()); - if (mgrs.isEmpty()) - stopCleanupWorker(); - } + if (mgrs.isEmpty()) + stopCleanupWorker(); } /** @@ -91,27 +87,51 @@ public void unregister(GridCacheTtlManager mgr) { public boolean eagerTtlEnabled() { assert cctx != null : "Manager is not started"; - return cleanupWorker != null; + lock.lock(); + + try { + return cleanupWorker != null; + } + finally { + lock.unlock(); + } } /** * */ private void startCleanupWorker() { - cleanupWorker = new CleanupWorker(); + lock.lock(); + + try { + if (cleanupWorker != null) + return; + + cleanupWorker = new CleanupWorker(); - new IgniteThread(cleanupWorker).start(); + new IgniteThread(cleanupWorker).start(); + } + finally { + lock.unlock(); + } } /** * */ private void stopCleanupWorker() { - if (null != cleanupWorker) { - U.cancel(cleanupWorker); - U.join(cleanupWorker, log); + lock.lock(); + + try { + if (null != cleanupWorker) { + U.cancel(cleanupWorker); + U.join(cleanupWorker, log); - cleanupWorker = null; + cleanupWorker = null; + } + } + finally { + lock.unlock(); } } @@ -143,14 +163,24 @@ private class CleanupWorker extends GridWorker { assert !cctx.kernalContext().recoveryMode(); + final AtomicBoolean expiredRemains = new AtomicBoolean(); + while (!isCancelled()) { - boolean expiredRemains = false; + expiredRemains.set(false); - for (GridCacheTtlManager mgr : mgrs) { + for (Map.Entry mgr : mgrs.entrySet()) { updateHeartbeat(); - if (mgr.expire(CLEANUP_WORKER_ENTRIES_PROCESS_LIMIT)) - expiredRemains = true; + Integer processedCacheID = mgr.getKey(); + + // Need to be sure that the cache to be processed will not be unregistered and, + // therefore, stopped during the process of expiration is in progress. + mgrs.computeIfPresent(processedCacheID, (id, m) -> { + if (m.expire(CLEANUP_WORKER_ENTRIES_PROCESS_LIMIT)) + expiredRemains.set(true); + + return m; + }); if (isCancelled()) return; @@ -158,7 +188,7 @@ private class CleanupWorker extends GridWorker { updateHeartbeat(); - if (!expiredRemains) + if (!expiredRemains.get()) U.sleep(CLEANUP_WORKER_SLEEP_INTERVAL); onIdle(); @@ -171,8 +201,12 @@ private class CleanupWorker extends GridWorker { return; } - if (!(t instanceof IgniteInterruptedCheckedException)) + if (!(t instanceof IgniteInterruptedCheckedException || t instanceof InterruptedException)) { + if (isCancelled) + return; + err = t; + } throw t; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheTtlManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheTtlManager.java index 65d500cf97637..d588eb60d2b3b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheTtlManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheTtlManager.java @@ -121,6 +121,15 @@ public boolean eagerTtlEnabled() { @Override protected void onKernalStop0(boolean cancel) { if (pendingEntries != null) pendingEntries.clear(); + } + + /** + * Unregister this TTL manager of cache from periodical check on expired entries. + */ + public void unregister() { + // Ignoring attempt to unregister manager that has never been started. + if (!starting.get()) + return; cctx.shared().ttl().unregister(this); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java index 8a643369d983d..6e11d82f3b24b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java @@ -813,6 +813,13 @@ public static GridNearTxLocal txStartInternal(GridCacheContext ctx, IgniteIntern return prj.txStartEx(concurrency, isolation); } + /** + * Alias for {@link #txString(IgniteInternalTx)}. + */ + public static String txDump(@Nullable IgniteInternalTx tx) { + return txString(tx); + } + /** * @param tx Transaction. * @return String view of all safe-to-print transaction properties. @@ -832,6 +839,7 @@ public static String txString(@Nullable IgniteInternalTx tx) { ", rollbackOnly=" + tx.isRollbackOnly() + ", nodeId=" + tx.nodeId() + ", timeout=" + tx.timeout() + + ", startTime=" + tx.startTime() + ", duration=" + (U.currentTimeMillis() - tx.startTime()) + (tx instanceof GridNearTxLocal ? ", label=" + tx.label() : "") + ']'; @@ -1151,6 +1159,14 @@ public static boolean isIgfsCache(IgniteConfiguration cfg, @Nullable String cach return IgfsUtils.isIgfsCache(cfg, cacheName); } + /** + * @param cacheName Cache name. + * @return {@code True} in this is IGFS data or meta cache. + */ + public static boolean isIgfsCache(@Nullable String cacheName) { + return IgfsUtils.isIgfsCache(cacheName); + } + /** * Convert TTL to expire time. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridLocalConfigManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridLocalConfigManager.java new file mode 100644 index 0000000000000..5654e20de1316 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridLocalConfigManager.java @@ -0,0 +1,274 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Deque; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.IgniteSystemProperties; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteUuid; + +import static org.apache.ignite.internal.processors.cache.GridCacheUtils.isPersistentCache; + +/** + * Responsible for restoring local cache configurations (both from static configuration and persistence). + * Keep stop sequence of caches and caches which were presented on node before node join. + */ +public class GridLocalConfigManager { + /** */ + private final boolean startClientCaches = + IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_START_CACHES_ON_JOIN, false); + + /** Caches stop sequence. */ + private final Deque stopSeq = new LinkedList<>(); + + /** Logger. */ + private final IgniteLogger log; + + /** Node's local caches on start (both from static configuration and from persistent caches). */ + private Set localCachesOnStart; + + /** Cache processor. */ + private final GridCacheProcessor cacheProcessor; + + /** Context. */ + private final GridKernalContext ctx; + + /** + * @param cacheProcessor Cache processor. + * @param kernalCtx Kernal context. + */ + public GridLocalConfigManager( + GridCacheProcessor cacheProcessor, + GridKernalContext kernalCtx + ) { + this.cacheProcessor = cacheProcessor; + ctx = kernalCtx; + log = ctx.log(getClass()); + } + + /** + * Save cache configuration to persistent store if necessary. + * + * @param storedCacheData Stored cache data. + * @param overwrite Overwrite existing. + */ + public void saveCacheConfiguration(StoredCacheData storedCacheData, boolean overwrite) throws IgniteCheckedException { + assert storedCacheData != null; + + GridCacheSharedContext sharedContext = cacheProcessor.context(); + + if (sharedContext.pageStore() != null + && !sharedContext.kernalContext().clientNode() + && isPersistentCache(storedCacheData.config(), sharedContext.gridConfig().getDataStorageConfiguration())) + sharedContext.pageStore().storeCacheData(storedCacheData, overwrite); + } + + /** + * + */ + public Collection stopSequence() { + return stopSeq; + } + + /** + * @return Caches to be started when this node starts. + */ + public Set localCachesOnStart() { + return localCachesOnStart; + } + + /** + * @throws IgniteCheckedException If failed. + */ + public CacheJoinNodeDiscoveryData restoreCacheConfigurations() throws IgniteCheckedException { + if (ctx.isDaemon()) + return null; + + Map caches = new HashMap<>(); + + Map templates = new HashMap<>(); + + restoreCaches(caches, templates, ctx.config(), ctx.cache().context().pageStore()); + + CacheJoinNodeDiscoveryData discoData = new CacheJoinNodeDiscoveryData( + IgniteUuid.randomUuid(), + caches, + templates, + startAllCachesOnClientStart() + ); + + localCachesOnStart = new HashSet<>(discoData.caches().keySet()); + + return discoData; + } + + /** + * @return {@code True} if need locally start all existing caches on client node start. + */ + private boolean startAllCachesOnClientStart() { + return startClientCaches && ctx.clientNode(); + } + + /** + * @param caches Caches accumulator. + * @param templates Templates accumulator. + * @param config Ignite configuration. + * @param pageStoreManager Page store manager. + */ + private void restoreCaches( + Map caches, + Map templates, + IgniteConfiguration config, + IgnitePageStoreManager pageStoreManager + ) throws IgniteCheckedException { + assert !config.isDaemon() : "Trying to restore cache configurations on daemon node."; + + CacheConfiguration[] cfgs = config.getCacheConfiguration(); + + for (int i = 0; i < cfgs.length; i++) { + CacheConfiguration cfg = new CacheConfiguration(cfgs[i]); + + // Replace original configuration value. + cfgs[i] = cfg; + + addCacheFromConfiguration(cfg, false, caches, templates); + } + + if (CU.isPersistenceEnabled(config) && pageStoreManager != null) { + Map storedCaches = pageStoreManager.readCacheConfigurations(); + + if (!F.isEmpty(storedCaches)) { + List skippedConfigs = new ArrayList<>(); + + for (StoredCacheData storedCacheData : storedCaches.values()) { + String cacheName = storedCacheData.config().getName(); + + CacheType type = ctx.cache().cacheType(cacheName); + + if (!caches.containsKey(cacheName)) + // No static cache - add the configuration. + addStoredCache(caches, storedCacheData, cacheName, type, true, false); + else { + addStoredCache(caches, storedCacheData, cacheName, type, true, + cacheProcessor.keepStaticCacheConfiguration()); + + if (!cacheProcessor.keepStaticCacheConfiguration() && type == CacheType.USER) + skippedConfigs.add(cacheName); + + } + } + + if (!F.isEmpty(skippedConfigs)) { + U.warn(log, "Static configuration for the following caches will be ignored because a persistent " + + "cache with the same name already exist (see " + + "https://apacheignite.readme.io/docs/cache-configuration for more information): " + + skippedConfigs); + } + } + } + } + + /** + * Add stored cache data to caches storage. + * + * @param caches Cache storage. + * @param cacheData Cache data to add. + * @param cacheName Cache name. + * @param cacheType Cache type. + * @param isStaticallyConfigured Statically configured flag. + */ + private void addStoredCache( + Map caches, + StoredCacheData cacheData, + String cacheName, + CacheType cacheType, + boolean persistedBefore, + boolean isStaticallyConfigured + ) { + if (!caches.containsKey(cacheName)) { + if (!cacheType.userCache()) + stopSeq.addLast(cacheName); + else + stopSeq.addFirst(cacheName); + } + + caches.put(cacheName, new CacheJoinNodeDiscoveryData.CacheInfo(cacheData, cacheType, cacheData.sql(), + persistedBefore ? 1 : 0, isStaticallyConfigured)); + } + + /** + * @param cfg Cache configuration. + * @param sql SQL flag. + * @param caches Caches map. + * @param templates Templates map. + * @throws IgniteCheckedException If failed. + */ + private void addCacheFromConfiguration( + CacheConfiguration cfg, + boolean sql, + Map caches, + Map templates + ) throws IgniteCheckedException { + String cacheName = cfg.getName(); + + CU.validateCacheName(cacheName); + + cacheProcessor.cloneCheckSerializable(cfg); + + CacheObjectContext cacheObjCtx = ctx.cacheObjects().contextForCache(cfg); + + // Initialize defaults. + cacheProcessor.initialize(cfg, cacheObjCtx); + + StoredCacheData cacheData = new StoredCacheData(cfg); + + cacheData.sql(sql); + + if (GridCacheUtils.isCacheTemplateName(cacheName)) + templates.put(cacheName, new CacheJoinNodeDiscoveryData.CacheInfo(cacheData, CacheType.USER, false, 0, true)); + else { + if (caches.containsKey(cacheName)) { + throw new IgniteCheckedException("Duplicate cache name found (check configuration and " + + "assign unique name to each cache): " + cacheName); + } + + CacheType cacheType = ctx.cache().cacheType(cacheName); + + if (cacheType != CacheType.USER && cfg.getDataRegionName() == null) + cfg.setDataRegionName(cacheProcessor.context().database().systemDateRegionName()); + + addStoredCache(caches, cacheData, cacheName, cacheType, false, true); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java index 2e3253bd5a578..df766c4ff1ce7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java @@ -55,7 +55,6 @@ import org.apache.ignite.internal.processors.cache.persistence.partstate.GroupPartitionId; import org.apache.ignite.internal.processors.cache.persistence.partstorage.PartitionMetaStorage; import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree; -import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageLockListener; @@ -1022,7 +1021,7 @@ protected CacheDataStore createCacheDataStore0(int p) throws IgniteCheckedExcept } /** {@inheritDoc} */ - @Override public final void destroyCacheDataStore(CacheDataStore store) throws IgniteCheckedException { + @Override public void destroyCacheDataStore(CacheDataStore store) throws IgniteCheckedException { int p = store.partId(); partStoreLock.lock(p); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/LongRunningTxTimeDumpSettingsClosure.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/LongRunningTxTimeDumpSettingsClosure.java new file mode 100644 index 0000000000000..95c6ea0b1ed72 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/LongRunningTxTimeDumpSettingsClosure.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import org.apache.ignite.Ignite; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager; +import org.apache.ignite.lang.IgniteRunnable; +import org.apache.ignite.resources.IgniteInstanceResource; + +/** + * Closure that is sent on all server nodes in order to change configuration parameters + * of dumping long running transactions' system and user time values. + */ +public class LongRunningTxTimeDumpSettingsClosure implements IgniteRunnable { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private final Long timeoutThreshold; + + /** */ + private final Double samplesCoefficient; + + /** */ + private final Integer samplesPerSecondLimit; + + /** + * Auto-inject Ignite instance + */ + @IgniteInstanceResource + private Ignite ignite; + + /** */ + public LongRunningTxTimeDumpSettingsClosure( + Long timeoutThreshold, + Double samplesCoefficient, + Integer samplesPerSecondLimit + ) { + this.timeoutThreshold = timeoutThreshold; + this.samplesCoefficient = samplesCoefficient; + this.samplesPerSecondLimit = samplesPerSecondLimit; + } + + /** {@inheritDoc} */ + @Override + public void run() { + IgniteTxManager tm = ((IgniteEx) ignite).context().cache().context().tm(); + + if (timeoutThreshold != null) + tm.longTransactionTimeDumpThreshold(timeoutThreshold); + + if (samplesCoefficient != null) + tm.transactionTimeDumpSamplesCoefficient(samplesCoefficient); + + if (samplesPerSecondLimit != null) + tm.transactionTimeDumpSamplesPerSecondLimit(samplesPerSecondLimit); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionTxUpdateCounterImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionTxUpdateCounterImpl.java index fd58b29cf477f..14eb004f38969 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionTxUpdateCounterImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionTxUpdateCounterImpl.java @@ -74,6 +74,9 @@ public class PartitionTxUpdateCounterImpl implements PartitionUpdateCounter { /** HWM. */ protected final AtomicLong reserveCntr = new AtomicLong(); + /** */ + private boolean first = true; + /** * Initial counter points to last sequential update after WAL recovery. * @deprecated TODO FIXME https://issues.apache.org/jira/browse/IGNITE-11794 @@ -84,7 +87,7 @@ public class PartitionTxUpdateCounterImpl implements PartitionUpdateCounter { @Override public void init(long initUpdCntr, @Nullable byte[] cntrUpdData) { cntr.set(initUpdCntr); - initCntr = initUpdCntr; + reserveCntr.set(initCntr = initUpdCntr); queue = fromBytes(cntrUpdData); } @@ -120,11 +123,14 @@ protected synchronized long highestAppliedCounter() { // Reserved update counter is updated only on exchange. long cur = get(); - // Special case: single node in topology. - if (val == 0) - reserveCntr.set(cur); + // Always set reserved counter equal to max known counter. + long max = Math.max(val, cur); - if (val < cur) // Outdated counter (txs are possible before current topology future is finished). + if (reserveCntr.get() < max) + reserveCntr.set(max); + + // Outdated counter (txs are possible before current topology future is finished if primary is not changed). + if (val < cur) return; // Absolute counter should be not less than last applied update. @@ -133,15 +139,17 @@ protected synchronized long highestAppliedCounter() { if (val < highestAppliedCounter()) throw new IgniteCheckedException("Failed to update the counter [newVal=" + val + ", curState=" + this + ']'); - if (reserveCntr.get() < val) - reserveCntr.set(val); // Adjust counter on new primary. - cntr.set(val); - // If some holes are present at this point, that means some update were missed on recovery and will be restored - // during rebalance. All gaps are safe to "forget". - if (!queue.isEmpty()) - queue.clear(); + /** If some holes are present at this point, thar means some update were missed on recovery and will be restored + * during rebalance. All gaps are safe to "forget". + * Should only do it for first PME (later missed updates on node left are reset in {@link #finalizeUpdateCounters}. */ + if (first) { + if (!queue.isEmpty()) + queue.clear(); + + first = false; + } } /** {@inheritDoc} */ @@ -220,7 +228,10 @@ else if (last.within(start) && last.within(start + delta - 1)) @Override public void updateInitial(long start, long delta) { update(start, delta); - reserveCntr.set(initCntr = get()); + initCntr = get(); + + if (reserveCntr.get() < initCntr) + reserveCntr.set(initCntr); } /** */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/StateChangeRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/StateChangeRequest.java index 30a42bbb059ba..cd37016f3196b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/StateChangeRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/StateChangeRequest.java @@ -84,6 +84,13 @@ public boolean activate() { return msg.activate(); } + /** + * @return Read-only mode flag. + */ + public boolean readOnly() { + return msg.readOnly(); + } + /** * @return {@code True} if active state was changed. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/WalStateManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/WalStateManager.java index a077f598cafec..e901abc8c0542 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/WalStateManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/WalStateManager.java @@ -41,7 +41,7 @@ import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; -import org.apache.ignite.internal.processors.cache.persistence.CheckpointFuture; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.CheckpointProgress; import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetastorageLifecycleListener; import org.apache.ignite.internal.processors.cache.persistence.metastorage.ReadOnlyMetastorage; @@ -67,6 +67,8 @@ import static org.apache.ignite.internal.managers.communication.GridIoPolicy.SYSTEM_POOL; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.MOVING; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.OWNING; +import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.CheckpointProgress.State.FINISHED; +import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.CheckpointProgress.State.LOCK_RELEASED; /** * Write-ahead log state manager. Manages WAL enable and disable. @@ -426,7 +428,7 @@ else if (!grp.localWalEnabled()) try { if (hasNonEmptyOwning && !grpsToEnableWal.isEmpty()) - triggerCheckpoint("wal-local-state-change-" + topVer).finishFuture().get(); + triggerCheckpoint("wal-local-state-change-" + topVer).futureFor(FINISHED).get(); } catch (IgniteCheckedException ex) { throw new IgniteException(ex); @@ -472,12 +474,12 @@ public void onGroupRebalanceFinished(int grpId, AffinityTopologyVersion topVer) // Pending updates in groups with disabled WAL are not protected from crash. // Need to trigger checkpoint for attempt to persist them. - CheckpointFuture cpFut = triggerCheckpoint("wal-local-state-changed-rebalance-finished-" + topVer); + CheckpointProgress cpFut = triggerCheckpoint("wal-local-state-changed-rebalance-finished-" + topVer); assert cpFut != null; // It's safe to switch partitions to owning state only if checkpoint was successfully finished. - cpFut.finishFuture().listen(new IgniteInClosureX() { + cpFut.futureFor(FINISHED).listen(new IgniteInClosureX() { @Override public void applyx(IgniteInternalFuture future) { for (Integer grpId0 : session0.disabledGrps) { CacheGroupContext grp = cctx.cache().cacheGroup(grpId0); @@ -631,12 +633,12 @@ public void onProposeExchange(WalStateProposeMessage msg) { res = new WalStateResult(msg, false); else { // Initiate a checkpoint. - CheckpointFuture cpFut = triggerCheckpoint("wal-state-change-grp-" + msg.groupId()); + CheckpointProgress cpFut = triggerCheckpoint("wal-state-change-grp-" + msg.groupId()); if (cpFut != null) { try { // Wait for checkpoint mark synchronously before releasing the control. - cpFut.beginFuture().get(); + cpFut.futureFor(LOCK_RELEASED).get(); if (msg.enable()) { grpCtx.globalWalEnabled(true); @@ -1025,7 +1027,7 @@ private void addResult(WalStateResult res) { * @param msg Message. * @return Checkpoint future or {@code null} if failed to get checkpointer. */ - @Nullable private CheckpointFuture triggerCheckpoint(String msg) { + @Nullable private CheckpointProgress triggerCheckpoint(String msg) { return cctx.database().forceCheckpoint(msg); } @@ -1036,14 +1038,14 @@ private void addResult(WalStateResult res) { * @param msg Orignial message which triggered the process. * @return Result. */ - private WalStateResult awaitCheckpoint(CheckpointFuture cpFut, WalStateProposeMessage msg) { + private WalStateResult awaitCheckpoint(CheckpointProgress cpFut, WalStateProposeMessage msg) { WalStateResult res; try { assert msg.affinityNode(); if (cpFut != null) - cpFut.finishFuture().get(); + cpFut.futureFor(FINISHED).get(); res = new WalStateResult(msg, true); } @@ -1099,14 +1101,14 @@ private class WalStateChangeWorker extends GridWorker { private final WalStateProposeMessage msg; /** Checkpoint future. */ - private final CheckpointFuture cpFut; + private final CheckpointProgress cpFut; /** * Constructor. * * @param msg Propose message. */ - private WalStateChangeWorker(WalStateProposeMessage msg, CheckpointFuture cpFut) { + private WalStateChangeWorker(WalStateProposeMessage msg, CheckpointProgress cpFut) { super(cctx.igniteInstanceName(), "wal-state-change-worker-" + msg.groupId(), WalStateManager.this.log); this.msg = msg; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataFileStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataFileStore.java index bee40994bc63b..3c0f74a687a3a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataFileStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataFileStore.java @@ -14,23 +14,34 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package org.apache.ignite.internal.processors.cache.binary; import java.io.File; import java.io.FileInputStream; +import java.util.Map; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.LinkedBlockingQueue; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.binary.BinaryType; import org.apache.ignite.failure.FailureContext; import org.apache.ignite.failure.FailureType; import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.IgniteInterruptedCheckedException; import org.apache.ignite.internal.binary.BinaryMetadata; import org.apache.ignite.internal.binary.BinaryUtils; import org.apache.ignite.internal.processors.cache.persistence.file.FileIO; import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; +import org.apache.ignite.internal.util.future.GridFutureAdapter; import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.util.worker.GridWorker; +import org.apache.ignite.thread.IgniteThread; import org.jetbrains.annotations.Nullable; /** @@ -55,6 +66,15 @@ class BinaryMetadataFileStore { /** */ private final IgniteLogger log; + /** */ + private BinaryMetadataAsyncWriter writer; + + /** */ + private final ConcurrentMap writeOpFutures = new ConcurrentHashMap<>(); + + /** Flag to indicate that node is stopping due to detected critical error. */ + private volatile boolean stopOnCriticalError = false; + /** * @param metadataLocCache Metadata locale cache. * @param ctx Context. @@ -90,6 +110,16 @@ class BinaryMetadataFileStore { } U.ensureDirectory(workDir, "directory for serialized binary metadata", log); + + writer = new BinaryMetadataAsyncWriter(); + new IgniteThread(writer).start(); + } + + /** + * Stops worker for async writing of binary metadata. + */ + void stop() { + U.cancel(writer); } /** @@ -118,6 +148,19 @@ void writeMetadata(BinaryMetadata binMeta) { U.error(log, msg); + stopOnCriticalError = true; + + for (Map.Entry entry : writeOpFutures.entrySet()) { + if (log.isDebugEnabled()) + log.debug( + "Cancelling future for write operation for" + + " [typeId=" + entry.getKey().typeId + + ", typeVer=" + entry.getKey().typeVer + ']' + ); + + entry.getValue().onDone(entry); + } + ctx.failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); throw new IgniteException(msg, e); @@ -183,4 +226,235 @@ private BinaryMetadata readMetadata(int typeId) { return null; } + + /** + * @param meta Binary metadata to be written. + * @param typeVer Type version. + */ + void writeMetadataAsync(BinaryMetadata meta, int typeVer) { + if (!CU.isPersistenceEnabled(ctx.config())) + return; + + if (log.isDebugEnabled()) + log.debug( + "Submitting task for async write for" + + " [typeName=" + meta.typeName() + + ", typeId=" + meta.typeId() + + ", typeVersion=" + typeVer + ']' + ); + + writer.submit(new WriteOperationTask(meta, typeVer)); + } + + /** + * {@code typeVer} parameter is always non-negative except one special case + * (see {@link CacheObjectBinaryProcessorImpl#addMeta(int, BinaryType, boolean)} for context): + * if request for bin meta update arrives right at the moment when node is stopping + * {@link MetadataUpdateResult} of special type is generated: UPDATE_DISABLED. + * + * At this moment type version is unknown and blocking thread adds risk of deadlock so wait is skipped. + * + * @param typeId Type ID. + * @param typeVer Type version. + * @throws IgniteCheckedException If write operation failed. + */ + void waitForWriteCompletion(int typeId, int typeVer) throws IgniteCheckedException { + //special case, see javadoc + if (typeVer == -1) { + if (log.isDebugEnabled()) + log.debug("No need to wait for " + typeId + ", negative typeVer was passed."); + + return; + } + + GridFutureAdapter fut = writeOpFutures.get(new OperationSyncKey(typeId, typeVer)); + + if (fut != null) { + if (log.isDebugEnabled()) + log.debug( + "Waiting for write completion of" + + " [typeId=" + typeId + + ", typeVer=" + typeVer + ']' + ); + + fut.get(); + } + } + + /** + * + */ + private class BinaryMetadataAsyncWriter extends GridWorker { + /** */ + private final BlockingQueue queue = new LinkedBlockingQueue<>(); + + /** */ + BinaryMetadataAsyncWriter() { + super(ctx.igniteInstanceName(), "binary-metadata-writer", BinaryMetadataFileStore.this.log, ctx.workersRegistry()); + } + + /** + * @param task Write operation task. + */ + void submit(WriteOperationTask task) { + if (isCancelled()) + return; + + GridFutureAdapter writeOpFuture = new GridFutureAdapter(); + + writeOpFutures.put(new OperationSyncKey(task.meta.typeId(), task.typeVer), writeOpFuture); + + if (stopOnCriticalError) { + writeOpFuture.onDone(new Exception("The node is in invalid state due to a critical error. " + + "See logs for more details.")); + + return; + } + + queue.add(task); + } + + /** {@inheritDoc} */ + @Override public void cancel() { + super.cancel(); + + queue.clear(); + + IgniteCheckedException err = new IgniteCheckedException("Operation has been cancelled (node is stopping)."); + + for (Map.Entry e : writeOpFutures.entrySet()) { + if (log.isDebugEnabled()) + log.debug( + "Cancelling future for write operation for" + + " [typeId=" + e.getKey().typeId + + ", typeVer=" + e.getKey().typeVer + ']' + ); + + e.getValue().onDone(err); + } + + writeOpFutures.clear(); + } + + /** {@inheritDoc} */ + @Override protected void body() throws InterruptedException, IgniteInterruptedCheckedException { + while (!isCancelled()) { + try { + body0(); + } + catch (InterruptedException e) { + if (!isCancelled) { + ctx.failure().process(new FailureContext(FailureType.SYSTEM_WORKER_TERMINATION, e)); + + throw e; + } + } + } + } + + /** */ + private void body0() throws InterruptedException { + WriteOperationTask task; + + blockingSectionBegin(); + + try { + task = queue.take(); + + if (log.isDebugEnabled()) + log.debug( + "Starting write operation for" + + " [typeId=" + task.meta.typeId() + + ", typeVer=" + task.typeVer + ']' + ); + + writeMetadata(task.meta); + } + finally { + blockingSectionEnd(); + } + + GridFutureAdapter fut = writeOpFutures.remove(new OperationSyncKey(task.meta.typeId(), task.typeVer)); + + if (fut != null) { + if (log.isDebugEnabled()) + log.debug( + "Future for write operation for" + + " [typeId=" + task.meta.typeId() + + ", typeVer=" + task.typeVer + ']' + + " completed." + ); + + fut.onDone(); + } + else { + if (log.isDebugEnabled()) + log.debug( + "Future for write operation for" + + " [typeId=" + task.meta.typeId() + + ", typeVer=" + task.typeVer + ']' + + " not found." + ); + } + } + } + + /** + * + */ + private static final class WriteOperationTask { + /** */ + private final BinaryMetadata meta; + /** */ + private final int typeVer; + + /** + * @param meta Metadata for binary type. + * @param ver Version of type. + */ + private WriteOperationTask(BinaryMetadata meta, int ver) { + this.meta = meta; + typeVer = ver; + } + } + + /** + * + */ + private static final class OperationSyncKey { + /** */ + private final int typeId; + + /** */ + private final int typeVer; + + /** + * @param typeId Type Id. + * @param typeVer Type version. + */ + private OperationSyncKey(int typeId, int typeVer) { + this.typeId = typeId; + this.typeVer = typeVer; + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return 31 * typeId + typeVer; + } + + /** {@inheritDoc} */ + @Override public boolean equals(Object obj) { + if (!(obj instanceof OperationSyncKey)) + return false; + + OperationSyncKey that = (OperationSyncKey)obj; + + return (that.typeId == typeId) && (that.typeVer == typeVer); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(OperationSyncKey.class, this); + } + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataTransport.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataTransport.java index 0d2f6f9fb9bf4..e70866e28f0ef 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataTransport.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/BinaryMetadataTransport.java @@ -167,7 +167,7 @@ void addBinaryMetadataUpdateListener(BinaryMetadataUpdatedListener lsnr) { * @param newMeta Metadata proposed for update. * @return Future to wait for update result on. */ - GridFutureAdapter requestMetadataUpdate(BinaryMetadata newMeta) { + GridFutureAdapter requestMetadataUpdate(BinaryMetadata newMeta) { int typeId = newMeta.typeId(); MetadataUpdateResultFuture resFut; @@ -204,7 +204,7 @@ GridFutureAdapter requestMetadataUpdate(BinaryMetadata ne BinaryMetadata mergedMeta = mergeMetadata(oldMeta, newMeta, changedSchemas); if (mergedMeta == oldMeta) { - resFut.onDone(MetadataUpdateResult.createSuccessfulResult()); + resFut.onDone(MetadataUpdateResult.createSuccessfulResult(-1)); return null; } @@ -282,7 +282,7 @@ GridFutureAdapter awaitMetadataUpdate(int typeId, int ver) BinaryMetadataHolder holder = metaLocCache.get(typeId); if (holder.acceptedVersion() >= ver) - resFut.onDone(MetadataUpdateResult.createSuccessfulResult()); + resFut.onDone(MetadataUpdateResult.createSuccessfulResult(-1)); return resFut; } @@ -435,7 +435,7 @@ private final class MetadataUpdateProposedListener implements CustomEventListene acceptedVer)) { obsoleteUpd = true; - fut.onDone(MetadataUpdateResult.createSuccessfulResult()); + fut.onDone(MetadataUpdateResult.createSuccessfulResult(-1)); break; } @@ -580,7 +580,7 @@ private final class MetadataUpdateAcceptedListener implements CustomEventListene return; } - metadataFileStore.writeMetadata(holder.metadata()); + metadataFileStore.writeMetadataAsync(holder.metadata(), holder.pendingVersion()); metaLocCache.put(typeId, new BinaryMetadataHolder(holder.metadata(), holder.pendingVersion(), newAcceptedVer)); } @@ -612,7 +612,7 @@ private final class MetadataUpdateAcceptedListener implements CustomEventListene } if (fut != null) - fut.onDone(MetadataUpdateResult.createSuccessfulResult()); + fut.onDone(MetadataUpdateResult.createSuccessfulResult(newAcceptedVer)); } } @@ -657,6 +657,11 @@ void key(SyncKey key) { this.key = key; } + /** */ + public int typeVersion() { + return key.ver; + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(MetadataUpdateResultFuture.class, this); @@ -795,7 +800,7 @@ private final class MetadataResponseListener implements GridMessageListener { return; if (msg0.metadataNotFound()) { - fut.onDone(MetadataUpdateResult.createSuccessfulResult()); + fut.onDone(MetadataUpdateResult.createSuccessfulResult(-1)); return; } @@ -820,7 +825,7 @@ private final class MetadataResponseListener implements GridMessageListener { while (!metaLocCache.replace(typeId, oldHolder, newHolder)); } - fut.onDone(MetadataUpdateResult.createSuccessfulResult()); + fut.onDone(MetadataUpdateResult.createSuccessfulResult(-1)); } catch (IgniteCheckedException e) { fut.onDone(MetadataUpdateResult.createFailureResult(new BinaryObjectException(e))); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessorImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessorImpl.java index b1decfc6695b6..3a4efea95c575 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessorImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessorImpl.java @@ -280,6 +280,9 @@ public void addBinaryMetadataUpdateListener(BinaryMetadataUpdatedListener lsnr) @Override public void stop(boolean cancel) { if (transport != null) transport.stop(); + + if (metadataFileStore != null) + metadataFileStore.stop(); } /** {@inheritDoc} */ @@ -511,6 +514,8 @@ public GridBinaryMarshaller marshaller() { if (res.rejected()) throw res.error(); + else if (!ctx.clientNode()) + metadataFileStore.waitForWriteCompletion(typeId, res.typeVersion()); } catch (IgniteCheckedException e) { throw new BinaryObjectException("Failed to update metadata for type: " + newMeta.typeName(), e); @@ -576,6 +581,30 @@ private void failIfUnregistered(int typeId, BinaryMetadata newMeta0) { return meta != null ? meta.wrap(binaryCtx) : null; } + /** + * Forces caller thread to wait for binary metadata write operation for given type ID. + * + * In case of in-memory mode this method becomes a No-op as no binary metadata is written to disk in this mode. + * + * @param typeId ID of binary type to wait for metadata write operation. + */ + public void waitMetadataWriteIfNeeded(final int typeId) { + if (metadataFileStore == null) + return; + + BinaryMetadataHolder hldr = metadataLocCache.get(typeId); + + if (hldr != null) { + try { + metadataFileStore.waitForWriteCompletion(typeId, hldr.acceptedVersion()); + } + catch (IgniteCheckedException e) { + log.warning("Failed to wait for metadata write operation for [typeId=" + typeId + + ", typeVer=" + hldr.acceptedVersion() + ']', e); + } + } + } + /** * @param typeId Type ID. * @return Metadata. @@ -619,6 +648,17 @@ private void failIfUnregistered(int typeId, BinaryMetadata newMeta0) { // No-op. } } + else if (metadataFileStore != null) { + try { + metadataFileStore.waitForWriteCompletion(typeId, holder.acceptedVersion()); + } + catch (IgniteCheckedException e) { + log.warning("Failed to wait for metadata write operation for [typeId=" + typeId + + ", typeVer=" + holder.acceptedVersion() + ']', e); + + return null; + } + } return holder.metadata(); } @@ -733,6 +773,18 @@ else if (holder == null || !holder.metadata().hasSchema(schemaId)) { } } + if (holder != null && metadataFileStore != null) { + try { + metadataFileStore.waitForWriteCompletion(typeId, holder.acceptedVersion()); + } + catch (IgniteCheckedException e) { + log.warning("Failed to wait for metadata write operation for [typeId=" + typeId + + ", typeVer=" + holder.acceptedVersion() + ']', e); + + return null; + } + } + return holder != null ? holder.metadata().wrap(binaryCtx) : null; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/MetadataUpdateResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/MetadataUpdateResult.java index 6c299abc732d6..f6d0d9de69a1e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/MetadataUpdateResult.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/MetadataUpdateResult.java @@ -28,13 +28,18 @@ final class MetadataUpdateResult { /** */ private final BinaryObjectException error; + /** */ + private final int typeVer; + /** * @param resType Response type. * @param error Error. + * @param typeVer Accepted version of updated type. */ - private MetadataUpdateResult(ResultType resType, BinaryObjectException error) { + private MetadataUpdateResult(ResultType resType, BinaryObjectException error, int typeVer) { this.resType = resType; this.error = error; + this.typeVer = typeVer; } /** @@ -51,11 +56,16 @@ BinaryObjectException error() { return error; } + /** */ + int typeVersion() { + return typeVer; + } + /** - * + * @param typeVer Accepted version of updated BinaryMetadata type or -1 if not applicable. */ - static MetadataUpdateResult createSuccessfulResult() { - return new MetadataUpdateResult(ResultType.SUCCESS, null); + static MetadataUpdateResult createSuccessfulResult(int typeVer) { + return new MetadataUpdateResult(ResultType.SUCCESS, null, typeVer); } /** @@ -64,14 +74,14 @@ static MetadataUpdateResult createSuccessfulResult() { static MetadataUpdateResult createFailureResult(BinaryObjectException err) { assert err != null; - return new MetadataUpdateResult(ResultType.REJECT, err); + return new MetadataUpdateResult(ResultType.REJECT, err, -1); } /** * */ static MetadataUpdateResult createUpdateDisabledResult() { - return new MetadataUpdateResult(ResultType.UPDATE_DISABLED, null); + return new MetadataUpdateResult(ResultType.UPDATE_DISABLED, null, -1); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/datastructures/CacheDataStructuresManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/datastructures/CacheDataStructuresManager.java index 14e47b1bc6156..f73fec11a4b25 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/datastructures/CacheDataStructuresManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/datastructures/CacheDataStructuresManager.java @@ -25,7 +25,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; -import java.util.Iterator; import java.util.Map; import java.util.UUID; import java.util.concurrent.Callable; @@ -33,6 +32,7 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; +import javax.cache.Cache; import javax.cache.event.CacheEntryEvent; import javax.cache.event.CacheEntryUpdatedListener; import org.apache.ignite.Ignite; @@ -40,6 +40,7 @@ import org.apache.ignite.IgniteSet; import org.apache.ignite.binary.BinaryObject; import org.apache.ignite.cache.CacheEntryEventSerializableFilter; +import org.apache.ignite.cache.CachePeekMode; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.internal.IgniteKernal; import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException; @@ -49,7 +50,6 @@ import org.apache.ignite.internal.processors.cache.GridCacheGateway; import org.apache.ignite.internal.processors.cache.GridCacheManagerAdapter; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; -import org.apache.ignite.internal.processors.cache.KeyCacheObject; import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor; import org.apache.ignite.internal.processors.datastructures.GridAtomicCacheQueueImpl; import org.apache.ignite.internal.processors.datastructures.GridCacheQueueHeader; @@ -62,7 +62,6 @@ import org.apache.ignite.internal.processors.datastructures.GridTransactionalCacheQueueImpl; import org.apache.ignite.internal.processors.datastructures.SetItemKey; import org.apache.ignite.internal.processors.task.GridInternal; -import org.apache.ignite.internal.util.GridConcurrentHashSet; import org.apache.ignite.internal.util.GridSpinBusyLock; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; @@ -71,7 +70,6 @@ import org.apache.ignite.resources.IgniteInstanceResource; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import java.util.concurrent.ConcurrentHashMap; import static javax.cache.event.EventType.REMOVED; import static org.apache.ignite.cache.CacheMode.PARTITIONED; @@ -107,10 +105,6 @@ public class CacheDataStructuresManager extends GridCacheManagerAdapter { /** Sets map. */ private final ConcurrentMap setsMap; - /** Set keys used for set iteration. */ - private ConcurrentMap> setDataMap = - new ConcurrentHashMap<>(); - /** Queues map. */ private final ConcurrentMap queuesMap; @@ -347,45 +341,6 @@ private void waitInitialization() throws IgniteCheckedException { } } - /** - * Entry update callback. - * - * @param key Key. - * @param rmv {@code True} if entry was removed. - * @param keepBinary Keep binary flag. - */ - public void onEntryUpdated(KeyCacheObject key, boolean rmv, boolean keepBinary) { - // No need to notify data structures manager for a user cache since all DS objects are stored - // in system caches. - if (cctx.userCache()) - return; - - Object key0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(key, keepBinary, false); - - if (key0 instanceof SetItemKey) - onSetItemUpdated((SetItemKey)key0, rmv); - } - - /** - * Partition evicted callback. - * - * @param part Partition number. - */ - public void onPartitionEvicted(int part) { - GridCacheAffinityManager aff = cctx.affinity(); - - for (GridConcurrentHashSet set : setDataMap.values()) { - Iterator iter = set.iterator(); - - while (iter.hasNext()) { - SetItemKey key = iter.next(); - - if (aff.partition(key) == part) - iter.remove(); - } - } - } - /** * @param name Set name. * @param colloc Collocated flag. @@ -466,14 +421,6 @@ public boolean knownType(Object obj) { return obj == null || KNOWN_CLS.contains(obj.getClass()); } - /** - * @param id Set ID. - * @return Data for given set. - */ - @Nullable public GridConcurrentHashSet setData(IgniteUuid id) { - return setDataMap.get(id); - } - /** * @param setId Set ID. * @param topVer Topology version. @@ -491,22 +438,19 @@ private void removeSetData(IgniteUuid setId, AffinityTopologyVersion topVer) thr cctx.preloader().syncFuture().get(); } - GridConcurrentHashSet set = setDataMap.get(setId); - - if (set == null) - return; - - IgniteInternalCache cache = cctx.cache(); + IgniteInternalCache cache = cctx.cache(); final int BATCH_SIZE = 100; Collection keys = new ArrayList<>(BATCH_SIZE); - for (SetItemKey key : set) { - if (!loc && !aff.primaryByKey(cctx.localNode(), key, topVer)) + for (Cache.Entry entry : cache.localEntries(new CachePeekMode[] {CachePeekMode.PRIMARY})) { + Object obj = entry.getKey(); + + if (!(obj instanceof SetItemKey && setId.equals(((SetItemKey)obj).setId()))) continue; - keys.add(key); + keys.add((SetItemKey)obj); if (keys.size() == BATCH_SIZE) { retryRemoveAll(cache, keys); @@ -517,8 +461,6 @@ private void removeSetData(IgniteUuid setId, AffinityTopologyVersion topVer) thr if (!keys.isEmpty()) retryRemoveAll(cache, keys); - - setDataMap.remove(setId); } /** @@ -608,30 +550,6 @@ private boolean pingNodes(Collection nodes) throws IgniteCheckedExc return true; } - /** - * @param key Set item key. - * @param rmv {@code True} if item was removed. - */ - private void onSetItemUpdated(SetItemKey key, boolean rmv) { - GridConcurrentHashSet set = setDataMap.get(key.setId()); - - if (set == null) { - if (rmv) - return; - - GridConcurrentHashSet old = setDataMap.putIfAbsent(key.setId(), - set = new GridConcurrentHashSet<>()); - - if (old != null) - set = old; - } - - if (rmv) - set.remove(key); - else - set.add(key); - } - /** * @param setId Set ID. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxRecoveryRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxRecoveryRequest.java index 90ce2344d77ab..4b762facba58f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxRecoveryRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxRecoveryRequest.java @@ -20,6 +20,7 @@ import java.io.Externalizable; import java.nio.ByteBuffer; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.internal.GridDirectTransient; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; @@ -27,11 +28,14 @@ import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableRequest; + +import static org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse.INVALID_TIMESTAMP; /** * Message sent to check that transactions related to transaction were prepared on remote node. */ -public class GridCacheTxRecoveryRequest extends GridDistributedBaseMessage { +public class GridCacheTxRecoveryRequest extends GridDistributedBaseMessage implements TimeLoggableRequest { /** */ private static final long serialVersionUID = 0L; @@ -53,6 +57,13 @@ public class GridCacheTxRecoveryRequest extends GridDistributedBaseMessage { /** {@code True} if should check only tx on near node. */ private boolean nearTxCheck; + /** @see TimeLoggableRequest#sendTimestamp(). */ + private long sendTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableRequest#receiveTimestamp(). */ + @GridDirectTransient + private long receiveTimestamp = INVALID_TIMESTAMP; + /** * Empty constructor required by {@link Externalizable} */ @@ -133,6 +144,26 @@ public boolean system() { return ctx.txRecoveryMessageLogger(); } + /** {@inheritDoc} */ + @Override public long sendTimestamp() { + return sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public void sendTimestamp(long sendTimestamp) { + this.sendTimestamp = sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public long receiveTimestamp() { + return receiveTimestamp; + } + + /** {@inheritDoc} */ + @Override public void receiveTimestamp(long receiveTimestamp) { + this.receiveTimestamp = receiveTimestamp; + } + /** {@inheritDoc} */ @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { writer.setBuffer(buf); @@ -173,12 +204,18 @@ public boolean system() { writer.incrementState(); case 12: - if (!writer.writeBoolean("sys", sys)) + if (!writer.writeLong("sendTimestamp", sendTimestamp)) return false; writer.incrementState(); case 13: + if (!writer.writeBoolean("sys", sys)) + return false; + + writer.incrementState(); + + case 14: if (!writer.writeInt("txNum", txNum)) return false; @@ -233,7 +270,7 @@ public boolean system() { reader.incrementState(); case 12: - sys = reader.readBoolean("sys"); + sendTimestamp = reader.readLong("sendTimestamp"); if (!reader.isLastRead()) return false; @@ -241,6 +278,14 @@ public boolean system() { reader.incrementState(); case 13: + sys = reader.readBoolean("sys"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 14: txNum = reader.readInt("txNum"); if (!reader.isLastRead()) @@ -260,7 +305,7 @@ public boolean system() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 14; + return 15; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxRecoveryResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxRecoveryResponse.java index 1ef44a8f21f6b..d37abeaba69f4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxRecoveryResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxRecoveryResponse.java @@ -29,11 +29,13 @@ import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableResponse; /** * Transactions recovery check response. */ -public class GridCacheTxRecoveryResponse extends GridDistributedBaseMessage implements IgniteTxStateAware { +public class GridCacheTxRecoveryResponse extends GridDistributedBaseMessage implements IgniteTxStateAware, ProcessingTimeLoggableResponse { /** */ private static final long serialVersionUID = 0L; @@ -50,6 +52,17 @@ public class GridCacheTxRecoveryResponse extends GridDistributedBaseMessage impl @GridDirectTransient private IgniteTxState txState; + /** @see ProcessingTimeLoggableResponse#reqSentTimestamp(). */ + @GridDirectTransient + private long reqSentTimestamp = INVALID_TIMESTAMP; + + /** @see ProcessingTimeLoggableResponse#reqReceivedTimestamp(). */ + @GridDirectTransient + private long reqReceivedTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableResponse#reqTimeData(). */ + private long reqTimeData = INVALID_TIMESTAMP; + /** * Empty constructor required by {@link Externalizable} */ @@ -114,6 +127,36 @@ public boolean success() { return ctx.txRecoveryMessageLogger(); } + /** {@inheritDoc} */ + @Override public void reqSentTimestamp(long reqSentTimestamp) { + this.reqSentTimestamp = reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqSentTimestamp() { + return reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqReceivedTimestamp(long reqReceivedTimestamp) { + this.reqReceivedTimestamp = reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqReceivedTimestamp() { + return reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqTimeData(long reqTimeData) { + this.reqTimeData = reqTimeData; + } + + /** {@inheritDoc} */ + @Override public long reqTimeData() { + return reqTimeData; + } + /** {@inheritDoc} */ @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { writer.setBuffer(buf); @@ -142,6 +185,12 @@ public boolean success() { writer.incrementState(); case 10: + if (!writer.writeLong("reqTimeData", reqTimeData)) + return false; + + writer.incrementState(); + + case 11: if (!writer.writeBoolean("success", success)) return false; @@ -180,6 +229,14 @@ public boolean success() { reader.incrementState(); case 10: + reqTimeData = reader.readLong("reqTimeData"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 11: success = reader.readBoolean("success"); if (!reader.isLastRead()) @@ -199,7 +256,7 @@ public boolean success() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 11; + return 12; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedCacheEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedCacheEntry.java index d4dc59d05221a..812ed2e922d7a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedCacheEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedCacheEntry.java @@ -641,8 +641,12 @@ public void doneRemote( /** * Rechecks if lock should be reassigned. + * + * @param ver Thread chain version. + * + * @return {@code True} if thread chain processing must be stopped. */ - public void recheck() { + public boolean recheck(GridCacheVersion ver) { CacheLockCandidates prev = null; CacheLockCandidates owner = null; @@ -675,7 +679,9 @@ public void recheck() { } // This call must be made outside of synchronization. - checkOwnerChanged(prev, owner, val); + checkOwnerChanged(prev, owner, val, true); + + return owner == null || !owner.hasCandidate(ver); // Will return false if locked by thread chain version. } /** {@inheritDoc} */ @@ -748,15 +754,17 @@ protected void checkCallbacks(boolean emptyBefore, boolean emptyAfter) { // Allow next lock in the thread to proceed. if (!cand.used()) { + if (cand.owner()) + break; + GridCacheContext cctx0 = cand.parent().context(); GridDistributedCacheEntry e = (GridDistributedCacheEntry)cctx0.cache().peekEx(cand.parent().key()); - if (e != null) - e.recheck(); - - break; + // At this point candidate may have been removed and entry destroyed, so we check for null. + if (e == null || e.recheck(owner.version())) + break; } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedLockRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedLockRequest.java index ca78763fc2148..49bcad67f2202 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedLockRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedLockRequest.java @@ -35,13 +35,16 @@ import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableRequest; import org.apache.ignite.transactions.TransactionIsolation; import org.jetbrains.annotations.Nullable; +import static org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse.INVALID_TIMESTAMP; + /** * Lock request message. */ -public class GridDistributedLockRequest extends GridDistributedBaseMessage { +public class GridDistributedLockRequest extends GridDistributedBaseMessage implements TimeLoggableRequest { /** */ private static final long serialVersionUID = 0L; @@ -99,6 +102,13 @@ public class GridDistributedLockRequest extends GridDistributedBaseMessage { /** Additional flags. */ private byte flags; + /** @see TimeLoggableRequest#sendTimestamp(). */ + private long sendTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableRequest#receiveTimestamp(). */ + @GridDirectTransient + private long receiveTimestamp = INVALID_TIMESTAMP; + /** * Empty constructor. */ @@ -351,6 +361,26 @@ public long timeout() { finishUnmarshalCacheObjects(keys, cctx, ldr); } + /** {@inheritDoc} */ + @Override public long sendTimestamp() { + return sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public void sendTimestamp(long sendTimestamp) { + this.sendTimestamp = sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public long receiveTimestamp() { + return receiveTimestamp; + } + + /** {@inheritDoc} */ + @Override public void receiveTimestamp(long receiveTimestamp) { + this.receiveTimestamp = receiveTimestamp; + } + /** {@inheritDoc} */ @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { writer.setBuffer(buf); @@ -427,18 +457,24 @@ public long timeout() { writer.incrementState(); case 18: - if (!writer.writeLong("threadId", threadId)) + if (!writer.writeLong("sendTimestamp", sendTimestamp)) return false; writer.incrementState(); case 19: - if (!writer.writeLong("timeout", timeout)) + if (!writer.writeLong("threadId", threadId)) return false; writer.incrementState(); case 20: + if (!writer.writeLong("timeout", timeout)) + return false; + + writer.incrementState(); + + case 21: if (!writer.writeInt("txSize", txSize)) return false; @@ -545,7 +581,7 @@ public long timeout() { reader.incrementState(); case 18: - threadId = reader.readLong("threadId"); + sendTimestamp = reader.readLong("sendTimestamp"); if (!reader.isLastRead()) return false; @@ -553,7 +589,7 @@ public long timeout() { reader.incrementState(); case 19: - timeout = reader.readLong("timeout"); + threadId = reader.readLong("threadId"); if (!reader.isLastRead()) return false; @@ -561,6 +597,14 @@ public long timeout() { reader.incrementState(); case 20: + timeout = reader.readLong("timeout"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 21: txSize = reader.readInt("txSize"); if (!reader.isLastRead()) @@ -580,7 +624,7 @@ public long timeout() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 21; + return 22; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxFinishRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxFinishRequest.java index a1af470c56deb..dd433935a0eba 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxFinishRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxFinishRequest.java @@ -231,7 +231,7 @@ public boolean system() { /** * @return IO policy. */ - public byte policy() { + @Override public byte policy() { return plc; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxPrepareRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxPrepareRequest.java index 2d6da9c33d9a7..f22f829197db2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxPrepareRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxPrepareRequest.java @@ -48,15 +48,18 @@ import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableRequest; import org.apache.ignite.transactions.TransactionConcurrency; import org.apache.ignite.transactions.TransactionIsolation; import org.jetbrains.annotations.Nullable; +import static org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse.INVALID_TIMESTAMP; + /** * Transaction prepare request for optimistic and eventually consistent * transactions. */ -public class GridDistributedTxPrepareRequest extends GridDistributedBaseMessage implements IgniteTxStateAware { +public class GridDistributedTxPrepareRequest extends GridDistributedBaseMessage implements IgniteTxStateAware, TimeLoggableRequest { /** */ private static final long serialVersionUID = 0L; @@ -157,6 +160,13 @@ public class GridDistributedTxPrepareRequest extends GridDistributedBaseMessage @GridToStringExclude private byte flags; + /** @see TimeLoggableRequest#sendTimestamp(). */ + private long sendTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableRequest#receiveTimestamp(). */ + @GridDirectTransient + private long receiveTimestamp = INVALID_TIMESTAMP; + /** * Required by {@link Externalizable}. */ @@ -255,7 +265,7 @@ public void storeWriteThrough(boolean storeWriteThrough) { /** * @return IO policy. */ - public byte policy() { + @Override public byte policy() { return plc; } @@ -448,6 +458,26 @@ public boolean last() { return ctx.txPrepareMessageLogger(); } + /** {@inheritDoc} */ + @Override public long sendTimestamp() { + return sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public void sendTimestamp(long sendTimestamp) { + this.sendTimestamp = sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public long receiveTimestamp() { + return receiveTimestamp; + } + + /** {@inheritDoc} */ + @Override public void receiveTimestamp(long receiveTimestamp) { + this.receiveTimestamp = receiveTimestamp; + } + /** * Sets flag mask. * @@ -526,36 +556,42 @@ private boolean isFlag(int mask) { writer.incrementState(); case 15: - if (!writer.writeLong("threadId", threadId)) + if (!writer.writeLong("sendTimestamp", sendTimestamp)) return false; writer.incrementState(); case 16: - if (!writer.writeLong("timeout", timeout)) + if (!writer.writeLong("threadId", threadId)) return false; writer.incrementState(); case 17: - if (!writer.writeMap("txNodesMsg", txNodesMsg, MessageCollectionItemType.UUID, MessageCollectionItemType.MSG)) + if (!writer.writeLong("timeout", timeout)) return false; writer.incrementState(); case 18: - if (!writer.writeInt("txSize", txSize)) + if (!writer.writeMap("txNodesMsg", txNodesMsg, MessageCollectionItemType.UUID, MessageCollectionItemType.MSG)) return false; writer.incrementState(); case 19: - if (!writer.writeMessage("writeVer", writeVer)) + if (!writer.writeInt("txSize", txSize)) return false; writer.incrementState(); case 20: + if (!writer.writeMessage("writeVer", writeVer)) + return false; + + writer.incrementState(); + + case 21: if (!writer.writeCollection("writes", writes, MessageCollectionItemType.MSG)) return false; @@ -642,7 +678,7 @@ private boolean isFlag(int mask) { reader.incrementState(); case 15: - threadId = reader.readLong("threadId"); + sendTimestamp = reader.readLong("sendTimestamp"); if (!reader.isLastRead()) return false; @@ -650,7 +686,7 @@ private boolean isFlag(int mask) { reader.incrementState(); case 16: - timeout = reader.readLong("timeout"); + threadId = reader.readLong("threadId"); if (!reader.isLastRead()) return false; @@ -658,7 +694,7 @@ private boolean isFlag(int mask) { reader.incrementState(); case 17: - txNodesMsg = reader.readMap("txNodesMsg", MessageCollectionItemType.UUID, MessageCollectionItemType.MSG, false); + timeout = reader.readLong("timeout"); if (!reader.isLastRead()) return false; @@ -666,7 +702,7 @@ private boolean isFlag(int mask) { reader.incrementState(); case 18: - txSize = reader.readInt("txSize"); + txNodesMsg = reader.readMap("txNodesMsg", MessageCollectionItemType.UUID, MessageCollectionItemType.MSG, false); if (!reader.isLastRead()) return false; @@ -674,7 +710,7 @@ private boolean isFlag(int mask) { reader.incrementState(); case 19: - writeVer = reader.readMessage("writeVer"); + txSize = reader.readInt("txSize"); if (!reader.isLastRead()) return false; @@ -682,6 +718,14 @@ private boolean isFlag(int mask) { reader.incrementState(); case 20: + writeVer = reader.readMessage("writeVer"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 21: writes = reader.readCollection("writes", MessageCollectionItemType.MSG); if (!reader.isLastRead()) @@ -701,7 +745,7 @@ private boolean isFlag(int mask) { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 21; + return 22; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java index c4f54e5af7f93..5742b9f16803e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -50,6 +51,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.GridCacheUpdateTxResult; import org.apache.ignite.internal.processors.cache.KeyCacheObject; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheEntry; @@ -72,6 +74,7 @@ import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.LT; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.transactions.TransactionConcurrency; @@ -85,6 +88,8 @@ import static org.apache.ignite.internal.processors.cache.GridCacheOperation.READ; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.RELOAD; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.UPDATE; +import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.RENTING; +import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.EVICTED; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_BACKUP; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_NONE; import static org.apache.ignite.transactions.TransactionState.COMMITTED; @@ -269,11 +274,15 @@ public void clearEntry(IgniteTxKey key) { * @param baseVer Base version. * @param committedVers Committed versions. * @param rolledbackVers Rolled back versions. + * @param pendingVers Pending versions. + * + * @throws GridDhtInvalidPartitionException If partition was invalidated. */ @Override public void doneRemote(GridCacheVersion baseVer, Collection committedVers, Collection rolledbackVers, - Collection pendingVers) { + Collection pendingVers + ) throws GridDhtInvalidPartitionException { Map readMap = txState.readMap(); if (readMap != null && !readMap.isEmpty()) { @@ -310,12 +319,15 @@ public void clearEntry(IgniteTxKey key) { * @param committedVers Completed versions relative to base version. * @param rolledbackVers Rolled back versions relative to base version. * @param pendingVers Pending versions. + * + * @throws GridDhtInvalidPartitionException If entry partition was invalidated. */ private void doneRemote(IgniteTxEntry txEntry, GridCacheVersion baseVer, Collection committedVers, Collection rolledbackVers, - Collection pendingVers) { + Collection pendingVers + ) throws GridDhtInvalidPartitionException { while (true) { GridDistributedCacheEntry entry = (GridDistributedCacheEntry)txEntry.cached(); @@ -456,7 +468,12 @@ private void commitIfLocked() throws IgniteCheckedException { if (log.isDebugEnabled()) log.debug("Got removed entry while committing (will retry): " + txEntry); - txEntry.cached(txEntry.context().cache().entryEx(txEntry.key(), topologyVersion())); + try { + txEntry.cached(txEntry.context().cache().entryEx(txEntry.key(), topologyVersion())); + } + catch (GridDhtInvalidPartitionException e) { + break; + } } } } @@ -496,6 +513,9 @@ private void commitIfLocked() throws IgniteCheckedException { cctx.database().checkpointReadLock(); + // Reserved partitions (necessary to prevent race due to updates in RENTING state). + Set reservedParts = new HashSet<>(); + try { Collection entries = near() || cctx.snapshot().needTxReadLogging() ? allEntries() : writeEntries(); @@ -509,7 +529,29 @@ private void commitIfLocked() throws IgniteCheckedException { for (IgniteTxEntry txEntry : entries) { GridCacheContext cacheCtx = txEntry.context(); - boolean replicate = cacheCtx.isDrEnabled(); + // Prevent stale updates. + GridDhtLocalPartition locPart = + cacheCtx.group().topology().localPartition(txEntry.cached().partition()); + + if (!near()) { + if (locPart == null) + continue; + + if (!reservedParts.contains(locPart) && locPart.reserve()) { + assert locPart.state() != EVICTED && locPart.reservations() > 0 : locPart; + + reservedParts.add(locPart); + } + + if (locPart.state() == RENTING || locPart.state() == EVICTED) { + LT.warn(log(), "Skipping update to partition that is concurrently evicting " + + "[grp=" + cacheCtx.group().cacheOrGroupName() + ", part=" + locPart + "]"); + + continue; + } + } + + boolean replicate = cacheCtx.isDrEnabled(); while (true) { try { @@ -778,7 +820,7 @@ else if (!near()){ .map(tuple -> tuple.get1().partitionCounter(tuple.get2().updateCounter())) .collect(Collectors.toList()); - cctx.wal().log(new DataRecord(entriesWithCounters)); + ptr = cctx.wal().log(new DataRecord(entriesWithCounters)); } if (ptr != null && !cctx.tm().logTxRecords()) @@ -807,6 +849,9 @@ else if (!near()){ } } finally { + for (GridDhtLocalPartition locPart : reservedParts) + locPart.release(); + cctx.database().checkpointReadUnlock(); if (wrapper != null) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtAffinityAssignmentRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtAffinityAssignmentRequest.java index cf7018a15c895..fa2635d540caf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtAffinityAssignmentRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtAffinityAssignmentRequest.java @@ -18,16 +18,20 @@ package org.apache.ignite.internal.processors.cache.distributed.dht; import java.nio.ByteBuffer; +import org.apache.ignite.internal.GridDirectTransient; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.GridCacheGroupIdMessage; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableRequest; + +import static org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse.INVALID_TIMESTAMP; /** * Affinity assignment request. */ -public class GridDhtAffinityAssignmentRequest extends GridCacheGroupIdMessage { +public class GridDhtAffinityAssignmentRequest extends GridCacheGroupIdMessage implements TimeLoggableRequest { /** */ private static final long serialVersionUID = 0L; @@ -43,6 +47,13 @@ public class GridDhtAffinityAssignmentRequest extends GridCacheGroupIdMessage { /** Topology version being queried. */ private AffinityTopologyVersion topVer; + /** @see TimeLoggableRequest#sendTimestamp(). */ + private long sendTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableRequest#receiveTimestamp(). */ + @GridDirectTransient + private long receiveTimestamp = INVALID_TIMESTAMP; + /** * Empty constructor. */ @@ -102,6 +113,26 @@ public long futureId() { return topVer; } + /** {@inheritDoc} */ + @Override public long sendTimestamp() { + return sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public void sendTimestamp(long sendTimestamp) { + this.sendTimestamp = sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public long receiveTimestamp() { + return receiveTimestamp; + } + + /** {@inheritDoc} */ + @Override public void receiveTimestamp(long receiveTimestamp) { + this.receiveTimestamp = receiveTimestamp; + } + /** {@inheritDoc} */ @Override public short directType() { return 28; @@ -109,7 +140,7 @@ public long futureId() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 7; + return 8; } /** {@inheritDoc} */ @@ -140,6 +171,12 @@ public long futureId() { writer.incrementState(); case 6: + if (!writer.writeLong("sendTimestamp", sendTimestamp)) + return false; + + writer.incrementState(); + + case 7: if (!writer.writeAffinityTopologyVersion("topVer", topVer)) return false; @@ -178,6 +215,14 @@ public long futureId() { reader.incrementState(); case 6: + sendTimestamp = reader.readLong("sendTimestamp"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 7: topVer = reader.readAffinityTopologyVersion("topVer"); if (!reader.isLastRead()) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtAffinityAssignmentResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtAffinityAssignmentResponse.java index e8b40e9cd8c15..b17a27cbd0d4e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtAffinityAssignmentResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtAffinityAssignmentResponse.java @@ -33,13 +33,15 @@ import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableResponse; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; /** * Affinity assignment response. */ -public class GridDhtAffinityAssignmentResponse extends GridCacheGroupIdMessage { +public class GridDhtAffinityAssignmentResponse extends GridCacheGroupIdMessage implements ProcessingTimeLoggableResponse { /** */ private static final long serialVersionUID = 0L; @@ -70,6 +72,17 @@ public class GridDhtAffinityAssignmentResponse extends GridCacheGroupIdMessage { /** */ private byte[] partBytes; + /** @see ProcessingTimeLoggableResponse#reqSentTimestamp(). */ + @GridDirectTransient + private long reqSentTimestamp = INVALID_TIMESTAMP; + + /** @see ProcessingTimeLoggableResponse#reqReceivedTimestamp(). */ + @GridDirectTransient + private long reqReceivedTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableResponse#reqTimeData(). */ + private long reqTimeData = INVALID_TIMESTAMP; + /** * Empty constructor. */ @@ -95,6 +108,36 @@ public GridDhtAffinityAssignmentResponse( affAssignmentIds = ids(affAssignment); } + /** {@inheritDoc} */ + @Override public void reqSentTimestamp(long reqSentTimestamp) { + this.reqSentTimestamp = reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqSentTimestamp() { + return reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqReceivedTimestamp(long reqReceivedTimestamp) { + this.reqReceivedTimestamp = reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqReceivedTimestamp() { + return reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqTimeData(long reqTimeData) { + this.reqTimeData = reqTimeData; + } + + /** {@inheritDoc} */ + @Override public long reqTimeData() { + return reqTimeData; + } + /** * @return Future ID. */ @@ -215,7 +258,7 @@ private List> ids(List> assignments) { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 9; + return 10; } /** @@ -297,6 +340,12 @@ private List> ids(List> assignments) { writer.incrementState(); case 8: + if (!writer.writeLong("reqTimeData", reqTimeData)) + return false; + + writer.incrementState(); + + case 9: if (!writer.writeAffinityTopologyVersion("topVer", topVer)) return false; @@ -351,6 +400,14 @@ private List> ids(List> assignments) { reader.incrementState(); case 8: + reqTimeData = reader.readLong("reqTimeData"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 9: topVer = reader.readAffinityTopologyVersion("topVer"); if (!reader.isLastRead()) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java index 4c00300e590b8..259dc0de70de5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java @@ -220,6 +220,8 @@ private void processForceKeysRequest0(ClusterNode node, GridDhtForceKeysRequest msg.miniId(), ctx.deploymentEnabled()); + res.copyTimestamps(msg); + GridDhtPartitionTopology top = ctx.topology(); for (KeyCacheObject k : msg.keys()) { @@ -1312,6 +1314,8 @@ else if (req.needVersion()) res.error(e); } + res.copyTimestamps(req); + try { ctx.io().send(nodeId, res, ctx.ioPolicy()); } @@ -1360,6 +1364,8 @@ protected void processNearGetRequest(final UUID nodeId, final GridNearGetRequest req.version(), req.deployInfo() != null); + res.copyTimestamps(req); + GridDhtFuture> fut = (GridDhtFuture>)f; @@ -1575,8 +1581,7 @@ private void updateTtl(GridCacheAdapter cache, * @param curVer Current topology version. * @return {@code True} if cache affinity changed and operation should be remapped. */ - protected final boolean needRemap(AffinityTopologyVersion expVer, AffinityTopologyVersion curVer, - Collection keys) { + protected final boolean needRemap(AffinityTopologyVersion expVer, AffinityTopologyVersion curVer) { if (curVer.equals(expVer)) return false; @@ -1585,24 +1590,21 @@ protected final boolean needRemap(AffinityTopologyVersion expVer, AffinityTopolo if (curVer.compareTo(lastAffChangedTopVer) >= 0 && curVer.compareTo(expVer) <= 0) return false; - // TODO IGNITE-7164 check mvcc crd for mvcc enabled txs. + Collection cacheNodes0 = ctx.discovery().cacheGroupAffinityNodes(ctx.groupId(), expVer); + Collection cacheNodes1 = ctx.discovery().cacheGroupAffinityNodes(ctx.groupId(), curVer); - for (KeyCacheObject key : keys) { - assert key.partition() != -1; + if (!cacheNodes0.equals(cacheNodes1) || ctx.affinity().affinityTopologyVersion().compareTo(curVer) < 0) + return true; - try { - List aff1 = ctx.affinity().assignments(expVer).get(key.partition()); - List aff2 = ctx.affinity().assignments(curVer).get(key.partition()); + try { + List> aff1 = ctx.affinity().assignments(expVer); + List> aff2 = ctx.affinity().assignments(curVer); - if (!aff1.containsAll(aff2) || aff2.isEmpty() || !aff1.get(0).equals(aff2.get(0))) - return true; - } - catch (IllegalStateException ignored) { - return true; - } + return !aff1.equals(aff2); + } + catch (IllegalStateException ignored) { + return true; } - - return false; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockFuture.java index c91aab80f024b..d5a48fded5ac7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockFuture.java @@ -314,7 +314,7 @@ void addInvalidPartition(GridCacheContext cacheCtx, int invalidPart) { // Register invalid partitions with transaction. if (tx != null) - tx.addInvalidPartition(cacheCtx, invalidPart); + tx.addInvalidPartition(cacheCtx.cacheId(), invalidPart); if (log.isDebugEnabled()) log.debug("Added invalid partition to future [invalidPart=" + invalidPart + ", fut=" + this + ']'); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockRequest.java index 95786be854a85..6b340b19bcd57 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockRequest.java @@ -377,67 +377,67 @@ public long accessTtl() { } switch (writer.state()) { - case 21: + case 22: if (!writer.writeLong("accessTtl", accessTtl)) return false; writer.incrementState(); - case 22: + case 23: if (!writer.writeBitSet("invalidateEntries", invalidateEntries)) return false; writer.incrementState(); - case 23: + case 24: if (!writer.writeIgniteUuid("miniId", miniId)) return false; writer.incrementState(); - case 24: + case 25: if (!writer.writeCollection("nearKeys", nearKeys, MessageCollectionItemType.MSG)) return false; writer.incrementState(); - case 25: + case 26: if (!writer.writeObjectArray("ownedKeys", ownedKeys, MessageCollectionItemType.MSG)) return false; writer.incrementState(); - case 26: + case 27: if (!writer.writeObjectArray("ownedValues", ownedValues, MessageCollectionItemType.MSG)) return false; writer.incrementState(); - case 27: + case 28: if (!writer.writeBitSet("preloadKeys", preloadKeys)) return false; writer.incrementState(); - case 28: + case 29: if (!writer.writeUuid("subjId", subjId)) return false; writer.incrementState(); - case 29: + case 30: if (!writer.writeInt("taskNameHash", taskNameHash)) return false; writer.incrementState(); - case 30: + case 31: if (!writer.writeAffinityTopologyVersion("topVer", topVer)) return false; writer.incrementState(); - case 31: + case 32: if (!writer.writeString("txLbl", txLbl)) return false; @@ -459,7 +459,7 @@ public long accessTtl() { return false; switch (reader.state()) { - case 21: + case 22: accessTtl = reader.readLong("accessTtl"); if (!reader.isLastRead()) @@ -467,7 +467,7 @@ public long accessTtl() { reader.incrementState(); - case 22: + case 23: invalidateEntries = reader.readBitSet("invalidateEntries"); if (!reader.isLastRead()) @@ -475,7 +475,7 @@ public long accessTtl() { reader.incrementState(); - case 23: + case 24: miniId = reader.readIgniteUuid("miniId"); if (!reader.isLastRead()) @@ -483,7 +483,7 @@ public long accessTtl() { reader.incrementState(); - case 24: + case 25: nearKeys = reader.readCollection("nearKeys", MessageCollectionItemType.MSG); if (!reader.isLastRead()) @@ -491,7 +491,7 @@ public long accessTtl() { reader.incrementState(); - case 25: + case 26: ownedKeys = reader.readObjectArray("ownedKeys", MessageCollectionItemType.MSG, KeyCacheObject.class); if (!reader.isLastRead()) @@ -499,7 +499,7 @@ public long accessTtl() { reader.incrementState(); - case 26: + case 27: ownedValues = reader.readObjectArray("ownedValues", MessageCollectionItemType.MSG, GridCacheVersion.class); if (!reader.isLastRead()) @@ -507,7 +507,7 @@ public long accessTtl() { reader.incrementState(); - case 27: + case 28: preloadKeys = reader.readBitSet("preloadKeys"); if (!reader.isLastRead()) @@ -515,7 +515,7 @@ public long accessTtl() { reader.incrementState(); - case 28: + case 29: subjId = reader.readUuid("subjId"); if (!reader.isLastRead()) @@ -523,7 +523,7 @@ public long accessTtl() { reader.incrementState(); - case 29: + case 30: taskNameHash = reader.readInt("taskNameHash"); if (!reader.isLastRead()) @@ -531,7 +531,7 @@ public long accessTtl() { reader.incrementState(); - case 30: + case 31: topVer = reader.readAffinityTopologyVersion("topVer"); if (!reader.isLastRead()) @@ -539,7 +539,7 @@ public long accessTtl() { reader.incrementState(); - case 31: + case 32: txLbl = reader.readString("txLbl"); if (!reader.isLastRead()) @@ -559,7 +559,7 @@ public long accessTtl() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 32; + return 33; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockResponse.java index 63c07e82906f4..fa2220257ed27 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockResponse.java @@ -26,6 +26,7 @@ import java.util.List; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.GridDirectCollection; +import org.apache.ignite.internal.GridDirectTransient; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheEntryInfo; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; @@ -38,11 +39,13 @@ import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableResponse; /** * DHT cache lock response. */ -public class GridDhtLockResponse extends GridDistributedLockResponse { +public class GridDhtLockResponse extends GridDistributedLockResponse implements ProcessingTimeLoggableResponse { /** */ private static final long serialVersionUID = 0L; @@ -63,6 +66,17 @@ public class GridDhtLockResponse extends GridDistributedLockResponse { @GridDirectCollection(GridCacheEntryInfo.class) private List preloadEntries; + /** @see ProcessingTimeLoggableResponse#reqSentTimestamp(). */ + @GridDirectTransient + private long reqSentTimestamp = INVALID_TIMESTAMP; + + /** @see ProcessingTimeLoggableResponse#reqReceivedTimestamp(). */ + @GridDirectTransient + private long reqReceivedTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableResponse#reqTimeData(). */ + private long reqTimeData = INVALID_TIMESTAMP; + /** * Empty constructor (required by {@link Externalizable}). */ @@ -192,6 +206,37 @@ public Collection preloadEntries() { unmarshalInfos(preloadEntries, ctx.cacheContext(cacheId), ldr); } + /** {@inheritDoc} */ + @Override public void reqSentTimestamp(long reqSentTimestamp) { + this.reqSentTimestamp = reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqSentTimestamp() { + return reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqReceivedTimestamp(long reqReceivedTimestamp) { + this.reqReceivedTimestamp = reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqReceivedTimestamp() { + return reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqTimeData(long reqTimeData) { + this.reqTimeData = reqTimeData; + } + + /** {@inheritDoc} */ + @Override public long reqTimeData() { + return reqTimeData; + } + + /** {@inheritDoc} */ @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { writer.setBuffer(buf); @@ -231,6 +276,12 @@ public Collection preloadEntries() { writer.incrementState(); + case 15: + if (!writer.writeLong("reqTimeData", reqTimeData)) + return false; + + writer.incrementState(); + } return true; @@ -279,6 +330,14 @@ public Collection preloadEntries() { reader.incrementState(); + case 15: + reqTimeData = reader.readLong("reqTimeData"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + } return reader.afterMessageRead(GridDhtLockResponse.class); @@ -291,7 +350,7 @@ public Collection preloadEntries() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 15; + return 16; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTopologyFutureAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTopologyFutureAdapter.java index 60ab62fef5743..e0b3b8196a708 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTopologyFutureAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTopologyFutureAdapter.java @@ -30,12 +30,15 @@ import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.util.future.GridFutureAdapter; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.cache.PartitionLossPolicy.READ_ONLY_ALL; import static org.apache.ignite.cache.PartitionLossPolicy.READ_ONLY_SAFE; import static org.apache.ignite.cache.PartitionLossPolicy.READ_WRITE_SAFE; +import static org.apache.ignite.internal.processors.cache.GridCacheUtils.isSystemCache; import static org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFutureAdapter.OperationType.WRITE; +import static org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor.DEFAULT_VOLATILE_DS_GROUP_NAME; /** * @@ -98,8 +101,11 @@ protected final CacheGroupValidation validateCacheGroup(CacheGroupContext grp, C PartitionLossPolicy lossPlc = grp.config().getPartitionLossPolicy(); - if (cctx.shared().readOnlyMode() && opType == WRITE) - return new IgniteCheckedException("Failed to perform cache operation (cluster is in read only mode)"); + if (cctx.shared().readOnlyMode() && opType == WRITE && !isSystemCache(cctx.name()) + && cctx.group().groupId() != CU.cacheId(DEFAULT_VOLATILE_DS_GROUP_NAME)) { + return new IgniteClusterReadOnlyException("Failed to perform cache operation (cluster is in " + + "read-only mode) [cacheGrp=" + cctx.group().name() + ", cache=" + cctx.name() + ']'); + } if (grp.needsRecovery() && !recovery) { if (opType == WRITE && (lossPlc == READ_ONLY_SAFE || lossPlc == READ_ONLY_ALL)) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java index 35f2ceafef80a..493a88a75e3a2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java @@ -546,6 +546,8 @@ private void processDhtLockRequest0(UUID nodeId, GridDhtLockRequest req) { if (res != null) { try { // Reply back to sender. + res.copyTimestamps(req); + ctx.io().send(nodeId, res, ctx.ioPolicy()); if (txLockMsgLog.isDebugEnabled()) { @@ -938,7 +940,7 @@ public IgniteInternalFuture lockAllAsync( } try { - if (top != null && needRemap(req.topologyVersion(), top.readyTopologyVersion(), req.keys())) { + if (top != null && needRemap(req.topologyVersion(), top.readyTopologyVersion())) { if (log.isDebugEnabled()) { log.debug("Client topology version mismatch, need remap lock request [" + "reqTopVer=" + req.topologyVersion() + @@ -1043,7 +1045,7 @@ public IgniteInternalFuture lockAllAsync( } try { - if (top != null && needRemap(req.topologyVersion(), top.readyTopologyVersion(), req.keys())) { + if (top != null && needRemap(req.topologyVersion(), top.readyTopologyVersion())) { if (log.isDebugEnabled()) { log.debug("Client topology version mismatch, need remap lock request [" + "reqTopVer=" + req.topologyVersion() + @@ -1226,7 +1228,10 @@ private GridNearLockResponse sendClientLockRemapResponse(ClusterNode nearNode, 0, null, topVer, - ctx.deploymentEnabled()); + ctx.deploymentEnabled(), + false); + + res.copyTimestamps(req); try { ctx.io().send(nearNode, res, ctx.ioPolicy()); @@ -1263,6 +1268,12 @@ private GridNearLockResponse createLockReply( assert tx == null || tx.xidVersion().equals(mappedVer); try { + // All subsequent lock requests must use actual topology version to avoid mapping on invalid primaries. + AffinityTopologyVersion clienRemapVer = req.firstClientRequest() && + tx != null && + topology().readyTopologyVersion().after(req.topologyVersion()) ? + topology().readyTopologyVersion() : null; + // Send reply back to originating near node. GridNearLockResponse res = new GridNearLockResponse(ctx.cacheId(), req.version(), @@ -1271,8 +1282,11 @@ private GridNearLockResponse createLockReply( tx != null && tx.onePhaseCommit(), entries.size(), err, - null, - ctx.deploymentEnabled()); + clienRemapVer, + ctx.deploymentEnabled(), + clienRemapVer != null); + + res.copyTimestamps(req); if (err == null) { res.pending(localDhtPendingVersions(entries, mappedVer)); @@ -1302,7 +1316,7 @@ private GridNearLockResponse createLockReply( CacheObject val = null; - if (ret) + if (ret) { val = e.innerGet( null, tx, @@ -1314,6 +1328,7 @@ private GridNearLockResponse createLockReply( tx != null ? tx.resolveTaskName() : null, null, req.keepBinary()); + } assert e.lockedBy(mappedVer) || ctx.mvcc().isRemoved(e.context(), mappedVer) || @@ -1374,7 +1389,7 @@ private GridNearLockResponse createLockReply( U.error(log, "Failed to get value for lock reply message for node [node=" + U.toShortString(nearNode) + ", req=" + req + ']', e); - return new GridNearLockResponse(ctx.cacheId(), + GridNearLockResponse res = new GridNearLockResponse(ctx.cacheId(), req.version(), req.futureId(), req.miniId(), @@ -1382,7 +1397,12 @@ private GridNearLockResponse createLockReply( entries.size(), e, null, - ctx.deploymentEnabled()); + ctx.deploymentEnabled(), + false); + + res.copyTimestamps(req); + + return res; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishFuture.java index ec4398166e5c5..c3430e3617b69 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishFuture.java @@ -49,6 +49,7 @@ import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteUuid; +import static java.util.Objects.isNull; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.PRIMARY_SYNC; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.CREATE; @@ -472,18 +473,26 @@ private boolean finish(boolean commit, req.writeVersion(tx.writeVersion() != null ? tx.writeVersion() : tx.xidVersion()); try { - cctx.io().send(n, req, tx.ioPolicy()); + if (isNull(cctx.discovery().getAlive(n.id()))) { + log.error("Unable to send message (node left topology): " + n); - if (msgLog.isDebugEnabled()) { - msgLog.debug("DHT finish fut, sent request dht [txId=" + tx.nearXidVersion() + - ", dhtTxId=" + tx.xidVersion() + - ", node=" + n.id() + ']'); + fut.onNodeLeft(new ClusterTopologyCheckedException("Node left grid while sending message to: " + + n.id())); } + else { + cctx.io().send(n, req, tx.ioPolicy()); - if (sync) - res = true; - else - fut.onDone(); + if (msgLog.isDebugEnabled()) { + msgLog.debug("DHT finish fut, sent request dht [txId=" + tx.nearXidVersion() + + ", dhtTxId=" + tx.xidVersion() + + ", node=" + n.id() + ']'); + } + + if (sync) + res = true; + else + fut.onDone(); + } } catch (IgniteCheckedException e) { // Fail the whole thing. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishRequest.java index 0fa2e69b03623..b821647e95e67 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishRequest.java @@ -23,6 +23,7 @@ import java.util.UUID; import org.apache.ignite.cache.CacheWriteSynchronizationMode; import org.apache.ignite.internal.GridDirectCollection; +import org.apache.ignite.internal.GridDirectTransient; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxFinishRequest; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; @@ -34,14 +35,17 @@ import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableRequest; import org.apache.ignite.transactions.TransactionIsolation; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import static org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse.INVALID_TIMESTAMP; + /** * Near transaction finish request. */ -public class GridDhtTxFinishRequest extends GridDistributedTxFinishRequest { +public class GridDhtTxFinishRequest extends GridDistributedTxFinishRequest implements TimeLoggableRequest { /** */ private static final long serialVersionUID = 0L; @@ -71,6 +75,13 @@ public class GridDhtTxFinishRequest extends GridDistributedTxFinishRequest { @GridDirectCollection(PartitionUpdateCountersMessage.class) private Collection updCntrs; + /** @see TimeLoggableRequest#sendTimestamp(). */ + private long sendTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableRequest#receiveTimestamp(). */ + @GridDirectTransient + private long receiveTimestamp = INVALID_TIMESTAMP; + /** * Empty constructor required for {@link Externalizable}. */ @@ -357,6 +368,26 @@ public Collection updateCounters() { return updCntrs; } + /** {@inheritDoc} */ + @Override public long sendTimestamp() { + return sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public void sendTimestamp(long sendTimestamp) { + this.sendTimestamp = sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public long receiveTimestamp() { + return receiveTimestamp; + } + + /** {@inheritDoc} */ + @Override public void receiveTimestamp(long receiveTimestamp) { + this.receiveTimestamp = receiveTimestamp; + } + /** {@inheritDoc} */ @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { writer.setBuffer(buf); @@ -403,12 +434,18 @@ public Collection updateCounters() { writer.incrementState(); case 27: - if (!writer.writeCollection("updCntrs", updCntrs, MessageCollectionItemType.MSG)) + if (!writer.writeLong("sendTimestamp", sendTimestamp)) return false; writer.incrementState(); case 28: + if (!writer.writeCollection("updCntrs", updCntrs, MessageCollectionItemType.MSG)) + return false; + + writer.incrementState(); + + case 29: if (!writer.writeMessage("writeVer", writeVer)) return false; @@ -475,7 +512,7 @@ public Collection updateCounters() { reader.incrementState(); case 27: - updCntrs = reader.readCollection("updCntrs", MessageCollectionItemType.MSG); + sendTimestamp = reader.readLong("sendTimestamp"); if (!reader.isLastRead()) return false; @@ -483,6 +520,14 @@ public Collection updateCounters() { reader.incrementState(); case 28: + updCntrs = reader.readCollection("updCntrs", MessageCollectionItemType.MSG); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 29: writeVer = reader.readMessage("writeVer"); if (!reader.isLastRead()) @@ -502,7 +547,7 @@ public Collection updateCounters() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 29; + return 30; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishResponse.java index d777a2201a149..b72b6af4a8776 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishResponse.java @@ -31,11 +31,13 @@ import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableResponse; /** * DHT transaction finish response. */ -public class GridDhtTxFinishResponse extends GridDistributedTxFinishResponse { +public class GridDhtTxFinishResponse extends GridDistributedTxFinishResponse implements ProcessingTimeLoggableResponse { /** */ private static final long serialVersionUID = 0L; @@ -55,6 +57,17 @@ public class GridDhtTxFinishResponse extends GridDistributedTxFinishResponse { /** Cache return value. */ private GridCacheReturn retVal; + /** @see ProcessingTimeLoggableResponse#reqSentTimestamp(). */ + @GridDirectTransient + private long reqSentTimestamp = INVALID_TIMESTAMP; + + /** @see ProcessingTimeLoggableResponse#reqReceivedTimestamp(). */ + @GridDirectTransient + private long reqReceivedTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableResponse#reqTimeData(). */ + private long reqTimeData = INVALID_TIMESTAMP; + /** * Empty constructor required by {@link Externalizable}. */ @@ -158,6 +171,36 @@ public GridCacheReturn returnValue() { return retVal; } + /** {@inheritDoc} */ + @Override public void reqSentTimestamp(long reqSentTimestamp) { + this.reqSentTimestamp = reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqSentTimestamp() { + return reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqReceivedTimestamp(long reqReceivedTimestamp) { + this.reqReceivedTimestamp = reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqReceivedTimestamp() { + return reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqTimeData(long reqTimeData) { + this.reqTimeData = reqTimeData; + } + + /** {@inheritDoc} */ + @Override public long reqTimeData() { + return reqTimeData; + } + /** {@inheritDoc} */ @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { writer.setBuffer(buf); @@ -186,6 +229,12 @@ public GridCacheReturn returnValue() { writer.incrementState(); case 9: + if (!writer.writeLong("reqTimeData", reqTimeData)) + return false; + + writer.incrementState(); + + case 10: if (!writer.writeMessage("retVal", retVal)) return false; @@ -224,6 +273,14 @@ public GridCacheReturn returnValue() { reader.incrementState(); case 9: + reqTimeData = reader.readLong("reqTimeData"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 10: retVal = reader.readMessage("retVal"); if (!reader.isLastRead()) @@ -243,7 +300,7 @@ public GridCacheReturn returnValue() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 10; + return 11; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxLocalAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxLocalAdapter.java index 2665cf3ee5532..bd4ee14ac88a5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxLocalAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxLocalAdapter.java @@ -459,7 +459,7 @@ private void addMapping( } /** {@inheritDoc} */ - @Override public void addInvalidPartition(GridCacheContext ctx, int part) { + @Override public void addInvalidPartition(int cacheId, int part) { assert false : "DHT transaction encountered invalid partition [part=" + part + ", tx=" + this + ']'; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java index 38c532b93a3b3..3e85a41f55a27 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java @@ -73,6 +73,7 @@ import org.apache.ignite.internal.processors.dr.GridDrType; import org.apache.ignite.internal.processors.timeout.GridTimeoutObjectAdapter; import org.apache.ignite.internal.transactions.IgniteTxOptimisticCheckedException; +import org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException; import org.apache.ignite.internal.util.GridLeanSet; import org.apache.ignite.internal.util.future.GridCompoundFuture; import org.apache.ignite.internal.util.future.GridFutureAdapter; @@ -100,6 +101,7 @@ import static org.apache.ignite.internal.processors.cache.GridCacheOperation.READ; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.TRANSFORM; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.UPDATE; +import static org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse.INVALID_TIMESTAMP; import static org.apache.ignite.transactions.TransactionState.PREPARED; /** @@ -694,7 +696,12 @@ private boolean mapIfLocked() { } if (forceKeysFut == null || (forceKeysFut.isDone() && forceKeysFut.error() == null)) - prepare0(); + try { + prepare0(); + } + catch (IgniteTxRollbackCheckedException e) { + onError(e); + } else { forceKeysFut.listen(new CI1>() { @Override public void apply(IgniteInternalFuture f) { @@ -894,6 +901,8 @@ private GridNearTxPrepareResponse createPrepareResponse(@Nullable Throwable prep tx.onePhaseCommit(), tx.activeCachesDeploymentEnabled()); + copyReqTimestamps(res); + if (prepErr == null) { if (tx.needReturnValue() || tx.nearOnOriginatingNode() || tx.hasInterceptor()) addDhtValues(res); @@ -916,6 +925,12 @@ private GridNearTxPrepareResponse createPrepareResponse(@Nullable Throwable prep return res; } + /** */ + private void copyReqTimestamps(GridNearTxPrepareResponse res) { + res.reqReceivedTimestamp(req == null ? INVALID_TIMESTAMP : req.receiveTimestamp()); + res.reqSentTimestamp(req == null ? INVALID_TIMESTAMP :req.sendTimestamp()); + } + /** * @param res Response being sent. */ @@ -1218,7 +1233,7 @@ private IgniteTxOptimisticCheckedException versionCheckError(IgniteTxEntry entry /** * */ - private void prepare0() { + private void prepare0() throws IgniteTxRollbackCheckedException { boolean error = false; try { @@ -1391,8 +1406,12 @@ private void sendPrepareRequests() { break; } - catch (GridCacheEntryRemovedException ignore) { - assert false : "Got removed exception on entry with dht local candidate: " + entry; + catch (GridCacheEntryRemovedException e) { + log.error("Got removed exception on entry with dht local candidate. Transaction will be " + + "rolled back. Entry: " + entry + " tx: " + CU.txDump(tx), e); + + // Entry was unlocked by concurrent rollback. + onError(tx.rollbackException()); } idx++; @@ -1413,8 +1432,12 @@ private void sendPrepareRequests() { break; } - catch (GridCacheEntryRemovedException ignore) { - assert false : "Got removed exception on entry with dht local candidate: " + entry; + catch (GridCacheEntryRemovedException e) { + log.error("Got removed exception on entry with dht local candidate. Transaction will be " + + "rolled back. Entry: " + entry + " tx: " + CU.txDump(tx), e); + + // Entry was unlocked by concurrent rollback. + onError(tx.rollbackException()); } } } @@ -1488,8 +1511,13 @@ private void sendPrepareRequests() { } break; - } catch (GridCacheEntryRemovedException ignore) { - assert false : "Got removed exception on entry with dht local candidate: " + entry; + } + catch (GridCacheEntryRemovedException e) { + log.error("Got removed exception on entry with dht local candidate. Transaction will be " + + "rolled back. Entry: " + entry + " tx: " + CU.txDump(tx), e); + + // Entry was unlocked by concurrent rollback. + onError(tx.rollbackException()); } } } @@ -1557,7 +1585,7 @@ private void map(IgniteTxEntry entry) { List dhtNodes = dht.topology().nodes(cached.partition(), tx.topologyVersion()); assert !dhtNodes.isEmpty() && dhtNodes.get(0).id().equals(cctx.localNodeId()) : - "localNode = " + cctx.localNodeId() + ", dhtNodes = " + dhtNodes; + "cacheId=" + cacheCtx.cacheId() + ", localNode = " + cctx.localNodeId() + ", dhtNodes = " + dhtNodes; if (log.isDebugEnabled()) log.debug("Mapping entry to DHT nodes [nodes=" + U.toShortString(dhtNodes) + diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareRequest.java index d14586fc4ccc2..270fcf9241e17 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareRequest.java @@ -421,91 +421,91 @@ public boolean skipCompletedVersion() { } switch (writer.state()) { - case 21: + case 22: if (!writer.writeIgniteUuid("futId", futId)) return false; writer.incrementState(); - case 22: + case 23: if (!writer.writeBitSet("invalidateNearEntries", invalidateNearEntries)) return false; writer.incrementState(); - case 23: + case 24: if (!writer.writeInt("miniId", miniId)) return false; writer.incrementState(); - case 24: + case 25: if (!writer.writeUuid("nearNodeId", nearNodeId)) return false; writer.incrementState(); - case 25: + case 26: if (!writer.writeCollection("nearWrites", nearWrites, MessageCollectionItemType.MSG)) return false; writer.incrementState(); - case 26: + case 27: if (!writer.writeMessage("nearXidVer", nearXidVer)) return false; writer.incrementState(); - case 27: + case 28: if (!writer.writeCollection("ownedKeys", ownedKeys, MessageCollectionItemType.MSG)) return false; writer.incrementState(); - case 28: + case 29: if (!writer.writeCollection("ownedVals", ownedVals, MessageCollectionItemType.MSG)) return false; writer.incrementState(); - case 29: + case 30: if (!writer.writeBitSet("preloadKeys", preloadKeys)) return false; writer.incrementState(); - case 30: + case 31: if (!writer.writeBoolean("skipCompletedVers", skipCompletedVers)) return false; writer.incrementState(); - case 31: + case 32: if (!writer.writeUuid("subjId", subjId)) return false; writer.incrementState(); - case 32: + case 33: if (!writer.writeInt("taskNameHash", taskNameHash)) return false; writer.incrementState(); - case 33: + case 34: if (!writer.writeAffinityTopologyVersion("topVer", topVer)) return false; writer.incrementState(); - case 34: + case 35: if (!writer.writeString("txLbl", txLbl)) return false; writer.incrementState(); - case 35: + case 36: if (!writer.writeCollection("updCntrs", updCntrs, MessageCollectionItemType.MSG)) return false; @@ -527,7 +527,7 @@ public boolean skipCompletedVersion() { return false; switch (reader.state()) { - case 21: + case 22: futId = reader.readIgniteUuid("futId"); if (!reader.isLastRead()) @@ -535,7 +535,7 @@ public boolean skipCompletedVersion() { reader.incrementState(); - case 22: + case 23: invalidateNearEntries = reader.readBitSet("invalidateNearEntries"); if (!reader.isLastRead()) @@ -543,7 +543,7 @@ public boolean skipCompletedVersion() { reader.incrementState(); - case 23: + case 24: miniId = reader.readInt("miniId"); if (!reader.isLastRead()) @@ -551,7 +551,7 @@ public boolean skipCompletedVersion() { reader.incrementState(); - case 24: + case 25: nearNodeId = reader.readUuid("nearNodeId"); if (!reader.isLastRead()) @@ -559,7 +559,7 @@ public boolean skipCompletedVersion() { reader.incrementState(); - case 25: + case 26: nearWrites = reader.readCollection("nearWrites", MessageCollectionItemType.MSG); if (!reader.isLastRead()) @@ -567,7 +567,7 @@ public boolean skipCompletedVersion() { reader.incrementState(); - case 26: + case 27: nearXidVer = reader.readMessage("nearXidVer"); if (!reader.isLastRead()) @@ -575,7 +575,7 @@ public boolean skipCompletedVersion() { reader.incrementState(); - case 27: + case 28: ownedKeys = reader.readCollection("ownedKeys", MessageCollectionItemType.MSG); if (!reader.isLastRead()) @@ -583,7 +583,7 @@ public boolean skipCompletedVersion() { reader.incrementState(); - case 28: + case 29: ownedVals = reader.readCollection("ownedVals", MessageCollectionItemType.MSG); if (!reader.isLastRead()) @@ -591,7 +591,7 @@ public boolean skipCompletedVersion() { reader.incrementState(); - case 29: + case 30: preloadKeys = reader.readBitSet("preloadKeys"); if (!reader.isLastRead()) @@ -599,7 +599,7 @@ public boolean skipCompletedVersion() { reader.incrementState(); - case 30: + case 31: skipCompletedVers = reader.readBoolean("skipCompletedVers"); if (!reader.isLastRead()) @@ -607,7 +607,7 @@ public boolean skipCompletedVersion() { reader.incrementState(); - case 31: + case 32: subjId = reader.readUuid("subjId"); if (!reader.isLastRead()) @@ -615,7 +615,7 @@ public boolean skipCompletedVersion() { reader.incrementState(); - case 32: + case 33: taskNameHash = reader.readInt("taskNameHash"); if (!reader.isLastRead()) @@ -623,7 +623,7 @@ public boolean skipCompletedVersion() { reader.incrementState(); - case 33: + case 34: topVer = reader.readAffinityTopologyVersion("topVer"); if (!reader.isLastRead()) @@ -631,7 +631,7 @@ public boolean skipCompletedVersion() { reader.incrementState(); - case 34: + case 35: txLbl = reader.readString("txLbl"); if (!reader.isLastRead()) @@ -639,7 +639,7 @@ public boolean skipCompletedVersion() { reader.incrementState(); - case 35: + case 36: updCntrs = reader.readCollection("updCntrs", MessageCollectionItemType.MSG); if (!reader.isLastRead()) @@ -659,7 +659,7 @@ public boolean skipCompletedVersion() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 36; + return 37; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareResponse.java index fcb14a34c58e0..b0ee0fc9f8561 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareResponse.java @@ -28,6 +28,7 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.GridDirectCollection; import org.apache.ignite.internal.GridDirectMap; +import org.apache.ignite.internal.GridDirectTransient; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheEntryInfo; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; @@ -41,11 +42,13 @@ import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableResponse; /** * DHT transaction prepare response. */ -public class GridDhtTxPrepareResponse extends GridDistributedTxPrepareResponse { +public class GridDhtTxPrepareResponse extends GridDistributedTxPrepareResponse implements ProcessingTimeLoggableResponse { /** */ private static final long serialVersionUID = 0L; @@ -68,6 +71,18 @@ public class GridDhtTxPrepareResponse extends GridDistributedTxPrepareResponse { @GridDirectCollection(GridCacheEntryInfo.class) private List preloadEntries; + /** @see ProcessingTimeLoggableResponse#reqSentTimestamp(). */ + @GridDirectTransient + private long reqSentTimestamp = INVALID_TIMESTAMP; + + /** @see ProcessingTimeLoggableResponse#reqReceivedTimestamp(). */ + @GridDirectTransient + private long reqReceivedTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableResponse#reqTimeData(). */ + private long reqTimeData = INVALID_TIMESTAMP; + + /** * Empty constructor required by {@link Externalizable}. */ @@ -230,6 +245,37 @@ public void addPreloadEntry(GridCacheEntryInfo info) { } } + /** {@inheritDoc} */ + @Override public void reqSentTimestamp(long reqSentTimestamp) { + this.reqSentTimestamp = reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqSentTimestamp() { + return reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqReceivedTimestamp(long reqReceivedTimestamp) { + this.reqReceivedTimestamp = reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqReceivedTimestamp() { + return reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqTimeData(long reqTimeData) { + this.reqTimeData = reqTimeData; + } + + /** {@inheritDoc} */ + @Override public long reqTimeData() { + return reqTimeData; + } + + /** {@inheritDoc} */ @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { writer.setBuffer(buf); @@ -275,6 +321,12 @@ public void addPreloadEntry(GridCacheEntryInfo info) { writer.incrementState(); + case 16: + if (!writer.writeLong("reqTimeData", reqTimeData)) + return false; + + writer.incrementState(); + } return true; @@ -331,6 +383,14 @@ public void addPreloadEntry(GridCacheEntryInfo info) { reader.incrementState(); + case 16: + reqTimeData = reader.readLong("reqTimeData"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + } return reader.afterMessageRead(GridDhtTxPrepareResponse.class); @@ -343,7 +403,7 @@ public void addPreloadEntry(GridCacheEntryInfo info) { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 16; + return 17; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxRemote.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxRemote.java index 9518a014b5029..583c4d710d34f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxRemote.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxRemote.java @@ -302,10 +302,10 @@ IgniteUuid remoteFutureId() { } /** {@inheritDoc} */ - @Override public void addInvalidPartition(GridCacheContext cacheCtx, int part) { - super.addInvalidPartition(cacheCtx, part); + @Override public void addInvalidPartition(int cacheId, int part) { + super.addInvalidPartition(cacheId, part); - txState.invalidPartition(part); + txState.invalidPartition(cacheId, part, xidVersion()); } /** @@ -331,7 +331,7 @@ public void addWrite(IgniteTxEntry entry, ClassLoader ldr) throws IgniteCheckedE addExplicit(entry); } catch (GridDhtInvalidPartitionException e) { - addInvalidPartition(cacheCtx, e.partition()); + addInvalidPartition(cacheCtx.cacheId(), e.partition()); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/IgniteClusterReadOnlyException.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/IgniteClusterReadOnlyException.java new file mode 100644 index 0000000000000..bfafdaa2b1383 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/IgniteClusterReadOnlyException.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.distributed.dht; + +import org.apache.ignite.IgniteCheckedException; +import org.jetbrains.annotations.Nullable; + +/** + This exception is used to indicate that the cluster is in a read-only state + */ +public class IgniteClusterReadOnlyException extends IgniteCheckedException { + /** */ + private static final long serialVersionUID = 0L; + + /** + * Create empty exception. + */ + public IgniteClusterReadOnlyException() { + } + + /** + * Creates new exception with given error message. + * + * @param msg Error message. + */ + public IgniteClusterReadOnlyException(String msg) { + super(msg); + } + + /** + * Creates new exception with given cause. + * + * @param cause Cause. + */ + public IgniteClusterReadOnlyException(Throwable cause) { + super(cause); + } + + /** + * Creates new exception with given error message and cause. + * + * @param msg Error message. + * @param cause Cause. + */ + public IgniteClusterReadOnlyException(String msg, @Nullable Throwable cause) { + super(msg, cause); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionCountersNeighborcastRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionCountersNeighborcastRequest.java index 7ce0a879b4b0f..63ae825cea763 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionCountersNeighborcastRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionCountersNeighborcastRequest.java @@ -20,12 +20,13 @@ import java.nio.ByteBuffer; import java.util.Collection; import org.apache.ignite.internal.GridDirectCollection; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.GridCacheIdMessage; -import org.apache.ignite.internal.processors.cache.distributed.dht.PartitionUpdateCountersMessage; import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.jetbrains.annotations.NotNull; /** */ public class PartitionCountersNeighborcastRequest extends GridCacheIdMessage { @@ -39,15 +40,27 @@ public class PartitionCountersNeighborcastRequest extends GridCacheIdMessage { /** */ private IgniteUuid futId; + /** Topology version. */ + private AffinityTopologyVersion topVer; + /** */ public PartitionCountersNeighborcastRequest() { } /** */ public PartitionCountersNeighborcastRequest( - Collection updCntrs, IgniteUuid futId) { + Collection updCntrs, + IgniteUuid futId, + @NotNull AffinityTopologyVersion topVer + ) { this.updCntrs = updCntrs; this.futId = futId; + this.topVer = topVer; + } + + /** {@inheritDoc} */ + @Override public AffinityTopologyVersion topologyVersion() { + return topVer; } /** @@ -86,6 +99,12 @@ public IgniteUuid futId() { writer.incrementState(); case 5: + if (!writer.writeAffinityTopologyVersion("topVer", topVer)) + return false; + + writer.incrementState(); + + case 6: if (!writer.writeCollection("updCntrs", updCntrs, MessageCollectionItemType.MSG)) return false; @@ -116,6 +135,14 @@ public IgniteUuid futId() { reader.incrementState(); case 5: + topVer = reader.readAffinityTopologyVersion("topVer"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 6: updCntrs = reader.readCollection("updCntrs", MessageCollectionItemType.MSG); if (!reader.isLastRead()) @@ -135,7 +162,7 @@ public IgniteUuid futId() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 6; + return 7; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionCountersNeighborcastResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionCountersNeighborcastResponse.java index 093c40925e212..e21472ecee481 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionCountersNeighborcastResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionCountersNeighborcastResponse.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.processors.cache.distributed.dht; import java.nio.ByteBuffer; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.GridCacheIdMessage; import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.plugin.extensions.communication.MessageReader; @@ -31,13 +32,25 @@ public class PartitionCountersNeighborcastResponse extends GridCacheIdMessage { /** */ private IgniteUuid futId; + /** Topology version. */ + private AffinityTopologyVersion topVer; + /** */ public PartitionCountersNeighborcastResponse() { } /** */ - public PartitionCountersNeighborcastResponse(IgniteUuid futId) { + public PartitionCountersNeighborcastResponse( + IgniteUuid futId, + AffinityTopologyVersion topVer + ) { this.futId = futId; + this.topVer = topVer; + } + + /** {@inheritDoc} */ + @Override public AffinityTopologyVersion topologyVersion() { + return topVer; } /** @@ -68,6 +81,12 @@ public IgniteUuid futId() { writer.incrementState(); + case 5: + if (!writer.writeAffinityTopologyVersion("topVer", topVer)) + return false; + + writer.incrementState(); + } return true; @@ -92,6 +111,14 @@ public IgniteUuid futId() { reader.incrementState(); + case 5: + topVer = reader.readAffinityTopologyVersion("topVer"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + } return reader.afterMessageRead(PartitionCountersNeighborcastResponse.class); @@ -104,7 +131,7 @@ public IgniteUuid futId() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 5; + return 6; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicAbstractUpdateRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicAbstractUpdateRequest.java index 3e2f971e11aae..760305574b0fa 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicAbstractUpdateRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicAbstractUpdateRequest.java @@ -34,13 +34,16 @@ import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableRequest; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import static org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse.INVALID_TIMESTAMP; + /** * */ -public abstract class GridDhtAtomicAbstractUpdateRequest extends GridCacheIdMessage implements GridCacheDeployable { +public abstract class GridDhtAtomicAbstractUpdateRequest extends GridCacheIdMessage implements GridCacheDeployable, TimeLoggableRequest { /** Skip store flag bit mask. */ protected static final int DHT_ATOMIC_SKIP_STORE_FLAG_MASK = 0x01; @@ -97,6 +100,13 @@ public abstract class GridDhtAtomicAbstractUpdateRequest extends GridCacheIdMess /** Additional flags. */ protected byte flags; + /** @see TimeLoggableRequest#sendTimestamp(). */ + private long sendTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableRequest#receiveTimestamp(). */ + @GridDirectTransient + private long receiveTimestamp = INVALID_TIMESTAMP; + /** * Empty constructor required by {@link Externalizable}. */ @@ -450,6 +460,26 @@ public final CacheWriteSynchronizationMode writeSynchronizationMode() { */ @Nullable public abstract Object[] invokeArguments(); + /** {@inheritDoc} */ + @Override public long sendTimestamp() { + return sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public void sendTimestamp(long sendTimestamp) { + this.sendTimestamp = sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public long receiveTimestamp() { + return receiveTimestamp; + } + + /** {@inheritDoc} */ + @Override public void receiveTimestamp(long receiveTimestamp) { + this.receiveTimestamp = receiveTimestamp; + } + /** * Sets flag mask. * @@ -472,7 +502,7 @@ final boolean isFlag(int mask) { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 13; + return 14; } /** {@inheritDoc} */ @@ -515,30 +545,36 @@ final boolean isFlag(int mask) { writer.incrementState(); case 8: - if (!writer.writeUuid("subjId", subjId)) + if (!writer.writeLong("sendTimestamp", sendTimestamp)) return false; writer.incrementState(); case 9: - if (!writer.writeByte("syncMode", syncMode != null ? (byte)syncMode.ordinal() : -1)) + if (!writer.writeUuid("subjId", subjId)) return false; writer.incrementState(); case 10: - if (!writer.writeInt("taskNameHash", taskNameHash)) + if (!writer.writeByte("syncMode", syncMode != null ? (byte)syncMode.ordinal() : -1)) return false; writer.incrementState(); case 11: - if (!writer.writeAffinityTopologyVersion("topVer", topVer)) + if (!writer.writeInt("taskNameHash", taskNameHash)) return false; writer.incrementState(); case 12: + if (!writer.writeAffinityTopologyVersion("topVer", topVer)) + return false; + + writer.incrementState(); + + case 13: if (!writer.writeMessage("writeVer", writeVer)) return false; @@ -593,7 +629,7 @@ final boolean isFlag(int mask) { reader.incrementState(); case 8: - subjId = reader.readUuid("subjId"); + sendTimestamp = reader.readLong("sendTimestamp"); if (!reader.isLastRead()) return false; @@ -601,6 +637,14 @@ final boolean isFlag(int mask) { reader.incrementState(); case 9: + subjId = reader.readUuid("subjId"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 10: byte syncModeOrd; syncModeOrd = reader.readByte("syncMode"); @@ -612,7 +656,7 @@ final boolean isFlag(int mask) { reader.incrementState(); - case 10: + case 11: taskNameHash = reader.readInt("taskNameHash"); if (!reader.isLastRead()) @@ -620,7 +664,7 @@ final boolean isFlag(int mask) { reader.incrementState(); - case 11: + case 12: topVer = reader.readAffinityTopologyVersion("topVer"); if (!reader.isLastRead()) @@ -628,7 +672,7 @@ final boolean isFlag(int mask) { reader.incrementState(); - case 12: + case 13: writeVer = reader.readMessage("writeVer"); if (!reader.isLastRead()) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java index 5cdb2009207d9..d6c868ca91ea5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java @@ -120,6 +120,7 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_ATOMIC_DEFERRED_ACK_BUFFER_SIZE; import static org.apache.ignite.IgniteSystemProperties.IGNITE_ATOMIC_DEFERRED_ACK_TIMEOUT; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_ASYNC; +import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.PRIMARY_SYNC; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.DELETE; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.TRANSFORM; @@ -1675,6 +1676,10 @@ private void onForceKeysError(final UUID nodeId, false, ctx.deploymentEnabled()); + // For full sync mode response can be sent to node that didn't send request. + if (req.syncMode != FULL_SYNC) + res.copyTimestamps(req); + res.addFailedKeys(req.keys(), e); completionCb.apply(req, res); @@ -1699,6 +1704,10 @@ private void updateAllAsyncInternal0( false, ctx.deploymentEnabled()); + // For full sync mode response can be sent to node that didn't send request. + if (req.syncMode != FULL_SYNC) + res.copyTimestamps(req); + assert !req.returnValue() || (req.operation() == TRANSFORM || req.size() == 1); GridDhtAtomicAbstractUpdateFuture dhtFut = null; @@ -1712,7 +1721,7 @@ private void updateAllAsyncInternal0( // If batch store update is enabled, we need to lock all entries. // First, need to acquire locks on cache entries, then check filter. - List locked = lockEntries(req, req.topologyVersion());; + List locked = lockEntries(req, req.topologyVersion()); Collection> deleted = null; @@ -1745,7 +1754,7 @@ private void updateAllAsyncInternal0( // Can not wait for topology future since it will break // GridNearAtomicCheckUpdateRequest processing. remap = !top.topologyVersionFuture().exchangeDone() || - needRemap(req.topologyVersion(), top.readyTopologyVersion(), req.keys()); + needRemap(req.topologyVersion(), top.readyTopologyVersion()); } if (!remap) { @@ -1776,7 +1785,7 @@ private void updateAllAsyncInternal0( assert cacheObjProc instanceof CacheObjectBinaryProcessorImpl; ((CacheObjectBinaryProcessorImpl)cacheObjProc) - .binaryContext().descriptorForClass(ex.cls(), false, false); + .binaryContext().registerClass(ex.cls(), true, false); } catch (UnregisteredBinaryTypeException ex) { if (ex.future() != null) { @@ -3170,6 +3179,8 @@ private void processCheckUpdateRequest(UUID nodeId, GridNearAtomicCheckUpdateReq false, false); + res.copyTimestamps(checkReq); + GridCacheReturn ret = new GridCacheReturn(false, true); res.returnValue(ret); @@ -3330,6 +3341,8 @@ else if (req.nearSize() > 0) { ctx.deploymentEnabled()); dhtRes.nearEvicted(nearEvicted); + + dhtRes.copyTimestamps(req); } } @@ -3362,12 +3375,16 @@ else if (req.nearSize() > 0) { req.partition(), req.futureId(), ctx.deploymentEnabled()); + + dhtRes.copyTimestamps(req); } if (dhtRes != null) sendDhtPrimaryResponse(nodeId, req, dhtRes); - else - sendDeferredUpdateResponse(req.partition(), nodeId, req.futureId()); + else { + sendDeferredUpdateResponse(req.partition(), nodeId, req.futureId(), req.sendTimestamp(), + req.receiveTimestamp()); + } } /** @@ -3405,8 +3422,10 @@ private void sendDhtPrimaryResponse(UUID nodeId, * @param part Partition. * @param primaryId Primary ID. * @param futId Future ID. + * @param reqSendTs Request send timestamp. + * @param reqReceiveTs Request receive timestamp. */ - private void sendDeferredUpdateResponse(int part, UUID primaryId, long futId) { + private void sendDeferredUpdateResponse(int part, UUID primaryId, long futId, long reqSendTs, long reqReceiveTs) { Map resMap = defRes.get(); GridDhtAtomicDeferredUpdateResponse msg = resMap.get(primaryId); @@ -3435,6 +3454,9 @@ private void sendDeferredUpdateResponse(int part, UUID primaryId, long futId) { if (futIds.size() >= DEFERRED_UPDATE_RESPONSE_BUFFER_SIZE) { resMap.remove(primaryId); + msg.reqSentTimestamp(reqSendTs); + msg.reqReceivedTimestamp(reqReceiveTs); + sendDeferredUpdateResponse(primaryId, msg); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicDeferredUpdateResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicDeferredUpdateResponse.java index ee5eac15a6c45..898ff51e33650 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicDeferredUpdateResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicDeferredUpdateResponse.java @@ -30,12 +30,14 @@ import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableResponse; import org.jetbrains.annotations.Nullable; /** * Deferred dht atomic update response. */ -public class GridDhtAtomicDeferredUpdateResponse extends GridCacheIdMessage implements GridCacheDeployable { +public class GridDhtAtomicDeferredUpdateResponse extends GridCacheIdMessage implements GridCacheDeployable, ProcessingTimeLoggableResponse { /** */ private static final long serialVersionUID = 0L; @@ -50,6 +52,17 @@ public class GridDhtAtomicDeferredUpdateResponse extends GridCacheIdMessage impl @GridToStringExclude private GridTimeoutObject timeoutSnd; + /** @see ProcessingTimeLoggableResponse#reqSentTimestamp(). */ + @GridDirectTransient + private long reqSentTimestamp = INVALID_TIMESTAMP; + + /** @see ProcessingTimeLoggableResponse#reqReceivedTimestamp(). */ + @GridDirectTransient + private long reqReceivedTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableResponse#reqTimeData(). */ + private long reqTimeData = INVALID_TIMESTAMP; + /** * Empty constructor required by {@link Externalizable} */ @@ -104,6 +117,36 @@ GridLongList futureIds() { return ctx.atomicMessageLogger(); } + /** {@inheritDoc} */ + @Override public void reqSentTimestamp(long reqSentTimestamp) { + this.reqSentTimestamp = reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqSentTimestamp() { + return reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqReceivedTimestamp(long reqReceivedTimestamp) { + this.reqReceivedTimestamp = reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqReceivedTimestamp() { + return reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqTimeData(long reqTimeData) { + this.reqTimeData = reqTimeData; + } + + /** {@inheritDoc} */ + @Override public long reqTimeData() { + return reqTimeData; + } + /** {@inheritDoc} */ @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { writer.setBuffer(buf); @@ -125,6 +168,12 @@ GridLongList futureIds() { writer.incrementState(); + case 5: + if (!writer.writeLong("reqTimeData", reqTimeData)) + return false; + + writer.incrementState(); + } return true; @@ -149,6 +198,14 @@ GridLongList futureIds() { reader.incrementState(); + case 5: + reqTimeData = reader.readLong("reqTimeData"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + } return reader.afterMessageRead(GridDhtAtomicDeferredUpdateResponse.class); @@ -161,7 +218,7 @@ GridLongList futureIds() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 5; + return 6; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicSingleUpdateRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicSingleUpdateRequest.java index 392ffc54b9bec..f8e6298586872 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicSingleUpdateRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicSingleUpdateRequest.java @@ -366,25 +366,25 @@ private void near(boolean near) { } switch (writer.state()) { - case 13: + case 14: if (!writer.writeMessage("key", key)) return false; writer.incrementState(); - case 14: + case 15: if (!writer.writeMessage("prevVal", prevVal)) return false; writer.incrementState(); - case 15: + case 16: if (!writer.writeLong("updateCntr", updateCntr)) return false; writer.incrementState(); - case 16: + case 17: if (!writer.writeMessage("val", val)) return false; @@ -406,7 +406,7 @@ private void near(boolean near) { return false; switch (reader.state()) { - case 13: + case 14: key = reader.readMessage("key"); if (!reader.isLastRead()) @@ -414,7 +414,7 @@ private void near(boolean near) { reader.incrementState(); - case 14: + case 15: prevVal = reader.readMessage("prevVal"); if (!reader.isLastRead()) @@ -422,7 +422,7 @@ private void near(boolean near) { reader.incrementState(); - case 15: + case 16: updateCntr = reader.readLong("updateCntr"); if (!reader.isLastRead()) @@ -430,7 +430,7 @@ private void near(boolean near) { reader.incrementState(); - case 16: + case 17: val = reader.readMessage("val"); if (!reader.isLastRead()) @@ -480,7 +480,7 @@ private void finishUnmarshalObject(@Nullable CacheObject obj, GridCacheContext c /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 17; + return 18; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicUpdateRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicUpdateRequest.java index 56e0058084d28..ca8b98c782856 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicUpdateRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicUpdateRequest.java @@ -557,97 +557,97 @@ else if (conflictVers != null) } switch (writer.state()) { - case 13: + case 14: if (!writer.writeMessage("conflictExpireTimes", conflictExpireTimes)) return false; writer.incrementState(); - case 14: + case 15: if (!writer.writeCollection("conflictVers", conflictVers, MessageCollectionItemType.MSG)) return false; writer.incrementState(); - case 15: + case 16: if (!writer.writeCollection("entryProcessorsBytes", entryProcessorsBytes, MessageCollectionItemType.BYTE_ARR)) return false; writer.incrementState(); - case 16: + case 17: if (!writer.writeBoolean("forceTransformBackups", forceTransformBackups)) return false; writer.incrementState(); - case 17: + case 18: if (!writer.writeObjectArray("invokeArgsBytes", invokeArgsBytes, MessageCollectionItemType.BYTE_ARR)) return false; writer.incrementState(); - case 18: + case 19: if (!writer.writeCollection("keys", keys, MessageCollectionItemType.MSG)) return false; writer.incrementState(); - case 19: + case 20: if (!writer.writeCollection("nearEntryProcessorsBytes", nearEntryProcessorsBytes, MessageCollectionItemType.BYTE_ARR)) return false; writer.incrementState(); - case 20: + case 21: if (!writer.writeMessage("nearExpireTimes", nearExpireTimes)) return false; writer.incrementState(); - case 21: + case 22: if (!writer.writeCollection("nearKeys", nearKeys, MessageCollectionItemType.MSG)) return false; writer.incrementState(); - case 22: + case 23: if (!writer.writeMessage("nearTtls", nearTtls)) return false; writer.incrementState(); - case 23: + case 24: if (!writer.writeCollection("nearVals", nearVals, MessageCollectionItemType.MSG)) return false; writer.incrementState(); - case 24: + case 25: if (!writer.writeMessage("obsoleteIndexes", obsoleteIndexes)) return false; writer.incrementState(); - case 25: + case 26: if (!writer.writeCollection("prevVals", prevVals, MessageCollectionItemType.MSG)) return false; writer.incrementState(); - case 26: + case 27: if (!writer.writeMessage("ttls", ttls)) return false; writer.incrementState(); - case 27: + case 28: if (!writer.writeMessage("updateCntrs", updateCntrs)) return false; writer.incrementState(); - case 28: + case 29: if (!writer.writeCollection("vals", vals, MessageCollectionItemType.MSG)) return false; @@ -669,7 +669,7 @@ else if (conflictVers != null) return false; switch (reader.state()) { - case 13: + case 14: conflictExpireTimes = reader.readMessage("conflictExpireTimes"); if (!reader.isLastRead()) @@ -677,7 +677,7 @@ else if (conflictVers != null) reader.incrementState(); - case 14: + case 15: conflictVers = reader.readCollection("conflictVers", MessageCollectionItemType.MSG); if (!reader.isLastRead()) @@ -685,7 +685,7 @@ else if (conflictVers != null) reader.incrementState(); - case 15: + case 16: entryProcessorsBytes = reader.readCollection("entryProcessorsBytes", MessageCollectionItemType.BYTE_ARR); if (!reader.isLastRead()) @@ -693,7 +693,7 @@ else if (conflictVers != null) reader.incrementState(); - case 16: + case 17: forceTransformBackups = reader.readBoolean("forceTransformBackups"); if (!reader.isLastRead()) @@ -701,7 +701,7 @@ else if (conflictVers != null) reader.incrementState(); - case 17: + case 18: invokeArgsBytes = reader.readObjectArray("invokeArgsBytes", MessageCollectionItemType.BYTE_ARR, byte[].class); if (!reader.isLastRead()) @@ -709,7 +709,7 @@ else if (conflictVers != null) reader.incrementState(); - case 18: + case 19: keys = reader.readCollection("keys", MessageCollectionItemType.MSG); if (!reader.isLastRead()) @@ -717,7 +717,7 @@ else if (conflictVers != null) reader.incrementState(); - case 19: + case 20: nearEntryProcessorsBytes = reader.readCollection("nearEntryProcessorsBytes", MessageCollectionItemType.BYTE_ARR); if (!reader.isLastRead()) @@ -725,7 +725,7 @@ else if (conflictVers != null) reader.incrementState(); - case 20: + case 21: nearExpireTimes = reader.readMessage("nearExpireTimes"); if (!reader.isLastRead()) @@ -733,7 +733,7 @@ else if (conflictVers != null) reader.incrementState(); - case 21: + case 22: nearKeys = reader.readCollection("nearKeys", MessageCollectionItemType.MSG); if (!reader.isLastRead()) @@ -741,7 +741,7 @@ else if (conflictVers != null) reader.incrementState(); - case 22: + case 23: nearTtls = reader.readMessage("nearTtls"); if (!reader.isLastRead()) @@ -749,7 +749,7 @@ else if (conflictVers != null) reader.incrementState(); - case 23: + case 24: nearVals = reader.readCollection("nearVals", MessageCollectionItemType.MSG); if (!reader.isLastRead()) @@ -757,7 +757,7 @@ else if (conflictVers != null) reader.incrementState(); - case 24: + case 25: obsoleteIndexes = reader.readMessage("obsoleteIndexes"); if (!reader.isLastRead()) @@ -765,7 +765,7 @@ else if (conflictVers != null) reader.incrementState(); - case 25: + case 26: prevVals = reader.readCollection("prevVals", MessageCollectionItemType.MSG); if (!reader.isLastRead()) @@ -773,7 +773,7 @@ else if (conflictVers != null) reader.incrementState(); - case 26: + case 27: ttls = reader.readMessage("ttls"); if (!reader.isLastRead()) @@ -781,7 +781,7 @@ else if (conflictVers != null) reader.incrementState(); - case 27: + case 28: updateCntrs = reader.readMessage("updateCntrs"); if (!reader.isLastRead()) @@ -789,7 +789,7 @@ else if (conflictVers != null) reader.incrementState(); - case 28: + case 29: vals = reader.readCollection("vals", MessageCollectionItemType.MSG); if (!reader.isLastRead()) @@ -815,7 +815,7 @@ else if (conflictVers != null) /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 29; + return 30; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicUpdateResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicUpdateResponse.java index 21efbb1350b3d..ce4e231d3cc27 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicUpdateResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicUpdateResponse.java @@ -24,6 +24,7 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.GridDirectCollection; +import org.apache.ignite.internal.GridDirectTransient; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheDeployable; import org.apache.ignite.internal.processors.cache.GridCacheIdMessage; @@ -34,11 +35,13 @@ import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableResponse; /** * DHT atomic cache backup update response. */ -public class GridDhtAtomicUpdateResponse extends GridCacheIdMessage implements GridCacheDeployable { +public class GridDhtAtomicUpdateResponse extends GridCacheIdMessage implements GridCacheDeployable, ProcessingTimeLoggableResponse { /** */ private static final long serialVersionUID = 0L; @@ -59,6 +62,17 @@ public class GridDhtAtomicUpdateResponse extends GridCacheIdMessage implements G /** */ private int partId; + /** @see ProcessingTimeLoggableResponse#reqSentTimestamp(). */ + @GridDirectTransient + private long reqSentTimestamp = INVALID_TIMESTAMP; + + /** @see ProcessingTimeLoggableResponse#reqReceivedTimestamp(). */ + @GridDirectTransient + private long reqReceivedTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableResponse#reqTimeData(). */ + private long reqTimeData = INVALID_TIMESTAMP; + /** * Empty constructor required by {@link Externalizable}. */ @@ -164,6 +178,36 @@ public void nearEvicted(List nearEvicted) { return ctx.atomicMessageLogger(); } + /** {@inheritDoc} */ + @Override public void reqSentTimestamp(long reqSentTimestamp) { + this.reqSentTimestamp = reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqSentTimestamp() { + return reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqReceivedTimestamp(long reqReceivedTimestamp) { + this.reqReceivedTimestamp = reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqReceivedTimestamp() { + return reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqTimeData(long reqTimeData) { + this.reqTimeData = reqTimeData; + } + + /** {@inheritDoc} */ + @Override public long reqTimeData() { + return reqTimeData; + } + /** {@inheritDoc} */ @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { writer.setBuffer(buf); @@ -203,6 +247,12 @@ public void nearEvicted(List nearEvicted) { writer.incrementState(); + case 8: + if (!writer.writeLong("reqTimeData", reqTimeData)) + return false; + + writer.incrementState(); + } return true; @@ -251,6 +301,14 @@ public void nearEvicted(List nearEvicted) { reader.incrementState(); + case 8: + reqTimeData = reader.readLong("reqTimeData"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + } return reader.afterMessageRead(GridDhtAtomicUpdateResponse.class); @@ -263,7 +321,7 @@ public void nearEvicted(List nearEvicted) { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 8; + return 9; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicAbstractUpdateFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicAbstractUpdateFuture.java index 63f103801531f..3132a38a7dbf1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicAbstractUpdateFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicAbstractUpdateFuture.java @@ -463,6 +463,8 @@ final GridNearAtomicUpdateResponse primaryFailedResponse(GridNearAtomicAbstractU true, cctx.deploymentEnabled()); + copyReqTimestamp(req, res); + ClusterTopologyCheckedException e = new ClusterTopologyCheckedException("Primary node left grid " + "before response is received: " + req.nodeId()); @@ -485,11 +487,20 @@ final void onSendError(GridNearAtomicAbstractUpdateRequest req, IgniteCheckedExc e instanceof ClusterTopologyCheckedException, cctx.deploymentEnabled()); + copyReqTimestamp(req, res); + res.addFailedKeys(req.keys(), e); onPrimaryResponse(req.nodeId(), res, true); } + /** */ + private void copyReqTimestamp(GridNearAtomicAbstractUpdateRequest req, GridNearAtomicUpdateResponse res) { + // For full sync mode response can be sent to node that didn't send request. + if (req.syncMode != FULL_SYNC) + res.copyTimestamps(req); + } + /** * @param req Request. * @param e Error. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicAbstractUpdateRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicAbstractUpdateRequest.java index 64fe1eef8fe6c..0ad05de501f79 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicAbstractUpdateRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicAbstractUpdateRequest.java @@ -38,13 +38,16 @@ import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableRequest; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import static org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse.INVALID_TIMESTAMP; + /** * */ -public abstract class GridNearAtomicAbstractUpdateRequest extends GridCacheIdMessage implements GridCacheDeployable { +public abstract class GridNearAtomicAbstractUpdateRequest extends GridCacheIdMessage implements GridCacheDeployable, TimeLoggableRequest { /** Message index. */ public static final int CACHE_MSG_IDX = nextIndexId(); @@ -102,6 +105,13 @@ public abstract class GridNearAtomicAbstractUpdateRequest extends GridCacheIdMes @GridDirectTransient private GridNearAtomicUpdateResponse res; + /** @see TimeLoggableRequest#sendTimestamp(). */ + private long sendTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableRequest#receiveTimestamp(). */ + @GridDirectTransient + private long receiveTimestamp = INVALID_TIMESTAMP; + /** * */ @@ -526,9 +536,29 @@ abstract void addUpdateEntry(KeyCacheObject key, */ public abstract KeyCacheObject key(int idx); + /** {@inheritDoc} */ + @Override public long sendTimestamp() { + return sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public void sendTimestamp(long sendTimestamp) { + this.sendTimestamp = sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public long receiveTimestamp() { + return receiveTimestamp; + } + + /** {@inheritDoc} */ + @Override public void receiveTimestamp(long receiveTimestamp) { + this.receiveTimestamp = receiveTimestamp; + } + /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 11; + return 12; } /** {@inheritDoc} */ @@ -565,24 +595,30 @@ abstract void addUpdateEntry(KeyCacheObject key, writer.incrementState(); case 7: - if (!writer.writeUuid("subjId", subjId)) + if (!writer.writeLong("sendTimestamp", sendTimestamp)) return false; writer.incrementState(); case 8: - if (!writer.writeByte("syncMode", syncMode != null ? (byte)syncMode.ordinal() : -1)) + if (!writer.writeUuid("subjId", subjId)) return false; writer.incrementState(); case 9: - if (!writer.writeInt("taskNameHash", taskNameHash)) + if (!writer.writeByte("syncMode", syncMode != null ? (byte)syncMode.ordinal() : -1)) return false; writer.incrementState(); case 10: + if (!writer.writeInt("taskNameHash", taskNameHash)) + return false; + + writer.incrementState(); + + case 11: if (!writer.writeAffinityTopologyVersion("topVer", topVer)) return false; @@ -633,7 +669,7 @@ abstract void addUpdateEntry(KeyCacheObject key, reader.incrementState(); case 7: - subjId = reader.readUuid("subjId"); + sendTimestamp = reader.readLong("sendTimestamp"); if (!reader.isLastRead()) return false; @@ -641,6 +677,14 @@ abstract void addUpdateEntry(KeyCacheObject key, reader.incrementState(); case 8: + subjId = reader.readUuid("subjId"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 9: byte syncModeOrd; syncModeOrd = reader.readByte("syncMode"); @@ -652,7 +696,7 @@ abstract void addUpdateEntry(KeyCacheObject key, reader.incrementState(); - case 9: + case 10: taskNameHash = reader.readInt("taskNameHash"); if (!reader.isLastRead()) @@ -660,7 +704,7 @@ abstract void addUpdateEntry(KeyCacheObject key, reader.incrementState(); - case 10: + case 11: topVer = reader.readAffinityTopologyVersion("topVer"); if (!reader.isLastRead()) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicCheckUpdateRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicCheckUpdateRequest.java index a19e28029b89f..16c256f8d357c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicCheckUpdateRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicCheckUpdateRequest.java @@ -23,11 +23,14 @@ import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableRequest; + +import static org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse.INVALID_TIMESTAMP; /** * */ -public class GridNearAtomicCheckUpdateRequest extends GridCacheIdMessage { +public class GridNearAtomicCheckUpdateRequest extends GridCacheIdMessage implements TimeLoggableRequest { /** */ private static final long serialVersionUID = 0L; @@ -44,6 +47,13 @@ public class GridNearAtomicCheckUpdateRequest extends GridCacheIdMessage { /** */ private long futId; + /** @see TimeLoggableRequest#sendTimestamp(). */ + private long sendTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableRequest#receiveTimestamp(). */ + @GridDirectTransient + private long receiveTimestamp = INVALID_TIMESTAMP; + /** * */ @@ -94,6 +104,26 @@ GridNearAtomicAbstractUpdateRequest updateRequest() { return false; } + /** {@inheritDoc} */ + @Override public long sendTimestamp() { + return sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public void sendTimestamp(long sendTimestamp) { + this.sendTimestamp = sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public long receiveTimestamp() { + return receiveTimestamp; + } + + /** {@inheritDoc} */ + @Override public void receiveTimestamp(long receiveTimestamp) { + this.receiveTimestamp = receiveTimestamp; + } + /** {@inheritDoc} */ @Override public short directType() { return -50; @@ -101,7 +131,7 @@ GridNearAtomicAbstractUpdateRequest updateRequest() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 6; + return 7; } /** {@inheritDoc} */ @@ -131,6 +161,12 @@ GridNearAtomicAbstractUpdateRequest updateRequest() { writer.incrementState(); + case 6: + if (!writer.writeLong("sendTimestamp", sendTimestamp)) + return false; + + writer.incrementState(); + } return true; @@ -163,6 +199,14 @@ GridNearAtomicAbstractUpdateRequest updateRequest() { reader.incrementState(); + case 6: + sendTimestamp = reader.readLong("sendTimestamp"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + } return reader.afterMessageRead(GridNearAtomicCheckUpdateRequest.class); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicFullUpdateRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicFullUpdateRequest.java index 170586b22d589..d644f1dba2f62 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicFullUpdateRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicFullUpdateRequest.java @@ -435,55 +435,55 @@ else if (conflictVers != null) } switch (writer.state()) { - case 11: + case 12: if (!writer.writeMessage("conflictExpireTimes", conflictExpireTimes)) return false; writer.incrementState(); - case 12: + case 13: if (!writer.writeMessage("conflictTtls", conflictTtls)) return false; writer.incrementState(); - case 13: + case 14: if (!writer.writeCollection("conflictVers", conflictVers, MessageCollectionItemType.MSG)) return false; writer.incrementState(); - case 14: + case 15: if (!writer.writeCollection("entryProcessorsBytes", entryProcessorsBytes, MessageCollectionItemType.BYTE_ARR)) return false; writer.incrementState(); - case 15: + case 16: if (!writer.writeByteArray("expiryPlcBytes", expiryPlcBytes)) return false; writer.incrementState(); - case 16: + case 17: if (!writer.writeObjectArray("filter", filter, MessageCollectionItemType.MSG)) return false; writer.incrementState(); - case 17: + case 18: if (!writer.writeObjectArray("invokeArgsBytes", invokeArgsBytes, MessageCollectionItemType.BYTE_ARR)) return false; writer.incrementState(); - case 18: + case 19: if (!writer.writeCollection("keys", keys, MessageCollectionItemType.MSG)) return false; writer.incrementState(); - case 19: + case 20: if (!writer.writeCollection("vals", vals, MessageCollectionItemType.MSG)) return false; @@ -505,7 +505,7 @@ else if (conflictVers != null) return false; switch (reader.state()) { - case 11: + case 12: conflictExpireTimes = reader.readMessage("conflictExpireTimes"); if (!reader.isLastRead()) @@ -513,7 +513,7 @@ else if (conflictVers != null) reader.incrementState(); - case 12: + case 13: conflictTtls = reader.readMessage("conflictTtls"); if (!reader.isLastRead()) @@ -521,7 +521,7 @@ else if (conflictVers != null) reader.incrementState(); - case 13: + case 14: conflictVers = reader.readCollection("conflictVers", MessageCollectionItemType.MSG); if (!reader.isLastRead()) @@ -529,7 +529,7 @@ else if (conflictVers != null) reader.incrementState(); - case 14: + case 15: entryProcessorsBytes = reader.readCollection("entryProcessorsBytes", MessageCollectionItemType.BYTE_ARR); if (!reader.isLastRead()) @@ -537,7 +537,7 @@ else if (conflictVers != null) reader.incrementState(); - case 15: + case 16: expiryPlcBytes = reader.readByteArray("expiryPlcBytes"); if (!reader.isLastRead()) @@ -545,7 +545,7 @@ else if (conflictVers != null) reader.incrementState(); - case 16: + case 17: filter = reader.readObjectArray("filter", MessageCollectionItemType.MSG, CacheEntryPredicate.class); if (!reader.isLastRead()) @@ -553,7 +553,7 @@ else if (conflictVers != null) reader.incrementState(); - case 17: + case 18: invokeArgsBytes = reader.readObjectArray("invokeArgsBytes", MessageCollectionItemType.BYTE_ARR, byte[].class); if (!reader.isLastRead()) @@ -561,7 +561,7 @@ else if (conflictVers != null) reader.incrementState(); - case 18: + case 19: keys = reader.readCollection("keys", MessageCollectionItemType.MSG); if (!reader.isLastRead()) @@ -569,7 +569,7 @@ else if (conflictVers != null) reader.incrementState(); - case 19: + case 20: vals = reader.readCollection("vals", MessageCollectionItemType.MSG); if (!reader.isLastRead()) @@ -601,7 +601,7 @@ else if (conflictVers != null) /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 20; + return 21; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateFilterRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateFilterRequest.java index c7076988a659f..88365ca861913 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateFilterRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateFilterRequest.java @@ -28,7 +28,6 @@ import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheOperation; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; -import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageReader; @@ -155,7 +154,7 @@ public GridNearAtomicSingleUpdateFilterRequest() { } switch (writer.state()) { - case 13: + case 14: if (!writer.writeObjectArray("filter", filter, MessageCollectionItemType.MSG)) return false; @@ -177,7 +176,7 @@ public GridNearAtomicSingleUpdateFilterRequest() { return false; switch (reader.state()) { - case 13: + case 14: filter = reader.readObjectArray("filter", MessageCollectionItemType.MSG, CacheEntryPredicate.class); if (!reader.isLastRead()) @@ -197,7 +196,7 @@ public GridNearAtomicSingleUpdateFilterRequest() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 14; + return 15; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateInvokeRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateInvokeRequest.java index ee3d2a4fe036a..1106e006b80cc 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateInvokeRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateInvokeRequest.java @@ -225,13 +225,13 @@ public GridNearAtomicSingleUpdateInvokeRequest() { } switch (writer.state()) { - case 13: + case 14: if (!writer.writeByteArray("entryProcessorBytes", entryProcessorBytes)) return false; writer.incrementState(); - case 14: + case 15: if (!writer.writeObjectArray("invokeArgsBytes", invokeArgsBytes, MessageCollectionItemType.BYTE_ARR)) return false; @@ -253,7 +253,7 @@ public GridNearAtomicSingleUpdateInvokeRequest() { return false; switch (reader.state()) { - case 13: + case 14: entryProcessorBytes = reader.readByteArray("entryProcessorBytes"); if (!reader.isLastRead()) @@ -261,7 +261,7 @@ public GridNearAtomicSingleUpdateInvokeRequest() { reader.incrementState(); - case 14: + case 15: invokeArgsBytes = reader.readObjectArray("invokeArgsBytes", MessageCollectionItemType.BYTE_ARR, byte[].class); if (!reader.isLastRead()) @@ -276,7 +276,7 @@ public GridNearAtomicSingleUpdateInvokeRequest() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 15; + return 16; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateRequest.java index 83ec4565f49ce..a843493dc2b07 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateRequest.java @@ -247,13 +247,13 @@ public GridNearAtomicSingleUpdateRequest() { } switch (writer.state()) { - case 11: + case 12: if (!writer.writeMessage("key", key)) return false; writer.incrementState(); - case 12: + case 13: if (!writer.writeMessage("val", val)) return false; @@ -275,7 +275,7 @@ public GridNearAtomicSingleUpdateRequest() { return false; switch (reader.state()) { - case 11: + case 12: key = reader.readMessage("key"); if (!reader.isLastRead()) @@ -283,7 +283,7 @@ public GridNearAtomicSingleUpdateRequest() { reader.incrementState(); - case 12: + case 13: val = reader.readMessage("val"); if (!reader.isLastRead()) @@ -311,7 +311,7 @@ public GridNearAtomicSingleUpdateRequest() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 13; + return 14; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicUpdateResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicUpdateResponse.java index 6dccd8b9cdc9b..9fb71e03a54f8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicUpdateResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicUpdateResponse.java @@ -40,12 +40,14 @@ import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableResponse; import org.jetbrains.annotations.Nullable; /** * DHT atomic cache near update response. */ -public class GridNearAtomicUpdateResponse extends GridCacheIdMessage implements GridCacheDeployable { +public class GridNearAtomicUpdateResponse extends GridCacheIdMessage implements GridCacheDeployable, ProcessingTimeLoggableResponse { /** */ private static final long serialVersionUID = 0L; @@ -84,6 +86,17 @@ public class GridNearAtomicUpdateResponse extends GridCacheIdMessage implements @GridDirectTransient private boolean nodeLeft; + /** @see ProcessingTimeLoggableResponse#reqSentTimestamp(). */ + @GridDirectTransient + private long reqSentTimestamp = INVALID_TIMESTAMP; + + /** @see ProcessingTimeLoggableResponse#reqReceivedTimestamp(). */ + @GridDirectTransient + private long reqReceivedTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableResponse#reqTimeData(). */ + private long reqTimeData = INVALID_TIMESTAMP; + /** * Empty constructor required by {@link Externalizable}. */ @@ -390,6 +403,37 @@ synchronized void addFailedKeys(Collection keys, Throwable e) { return ctx.atomicMessageLogger(); } + + /** {@inheritDoc} */ + @Override public void reqSentTimestamp(long reqSentTimestamp) { + this.reqSentTimestamp = reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqSentTimestamp() { + return reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqReceivedTimestamp(long reqReceivedTimestamp) { + this.reqReceivedTimestamp = reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqReceivedTimestamp() { + return reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqTimeData(long reqTimeData) { + this.reqTimeData = reqTimeData; + } + + /** {@inheritDoc} */ + @Override public long reqTimeData() { + return reqTimeData; + } + /** {@inheritDoc} */ @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { writer.setBuffer(buf); @@ -442,6 +486,12 @@ synchronized void addFailedKeys(Collection keys, Throwable e) { writer.incrementState(); case 10: + if (!writer.writeLong("reqTimeData", reqTimeData)) + return false; + + writer.incrementState(); + + case 11: if (!writer.writeMessage("ret", ret)) return false; @@ -512,6 +562,14 @@ synchronized void addFailedKeys(Collection keys, Throwable e) { reader.incrementState(); case 10: + reqTimeData = reader.readLong("reqTimeData"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 11: ret = reader.readMessage("ret"); if (!reader.isLastRead()) @@ -531,7 +589,7 @@ synchronized void addFailedKeys(Collection keys, Throwable e) { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 11; + return 12; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedCache.java index 02493dfc23c56..aafd9b57f9620 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedCache.java @@ -131,7 +131,7 @@ public GridDhtColocatedCache(GridCacheContext ctx, GridCacheConcurrentMap ctx.io().addCacheHandler(ctx.cacheId(), GridNearLockResponse.class, new CI2() { @Override public void apply(UUID nodeId, GridNearLockResponse res) { - processLockResponse(nodeId, res); + processNearLockResponse(nodeId, res); } }); } @@ -1106,7 +1106,7 @@ else if (!b) * @param nodeId Node ID. * @param res Response. */ - private void processLockResponse(UUID nodeId, GridNearLockResponse res) { + private void processNearLockResponse(UUID nodeId, GridNearLockResponse res) { if (txLockMsgLog.isDebugEnabled()) txLockMsgLog.debug("Received near lock response [txId=" + res.version() + ", node=" + nodeId + ']'); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java index d8dea400242f3..d281a5dac7bea 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java @@ -156,9 +156,6 @@ public final class GridDhtColocatedLockFuture extends GridCacheCompoundIdentityF @SuppressWarnings("UnusedDeclaration") private volatile int done; - /** Trackable flag (here may be non-volatile). */ - private boolean trackable; - /** TTL for create operation. */ private final long createTtl; @@ -183,6 +180,9 @@ public final class GridDhtColocatedLockFuture extends GridCacheCompoundIdentityF /** {@code True} when mappings are ready for processing. */ private boolean mappingsReady; + /** */ + private boolean trackable = true; + /** * @param cctx Registry. * @param keys Keys to lock. @@ -777,8 +777,23 @@ void map() { topVer = tx.topologyVersionSnapshot(); if (topVer != null) { + AffinityTopologyVersion lastChangeVer = cctx.shared().exchange().lastAffinityChangedTopologyVersion(topVer); + + IgniteInternalFuture affFut = cctx.shared().exchange().affinityReadyFuture(lastChangeVer); + + if (!affFut.isDone()) { + try { + affFut.get(); + } + catch (IgniteCheckedException e) { + onDone(err); + + return; + } + } + for (GridDhtTopologyFuture fut : cctx.shared().exchange().exchangeFutures()) { - if (fut.exchangeDone() && fut.topologyVersion().equals(topVer)) { + if (fut.exchangeDone() && fut.topologyVersion().equals(lastChangeVer)) { Throwable err = fut.validateCache(cctx, recovery, read, null, keys); if (err != null) { @@ -844,7 +859,7 @@ private void mapOnTopology(final boolean remap, @Nullable final Runnable c) { if (remap) { if (tx != null) - tx.onRemap(topVer); + tx.onRemap(topVer, true); synchronized (this) { this.topVer = topVer; @@ -1694,94 +1709,113 @@ void onResult(GridNearLockResponse res) { if (res.clientRemapVersion() != null) { assert cctx.kernalContext().clientNode(); - IgniteInternalFuture affFut = - cctx.shared().exchange().affinityReadyFuture(res.clientRemapVersion()); + if (res.compatibleRemapVersion()) { + if (tx != null) { + tx.onRemap(res.clientRemapVersion(), false); + + // Use remapped version for all subsequent mappings. + synchronized (GridDhtColocatedLockFuture.this) { + for (GridNearLockMapping mapping : mappings) { + GridNearLockRequest req = mapping.request(); - cctx.time().waitAsync(affFut, tx == null ? 0 : tx.remainingTime(), (e, timedOut) -> { - if (errorOrTimeoutOnTopologyVersion(e, timedOut)) - return; + assert req != null : mapping; - try { - remap(); - } - finally { - cctx.shared().txContextReset(); + req.topologyVersion(res.clientRemapVersion()); + } + } } - }); - } - else { - int i = 0; + } + else { + IgniteInternalFuture affFut = + cctx.shared().exchange().affinityReadyFuture(res.clientRemapVersion()); - for (KeyCacheObject k : keys) { - IgniteBiTuple oldValTup = valMap.get(k); + cctx.time().waitAsync(affFut, tx == null ? 0 : tx.remainingTime(), (e, timedOut) -> { + if (errorOrTimeoutOnTopologyVersion(e, timedOut)) + return; - CacheObject newVal = res.value(i); + try { + remap(); + } + finally { + cctx.shared().txContextReset(); + } + }); - GridCacheVersion dhtVer = res.dhtVersion(i); + return; + } + } - if (newVal == null) { - if (oldValTup != null) { - if (oldValTup.get1().equals(dhtVer)) - newVal = oldValTup.get2(); - } - } + int i = 0; - if (inTx()) { - IgniteTxEntry txEntry = tx.entry(cctx.txKey(k)); + for (KeyCacheObject k : keys) { + IgniteBiTuple oldValTup = valMap.get(k); - // In colocated cache we must receive responses only for detached entries. - assert txEntry.cached().detached() : txEntry; + CacheObject newVal = res.value(i); - txEntry.markLocked(); + GridCacheVersion dhtVer = res.dhtVersion(i); - GridDhtDetachedCacheEntry entry = (GridDhtDetachedCacheEntry)txEntry.cached(); + if (newVal == null) { + if (oldValTup != null) { + if (oldValTup.get1().equals(dhtVer)) + newVal = oldValTup.get2(); + } + } - if (res.dhtVersion(i) == null) { - onDone(new IgniteCheckedException("Failed to receive DHT version from remote node " + - "(will fail the lock): " + res)); + if (inTx()) { + IgniteTxEntry txEntry = tx.entry(cctx.txKey(k)); - return; - } + // In colocated cache we must receive responses only for detached entries. + assert txEntry.cached().detached() : txEntry; - // Set value to detached entry. - entry.resetFromPrimary(newVal, dhtVer); + txEntry.markLocked(); - tx.hasRemoteLocks(true); + GridDhtDetachedCacheEntry entry = (GridDhtDetachedCacheEntry)txEntry.cached(); - if (log.isDebugEnabled()) - log.debug("Processed response for entry [res=" + res + ", entry=" + entry + ']'); - } - else - cctx.mvcc().markExplicitOwner(cctx.txKey(k), threadId); - - if (retval && cctx.events().isRecordable(EVT_CACHE_OBJECT_READ)) { - cctx.events().addEvent(cctx.affinity().partition(k), - k, - tx, - null, - EVT_CACHE_OBJECT_READ, - newVal, - newVal != null, - null, - false, - CU.subjectId(tx, cctx.shared()), - null, - tx == null ? null : tx.resolveTaskName(), - keepBinary); + if (res.dhtVersion(i) == null) { + onDone(new IgniteCheckedException("Failed to receive DHT version from remote node " + + "(will fail the lock): " + res)); + + return; } - i++; - } + // Set value to detached entry. + entry.resetFromPrimary(newVal, dhtVer); - try { - proceedMapping(); + tx.hasRemoteLocks(true); + + if (log.isDebugEnabled()) + log.debug("Processed response for entry [res=" + res + ", entry=" + entry + ']'); } - catch (IgniteCheckedException e) { - onDone(e); + else + cctx.mvcc().markExplicitOwner(cctx.txKey(k), threadId); + + if (retval && cctx.events().isRecordable(EVT_CACHE_OBJECT_READ)) { + cctx.events().addEvent(cctx.affinity().partition(k), + k, + tx, + null, + EVT_CACHE_OBJECT_READ, + newVal, + newVal != null, + null, + false, + CU.subjectId(tx, cctx.shared()), + null, + tx == null ? null : tx.resolveTaskName(), + keepBinary); } - onDone(true); + i++; + } + + try { + proceedMapping(); } + catch (IgniteCheckedException e) { + onDone(e); + } + + onDone(true); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtDetachedCacheEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtDetachedCacheEntry.java index 346a992f3ac88..622055992856c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtDetachedCacheEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtDetachedCacheEntry.java @@ -65,10 +65,9 @@ public void resetFromPrimary(CacheObject val, GridCacheVersion ver) { } /** {@inheritDoc} */ - @Override protected boolean storeValue(CacheObject val, + @Override protected void storeValue(CacheObject val, long expireTime, GridCacheVersion ver) throws IgniteCheckedException { - return false; // No-op for detached entries, index is updated on primary nodes. } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/CacheGroupAffinityMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/CacheGroupAffinityMessage.java index c6abe892b0c53..26b726f560f13 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/CacheGroupAffinityMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/CacheGroupAffinityMessage.java @@ -158,9 +158,8 @@ static void createAffinityMessages( if (aff == null) { CacheGroupContext grp = cctx.cache().cacheGroup(grpId); - assert grp != null : "No cache group holder or cache group to create AffinityMessage" - + ". Requested group id: " + grpId - + ". Topology version: " + topVer; + if (grp == null) + return null; aff = grp.affinity(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtForceKeysRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtForceKeysRequest.java index 80c45efc76805..14c6042069827 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtForceKeysRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtForceKeysRequest.java @@ -22,6 +22,7 @@ import java.util.Collection; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.GridDirectCollection; +import org.apache.ignite.internal.GridDirectTransient; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheDeployable; @@ -35,12 +36,15 @@ import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableRequest; + +import static org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse.INVALID_TIMESTAMP; /** * Force keys request. This message is sent by node while preloading to force * another node to put given keys into the next batch of transmitting entries. */ -public class GridDhtForceKeysRequest extends GridCacheIdMessage implements GridCacheDeployable { +public class GridDhtForceKeysRequest extends GridCacheIdMessage implements GridCacheDeployable, TimeLoggableRequest { /** */ private static final long serialVersionUID = 0L; @@ -58,6 +62,13 @@ public class GridDhtForceKeysRequest extends GridCacheIdMessage implements GridC /** Topology version for which keys are requested. */ private AffinityTopologyVersion topVer; + /** @see TimeLoggableRequest#sendTimestamp(). */ + private long sendTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableRequest#receiveTimestamp(). */ + @GridDirectTransient + private long receiveTimestamp = INVALID_TIMESTAMP; + /** * Required by {@link Externalizable}. */ @@ -152,6 +163,26 @@ private int keyCount() { return keys.size(); } + /** {@inheritDoc} */ + @Override public long sendTimestamp() { + return sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public void sendTimestamp(long sendTimestamp) { + this.sendTimestamp = sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public long receiveTimestamp() { + return receiveTimestamp; + } + + /** {@inheritDoc} */ + @Override public void receiveTimestamp(long receiveTimestamp) { + this.receiveTimestamp = receiveTimestamp; + } + /** {@inheritDoc} */ @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { writer.setBuffer(buf); @@ -186,6 +217,12 @@ private int keyCount() { writer.incrementState(); case 7: + if (!writer.writeLong("sendTimestamp", sendTimestamp)) + return false; + + writer.incrementState(); + + case 8: if (!writer.writeAffinityTopologyVersion("topVer", topVer)) return false; @@ -232,6 +269,14 @@ private int keyCount() { reader.incrementState(); case 7: + sendTimestamp = reader.readLong("sendTimestamp"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 8: topVer = reader.readAffinityTopologyVersion("topVer"); if (!reader.isLastRead()) @@ -251,7 +296,7 @@ private int keyCount() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 8; + return 9; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtForceKeysResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtForceKeysResponse.java index ab85df3e94622..6bfe389e18365 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtForceKeysResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtForceKeysResponse.java @@ -39,11 +39,13 @@ import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableResponse; /** * Force keys response. Contains absent keys. */ -public class GridDhtForceKeysResponse extends GridCacheIdMessage implements GridCacheDeployable { +public class GridDhtForceKeysResponse extends GridCacheIdMessage implements GridCacheDeployable, ProcessingTimeLoggableResponse { /** */ private static final long serialVersionUID = 0L; @@ -70,6 +72,17 @@ public class GridDhtForceKeysResponse extends GridCacheIdMessage implements Grid @GridDirectCollection(GridCacheEntryInfo.class) private List infos; + /** @see ProcessingTimeLoggableResponse#reqSentTimestamp(). */ + @GridDirectTransient + private long reqSentTimestamp = INVALID_TIMESTAMP; + + /** @see ProcessingTimeLoggableResponse#reqReceivedTimestamp(). */ + @GridDirectTransient + private long reqReceivedTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableResponse#reqTimeData(). */ + private long reqTimeData = INVALID_TIMESTAMP; + /** * Required by {@link Externalizable}. */ @@ -198,6 +211,36 @@ public void addInfo(GridCacheEntryInfo info) { return addDepInfo; } + /** {@inheritDoc} */ + @Override public void reqSentTimestamp(long reqSentTimestamp) { + this.reqSentTimestamp = reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqSentTimestamp() { + return reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqReceivedTimestamp(long reqReceivedTimestamp) { + this.reqReceivedTimestamp = reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqReceivedTimestamp() { + return reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqTimeData(long reqTimeData) { + this.reqTimeData = reqTimeData; + } + + /** {@inheritDoc} */ + @Override public long reqTimeData() { + return reqTimeData; + } + /** {@inheritDoc} */ @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { writer.setBuffer(buf); @@ -243,6 +286,12 @@ public void addInfo(GridCacheEntryInfo info) { writer.incrementState(); + case 9: + if (!writer.writeLong("reqTimeData", reqTimeData)) + return false; + + writer.incrementState(); + } return true; @@ -299,6 +348,14 @@ public void addInfo(GridCacheEntryInfo info) { reader.incrementState(); + case 9: + reqTimeData = reader.readLong("reqTimeData"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + } return reader.afterMessageRead(GridDhtForceKeysResponse.class); @@ -311,7 +368,7 @@ public void addInfo(GridCacheEntryInfo info) { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 9; + return 10; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemandMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemandMessage.java index bae326424d0fb..07eb4339bc914 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemandMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemandMessage.java @@ -22,6 +22,7 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.GridDirectTransient; import org.apache.ignite.internal.IgniteCodeGeneratingFail; +import org.apache.ignite.internal.managers.communication.GridIoPolicy; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.GridCacheGroupIdMessage; import org.apache.ignite.internal.processors.cache.GridCacheMessage; @@ -179,7 +180,9 @@ Object topic() { /** * @param topic Topic. + * @deprecated Obsolete (Kept to solve compatibility issues). */ + @Deprecated void topic(Object topic) { this.topic = topic; } @@ -376,6 +379,11 @@ public GridCacheMessage convertIfNeeded(IgniteProductVersion target) { return 10; } + /** {@inheritDoc} */ + @Override public byte policy() { + return GridIoPolicy.REBALANCE_POOL; + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(GridDhtPartitionDemandMessage.class, this, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java index d0a63211489ea..96c1a9badcf71 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java @@ -17,19 +17,19 @@ package org.apache.ignite.internal.processors.cache.distributed.dht.preloader; -import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; -import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; import java.util.UUID; +import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.atomic.LongAdder; import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.stream.Stream; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.cache.CacheRebalanceMode; @@ -39,7 +39,6 @@ import org.apache.ignite.events.DiscoveryEvent; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.IgniteInterruptedCheckedException; -import org.apache.ignite.internal.IgniteNodeAttributes; import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException; import org.apache.ignite.internal.processors.affinity.AffinityAssignment; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; @@ -52,6 +51,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException; import org.apache.ignite.internal.processors.cache.GridCachePartitionExchangeManager; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; +import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.RebalanceStatisticsUtils.RebalanceFutureStatistics; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; @@ -73,10 +73,17 @@ import org.apache.ignite.spi.IgniteSpiException; import org.jetbrains.annotations.Nullable; +import static java.lang.System.currentTimeMillis; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static java.util.Objects.nonNull; +import static java.util.stream.Collectors.toMap; +import static java.util.stream.Collectors.toSet; import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_OBJECT_LOADED; import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_PART_LOADED; import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_STARTED; import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_STOPPED; +import static org.apache.ignite.internal.processors.cache.distributed.dht.preloader.RebalanceStatisticsUtils.rebalanceStatistics; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.MOVING; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_NONE; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_PRELOAD; @@ -112,8 +119,12 @@ public class GridDhtPartitionDemander { /** Last exchange future. */ private volatile GridDhtPartitionsExchangeFuture lastExchangeFut; - /** Cached rebalance topics. */ - private final Map rebalanceTopics; + /** Cache rebalance topic. */ + private final Object rebalanceTopic; + + /** Futures involved in the last rebalance. For statistics. */ + @GridToStringExclude + private final Collection lastStatFutures = new ConcurrentLinkedQueue<>(); /** * @param grp Ccahe group. @@ -137,12 +148,7 @@ public GridDhtPartitionDemander(CacheGroupContext grp) { syncFut.onDone(); } - Map tops = new HashMap<>(); - - for (int idx = 0; idx < grp.shared().kernalContext().config().getRebalanceThreadPoolSize(); idx++) - tops.put(idx, GridCachePartitionExchangeManager.rebalanceTopic(idx)); - - rebalanceTopics = tops; + rebalanceTopic = GridCachePartitionExchangeManager.rebalanceTopic(0); } /** @@ -361,17 +367,18 @@ Runnable addAssignments( } return () -> { - if (next != null) - fut.listen(f -> { - try { - if (f.get()) // Not cancelled. - next.run(); // Starts next cache rebalancing (according to the order). - } - catch (IgniteCheckedException e) { - if (log.isDebugEnabled()) - log.debug(e.getMessage()); - } - }); + fut.listen(f -> { + try { + printRebalanceStatistics(); + + if (f.get() && nonNull(next)) + next.run(); + } + catch (IgniteCheckedException e) { + if (log.isDebugEnabled()) + log.debug(e.getMessage()); + } + }); requestPartitions(fut, assignments); }; @@ -453,103 +460,71 @@ private void requestPartitions(final RebalanceFuture fut, GridDhtPreloaderAssign final CacheConfiguration cfg = grp.config(); - int locStripes = ctx.gridConfig().getRebalanceThreadPoolSize(); - for (Map.Entry e : assignments.entrySet()) { final ClusterNode node = e.getKey(); GridDhtPartitionDemandMessage d = e.getValue(); - int rmtStripes = Optional.ofNullable((Integer) node.attribute(IgniteNodeAttributes.ATTR_REBALANCE_POOL_SIZE)) - .orElse(1); - - int rmtTotalStripes = rmtStripes <= locStripes ? rmtStripes : locStripes; - - int stripes = rmtTotalStripes; - final IgniteDhtDemandedPartitionsMap parts; + synchronized (fut) { // Synchronized to prevent consistency issues in case of parallel cancellation. if (fut.isDone()) break; parts = fut.remaining.get(node.id()).get2(); - - U.log(log, "Prepared rebalancing [grp=" + grp.cacheOrGroupName() - + ", mode=" + cfg.getRebalanceMode() + ", supplier=" + node.id() - + ", full=" + parts.fullSet() + ", hist=" + parts.historicalSet() - + ", topVer=" + fut.topologyVersion() + ", localParallelism=" + locStripes - + ", rmtParallelism=" + rmtStripes + ", parallelism=" + rmtTotalStripes + "]"); } - final List stripePartitions = new ArrayList<>(stripes); - for (int i = 0; i < stripes; i++) - stripePartitions.add(new IgniteDhtDemandedPartitionsMap()); - - // Reserve one stripe for historical partitions. - if (parts.hasHistorical()) { - stripePartitions.set(stripes - 1, new IgniteDhtDemandedPartitionsMap(parts.historicalMap(), null)); - - if (stripes > 1) - stripes--; - } - - // Distribute full partitions across other stripes. - Iterator it = parts.fullSet().iterator(); - for (int i = 0; it.hasNext(); i++) - stripePartitions.get(i % stripes).addFull(it.next()); - - for (int stripe = 0; stripe < rmtTotalStripes; stripe++) { - if (!stripePartitions.get(stripe).isEmpty()) { - // Create copy of demand message with new striped partitions map. - final GridDhtPartitionDemandMessage demandMsg = d.withNewPartitionsMap(stripePartitions.get(stripe)); - - demandMsg.topic(rebalanceTopics.get(stripe)); - demandMsg.rebalanceId(fut.rebalanceId); - demandMsg.timeout(grp.preloader().timeout()); - - final int topicId = stripe; + U.log(log, "Prepared rebalancing [grp=" + grp.cacheOrGroupName() + + ", mode=" + cfg.getRebalanceMode() + ", supplier=" + node.id() + ", partitionsCount=" + parts.size() + + ", topVer=" + fut.topologyVersion() + "]"); - IgniteInternalFuture clearAllFuture = clearFullPartitions(fut, demandMsg.partitions().fullSet()); + if (!parts.isEmpty()) { + d.topic(rebalanceTopic); + d.rebalanceId(fut.rebalanceId); + d.timeout(grp.preloader().timeout()); - // Start rebalancing after clearing full partitions is finished. - clearAllFuture.listen(f -> ctx.kernalContext().closure().runLocalSafe(() -> { - if (fut.isDone()) - return; + IgniteInternalFuture clearAllFuture = clearFullPartitions(fut, d.partitions().fullSet()); - try { - ctx.io().sendOrderedMessage(node, rebalanceTopics.get(topicId), - demandMsg.convertIfNeeded(node.version()), grp.ioPolicy(), demandMsg.timeout()); + // Start rebalancing after clearing full partitions is finished. + clearAllFuture.listen(f -> ctx.kernalContext().closure().runLocalSafe(() -> { + if (fut.isDone()) + return; - // Cleanup required in case partitions demanded in parallel with cancellation. - synchronized (fut) { - if (fut.isDone()) - fut.cleanupRemoteContexts(node.id()); - } - - if (log.isInfoEnabled()) - log.info("Started rebalance routine [" + grp.cacheOrGroupName() + - ", topVer=" + fut.topologyVersion() + - ", supplier=" + node.id() + ", topic=" + topicId + - ", fullPartitions=" + S.compact(stripePartitions.get(topicId).fullSet()) + - ", histPartitions=" + S.compact(stripePartitions.get(topicId).historicalSet()) + "]"); + try { + if (log.isInfoEnabled()) + log.info("Starting rebalance routine [" + grp.cacheOrGroupName() + + ", topVer=" + fut.topologyVersion() + + ", supplier=" + node.id() + + ", fullPartitions=" + S.compact(parts.fullSet()) + + ", histPartitions=" + S.compact(parts.historicalSet()) + "]"); + + fut.stat.addMessageStatistics(node); + + ctx.io().sendOrderedMessage(node, rebalanceTopic, + d.convertIfNeeded(node.version()), grp.ioPolicy(), d.timeout()); + + // Cleanup required in case partitions demanded in parallel with cancellation. + synchronized (fut) { + if (fut.isDone()) + fut.cleanupRemoteContexts(node.id()); } - catch (IgniteCheckedException e1) { - ClusterTopologyCheckedException cause = e1.getCause(ClusterTopologyCheckedException.class); + } + catch (IgniteCheckedException e1) { + ClusterTopologyCheckedException cause = e1.getCause(ClusterTopologyCheckedException.class); - if (cause != null) - log.warning("Failed to send initial demand request to node. " + e1.getMessage()); - else - log.error("Failed to send initial demand request to node.", e1); + if (cause != null) + log.warning("Failed to send initial demand request to node. " + e1.getMessage()); + else + log.error("Failed to send initial demand request to node.", e1); - fut.cancel(); - } - catch (Throwable th) { - log.error("Runtime error caught during initial demand request sending.", th); + fut.cancel(); + } + catch (Throwable th) { + log.error("Runtime error caught during initial demand request sending.", th); - fut.cancel(); - } - }, true)); - } + fut.cancel(); + } + }, true)); } } } @@ -650,6 +625,23 @@ private IgniteInternalFuture clearFullPartitions(RebalanceFuture fut, Set clearFullPartitions(RebalanceFuture fut, Set infos = e.getValue().infos().iterator(); - // Loop through all received entries and try to preload them. - while (infos.hasNext()) { - ctx.database().checkpointReadLock(); - - try { - for (int i = 0; i < 100; i++) { - if (!infos.hasNext()) - break; - - GridCacheEntryInfo entry = infos.next(); - - if (!preloadEntry(node, p, entry, topVer)) { - if (log.isTraceEnabled()) - log.trace("Got entries for invalid partition during " + - "preloading (will skip) [p=" + p + ", entry=" + entry + ']'); - - break; - } - - for (GridCacheContext cctx : grp.caches()) { - if (cctx.statisticsEnabled()) - cctx.cache().metrics0().onRebalanceKeyReceived(); - } - } - } - finally { - ctx.database().checkpointReadUnlock(); - } - } + preloadEntries(topVer, node, p, infos); + fut.processed.get(p).increment(); + // If message was last for this partition, // then we take ownership. - if (last) { - fut.partitionDone(nodeId, p, true); - - if (log.isDebugEnabled()) - log.debug("Finished rebalancing partition: " + - "[" + demandRoutineInfo(topicId, nodeId, supplyMsg) + ", p=" + p + "]"); - } + if (last) + ownPartition(fut, part, nodeId, supplyMsg); } finally { part.release(); @@ -835,7 +796,7 @@ public void handleSupplyMessage( if (log.isDebugEnabled()) log.debug("Skipping rebalancing partition (state is not MOVING): " + - "[" + demandRoutineInfo(topicId, nodeId, supplyMsg) + ", p=" + p + "]"); + "[" + demandRoutineInfo(nodeId, supplyMsg) + ", p=" + p + "]"); } } else { @@ -843,7 +804,7 @@ public void handleSupplyMessage( if (log.isDebugEnabled()) log.debug("Skipping rebalancing partition (affinity changed): " + - "[" + demandRoutineInfo(topicId, nodeId, supplyMsg) + ", p=" + p + "]"); + "[" + demandRoutineInfo(nodeId, supplyMsg) + ", p=" + p + "]"); } } @@ -863,31 +824,33 @@ public void handleSupplyMessage( d.timeout(grp.preloader().timeout()); - d.topic(rebalanceTopics.get(topicId)); + d.topic(rebalanceTopic); if (!topologyChanged(fut) && !fut.isDone()) { // Send demand message. try { - ctx.io().sendOrderedMessage(node, rebalanceTopics.get(topicId), + ctx.io().sendOrderedMessage(node, rebalanceTopic, d.convertIfNeeded(node.version()), grp.ioPolicy(), grp.preloader().timeout()); if (log.isDebugEnabled()) - log.debug("Send next demand message [" + demandRoutineInfo(topicId, nodeId, supplyMsg) + "]"); + log.debug("Send next demand message [" + demandRoutineInfo(nodeId, supplyMsg) + "]"); } catch (ClusterTopologyCheckedException e) { if (log.isDebugEnabled()) - log.debug("Supplier has left [" + demandRoutineInfo(topicId, nodeId, supplyMsg) + + log.debug("Supplier has left [" + demandRoutineInfo(nodeId, supplyMsg) + ", errMsg=" + e.getMessage() + ']'); } } else { if (log.isDebugEnabled()) - log.debug("Will not request next demand message [" + demandRoutineInfo(topicId, nodeId, supplyMsg) + + log.debug("Will not request next demand message [" + demandRoutineInfo(nodeId, supplyMsg) + ", topChanged=" + topologyChanged(fut) + ", rebalanceFuture=" + fut + "]"); } } catch (IgniteSpiException | IgniteCheckedException e) { - LT.error(log, e, "Error during rebalancing [" + demandRoutineInfo(topicId, nodeId, supplyMsg) + + fut.cancel(nodeId); + + LT.error(log, e, "Error during rebalancing [" + demandRoutineInfo(nodeId, supplyMsg) + ", err=" + e + ']'); } } @@ -896,6 +859,97 @@ public void handleSupplyMessage( } } + /** + * @param fut Future. + * @param part Partition. + * @param nodeId Node id. + * @param supplyMsg Supply message. + */ + private void ownPartition( + final RebalanceFuture fut, + GridDhtLocalPartition part, + final UUID nodeId, + final GridDhtPartitionSupplyMessage supplyMsg + ) { + if (topologyChanged(fut) || !fut.isActual(supplyMsg.rebalanceId())) + return; + + int id = part.id(); + + long queued = fut.queued.get(id).sum(); + long processed = fut.processed.get(id).sum(); + + if (processed == queued) { + fut.partitionDone(nodeId, id, true); + + if (log.isDebugEnabled()) + log.debug("Finished rebalancing partition: " + + "[" + demandRoutineInfo(nodeId, supplyMsg) + ", id=" + id + "]"); + } + else { + if (log.isDebugEnabled()) + log.debug("Retrying partition owning: " + + "[" + demandRoutineInfo(nodeId, supplyMsg) + ", id=" + id + + ", processed=" + processed + ", queued=" + queued + "]"); + + ctx.kernalContext().getRebalanceExecutorService().execute(() -> ownPartition(fut, part, nodeId, supplyMsg)); + } + } + + /** + * Adds entries with theirs history to partition p. + * + * @param node Node which sent entry. + * @param p Partition id. + * @param infos Entries info for preload. + * @param topVer Topology version. + * @throws IgniteInterruptedCheckedException If interrupted. + */ + private void preloadEntries(AffinityTopologyVersion topVer, ClusterNode node, int p, + Iterator infos) throws IgniteCheckedException { + GridCacheContext cctx = null; + + // Loop through all received entries and try to preload them. + while (infos.hasNext()) { + ctx.database().checkpointReadLock(); + + try { + for (int i = 0; i < 100; i++) { + if (!infos.hasNext()) + break; + + GridCacheEntryInfo entry = infos.next(); + + if (cctx == null || (grp.sharedGroup() && entry.cacheId() != cctx.cacheId())) { + cctx = grp.sharedGroup() ? grp.shared().cacheContext(entry.cacheId()) : grp.singleCacheContext(); + + if (cctx == null) + continue; + else if (cctx.isNear()) + cctx = cctx.dhtCache().context(); + } + + if (!preloadEntry(node, p, entry, topVer, cctx)) { + if (log.isTraceEnabled()) + log.trace("Got entries for invalid partition during " + + "preloading (will skip) [p=" + p + ", entry=" + entry + ']'); + + return; + } + + //TODO: IGNITE-11330: Update metrics for touched cache only. + for (GridCacheContext ctx : grp.caches()) { + if (ctx.statisticsEnabled()) + ctx.cache().metrics0().onRebalanceKeyReceived(); + } + } + } + finally { + ctx.database().checkpointReadUnlock(); + } + } + } + /** * Adds {@code entry} to partition {@code p}. * @@ -903,6 +957,7 @@ public void handleSupplyMessage( * @param p Partition id. * @param entry Preloaded entry. * @param topVer Topology version. + * @param cctx Cache context. * @return {@code False} if partition has become invalid during preloading. * @throws IgniteInterruptedCheckedException If interrupted. */ @@ -910,7 +965,8 @@ private boolean preloadEntry( ClusterNode from, int p, GridCacheEntryInfo entry, - AffinityTopologyVersion topVer + AffinityTopologyVersion topVer, + GridCacheContext cctx ) throws IgniteCheckedException { assert ctx.database().checkpointLockIsHeldByThread(); @@ -918,15 +974,7 @@ private boolean preloadEntry( GridCacheEntryEx cached = null; try { - GridCacheContext cctx = grp.sharedGroup() ? ctx.cacheContext(entry.cacheId()) : grp.singleCacheContext(); - - if (cctx == null) - return true; - - if (cctx.isNear()) - cctx = cctx.dhtCache().context(); - - cached = cctx.cache().entryEx(entry.key()); + cached = cctx.cache().entryEx(entry.key(), topVer); if (log.isTraceEnabled()) { log.trace("Rebalancing key [key=" + entry.key() + ", part=" + p + ", fromNode=" + @@ -988,12 +1036,11 @@ else if (log.isTraceEnabled()) /** * String representation of demand routine. * - * @param topicId Topic id. * @param supplier Supplier. * @param supplyMsg Supply message. */ - private String demandRoutineInfo(int topicId, UUID supplier, GridDhtPartitionSupplyMessage supplyMsg) { - return "grp=" + grp.cacheOrGroupName() + ", topVer=" + supplyMsg.topologyVersion() + ", supplier=" + supplier + ", topic=" + topicId; + private String demandRoutineInfo(UUID supplier, GridDhtPartitionSupplyMessage supplyMsg) { + return "grp=" + grp.cacheOrGroupName() + ", topVer=" + supplyMsg.topologyVersion() + ", supplier=" + supplier; } /** {@inheritDoc} */ @@ -1033,10 +1080,24 @@ public static class RebalanceFuture extends GridFutureAdapter { /** The number of rebalance routines. */ private final long routines; - /** Used to enforce the condition: after rebalance future cancellation no more supply messages could be applied - * to partition. */ + /** Used to order rebalance cancellation and supply message processing, they should not overlap. + * Otherwise partition clearing could start on still rebalancing partition resulting in eviction of + * partition in OWNING state. */ private final ReentrantReadWriteLock cancelLock; + /** Rebalance statistics */ + @GridToStringExclude + final RebalanceFutureStatistics stat = new RebalanceFutureStatistics(); + + /** Entries batches queued. */ + private final Map queued = new HashMap<>(); + + /** Entries batches processed. */ + private final Map processed = new HashMap<>(); + + /** Historical rebalance set. */ + private final Set historical = new HashSet<>(); + /** * @param grp Cache group. * @param assignments Assignments. @@ -1047,7 +1108,8 @@ public static class RebalanceFuture extends GridFutureAdapter { CacheGroupContext grp, GridDhtPreloaderAssignments assignments, IgniteLogger log, - long rebalanceId) { + long rebalanceId + ) { assert assignments != null; exchId = assignments.exchangeId(); @@ -1058,6 +1120,15 @@ public static class RebalanceFuture extends GridFutureAdapter { "Partitions are null [grp=" + grp.cacheOrGroupName() + ", fromNode=" + k.id() + "]"; remaining.put(k.id(), new T2<>(U.currentTimeMillis(), v.partitions())); + + historical.addAll(v.partitions().historicalSet()); + + Stream.concat(v.partitions().historicalSet().stream(), v.partitions().fullSet().stream()) + .forEach( + p -> { + queued.put(p, new LongAdder()); + processed.put(p, new LongAdder()); + }); }); this.routines = remaining.size(); @@ -1082,7 +1153,7 @@ public static class RebalanceFuture extends GridFutureAdapter { this.log = null; this.rebalanceId = -1; this.routines = 0; - this.cancelLock = null; + this.cancelLock = new ReentrantReadWriteLock(); } /** @@ -1103,7 +1174,7 @@ private boolean isActual(long rebalanceId) { /** * @return Is initial (created at demander creation). */ - private boolean isInitial() { + public boolean isInitial() { return topVer == null; } @@ -1113,11 +1184,11 @@ private boolean isInitial() { * @return {@code True}. */ @Override public boolean cancel() { - try { - // Cancel lock is needed only for case when some message might be on the fly while rebalancing is - // cancelled. - cancelLock.writeLock().lock(); + // Cancel lock is needed only for case when some message might be on the fly while rebalancing is + // cancelled. + cancelLock.writeLock().lock(); + try { synchronized (this) { if (isDone()) return true; @@ -1198,12 +1269,12 @@ private void cleanupRemoteContexts(UUID nodeId) { d.timeout(grp.preloader().timeout()); try { - for (int idx = 0; idx < ctx.gridConfig().getRebalanceThreadPoolSize(); idx++) { - d.topic(GridCachePartitionExchangeManager.rebalanceTopic(idx)); + Object rebalanceTopic = GridCachePartitionExchangeManager.rebalanceTopic(0); - ctx.io().sendOrderedMessage(node, GridCachePartitionExchangeManager.rebalanceTopic(idx), - d.convertIfNeeded(node.version()), grp.ioPolicy(), grp.preloader().timeout()); - } + d.topic(rebalanceTopic); + + ctx.io().sendOrderedMessage(node, rebalanceTopic, + d.convertIfNeeded(node.version()), grp.ioPolicy(), grp.preloader().timeout()); } catch (IgniteCheckedException ignored) { if (log.isDebugEnabled()) @@ -1349,4 +1420,74 @@ public String toString() { return S.toString(RebalanceFuture.class, this); } } + + /** + * Collect demander per cache groups. For print statistics. + * + * @return List demanders. + * */ + private Set demanders(){ + return ctx.cacheContexts().stream() + .map(GridCacheContext::preloader) + .filter(GridDhtPreloader.class::isInstance) + .map(GridDhtPreloader.class::cast) + .map(GridDhtPreloader::demander) + .collect(toSet()); + } + + /** + * Print rebalance statistics into log. + * Statistic will print if + * {@link RebalanceStatisticsUtils#printRebalanceStatistics() + * printRebalanceStatistics()} == true. + * To use correctly you need to call this method exactly once right after + * {@code RebalanceFuture} was completed (successfully or not). + *

+ * If {@link #rebalanceFut} was done successfully, prints statistics + * for cache group. + *

+ * If the whole rebalance is over, print statistics for all cache groups. + * The end of the rebalance is determined by the successful done all + * {@code RebalanceFuture}'s. + * + * @throws IgniteCheckedException when get result {@code RebalanceFuture} + * @see RebalanceFuture RebalanceFuture + */ + private void printRebalanceStatistics() throws IgniteCheckedException { + if (!RebalanceStatisticsUtils.printRebalanceStatistics()) + return; + + RebalanceFuture currRebFut = rebalanceFut; + assert currRebFut.isDone() : "RebalanceFuture should be done."; + + currRebFut.stat.endTime(currentTimeMillis()); + lastStatFutures.add(currRebFut); + + if (currRebFut.get()) //Success rebalance for current cache group + log.info(rebalanceStatistics(false, singletonMap(grp, singletonList(currRebFut)))); + else + return; + + for (GridCacheContext gridCacheContext : ctx.cacheContexts()) { + IgniteInternalFuture rebalanceFuture = gridCacheContext.preloader().rebalanceFuture(); + + if (!rebalanceFuture.isDone() || !rebalanceFuture.get()) //Rebalance not done or not success + return; + } + + Set demanders = demanders(); + + Map> rebFuts = demanders.stream() + .collect(toMap(demander -> demander.grp, demander -> demander.lastStatFutures)); + + try { + log.info(rebalanceStatistics(true, rebFuts)); + } + finally { + demanders.forEach(demander -> { + demander.rebalanceFut.stat.clear(); + demander.lastStatFutures.clear(); + }); + } + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java index 3fa9de854cad7..1d99a2c4b7b49 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java @@ -30,6 +30,9 @@ import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.failure.FailureContext; +import org.apache.ignite.failure.FailureType; +import org.apache.ignite.internal.IgniteNodeAttributes; import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.CacheGroupContext; @@ -54,7 +57,7 @@ /** * Class for supplying partitions to demanding nodes. */ -class GridDhtPartitionSupplier { +public class GridDhtPartitionSupplier { /** */ private final CacheGroupContext grp; @@ -184,14 +187,14 @@ public void handleDemandMessage(int topicId, UUID nodeId, GridDhtPartitionDemand assert demandMsg != null; assert nodeId != null; - T3 contextId = new T3<>(nodeId, topicId, demandMsg.topologyVersion()); + T3 ctxId = new T3<>(nodeId, topicId, demandMsg.topologyVersion()); if (demandMsg.rebalanceId() < 0) { // Demand node requested context cleanup. synchronized (scMap) { - SupplyContext sctx = scMap.get(contextId); + SupplyContext sctx = scMap.get(ctxId); if (sctx != null && sctx.rebalanceId == -demandMsg.rebalanceId()) { - clearContext(scMap.remove(contextId), log); + clearContext(scMap.remove(ctxId), log); if (log.isDebugEnabled()) log.debug("Supply context cleaned [" + supplyRoutineInfo(topicId, nodeId, demandMsg) @@ -223,11 +226,11 @@ public void handleDemandMessage(int topicId, UUID nodeId, GridDhtPartitionDemand try { synchronized (scMap) { - sctx = scMap.remove(contextId); + sctx = scMap.remove(ctxId); if (sctx != null && demandMsg.rebalanceId() < sctx.rebalanceId) { // Stale message, return context back and return. - scMap.put(contextId, sctx); + scMap.put(ctxId, sctx); if (log.isDebugEnabled()) log.debug("Stale demand message [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + @@ -250,9 +253,16 @@ public void handleDemandMessage(int topicId, UUID nodeId, GridDhtPartitionDemand log.debug("Demand message accepted [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + "]"); - assert !(sctx != null && !demandMsg.partitions().isEmpty()); + assert sctx == null || demandMsg.partitions().isEmpty() : + "sctx=" + sctx + ", topicId=" + topicId + ", demanderId=" + nodeId + ", msg=" + demandMsg; - long maxBatchesCnt = grp.preloader().batchesPrefetchCount(); + // Saturate remote thread pool for first demand request. + Integer rmtThreadPoolSize = demanderNode.attribute(IgniteNodeAttributes.ATTR_REBALANCE_POOL_SIZE); + + if (rmtThreadPoolSize == null) + rmtThreadPoolSize = 1; + + long maxBatchesCnt = grp.preloader().batchesPrefetchCount() * rmtThreadPoolSize; if (sctx == null) { if (log.isDebugEnabled()) @@ -319,18 +329,18 @@ public void handleDemandMessage(int topicId, UUID nodeId, GridDhtPartitionDemand while (iter.hasNext()) { if (supplyMsg.messageSize() >= msgMaxSize) { if (++batchesCnt >= maxBatchesCnt) { - saveSupplyContext(contextId, + saveSupplyContext(ctxId, iter, remainingParts, demandMsg.rebalanceId() ); - reply(topicId, demanderNode, demandMsg, supplyMsg, contextId); + reply(topicId, demanderNode, demandMsg, supplyMsg, ctxId); return; } else { - if (!reply(topicId, demanderNode, demandMsg, supplyMsg, contextId)) + if (!reply(topicId, demanderNode, demandMsg, supplyMsg, ctxId)) return; supplyMsg = new GridDhtPartitionSupplyMessage(demandMsg.rebalanceId(), @@ -367,13 +377,10 @@ public void handleDemandMessage(int topicId, UUID nodeId, GridDhtPartitionDemand if (!remainingParts.contains(part)) continue; - GridCacheEntryInfo info = new GridCacheEntryInfo(); + GridCacheEntryInfo info = extractEntryInfo(row); - info.key(row.key()); - info.expireTime(row.expireTime()); - info.version(row.version()); - info.value(row.value()); - info.cacheId(row.cacheId()); + if (info == null) + continue; if (preloadPred == null || preloadPred.apply(info)) supplyMsg.addEntry0(part, iter.historical(part), info, grp.shared(), grp.cacheObjectContext()); @@ -429,7 +436,7 @@ else if (iter.isPartitionMissing(p)) { else iter.close(); - reply(topicId, demanderNode, demandMsg, supplyMsg, contextId); + reply(topicId, demanderNode, demandMsg, supplyMsg, ctxId); if (log.isInfoEnabled()) log.info("Finished supplying rebalancing [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + "]"); @@ -484,15 +491,37 @@ else if (iter != null) t ); - reply(topicId, demanderNode, demandMsg, errMsg, contextId); + reply(topicId, demanderNode, demandMsg, errMsg, ctxId); } catch (Throwable t1) { U.error(log, "Failed to send supply error message [" + supplyRoutineInfo(topicId, nodeId, demandMsg) + "]", t1); } + + grp.shared().kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, + new IgniteCheckedException("Failed to continue supplying [" + + supplyRoutineInfo(topicId, nodeId, demandMsg) + "]", t) + )); } } + /** + * Extracts entry info from row. + * @param row Cache data row. + * @return Entry info. + */ + private GridCacheEntryInfo extractEntryInfo(CacheDataRow row) { + GridCacheEntryInfo info = new GridCacheEntryInfo(); + + info.key(row.key()); + info.cacheId(row.cacheId()); + info.value(row.value()); + info.version(row.version()); + info.expireTime(row.expireTime()); + + return info; + } + /** * Sends supply message to demand node. * @@ -544,7 +573,10 @@ else if (grp.preloader().throttle() > 0) * @param demandMsg Demand message. */ private String supplyRoutineInfo(int topicId, UUID demander, GridDhtPartitionDemandMessage demandMsg) { - return "grp=" + grp.cacheOrGroupName() + ", demander=" + demander + ", topVer=" + demandMsg.topologyVersion() + ", topic=" + topicId; + return "grp=" + grp.cacheOrGroupName() + + ", demander=" + demander + + ", topVer=" + demandMsg.topologyVersion() + + (topicId > 0 ? ", topic=" + topicId : ""); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java index 5f3188fc3145a..0b066422b7ee7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java @@ -42,6 +42,7 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.stream.Collectors; import java.util.stream.Stream; +import javax.cache.expiry.EternalExpiryPolicy; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; @@ -80,6 +81,7 @@ import org.apache.ignite.internal.processors.cache.ExchangeActions; import org.apache.ignite.internal.processors.cache.ExchangeContext; import org.apache.ignite.internal.processors.cache.ExchangeDiscoveryEvents; +import org.apache.ignite.internal.processors.cache.GridCacheAdapter; import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate; import org.apache.ignite.internal.processors.cache.GridCacheProcessor; @@ -143,6 +145,9 @@ public class GridDhtPartitionsExchangeFuture extends GridDhtTopologyFutureAdapte /** */ public static final String EXCHANGE_LOG = "org.apache.ignite.internal.exchange.time"; + /** Partition state failed message. */ + public static final String PARTITION_STATE_FAILED_MSG = "Partition states validation has failed for group: %s, msg: %s"; + /** */ private static final int RELEASE_FUTURE_DUMP_THRESHOLD = IgniteSystemProperties.getInteger(IGNITE_PARTITION_RELEASE_FUTURE_DUMP_THRESHOLD, 0); @@ -198,6 +203,9 @@ public class GridDhtPartitionsExchangeFuture extends GridDhtTopologyFutureAdapte /** */ private AtomicBoolean added = new AtomicBoolean(false); + /** Exchange type. */ + private volatile ExchangeType exchangeType; + /** * Discovery event receive latch. There is a race between discovery event processing and single message * processing, so it is possible to create an exchange future before the actual discovery event is received. @@ -348,8 +356,8 @@ public class GridDhtPartitionsExchangeFuture extends GridDhtTopologyFutureAdapte /** Discovery lag / Clocks discrepancy, calculated on coordinator when all single messages are received. */ private T2 discoveryLag; - /** Partitions scheduled for historical reblanace for this topology version. */ - private Map> histPartitions; + /** Partitions scheduled for clearing before rebalance for this topology version. */ + private Map> clearingPartitions; /** * @param cctx Cache context. @@ -478,6 +486,13 @@ public void affinityChangeMessage(CacheAffinityChangeMessage affChangeMsg) { return isDone() ? result() : exchCtx.events().topologyVersion(); } + /** + * @return Exchange type or null if not determined yet. + */ + public ExchangeType exchangeType() { + return exchangeType; + } + /** * Retreives the node which has WAL history since {@code cntrSince}. * @@ -781,15 +796,14 @@ else if (msg instanceof DynamicCacheChangeBatch) { exchange = onCacheChangeRequest(crdNode); } - else if (msg instanceof SnapshotDiscoveryMessage) { - exchange = onCustomMessageNoAffinityChange(crdNode); - } + else if (msg instanceof SnapshotDiscoveryMessage) + exchange = onCustomMessageNoAffinityChange(); else if (msg instanceof WalStateAbstractMessage) - exchange = onCustomMessageNoAffinityChange(crdNode); + exchange = onCustomMessageNoAffinityChange(); else { assert affChangeMsg != null : this; - exchange = onAffinityChangeRequest(crdNode); + exchange = onAffinityChangeRequest(); } if (forceAffReassignment) @@ -815,7 +829,7 @@ else if (msg instanceof WalStateAbstractMessage) if (exchCtx.mergeExchanges()) { if (localJoinExchange()) { if (cctx.kernalContext().clientNode()) { - onClientNodeEvent(crdNode); + onClientNodeEvent(); exchange = ExchangeType.CLIENT; } @@ -827,7 +841,7 @@ else if (msg instanceof WalStateAbstractMessage) } else { if (firstDiscoEvt.eventNode().isClient()) - exchange = onClientNodeEvent(crdNode); + exchange = onClientNodeEvent(); else exchange = cctx.kernalContext().clientNode() ? ExchangeType.CLIENT : ExchangeType.ALL; } @@ -836,13 +850,15 @@ else if (msg instanceof WalStateAbstractMessage) onLeft(); } else { - exchange = firstDiscoEvt.eventNode().isClient() ? onClientNodeEvent(crdNode) : + exchange = firstDiscoEvt.eventNode().isClient() ? onClientNodeEvent() : onServerNodeEvent(crdNode); } } cctx.cache().registrateProxyRestart(resolveCacheRequests(exchActions), afterLsnrCompleteFut); + exchangeType = exchange; + for (PartitionsExchangeAware comp : cctx.exchange().exchangeAwareComponents()) comp.onInitBeforeTopologyLock(this); @@ -1270,25 +1286,22 @@ private ExchangeType onCacheChangeRequest(boolean crd) throws IgniteCheckedExcep } /** - * @param crd Coordinator flag. * @return Exchange type. */ - private ExchangeType onCustomMessageNoAffinityChange(boolean crd) { + private ExchangeType onCustomMessageNoAffinityChange() { if (!forceAffReassignment) - cctx.affinity().onCustomMessageNoAffinityChange(this, crd, exchActions); + cctx.affinity().onCustomMessageNoAffinityChange(this, exchActions); return cctx.kernalContext().clientNode() ? ExchangeType.CLIENT : ExchangeType.ALL; } /** - * @param crd Coordinator flag. - * @throws IgniteCheckedException If failed. * @return Exchange type. */ - private ExchangeType onAffinityChangeRequest(boolean crd) throws IgniteCheckedException { + private ExchangeType onAffinityChangeRequest() { assert affChangeMsg != null : this; - cctx.affinity().onChangeAffinityMessage(this, crd, affChangeMsg); + cctx.affinity().onChangeAffinityMessage(this, affChangeMsg); if (cctx.kernalContext().clientNode()) return ExchangeType.CLIENT; @@ -1297,11 +1310,10 @@ private ExchangeType onAffinityChangeRequest(boolean crd) throws IgniteCheckedEx } /** - * @param crd Coordinator flag. * @throws IgniteCheckedException If failed. * @return Exchange type. */ - private ExchangeType onClientNodeEvent(boolean crd) throws IgniteCheckedException { + private ExchangeType onClientNodeEvent() throws IgniteCheckedException { assert firstDiscoEvt.eventNode().isClient() : this; if (firstDiscoEvt.type() == EVT_NODE_LEFT || firstDiscoEvt.type() == EVT_NODE_FAILED) { @@ -1312,7 +1324,7 @@ private ExchangeType onClientNodeEvent(boolean crd) throws IgniteCheckedExceptio else assert firstDiscoEvt.type() == EVT_NODE_JOINED || firstDiscoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT : firstDiscoEvt; - cctx.affinity().onClientEvent(this, crd); + cctx.affinity().onClientEvent(this); return firstDiscoEvt.eventNode().isLocal() ? ExchangeType.CLIENT : ExchangeType.NONE; } @@ -1420,7 +1432,7 @@ private void distributedExchange() throws IgniteCheckedException { cctx.exchange().exchangerBlockingSectionEnd(); } - histPartitions = new HashMap(); + clearingPartitions = new HashMap(); timeBag.finishGlobalStage("WAL history reservation"); @@ -1803,7 +1815,14 @@ private void onLeft() { if (grp.isLocal()) continue; - grp.preloader().unwindUndeploys(); + grp.preloader().pause(); + + try { + grp.unwindUndeploys(); + } + finally { + grp.preloader().resume(); + } cctx.exchange().exchangerUpdateHeartbeat(); } @@ -2161,7 +2180,7 @@ private String exchangeTimingsLogMessage(String header, List timings) { if (drCacheCtx.isDrEnabled()) { try { - drCacheCtx.dr().onExchange(res, exchId.isLeft(), activateCluster()); + drCacheCtx.dr().onExchange(res, exchId.isLeft()); } catch (IgniteCheckedException e) { U.error(log, "Failed to notify DR: " + e, e); @@ -3458,12 +3477,13 @@ private void finishExchangeOnCoordinator(@Nullable Collection sndRe if (discoveryCustomMessage instanceof DynamicCacheChangeBatch) { if (exchActions != null) { - assignPartitionsStates(); Set caches = exchActions.cachesToResetLostPartitions(); if (!F.isEmpty(caches)) resetLostPartitions(caches); + + assignPartitionsStates(); } } else if (discoveryCustomMessage instanceof SnapshotDiscoveryMessage @@ -3686,13 +3706,16 @@ private void validatePartitionsState() { // Do not validate read or write through caches or caches with disabled rebalance // or ExpiryPolicy is set or validation is disabled. + boolean eternalExpiryPolicy = grpCtx != null && (grpCtx.config().getExpiryPolicyFactory() == null + || grpCtx.config().getExpiryPolicyFactory().create() instanceof EternalExpiryPolicy); + if (grpCtx == null || grpCtx.config().isReadThrough() || grpCtx.config().isWriteThrough() || grpCtx.config().getCacheStoreFactory() != null || grpCtx.config().getRebalanceDelay() == -1 || grpCtx.config().getRebalanceMode() == CacheRebalanceMode.NONE - || grpCtx.config().getExpiryPolicyFactory() == null + || !eternalExpiryPolicy || SKIP_PARTITION_SIZE_VALIDATION) return null; @@ -3700,7 +3723,7 @@ private void validatePartitionsState() { validator.validatePartitionCountersAndSizes(GridDhtPartitionsExchangeFuture.this, top, msgs); } catch (IgniteCheckedException ex) { - log.warning("Partition states validation has failed for group: " + grpCtx.cacheOrGroupName() + ". " + ex.getMessage()); + log.warning(String.format(PARTITION_STATE_FAILED_MSG, grpCtx.cacheOrGroupName(), ex.getMessage())); // TODO: Handle such errors https://issues.apache.org/jira/browse/IGNITE-7833 } @@ -4115,11 +4138,22 @@ private void processFullMessage(boolean checkCrd, ClusterNode node, GridDhtParti exchCtx.events().processEvents(this); - if (localJoinExchange()) - cctx.affinity().onLocalJoin(this, msg, resTopVer); + if (localJoinExchange()) { + Set noAffinityGroups = cctx.affinity().onLocalJoin(this, msg.joinedNodeAffinity(), resTopVer); + + // Prevent cache usage by a user. + if (!noAffinityGroups.isEmpty()) { + List closedCaches = cctx.cache().blockGateways(noAffinityGroups); + + closedCaches.forEach(cache -> log.warning("Affinity for cache " + cache.context().name() + + " has not received from coordinator during local join. " + + " Probably cache is already stopped but not processed on local node yet." + + " Cache proxy will be closed for user interactions for safety.")); + } + } else { if (exchCtx.events().hasServerLeft()) - cctx.affinity().applyAffinityFromFullMessage(this, msg); + cctx.affinity().applyAffinityFromFullMessage(this, msg.idealAffinityDiff()); else cctx.affinity().onServerJoinWithExchangeMergeProtocol(this, false); @@ -4132,9 +4166,9 @@ private void processFullMessage(boolean checkCrd, ClusterNode node, GridDhtParti } } else if (localJoinExchange() && !exchCtx.fetchAffinityOnJoin()) - cctx.affinity().onLocalJoin(this, msg, resTopVer); + cctx.affinity().onLocalJoin(this, msg.joinedNodeAffinity(), resTopVer); else if (forceAffReassignment) - cctx.affinity().applyAffinityFromFullMessage(this, msg); + cctx.affinity().applyAffinityFromFullMessage(this, msg.idealAffinityDiff()); timeBag.finishGlobalStage("Affinity recalculation"); @@ -4313,9 +4347,7 @@ public void onAffinityChangeMessage(final ClusterNode node, final CacheAffinityC if (crd.equals(node)) { AffinityTopologyVersion resTopVer = initialVersion(); - cctx.affinity().onExchangeChangeAffinityMessage(GridDhtPartitionsExchangeFuture.this, - crd.isLocal(), - msg); + cctx.affinity().onExchangeChangeAffinityMessage(GridDhtPartitionsExchangeFuture.this, msg); IgniteCheckedException err = !F.isEmpty(msg.partitionsMessage().getErrorsMap()) ? new IgniteCheckedException("Cluster state change failed.") : null; @@ -5000,34 +5032,35 @@ public static long nextDumpTimeout(int step, long timeout) { * * @param grp Group. * @param part Partition. - * @return {@code True} if partition is historical. + * @return {@code True} if partition has to be cleared before rebalance. */ - public boolean isHistoryPartition(CacheGroupContext grp, int part) { + public boolean isClearingPartition(CacheGroupContext grp, int part) { if (!grp.persistenceEnabled()) return false; synchronized (mux) { - if (histPartitions == null) + if (clearingPartitions == null) return false; - Set parts = histPartitions.get(grp.groupId()); + Set parts = clearingPartitions.get(grp.groupId()); return parts != null && parts.contains(part); } } /** - * Marks a partition for historical rebalance. + * Marks a partition for clearing before rebalance. + * Fully cleared partitions should never be historically rebalanced. * * @param grp Group. * @param part Partition. */ - public void addHistoryPartition(CacheGroupContext grp, int part) { + public void addClearingPartition(CacheGroupContext grp, int part) { if (!grp.persistenceEnabled()) return; synchronized (mux) { - histPartitions.computeIfAbsent(grp.groupId(), k -> new HashSet()).add(part); + clearingPartitions.computeIfAbsent(grp.groupId(), k -> new HashSet()).add(part); } } @@ -5067,7 +5100,7 @@ public void cleanUp() { /** * */ - enum ExchangeType { + public enum ExchangeType { /** */ CLIENT, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java index b1297aadd72ed..8505bd7f3e561 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java @@ -20,16 +20,15 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; -import java.util.Queue; import java.util.UUID; -import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.failure.FailureContext; +import org.apache.ignite.failure.FailureType; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.NodeStoppingException; -import org.apache.ignite.internal.managers.communication.GridIoPolicy; import org.apache.ignite.internal.processors.affinity.AffinityAssignment; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.CacheGroupContext; @@ -45,9 +44,8 @@ import org.apache.ignite.internal.util.future.GridFinishedFuture; import org.apache.ignite.internal.util.future.GridFutureAdapter; import org.apache.ignite.internal.util.lang.GridPlainRunnable; -import org.apache.ignite.internal.util.lang.GridTuple3; import org.apache.ignite.internal.util.typedef.CI1; -import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgnitePredicate; import org.jetbrains.annotations.Nullable; @@ -81,15 +79,6 @@ public class GridDhtPreloader extends GridCachePreloaderAdapter { /** Busy lock to prevent activities from accessing exchanger while it's stopping. */ private final ReadWriteLock busyLock = new ReentrantReadWriteLock(); - /** Demand lock. */ - private final ReadWriteLock demandLock = new ReentrantReadWriteLock(); - - /** */ - private boolean paused; - - /** */ - private Queue> pausedDemanderQueue = new ConcurrentLinkedQueue<>(); - /** */ private boolean stopped; @@ -131,8 +120,7 @@ public GridDhtPreloader(CacheGroupContext grp) { if (log.isDebugEnabled()) log.debug("DHT rebalancer onKernalStop callback."); - // Acquire write busy lock. - busyLock.writeLock().lock(); + pause(); try { if (supplier != null) @@ -146,7 +134,7 @@ public GridDhtPreloader(CacheGroupContext grp) { stopped = true; } finally { - busyLock.writeLock().unlock(); + resume(); } } @@ -186,7 +174,7 @@ private IgniteCheckedException stopError() { if (!grp.affinity().cachedVersions().contains(rebTopVer)) { assert rebTopVer.compareTo(grp.localStartVersion()) <= 0 : - "Empty hisroty allowed only for newly started cache group [rebTopVer=" + rebTopVer + + "Empty history allowed only for newly started cache group [rebTopVer=" + rebTopVer + ", localStartTopVer=" + grp.localStartVersion() + ']'; return true; // Required, since no history info available. @@ -200,11 +188,12 @@ private IgniteCheckedException stopError() { AffinityTopologyVersion lastAffChangeTopVer = ctx.exchange().lastAffinityChangedTopologyVersion(exchFut.topologyVersion()); - return lastAffChangeTopVer.compareTo(rebTopVer) > 0; + return lastAffChangeTopVer.after(rebTopVer); } /** {@inheritDoc} */ - @Override public GridDhtPreloaderAssignments generateAssignments(GridDhtPartitionExchangeId exchId, GridDhtPartitionsExchangeFuture exchFut) { + @Override public GridDhtPreloaderAssignments generateAssignments(GridDhtPartitionExchangeId exchId, + GridDhtPartitionsExchangeFuture exchFut) { assert exchFut == null || exchFut.isDone(); // No assignments for disabled preloader. @@ -281,7 +270,7 @@ private IgniteCheckedException stopError() { histSupplier = ctx.discovery().node(nodeId); } - if (histSupplier != null && exchFut.isHistoryPartition(grp, p)) { + if (histSupplier != null && !exchFut.isClearingPartition(grp, p)) { assert grp.persistenceEnabled(); assert remoteOwners(p, topVer).contains(histSupplier) : remoteOwners(p, topVer); @@ -299,6 +288,11 @@ private IgniteCheckedException stopError() { msg.partitions().addHistorical(p, part.initialUpdateCounter(), countersMap.updateCounter(p), partitions); } else { + // If for some reason (for example if supplier fails and new supplier is elected) partition is + // assigned for full rebalance force clearing if not yet set. + if (grp.persistenceEnabled() && exchFut != null && !exchFut.isClearingPartition(grp, p)) + part.clearAsync(); + List picked = remoteOwners(p, topVer); if (picked.isEmpty()) { @@ -365,35 +359,39 @@ private List remoteOwners(int p, AffinityTopologyVersion topVer) { } /** {@inheritDoc} */ - @Override public void handleSupplyMessage(int idx, UUID id, final GridDhtPartitionSupplyMessage s) { - if (!enterBusy()) - return; - - try { - demandLock.readLock().lock(); + @Override public void handleSupplyMessage(UUID nodeId, final GridDhtPartitionSupplyMessage msg) { + demander.registerSupplyMessage(msg, () -> { + if (!enterBusy()) + return; try { - if (paused) - pausedDemanderQueue.add(F.t(idx, id, s)); - else - demander.handleSupplyMessage(idx, id, s); + demander.handleSupplyMessage(nodeId, msg); + } + catch (Throwable t) { + try { + U.error(log, "Failed processing message [senderId=" + nodeId + ", msg=" + msg + ']', t); + } + catch (Throwable e0) { + U.error(log, "Failed processing message [senderId=" + nodeId + ", msg=(failed to log message)", t); + + U.error(log, "Failed to log message due to an error: ", e0); + } + + ctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, t)); } finally { - demandLock.readLock().unlock(); + leaveBusy(); } - } - finally { - leaveBusy(); - } + }); } /** {@inheritDoc} */ - @Override public void handleDemandMessage(int idx, UUID id, GridDhtPartitionDemandMessage d) { + @Override public void handleDemandMessage(int idx, UUID nodeId, GridDhtPartitionDemandMessage d) { if (!enterBusy()) return; try { - supplier.handleDemandMessage(idx, id, d); + supplier.handleDemandMessage(idx, nodeId, d); } finally { leaveBusy(); @@ -431,9 +429,9 @@ private List remoteOwners(int p, AffinityTopologyVersion topVer) { /** * @return {@code true} if entered to busy state. */ + @SuppressWarnings("LockAcquiredButNotSafelyReleased") private boolean enterBusy() { - if (!busyLock.readLock().tryLock()) - return false; + busyLock.readLock().lock(); if (stopped) { busyLock.readLock().unlock(); @@ -523,7 +521,8 @@ public void onPartitionEvicted(GridDhtLocalPartition part, boolean updateSeq) { * @return Future for request. */ @SuppressWarnings({"unchecked", "RedundantCast"}) - private GridDhtFuture request0(GridCacheContext cctx, Collection keys, AffinityTopologyVersion topVer) { + private GridDhtFuture request0(GridCacheContext cctx, Collection keys, + AffinityTopologyVersion topVer) { if (cctx.isNear()) cctx = cctx.near().dht().context(); @@ -570,54 +569,45 @@ private GridDhtFuture request0(GridCacheContext cctx, Collection> msgToProc = - new ArrayList<>(pausedDemanderQueue); - - pausedDemanderQueue.clear(); + busyLock.writeLock().unlock(); + } - final GridDhtPreloader preloader = this; + /** + * Return supplier. + * + * @return Supplier. + * */ + public GridDhtPartitionSupplier supplier() { + return supplier; + } - ctx.kernalContext().closure().runLocalSafe(() -> msgToProc.forEach( - m -> preloader.handleSupplyMessage(m.get1(), m.get2(), m.get3()) - ), GridIoPolicy.SYSTEM_POOL); + /** + * @param supplier Supplier. + */ + public void supplier(GridDhtPartitionSupplier supplier) { + this.supplier = supplier; + } - paused = false; - } - finally { - demandLock.writeLock().unlock(); - } + /** + * Return demander. + * + * @return Demander. + * */ + public GridDhtPartitionDemander demander() { + return demander; } - /** {@inheritDoc} */ - @Override public void dumpDebugInfo() { - // No-op + /** + * @param demander Demander. + */ + public void demander(GridDhtPartitionDemander demander) { + this.demander = demander; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/RebalanceStatisticsUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/RebalanceStatisticsUtils.java new file mode 100644 index 0000000000000..3030458aa0c12 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/RebalanceStatisticsUtils.java @@ -0,0 +1,615 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.distributed.dht.preloader; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteSystemProperties; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.processors.affinity.AffinityAssignment; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionDemander.RebalanceFuture; + +import java.time.LocalDateTime; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Comparator; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.StringJoiner; +import java.util.TreeMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.ToLongFunction; +import java.util.stream.Stream; +import org.jetbrains.annotations.NotNull; + +import static java.lang.String.valueOf; +import static java.lang.System.currentTimeMillis; +import static java.time.ZoneId.systemDefault; +import static java.time.format.DateTimeFormatter.ofPattern; +import static java.util.Comparator.comparingInt; +import static java.util.Comparator.comparingLong; +import static java.util.Objects.nonNull; +import static java.util.function.Function.identity; +import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.joining; +import static java.util.stream.Collectors.mapping; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toMap; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_QUIET; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_WRITE_REBALANCE_PARTITION_STATISTICS; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_WRITE_REBALANCE_STATISTICS; +import static org.apache.ignite.IgniteSystemProperties.getBoolean; + +/** + * Utility class for rebalance statistics. + */ +class RebalanceStatisticsUtils { + /** To format the date and time. */ + private static final DateTimeFormatter REBALANCE_STATISTICS_DTF = ofPattern("YYYY-MM-dd HH:mm:ss,SSS"); + + /** Text for successful or not rebalances. */ + private static final String SUCCESSFUL_OR_NOT_REBALANCE_TEXT = "including successful and not rebalances"; + + /** Text successful rebalance. */ + private static final String SUCCESSFUL_REBALANCE_TEXT = "successful rebalance"; + + /** + * Private constructor. + */ + private RebalanceStatisticsUtils() { + throw new RuntimeException("don't create"); + } + + /** Rebalance future statistics. */ + static class RebalanceFutureStatistics { + /** Start rebalance time in mills. */ + private final long startTime = currentTimeMillis(); + + /** End rebalance time in mills. */ + private volatile long endTime = startTime; + + /** Per node stats. */ + private final Map msgStats = new ConcurrentHashMap<>(); + + /** Is needed or not to print rebalance statistics. */ + private final boolean printRebalanceStatistics = printRebalanceStatistics(); + + /** + * Add new message statistics. + * Requires to be invoked before demand message sending. + * This method required for {@code addReceivePartitionStatistics}. + * This method add new message statistics if + * {@link #printRebalanceStatistics} == true. + * + * @param supplierNode Supplier node, require not null. + * @see RebalanceMessageStatistics + * @see #addReceivePartitionStatistics(ClusterNode, GridDhtPartitionSupplyMessage) + */ + public void addMessageStatistics(final @NotNull ClusterNode supplierNode) { + if (!printRebalanceStatistics) + return; + + msgStats.putIfAbsent(supplierNode, new RebalanceMessageStatistics(currentTimeMillis())); + } + + /** + * Add new statistics by receive message with partitions from supplier + * node. Require invoke {@code addMessageStatistics} before send + * demand message. This method add new message statistics if + * {@link #printRebalanceStatistics} == true. + * + * @param supplierNode Supplier node, require not null. + * @param supplyMsg Supply message, require not null. + * @see ReceivePartitionStatistics + * @see #addMessageStatistics(ClusterNode) + */ + public void addReceivePartitionStatistics( + final ClusterNode supplierNode, + final GridDhtPartitionSupplyMessage supplyMsg + ) { + assert nonNull(supplierNode); + assert nonNull(supplyMsg); + + if (!printRebalanceStatistics) + return; + + List partStats = supplyMsg.infos().entrySet().stream() + .map(entry -> new PartitionStatistics(entry.getKey(), entry.getValue().infos().size())) + .collect(toList()); + + msgStats.get(supplierNode).receivePartStats + .add(new ReceivePartitionStatistics(currentTimeMillis(), supplyMsg.messageSize(), partStats)); + } + + /** + * Clear statistics. + */ + public void clear() { + msgStats.clear(); + } + + /** + * Set end rebalance time in mills. + * + * @param endTime End rebalance time in mills. + */ + public void endTime(final long endTime) { + this.endTime = endTime; + } + } + + /** Rebalance messages statistics. */ + static class RebalanceMessageStatistics { + /** Time send demand message in mills. */ + private final long sndMsgTime; + + /** Statistics by received partitions. */ + private final Collection receivePartStats = new ConcurrentLinkedQueue<>(); + + /** + * Constructor. + * + * @param sndMsgTime time send demand message. + */ + public RebalanceMessageStatistics(final long sndMsgTime) { + this.sndMsgTime = sndMsgTime; + } + } + + /** Receive partition statistics. */ + static class ReceivePartitionStatistics { + /** Time receive message(on demand message) with partition in mills. */ + private final long rcvMsgTime; + + /** Size receive message in bytes. */ + private final long msgSize; + + /** Received partitions. */ + private final List parts; + + /** + * Constructor. + * + * @param rcvMsgTime time receive message in mills. + * @param msgSize message size in bytes. + * @param parts received partitions, require not null. + */ + public ReceivePartitionStatistics( + final long rcvMsgTime, + final long msgSize, + final List parts + ) { + assert nonNull(parts); + + this.rcvMsgTime = rcvMsgTime; + this.msgSize = msgSize; + this.parts = parts; + } + } + + /** Received partition info. */ + static class PartitionStatistics { + /** Partition id. */ + private final int id; + + /** Count entries in partition. */ + private final int entryCount; + + /** + * Constructor. + * + * @param id partition id. + * @param entryCount count entries in partitions. + */ + public PartitionStatistics(final int id, final int entryCount) { + this.id = id; + this.entryCount = entryCount; + } + } + + /** + * Finds out if statistics can be printed regarding + * {@link IgniteSystemProperties#IGNITE_QUIET}, + * {@link IgniteSystemProperties#IGNITE_WRITE_REBALANCE_STATISTICS}. + * + * @return Is print statistics enabled. + */ + public static boolean printRebalanceStatistics() { + return !getBoolean(IGNITE_QUIET, true) && getBoolean(IGNITE_WRITE_REBALANCE_STATISTICS, false); + } + + /** + * Finds out if partitions distribution can be printed regarding + * {@link IgniteSystemProperties#IGNITE_WRITE_REBALANCE_PARTITION_STATISTICS}. + * + * @return Is print partitions distribution enabled. + */ + public static boolean printPartitionsDistribution() { + return getBoolean(IGNITE_WRITE_REBALANCE_PARTITION_STATISTICS, false); + } + + /** + * Return rebalance statistics. Required to call this method if + * {@link #printRebalanceStatistics()} == true. + *

+ * Flag {@code finish} should reflect was full rebalance finished or not. + *
+ * If {@code finish} == true then expected {@code rebFutrs} contains + * successful or not {@code RebalanceFuture} per cache group, else expected + * {@code rebFutrs} contains only one successful {@code RebalanceFuture}. + *
+ * If {@code finish} == true then print total statistics. + *

+ * Partition distribution is printed only for last success rebalance, + * per cache group. + * + * @param finish Is the whole rebalance finished or not. + * @param rebFutrs Involved in rebalance, require not null. + * @return String with printed rebalance statistics. + * @throws IgniteCheckedException Could be thrown while getting result of + * {@code RebalanceFuture}. + * @see RebalanceFuture RebalanceFuture + */ + public static String rebalanceStatistics( + final boolean finish, + final Map> rebFutrs + ) throws IgniteCheckedException { + assert nonNull(rebFutrs); + assert printRebalanceStatistics() : "Can't print statistics"; + + AtomicInteger nodeCnt = new AtomicInteger(); + + Map nodeAliases = toRebalanceFutureStream(rebFutrs) + .flatMap(future -> future.stat.msgStats.keySet().stream()) + .distinct() + .collect(toMap(identity(), node -> nodeCnt.getAndIncrement())); + + StringJoiner joiner = new StringJoiner(" "); + + if (finish) + writeTotalRebalanceStatistics(rebFutrs, nodeAliases, joiner); + + writeCacheGroupsRebalanceStatistics(rebFutrs, nodeAliases, finish, joiner); + writeAliasesRebalanceStatistics("p - partitions, e - entries, b - bytes, d - duration", nodeAliases, joiner); + writePartitionsDistributionRebalanceStatistics(rebFutrs, nodeAliases, nodeCnt, joiner); + + return joiner.toString(); + } + + /** + * Write total statistics for rebalance. + * + * @param rebFutrs Participating in successful and not rebalances, require not null. + * @param nodeAliases For print nodeId=1 instead long string, require not null. + * @param joiner For write statistics, require not null. + */ + private static void writeTotalRebalanceStatistics( + final Map> rebFutrs, + final Map nodeAliases, + final StringJoiner joiner + ) { + assert nonNull(rebFutrs); + assert nonNull(nodeAliases); + assert nonNull(joiner); + + long minStartTime = minStartTime(toRebalanceFutureStream(rebFutrs)); + long maxEndTime = maxEndTime(toRebalanceFutureStream(rebFutrs)); + + joiner.add("Total information (" + SUCCESSFUL_OR_NOT_REBALANCE_TEXT + "):") + .add("[" + toStartEndDuration(minStartTime, maxEndTime) + "]"); + + Map> supplierStat = + toSupplierStatistics(toRebalanceFutureStream(rebFutrs)); + + writeSupplierRebalanceStatistics(supplierStat, nodeAliases, joiner); + } + + /** + * Write rebalance statistics per cache group. + *

+ * If {@code finish} == true then add {@link #SUCCESSFUL_OR_NOT_REBALANCE_TEXT} else add {@link + * #SUCCESSFUL_REBALANCE_TEXT} into header. + * + * @param rebFuts Participating in successful and not rebalances, require not null. + * @param nodeAliases For print nodeId=1 instead long string, require not null. + * @param joiner For write statistics, require not null. + * @param finish Is finish rebalance. + */ + private static void writeCacheGroupsRebalanceStatistics( + final Map> rebFuts, + final Map nodeAliases, + final boolean finish, + final StringJoiner joiner + ) { + assert nonNull(rebFuts); + assert nonNull(nodeAliases); + assert nonNull(joiner); + + joiner.add("Information per cache group (" + + (finish ? SUCCESSFUL_OR_NOT_REBALANCE_TEXT : SUCCESSFUL_REBALANCE_TEXT) + "):"); + + rebFuts.forEach((context, futures) -> { + long minStartTime = minStartTime(futures.stream()); + long maxEndTime = maxEndTime(futures.stream()); + + joiner.add("[id=" + context.groupId() + ",") + .add("name=" + context.cacheOrGroupName() + ",") + .add(toStartEndDuration(minStartTime, maxEndTime) + "]"); + + Map> supplierStat = toSupplierStatistics(futures.stream()); + writeSupplierRebalanceStatistics(supplierStat, nodeAliases, joiner); + }); + } + + /** + * Write partitions distribution per cache group. Only for last success rebalance. + * Works if {@link #printPartitionsDistribution()} return true. + * + * @param rebFutrs Participating in successful and not rebalances, require not null. + * @param nodeAliases For print nodeId=1 instead long string, require not null. + * @param nodeCnt For adding new nodes into {@code nodeAliases}, require not null. + * @param joiner For write statistics, require not null. + * @throws IgniteCheckedException When get result of + * {@link RebalanceFuture}. + */ + private static void writePartitionsDistributionRebalanceStatistics( + final Map> rebFutrs, + final Map nodeAliases, + final AtomicInteger nodeCnt, + final StringJoiner joiner + ) throws IgniteCheckedException { + assert nonNull(rebFutrs); + assert nonNull(nodeAliases); + assert nonNull(nodeCnt); + assert nonNull(joiner); + + if (!printPartitionsDistribution()) + return; + + joiner.add("Partitions distribution per cache group (" + SUCCESSFUL_REBALANCE_TEXT + "):"); + + Comparator startTimeCmp = comparingLong(fut -> fut.stat.startTime); + Comparator startTimeCmpReversed = startTimeCmp.reversed(); + + Comparator partIdCmp = comparingInt(value -> value.id); + Comparator nodeAliasesCmp = comparingInt(nodeAliases::get); + + for (Entry> rebFutrsEntry : rebFutrs.entrySet()) { + CacheGroupContext cacheGrpCtx = rebFutrsEntry.getKey(); + + joiner.add("[id=" + cacheGrpCtx.groupId() + ",") + .add("name=" + cacheGrpCtx.cacheOrGroupName() + "]"); + + List successFutures = new ArrayList<>(); + + for (RebalanceFuture rebalanceFuture : rebFutrsEntry.getValue()) { + if (rebalanceFuture.isDone() && rebalanceFuture.get()) + successFutures.add(rebalanceFuture); + } + + if (successFutures.isEmpty()) + return; + + successFutures.sort(startTimeCmpReversed); + + RebalanceFuture lastSuccessFuture = successFutures.get(0); + + AffinityAssignment affinity = cacheGrpCtx.affinity().cachedAffinity(lastSuccessFuture.topologyVersion()); + + Map supplierNodeRcvParts = new TreeMap<>(partIdCmp); + + for (Entry entry : lastSuccessFuture.stat.msgStats.entrySet()) { + for (ReceivePartitionStatistics receivePartStat : entry.getValue().receivePartStats) { + for (PartitionStatistics partStat : receivePartStat.parts) + supplierNodeRcvParts.put(partStat, entry.getKey()); + } + } + + affinity.nodes().forEach(node -> nodeAliases.computeIfAbsent(node, node1 -> nodeCnt.getAndIncrement())); + + for (Entry supplierNodeRcvPart : supplierNodeRcvParts.entrySet()) { + int partId = supplierNodeRcvPart.getKey().id; + + String nodes = affinity.get(partId).stream() + .sorted(nodeAliasesCmp) + .map(node -> "[" + nodeAliases.get(node) + + (affinity.primaryPartitions(node.id()).contains(partId) ? ",pr" : ",bu") + + (node.equals(supplierNodeRcvPart.getValue()) ? ",su" : "") + "]" + ) + .collect(joining(",")); + + joiner.add(valueOf(partId)).add("=").add(nodes); + } + } + + writeAliasesRebalanceStatistics("pr - primary, bu - backup, su - supplier node", nodeAliases, joiner); + } + + /** + * Write stattistics per supplier node. + * + * @param supplierStat Statistics by supplier (in successful and not rebalances), require not null. + * @param nodeAliases For print nodeId=1 instead long string, require not null. + * @param joiner For write statistics, require not null. + */ + private static void writeSupplierRebalanceStatistics( + final Map> supplierStat, + final Map nodeAliases, + final StringJoiner joiner + ) { + assert nonNull(supplierStat); + assert nonNull(nodeAliases); + assert nonNull(joiner); + + joiner.add("Supplier statistics:"); + + supplierStat.forEach((supplierNode, msgStats) -> { + long partCnt = sum(msgStats, rps -> rps.parts.size()); + long byteSum = sum(msgStats, rps -> rps.msgSize); + long entryCount = sum(msgStats, rps -> rps.parts.stream().mapToLong(ps -> ps.entryCount).sum()); + + long durationSum = msgStats.stream() + .flatMapToLong(msgStat -> msgStat.receivePartStats.stream() + .mapToLong(rps -> rps.rcvMsgTime - msgStat.sndMsgTime) + ) + .sum(); + + joiner.add("[nodeId=" + nodeAliases.get(supplierNode) + ",") + .add(toPartitionsEntriesBytes(partCnt, entryCount, byteSum) + ",") + .add("d=" + durationSum + " ms]"); + }); + } + + /** + * Write statistics aliases, for reducing output string. + * + * @param nodeAliases for print nodeId=1 instead long string, require not null. + * @param abbreviations Abbreviations ex. b - bytes, require not null. + * @param joiner For write statistics, require not null. + */ + private static void writeAliasesRebalanceStatistics( + final String abbreviations, + final Map nodeAliases, + final StringJoiner joiner + ) { + assert nonNull(abbreviations); + assert nonNull(nodeAliases); + assert nonNull(joiner); + + String nodes = nodeAliases.entrySet().stream() + .sorted(comparingInt(Entry::getValue)) + .map(entry -> "[" + entry.getValue() + "=" + entry.getKey().id() + "," + entry.getKey().consistentId() + "]") + .collect(joining(", ")); + + joiner.add("Aliases:").add(abbreviations + ",").add("nodeId mapping (nodeId=id,consistentId)").add(nodes); + } + + /** + * Convert time in millis to local date time. + * + * @param time Time in mills. + * @return The local date-time. + */ + private static LocalDateTime toLocalDateTime(final long time) { + return new Date(time).toInstant().atZone(systemDefault()).toLocalDateTime(); + } + + /** + * Get min {@link RebalanceFutureStatistics#startTime} in stream rebalance future's. + * + * @param stream Stream rebalance future, require not null. + * @return Min start time. + */ + private static long minStartTime(final Stream stream) { + assert nonNull(stream); + + return stream.mapToLong(value -> value.stat.startTime).min().orElse(0); + } + + /** + * Get max {@link RebalanceFutureStatistics#endTime} in stream rebalance future's. + * + * @param stream Stream rebalance future's, require not null. + * @return Max end time. + */ + private static long maxEndTime(final Stream stream) { + assert nonNull(stream); + + return stream.mapToLong(value -> value.stat.endTime).max().orElse(0); + } + + /** + * Prepare stream rebalance future's of each cache groups. + * + * @param rebFutrs Rebalance future's by cache groups, require not null. + * @return Stream rebalance future's. + */ + private static Stream toRebalanceFutureStream( + final Map> rebFutrs + ) { + assert nonNull(rebFutrs); + + return rebFutrs.entrySet().stream().flatMap(entry -> entry.getValue().stream()); + } + + /** + * Aggregates statistics by supplier node. + * + * @param stream Stream rebalance future's, require not null. + * @return Statistic by supplier. + */ + private static Map> toSupplierStatistics( + final Stream stream + ) { + assert nonNull(stream); + + return stream.flatMap(future -> future.stat.msgStats.entrySet().stream()) + .collect(groupingBy(Entry::getKey, mapping(Entry::getValue, toList()))); + } + + /** + * Creates a string containing the beginning, end, and duration of the rebalance. + * + * @param start Start time in ms. + * @param end End time in ms. + * @return Formatted string of rebalance time. + * @see #REBALANCE_STATISTICS_DTF + */ + private static String toStartEndDuration(final long start, final long end) { + return "startTime=" + REBALANCE_STATISTICS_DTF.format(toLocalDateTime(start)) + ", finishTime=" + + REBALANCE_STATISTICS_DTF.format(toLocalDateTime(end)) + ", d=" + (end - start) + " ms"; + } + + /** + * Summarizes long values. + * + * @param msgStats Message statistics, require not null. + * @param longExtractor Long extractor, require not null. + * @return Sum of long values. + */ + private static long sum( + final List msgStats, + final ToLongFunction longExtractor + ) { + assert nonNull(msgStats); + assert nonNull(longExtractor); + + return msgStats.stream() + .flatMap(msgStat -> msgStat.receivePartStats.stream()) + .mapToLong(longExtractor) + .sum(); + } + + /** + * Create a string containing count received partitions, + * count received entries and sum received bytes. + * + * @param parts Count received partitions. + * @param entries Count received entries. + * @param bytes Sum received bytes. + * @return Formatted string of received rebalance partitions. + */ + private static String toPartitionsEntriesBytes(final long parts, final long entries, final long bytes) { + return "p=" + parts + ", e=" + entries + ", b=" + bytes; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridDhtLocalPartition.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridDhtLocalPartition.java index 354a7851a61b1..d6a4fde75265d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridDhtLocalPartition.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridDhtLocalPartition.java @@ -78,6 +78,7 @@ import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.MOVING; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.OWNING; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.RENTING; +import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.CheckpointProgress.State.FINISHED; /** * Key partition. @@ -162,9 +163,6 @@ public class GridDhtLocalPartition extends GridCacheConcurrentMapImpl implements * reservation is released. */ private volatile boolean delayedRenting; - /** Set if partition must be cleared in MOVING state. */ - private volatile boolean clear; - /** Set if topology update sequence should be updated on partition destroy. */ private boolean updateSeqOnDestroy; @@ -488,7 +486,7 @@ private void release0(int sizeChange) { if (reservations == 0) return; - assert getPartState(state) != EVICTED : getPartState(state); + assert getPartState(state) != EVICTED : this; long newState = setReservations(state, --reservations); newState = setSize(newState, getSize(newState) + sizeChange); @@ -498,12 +496,14 @@ private void release0(int sizeChange) { // Decrement reservations. if (this.state.compareAndSet(state, newState)) { // If no more reservations try to continue delayed renting. - if (reservations == 0 && delayedRenting) - rent(true); + if (reservations == 0) { + if (delayedRenting) + rent(true); + else if (getPartState(state) == RENTING) + tryContinueClearing(); + } - // Partition could be only reserved in OWNING state so no further actions - // are required. - break; + return; } } } @@ -551,6 +551,8 @@ private boolean casState(long state, GridDhtPartitionState toState) { boolean update = this.state.compareAndSet(state, setPartState(state, toState)); if (update) { + assert toState != EVICTED || reservations() == 0 : this; + try { ctx.wal().log(new PartitionMetaStateRecord(grp.groupId(), id, toState, 0)); } @@ -574,6 +576,8 @@ private boolean casState(long state, GridDhtPartitionState toState) { boolean update = this.state.compareAndSet(state, setPartState(state, toState)); if (update) { + assert toState != EVICTED || reservations() == 0 : this; + if (log.isDebugEnabled()) log.debug("Partition changed state [grp=" + grp.cacheOrGroupName() + ", p=" + id + ", prev=" + prevState + ", to=" + toState + "]"); @@ -591,7 +595,6 @@ public boolean own() { long state = this.state.get(); GridDhtPartitionState partState = getPartState(state); - if (partState == RENTING || partState == EVICTED) return false; @@ -679,18 +682,43 @@ private void clearAsync0(boolean updateSeq) { GridDhtPartitionState partState = getPartState(state); - boolean evictionRequested = partState == RENTING || delayedRenting; - boolean clearingRequested = partState == MOVING && clear; + boolean evictionRequested = partState == RENTING; + boolean clearingRequested = partState == MOVING; if (!evictionRequested && !clearingRequested) return; boolean reinitialized = clearFuture.initialize(updateSeq, evictionRequested); - // Clearing process is already running at the moment. No needs to run it again. + // Clearing process is already running at the moment. No need to run it again. if (!reinitialized) return; + // Make sure current rebalance future is finished before start clearing + // to avoid clearing currently rebalancing partition (except "initial" dummy rebalance). + if (clearingRequested) { + GridDhtPartitionDemander.RebalanceFuture rebFut = + (GridDhtPartitionDemander.RebalanceFuture)grp.preloader().rebalanceFuture(); + + if (!rebFut.isInitial() && !rebFut.isDone()) { + rebFut.listen(fut -> { + // Partition could be owned after rebalance future is done. Skip clearing in such case. + // Otherwise continue clearing. + if (fut.error() == null && state() == MOVING) { + if (freeAndEmpty(state) && !grp.queriesEnabled() && !groupReserved()) { + clearFuture.finish(); + + return; + } + + ctx.evict().evictPartitionAsync(grp, GridDhtLocalPartition.this); + } + }); + + return; + } + } + // Try fast eviction. if (freeAndEmpty(state) && !grp.queriesEnabled() && !groupReserved()) { if (partState == RENTING && casState(state, EVICTED) || clearingRequested) { @@ -702,7 +730,7 @@ private void clearAsync0(boolean updateSeq) { destroy(); } - if (log.isDebugEnabled()) + if (log.isDebugEnabled() && evictionRequested) log.debug("Partition has been fast evicted [grp=" + grp.cacheOrGroupName() + ", p=" + id + ", state=" + state() + "]"); @@ -716,6 +744,9 @@ private void clearAsync0(boolean updateSeq) { /** * Initiates single clear process if partition is in MOVING state or continues cleaning for RENTING state. * Method does nothing if clear process is already running. + * + * IMPORTANT: if clearing is required when after return from method call clear future must be initialized. + * This enforces clearing happens before sending demand requests. */ public void clearAsync() { GridDhtPartitionState state0 = state(); @@ -723,18 +754,7 @@ public void clearAsync() { if (state0 != MOVING && state0 != RENTING) return; - clear = true; - - GridDhtPartitionDemander.RebalanceFuture rebFut = - (GridDhtPartitionDemander.RebalanceFuture)grp.preloader().rebalanceFuture(); - - // Make sure current rebalance future finishes before clearing - // to avoid clearing currently rebalancing partition. - // NOTE: this invariant is not true for initial rebalance future. - if (rebFut.topologyVersion() != null && state0 == MOVING && !rebFut.isDone()) - rebFut.listen(fut -> clearAsync0(false)); - else - clearAsync0(false); + clearAsync0(false); } /** @@ -827,7 +847,10 @@ private void finishEviction(boolean updateSeq) { GridDhtPartitionState state = getPartState(state0); - if (state == EVICTED || (freeAndEmpty(state0) && state == RENTING && casState(state0, EVICTED))) + // Some entries still might be present in partition cache maps due to concurrent updates on backup nodes, + // but it's safe to finish eviction because no physical updates are possible. + if (state == EVICTED || + (store.isEmpty() && getReservations(state0) == 0 && state == RENTING && casState(state0, EVICTED))) updateSeqOnDestroy = updateSeq; } @@ -1133,8 +1156,8 @@ private long clearAll(EvictionContext evictionCtx) throws NodeStoppingException CacheDataRow row = it0.next(); // Do not clear fresh rows in case of partition reloading. - // This is required because updates are possible to moving partition which is currently cleared. - if (row.version().compareTo(clearVer) >= 0 && (state() == MOVING && clear)) + // This is required because normal updates are possible to moving partition which is currently cleared. + if (row.version().compareTo(clearVer) >= 0 && state() == MOVING) continue; if (grp.sharedGroup() && (hld == null || hld.cctx.cacheId() != row.cacheId())) @@ -1190,7 +1213,7 @@ private long clearAll(EvictionContext evictionCtx) throws NodeStoppingException if (forceTestCheckpointOnEviction) { if (partWhereTestCheckpointEnforced == null && cleared >= fullSize()) { - ctx.database().forceCheckpoint("test").finishFuture().get(); + ctx.database().forceCheckpoint("test").futureFor(FINISHED).get(); log.warning("Forced checkpoint by test reasons for partition: " + this); @@ -1280,13 +1303,20 @@ private void clearDeferredDeletes() { /** {@inheritDoc} */ @Override public int hashCode() { - return id; + return 31 * id + grp.groupId(); } /** {@inheritDoc} */ - @SuppressWarnings({"OverlyStrongTypeCast"}) - @Override public boolean equals(Object obj) { - return obj instanceof GridDhtLocalPartition && (obj == this || ((GridDhtLocalPartition)obj).id() == id); + @Override public boolean equals(Object o) { + if (this == o) + return true; + + if (o == null || getClass() != o.getClass()) + return false; + + GridDhtLocalPartition part = (GridDhtLocalPartition)o; + + return id == part.id && grp.groupId() == part.group().groupId(); } /** {@inheritDoc} */ @@ -1302,6 +1332,7 @@ private void clearDeferredDeletes() { "reservations", reservations(), "empty", isEmpty(), "createTime", U.format(createTime), + "fullSize", fullSize(), "cntr", dataStore().partUpdateCounter()); } @@ -1438,7 +1469,9 @@ public GridLongList finalizeUpdateCounters() { } /** - * @param last {@code True} is last batch for partition. + * Called before next batch is about to be applied during rebalance. Currently used for tests. + * + * @param last {@code True} if last batch for partition. */ public void beforeApplyBatch(boolean last) { // No-op. @@ -1579,8 +1612,6 @@ private void registerClearingCallback() { // Recreate cache data store in case of allowed fast eviction, and reset clear flag. listen(f -> { - clear = false; - clearingCbRegistered = false; }); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridDhtPartitionTopologyImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridDhtPartitionTopologyImpl.java index d51f053597857..662f8dd175ddb 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridDhtPartitionTopologyImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridDhtPartitionTopologyImpl.java @@ -66,15 +66,16 @@ import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.CU; -import org.apache.ignite.internal.util.typedef.internal.LT; -import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.U; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_PART_DATA_LOST; +import static org.apache.ignite.events.EventType.EVT_NODE_JOINED; import static org.apache.ignite.internal.events.DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT; +import static org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture.ExchangeType.ALL; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.EVICTED; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.LOST; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.MOVING; @@ -764,58 +765,62 @@ private boolean partitionLocalNode(int p, AffinityTopologyVersion topVer) { long updateSeq = this.updateSeq.incrementAndGet(); - for (int p = 0; p < partitions; p++) { - GridDhtLocalPartition locPart = localPartition0(p, topVer, false, true); + // Skip partition updates in case of not real exchange. + if (!ctx.localNode().isClient() && exchFut.exchangeType() == ALL) { + for (int p = 0; p < partitions; p++) { + GridDhtLocalPartition locPart = localPartition0(p, topVer, false, true); - if (partitionLocalNode(p, topVer)) { - // Prepare partition to rebalance if it's not happened on full map update phase. - if (locPart == null || locPart.state() == RENTING || locPart.state() == EVICTED) - locPart = rebalancePartition(p, true, exchFut); + if (partitionLocalNode(p, topVer)) { + // Prepare partition to rebalance if it's not happened on full map update phase. + if (locPart == null || locPart.state() == RENTING || locPart.state() == EVICTED) + locPart = rebalancePartition(p, true, exchFut); - GridDhtPartitionState state = locPart.state(); + GridDhtPartitionState state = locPart.state(); - if (state == MOVING) { - if (grp.rebalanceEnabled()) { - Collection owners = owners(p); + if (state == MOVING) { + if (grp.rebalanceEnabled()) { + Collection owners = owners(p); + + // If an owner node left during exchange, then new exchange should be started with detecting lost partitions. + if (!F.isEmpty(owners)) { + if (log.isDebugEnabled()) + log.debug("Will not own partition (there are owners to rebalance from) " + + "[grp=" + grp.cacheOrGroupName() + ", p=" + p + ", owners = " + owners + ']'); + } - // If an owner node left during exchange, then new exchange should be started with detecting lost partitions. - if (!F.isEmpty(owners)) { - if (log.isDebugEnabled()) - log.debug("Will not own partition (there are owners to rebalance from) " + - "[grp=" + grp.cacheOrGroupName() + ", p=" + p + ", owners = " + owners + ']'); + // It's important to clear non empty moving partitions before full rebalancing. + // Consider the scenario: + // Node1 has keys k1 and k2 in the same partition. + // Node2 started rebalancing from Node1. + // Node2 received k1, k2 and failed before moving partition to OWNING state. + // Node1 removes k2 but update has not been delivered to Node1 because of failure. + // After new full rebalance Node1 will only send k1 to Node2 causing lost removal. + // NOTE: avoid calling clearAsync for partition twice per topology version. + if (grp.persistenceEnabled() && + exchFut.isClearingPartition(grp, locPart.id()) && + !locPart.isClearing() && + !locPart.isEmpty()) + locPart.clearAsync(); } - - // It's important to clear non empty moving partitions before full rebalancing. - // Consider the scenario: - // Node1 has keys k1 and k2 in the same partition. - // Node2 started rebalancing from Node1. - // Node2 received k1, k2 and failed before moving partition to OWNING state. - // Node1 removes k2 but update has not been delivered to Node1 because of failure. - // After new full rebalance Node1 will only send k1 to Node2 causing lost removal. - // NOTE: avoid calling clearAsync for partition twice per topology version. - // TODO FIXME clearing is not always needed see IGNITE-11799 - if (grp.persistenceEnabled() && !exchFut.isHistoryPartition(grp, locPart.id()) && - !locPart.isClearing() && !locPart.isEmpty()) - locPart.clearAsync(); + else + updateSeq = updateLocal(p, locPart.state(), updateSeq, topVer); } - else - updateSeq = updateLocal(p, locPart.state(), updateSeq, topVer); } - } - else { - if (locPart != null) { - GridDhtPartitionState state = locPart.state(); + else { + if (locPart != null) { + GridDhtPartitionState state = locPart.state(); - if (state == MOVING) { - locPart.rent(false); + if (state == MOVING) { + locPart.rent(false); - updateSeq = updateLocal(p, locPart.state(), updateSeq, topVer); + updateSeq = updateLocal(p, locPart.state(), updateSeq, topVer); - changed = true; + changed = true; - if (log.isDebugEnabled()) { - log.debug("Evicting " + state + " partition (it does not belong to affinity) [" + - "grp=" + grp.cacheOrGroupName() + ", p=" + locPart.id() + ']'); + if (log.isDebugEnabled()) { + log.debug("Evicting " + state + " partition (it does not belong to affinity) [" + + "grp=" + grp.cacheOrGroupName() + ", p=" + locPart.id() + ']'); + } } } } @@ -867,7 +872,7 @@ private boolean partitionLocalNode(int p, AffinityTopologyVersion topVer) { * @param p Partition number. * @return Partition. */ - private GridDhtLocalPartition getOrCreatePartition(int p) { + public GridDhtLocalPartition getOrCreatePartition(int p) { assert lock.isWriteLockedByCurrentThread(); assert ctx.database().checkpointLockIsHeldByThread(); @@ -1163,9 +1168,11 @@ else if (loc != null && state == RENTING && !showRenting) { AffinityTopologyVersion diffVer = diffFromAffinityVer; if (!diffVer.equals(topVer)) { - LT.warn(log, "Requested topology version does not match calculated diff, need to check if " + - "affinity has changed [grp=" + grp.cacheOrGroupName() + ", topVer=" + topVer + - ", diffVer=" + diffVer + "]"); + if (log.isDebugEnabled()) { + log.debug("Requested topology version does not match calculated diff, need to check if " + + "affinity has changed [grp=" + grp.cacheOrGroupName() + ", topVer=" + topVer + + ", diffVer=" + diffVer + "]"); + } boolean affChanged; @@ -1175,9 +1182,11 @@ else if (loc != null && state == RENTING && !showRenting) { affChanged = ctx.exchange().affinityChanged(topVer, diffVer); if (affChanged) { - LT.warn(log, "Requested topology version does not match calculated diff, will require full iteration to" + - "calculate mapping [grp=" + grp.cacheOrGroupName() + ", topVer=" + topVer + - ", diffVer=" + diffVer + "]"); + if (log.isDebugEnabled()) { + log.debug("Requested topology version does not match calculated diff, will require full iteration to" + + "calculate mapping [grp=" + grp.cacheOrGroupName() + ", topVer=" + topVer + + ", diffVer=" + diffVer + "]"); + } nodes = new ArrayList<>(); @@ -1587,9 +1596,8 @@ private boolean shouldOverridePartitionMap(GridDhtPartitionMap currentMap, GridD if (exchangeVer != null && nodeMap != null && grp.persistenceEnabled() && - readyTopVer.initialized()) { - - assert exchFut != null; + readyTopVer.initialized() && + exchFut != null) { for (Map.Entry e : nodeMap.entrySet()) { int p = e.getKey(); @@ -1609,7 +1617,10 @@ private boolean shouldOverridePartitionMap(GridDhtPartitionMap currentMap, GridD } } else if (state == MOVING) { - rebalancePartition(p, partsToReload.contains(p), exchFut); + GridDhtLocalPartition locPart = locParts.get(p); + + rebalancePartition(p, partsToReload.contains(p) || + locPart != null && locPart.state() == MOVING && exchFut.localJoinExchange(), exchFut); changed = true; } @@ -2227,17 +2238,30 @@ else if (plc != PartitionLossPolicy.IGNORE) { /** {@inheritDoc} */ @Override public Map> resetOwners(Map> ownersByUpdCounters, - Set haveHistory, + Set haveHist, GridDhtPartitionsExchangeFuture exchFut) { - Map> result = new HashMap<>(); + Map> res = new HashMap<>(); + + List evts = exchFut.events().events(); + + Set joinedNodes = U.newHashSet(evts.size()); + + for (DiscoveryEvent evt : evts) { + if (evt.type() == EVT_NODE_JOINED) + joinedNodes.add(evt.eventNode().id()); + } ctx.database().checkpointReadLock(); try { + Map> addToWaitGroups = new HashMap<>(); + lock.writeLock().lock(); try { // First process local partitions. + UUID locNodeId = ctx.localNodeId(); + for (Map.Entry> entry : ownersByUpdCounters.entrySet()) { int part = entry.getKey(); Set newOwners = entry.getValue(); @@ -2247,10 +2271,11 @@ else if (plc != PartitionLossPolicy.IGNORE) { if (locPart == null || locPart.state() != OWNING) continue; - if (!newOwners.contains(ctx.localNodeId())) { - rebalancePartition(part, !haveHistory.contains(part), exchFut); + // Partition state should be mutated only on joining nodes if they are exists for the exchange. + if (joinedNodes.isEmpty() && !newOwners.contains(locNodeId)) { + rebalancePartition(part, !haveHist.contains(part), exchFut); - result.computeIfAbsent(ctx.localNodeId(), n -> new HashSet<>()).add(part); + res.computeIfAbsent(locNodeId, n -> new HashSet<>()).add(part); } } @@ -2261,11 +2286,15 @@ else if (plc != PartitionLossPolicy.IGNORE) { for (Map.Entry remotes : node2part.entrySet()) { UUID remoteNodeId = remotes.getKey(); + + if (!joinedNodes.isEmpty() && !joinedNodes.contains(remoteNodeId)) + continue; + GridDhtPartitionMap partMap = remotes.getValue(); GridDhtPartitionState state = partMap.get(part); - if (state == null || state != OWNING) + if (state != OWNING) continue; if (!newOwners.contains(remoteNodeId)) { @@ -2273,25 +2302,23 @@ else if (plc != PartitionLossPolicy.IGNORE) { partMap.updateSequence(partMap.updateSequence() + 1, partMap.topologyVersion()); - if (partMap.nodeId().equals(ctx.localNodeId())) + if (partMap.nodeId().equals(locNodeId)) updateSeq.setIfGreater(partMap.updateSequence()); - result.computeIfAbsent(remoteNodeId, n -> new HashSet<>()).add(part); + res.computeIfAbsent(remoteNodeId, n -> new HashSet<>()).add(part); } } } - for (Map.Entry> entry : result.entrySet()) { + for (Map.Entry> entry : res.entrySet()) { UUID nodeId = entry.getKey(); Set rebalancedParts = entry.getValue(); - // Add to wait groups to ensure late assignment switch after all partitions are rebalanced. - for (Integer part : rebalancedParts) - ctx.cache().context().affinity().addToWaitGroup(groupId(), part, nodeId, topologyVersionFuture().initialVersion()); + addToWaitGroups.put(nodeId, new HashSet<>(rebalancedParts)); if (!rebalancedParts.isEmpty()) { Set historical = rebalancedParts.stream() - .filter(haveHistory::contains) + .filter(haveHist::contains) .collect(Collectors.toSet()); // Filter out partitions having WAL history. @@ -2306,15 +2333,28 @@ else if (plc != PartitionLossPolicy.IGNORE) { } node2part = new GridDhtPartitionFullMap(node2part, updateSeq.incrementAndGet()); - } finally { + } + finally { lock.writeLock().unlock(); } + + for (Map.Entry> entry : addToWaitGroups.entrySet()) { + // Add to wait groups to ensure late assignment switch after all partitions are rebalanced. + for (Integer part : entry.getValue()) { + ctx.cache().context().affinity().addToWaitGroup( + groupId(), + part, + entry.getKey(), + topologyVersionFuture().initialVersion() + ); + } + } } finally { ctx.database().checkpointReadUnlock(); } - return result; + return res; } /** @@ -2346,8 +2386,8 @@ private GridDhtLocalPartition rebalancePartition(int p, boolean clear, GridDhtPa if (part.state() != MOVING) part.moving(); - if (!clear) - exchFut.addHistoryPartition(grp, part.id()); + if (clear) + exchFut.addClearingPartition(grp, part.id()); assert part.state() == MOVING : part; @@ -2624,14 +2664,14 @@ private void removeNode(UUID nodeId) { @Override public void ownMoving(AffinityTopologyVersion rebFinishedTopVer) { lock.writeLock().lock(); - AffinityTopologyVersion lastAffChangeVer = ctx.exchange().lastAffinityChangedTopologyVersion(lastTopChangeVer); + try { + AffinityTopologyVersion lastAffChangeVer = ctx.exchange().lastAffinityChangedTopologyVersion(lastTopChangeVer); - if (lastAffChangeVer.compareTo(rebFinishedTopVer) > 0) - log.info("Affinity topology changed, no MOVING partitions will be owned " + - "[rebFinishedTopVer=" + rebFinishedTopVer + - ", lastAffChangeVer=" + lastAffChangeVer + "]"); + if (lastAffChangeVer.compareTo(rebFinishedTopVer) > 0 && log.isInfoEnabled()) + log.info("Affinity topology changed, no MOVING partitions will be owned " + + "[rebFinishedTopVer=" + rebFinishedTopVer + + ", lastAffChangeVer=" + lastAffChangeVer + "]"); - try { for (GridDhtLocalPartition locPart : grp.topology().currentLocalPartitions()) { if (locPart.state() == MOVING) { boolean reserved = locPart.reserve(); @@ -3113,13 +3153,16 @@ private void advance() { } } - /** */ + /** + * Partition factory used for (re-)creating partitions during their lifecycle. + * Currently used in tests for overriding default partition behavior. + */ public interface PartitionFactory { /** * @param ctx Context. * @param grp Group. * @param id Partition id. - * @return Partition instance. + * @return New partition instance. */ public GridDhtLocalPartition create(GridCacheSharedContext ctx, CacheGroupContext grp, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/PartitionsEvictManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/PartitionsEvictManager.java index 826902cee1a87..56bad0b14d5ab 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/PartitionsEvictManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/PartitionsEvictManager.java @@ -28,6 +28,7 @@ import java.util.concurrent.PriorityBlockingQueue; import java.util.concurrent.atomic.AtomicInteger; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.failure.FailureContext; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.managers.communication.GridIoPolicy; import org.apache.ignite.internal.processors.cache.CacheGroupContext; @@ -39,6 +40,7 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_EVICTION_PERMITS; import static org.apache.ignite.IgniteSystemProperties.getInteger; import static org.apache.ignite.IgniteSystemProperties.getLong; +import static org.apache.ignite.failure.FailureType.SYSTEM_WORKER_TERMINATION; /** * Class that serves asynchronous part eviction process. @@ -82,7 +84,7 @@ public class PartitionsEvictManager extends GridCacheSharedManagerAdapter { * Is not thread-safe. * All method should be called under mux synchronization. */ - private volatile BucketQueue evictionQueue; + volatile BucketQueue evictionQueue; /** Lock object. */ private final Object mux = new Object(); @@ -245,7 +247,8 @@ private void showProgress() { if (threads == 0) threads = permits = 1; - log.info("Evict partition permits=" + permits); + if (log.isInfoEnabled()) + log.info("Evict partition permits=" + permits); evictionQueue = new BucketQueue(threads); } @@ -349,8 +352,9 @@ private void awaitFinishAll(){ private void awaitFinish(Integer part, IgniteInternalFuture fut) { // Wait for last offered partition eviction completion try { - log.info("Await partition evict, grpName=" + grp.cacheOrGroupName() + - ", grpId=" + grp.groupId() + ", partId=" + part); + if (log.isInfoEnabled()) + log.info("Await partition evict, grpName=" + grp.cacheOrGroupName() + + ", grpId=" + grp.groupId() + ", partId=" + part); fut.get(); } @@ -369,14 +373,14 @@ private void showProgress() { ", grpId=" + grp.groupId() + ", remainingPartsToEvict=" + (totalTasks.get() - taskInProgress) + ", partsEvictInProgress=" + taskInProgress + - ", totalParts= " + grp.topology().localPartitions().size() + "]"); + ", totalParts=" + grp.topology().localPartitions().size() + "]"); } } /** * Task for self-scheduled partition eviction / clearing. */ - private class PartitionEvictionTask implements Runnable { + class PartitionEvictionTask implements Runnable { /** Partition to evict. */ private final GridDhtLocalPartition part; @@ -412,12 +416,8 @@ private PartitionEvictionTask( } try { - assert part.state() != GridDhtPartitionState.OWNING : part; - boolean success = part.tryClear(grpEvictionCtx); - assert part.state() != GridDhtPartitionState.OWNING : part; - if (success) { if (part.state() == GridDhtPartitionState.EVICTED && part.markForDestroy()) part.destroy(); @@ -439,8 +439,11 @@ private PartitionEvictionTask( false, true); } - else + else { LT.error(log, ex, "Partition eviction failed, this can cause grid hang."); + + cctx.kernalContext().failure().process(new FailureContext(SYSTEM_WORKER_TERMINATION, ex)); + } } } } @@ -448,13 +451,13 @@ private PartitionEvictionTask( /** * */ - private class BucketQueue { - /** Queues contains partitions scheduled for eviction. */ - private final Queue[] buckets; - + class BucketQueue { /** */ private final long[] bucketSizes; + /** Queues contains partitions scheduled for eviction. */ + final Queue[] buckets; + /** * @param buckets Number of buckets. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearCacheEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearCacheEntry.java index c953bebbbfd13..4ebd5e071163c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearCacheEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearCacheEntry.java @@ -458,8 +458,7 @@ public boolean loadedValue(@Nullable IgniteInternalTx tx, } /** {@inheritDoc} */ - @Override protected boolean storeValue(CacheObject val, long expireTime, GridCacheVersion ver) { - return false; + @Override protected void storeValue(CacheObject val, long expireTime, GridCacheVersion ver) { // No-op: queries are disabled for near cache. } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetRequest.java index ad6339a860f9a..09d2b35da1ca0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetRequest.java @@ -44,13 +44,16 @@ import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableRequest; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import static org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse.INVALID_TIMESTAMP; + /** * Get request. Responsible for obtaining entry from primary node. 'Near' means 'Initiating node' here, not 'Near Cache'. */ -public class GridNearGetRequest extends GridCacheIdMessage implements GridCacheDeployable, +public class GridNearGetRequest extends GridCacheIdMessage implements GridCacheDeployable, TimeLoggableRequest, GridCacheVersionable { /** */ private static final long serialVersionUID = 0L; @@ -110,6 +113,13 @@ public class GridNearGetRequest extends GridCacheIdMessage implements GridCacheD /** Transaction label. */ private @Nullable String txLbl; + /** @see TimeLoggableRequest#sendTimestamp(). */ + private long sendTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableRequest#receiveTimestamp(). */ + @GridDirectTransient + private long receiveTimestamp = INVALID_TIMESTAMP; + /** * Empty constructor required for {@link Externalizable}. */ @@ -346,6 +356,26 @@ public long accessTtl() { return addDepInfo; } + /** {@inheritDoc} */ + @Override public long sendTimestamp() { + return sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public void sendTimestamp(long sendTimestamp) { + this.sendTimestamp = sendTimestamp; + } + + /** {@inheritDoc} */ + @Override public long receiveTimestamp() { + return receiveTimestamp; + } + + /** {@inheritDoc} */ + @Override public void receiveTimestamp(long receiveTimestamp) { + this.receiveTimestamp = receiveTimestamp; + } + /** {@inheritDoc} */ @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { writer.setBuffer(buf); @@ -404,30 +434,36 @@ public long accessTtl() { writer.incrementState(); case 11: - if (!writer.writeUuid("subjId", subjId)) + if (!writer.writeLong("sendTimestamp", sendTimestamp)) return false; writer.incrementState(); case 12: - if (!writer.writeInt("taskNameHash", taskNameHash)) + if (!writer.writeUuid("subjId", subjId)) return false; writer.incrementState(); case 13: - if (!writer.writeAffinityTopologyVersion("topVer", topVer)) + if (!writer.writeInt("taskNameHash", taskNameHash)) return false; writer.incrementState(); case 14: - if (!writer.writeString("txLbl", txLbl)) + if (!writer.writeAffinityTopologyVersion("topVer", topVer)) return false; writer.incrementState(); case 15: + if (!writer.writeString("txLbl", txLbl)) + return false; + + writer.incrementState(); + + case 16: if (!writer.writeMessage("ver", ver)) return false; @@ -506,7 +542,7 @@ public long accessTtl() { reader.incrementState(); case 11: - subjId = reader.readUuid("subjId"); + sendTimestamp = reader.readLong("sendTimestamp"); if (!reader.isLastRead()) return false; @@ -514,7 +550,7 @@ public long accessTtl() { reader.incrementState(); case 12: - taskNameHash = reader.readInt("taskNameHash"); + subjId = reader.readUuid("subjId"); if (!reader.isLastRead()) return false; @@ -522,7 +558,7 @@ public long accessTtl() { reader.incrementState(); case 13: - topVer = reader.readAffinityTopologyVersion("topVer"); + taskNameHash = reader.readInt("taskNameHash"); if (!reader.isLastRead()) return false; @@ -530,7 +566,7 @@ public long accessTtl() { reader.incrementState(); case 14: - txLbl = reader.readString("txLbl"); + topVer = reader.readAffinityTopologyVersion("topVer"); if (!reader.isLastRead()) return false; @@ -538,6 +574,14 @@ public long accessTtl() { reader.incrementState(); case 15: + txLbl = reader.readString("txLbl"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 16: ver = reader.readMessage("ver"); if (!reader.isLastRead()) @@ -557,7 +601,7 @@ public long accessTtl() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 16; + return 17; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetResponse.java index 578c46b6ac34a..8a45c1d52342a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetResponse.java @@ -40,12 +40,14 @@ import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableResponse; import org.jetbrains.annotations.NotNull; /** * Get response. */ -public class GridNearGetResponse extends GridCacheIdMessage implements GridCacheDeployable, +public class GridNearGetResponse extends GridCacheIdMessage implements GridCacheDeployable, ProcessingTimeLoggableResponse, GridCacheVersionable { /** */ private static final long serialVersionUID = 0L; @@ -79,6 +81,17 @@ public class GridNearGetResponse extends GridCacheIdMessage implements GridCache /** Serialized error. */ private byte[] errBytes; + /** @see ProcessingTimeLoggableResponse#reqSentTimestamp(). */ + @GridDirectTransient + private long reqSentTimestamp = INVALID_TIMESTAMP; + + /** @see ProcessingTimeLoggableResponse#reqReceivedTimestamp(). */ + @GridDirectTransient + private long reqReceivedTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableResponse#reqTimeData(). */ + private long reqTimeData = INVALID_TIMESTAMP; + /** * Empty constructor required for {@link Externalizable}. */ @@ -213,6 +226,37 @@ public void error(IgniteCheckedException err) { return addDepInfo; } + + /** {@inheritDoc} */ + @Override public void reqSentTimestamp(long reqSentTimestamp) { + this.reqSentTimestamp = reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqSentTimestamp() { + return reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqReceivedTimestamp(long reqReceivedTimestamp) { + this.reqReceivedTimestamp = reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqReceivedTimestamp() { + return reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqTimeData(long reqTimeData) { + this.reqTimeData = reqTimeData; + } + + /** {@inheritDoc} */ + @Override public long reqTimeData() { + return reqTimeData; + } + /** {@inheritDoc} */ @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { writer.setBuffer(buf); @@ -259,12 +303,18 @@ public void error(IgniteCheckedException err) { writer.incrementState(); case 9: - if (!writer.writeAffinityTopologyVersion("topVer", topVer)) + if (!writer.writeLong("reqTimeData", reqTimeData)) return false; writer.incrementState(); case 10: + if (!writer.writeAffinityTopologyVersion("topVer", topVer)) + return false; + + writer.incrementState(); + + case 11: if (!writer.writeMessage("ver", ver)) return false; @@ -327,7 +377,7 @@ public void error(IgniteCheckedException err) { reader.incrementState(); case 9: - topVer = reader.readAffinityTopologyVersion("topVer"); + reqTimeData = reader.readLong("reqTimeData"); if (!reader.isLastRead()) return false; @@ -335,6 +385,14 @@ public void error(IgniteCheckedException err) { reader.incrementState(); case 10: + topVer = reader.readAffinityTopologyVersion("topVer"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 11: ver = reader.readMessage("ver"); if (!reader.isLastRead()) @@ -354,7 +412,7 @@ public void error(IgniteCheckedException err) { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 11; + return 12; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java index c648bfcced439..9620433055b7b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java @@ -819,7 +819,17 @@ void map() { if (topVer != null) { for (GridDhtTopologyFuture fut : cctx.shared().exchange().exchangeFutures()) { if (fut.exchangeDone() && fut.topologyVersion().equals(topVer)){ - Throwable err = fut.validateCache(cctx, recovery, read, null, keys); + Throwable err = null; + + // Before cache validation, make sure that this topology future is already completed. + try { + fut.get(); + } + catch (IgniteCheckedException e) { + err = fut.error(); + } + + err = (err == null)? fut.validateCache(cctx, recovery, read, null, keys): err; if (err != null) { onDone(err); @@ -881,7 +891,7 @@ synchronized void mapOnTopology(final boolean remap) { if (remap) { if (tx != null) - tx.onRemap(topVer); + tx.onRemap(topVer, true); this.topVer = topVer; } @@ -898,6 +908,7 @@ synchronized void mapOnTopology(final boolean remap) { markInitialized(); } else { + // TODO FIXME https://ggsystems.atlassian.net/browse/GG-23288 fut.listen(new CI1>() { @Override public void apply(IgniteInternalFuture fut) { try { @@ -1631,143 +1642,163 @@ void onResult(GridNearLockResponse res) { if (res.clientRemapVersion() != null) { assert cctx.kernalContext().clientNode(); - IgniteInternalFuture affFut = - cctx.shared().exchange().affinityReadyFuture(res.clientRemapVersion()); + if (res.compatibleRemapVersion()) { + if (tx != null) { + tx.onRemap(res.clientRemapVersion(), false); - if (affFut != null && !affFut.isDone()) { - affFut.listen(new CI1>() { - @Override public void apply(IgniteInternalFuture fut) { - try { - fut.get(); + // Use remapped version for all subsequent mappings. + synchronized (GridNearLockFuture.this) { + for (GridNearLockMapping mapping : mappings) { + GridNearLockRequest req = mapping.request(); - remap(); - } - catch (IgniteCheckedException e) { - onDone(e); - } - finally { - cctx.shared().txContextReset(); + assert req != null : mapping; + + req.topologyVersion(res.clientRemapVersion()); } } - }); + } } - else - remap(); - } - else { - int i = 0; + else { + IgniteInternalFuture affFut = + cctx.shared().exchange().affinityReadyFuture(res.clientRemapVersion()); - AffinityTopologyVersion topVer = GridNearLockFuture.this.topVer; + if (!affFut.isDone()) { + // TODO FIXME https://ggsystems.atlassian.net/browse/GG-23288 + affFut.listen(new CI1>() { + @Override public void apply(IgniteInternalFuture fut) { + try { + fut.get(); - for (KeyCacheObject k : keys) { - while (true) { - GridNearCacheEntry entry = cctx.near().entryExx(k, topVer); + remap(); + } + catch (IgniteCheckedException e) { + onDone(e); + } + finally { + cctx.shared().txContextReset(); + } + } + }); + } + else + remap(); - try { - if (res.dhtVersion(i) == null) { - onDone(new IgniteCheckedException("Failed to receive DHT version from remote node " + - "(will fail the lock): " + res)); + return; + } + } - return; - } + int i = 0; - IgniteBiTuple oldValTup = valMap.get(entry.key()); + AffinityTopologyVersion topVer = GridNearLockFuture.this.topVer; - CacheObject oldVal = entry.rawGet(); - boolean hasOldVal = false; - CacheObject newVal = res.value(i); + for (KeyCacheObject k : keys) { + while (true) { + GridNearCacheEntry entry = cctx.near().entryExx(k, topVer); - boolean readRecordable = false; + try { + if (res.dhtVersion(i) == null) { + onDone(new IgniteCheckedException("Failed to receive DHT version from remote node " + + "(will fail the lock): " + res)); - if (retval) { - readRecordable = cctx.events().isRecordable(EVT_CACHE_OBJECT_READ); + return; + } - if (readRecordable) - hasOldVal = entry.hasValue(); - } + IgniteBiTuple oldValTup = valMap.get(entry.key()); - GridCacheVersion dhtVer = res.dhtVersion(i); - GridCacheVersion mappedVer = res.mappedVersion(i); + CacheObject oldVal = entry.rawGet(); + boolean hasOldVal = false; + CacheObject newVal = res.value(i); - if (newVal == null) { - if (oldValTup != null) { - if (oldValTup.get1().equals(dhtVer)) - newVal = oldValTup.get2(); + boolean readRecordable = false; - oldVal = oldValTup.get2(); - } - } + if (retval) { + readRecordable = cctx.events().isRecordable(EVT_CACHE_OBJECT_READ); - // Lock is held at this point, so we can set the - // returned value if any. - entry.resetFromPrimary(newVal, lockVer, dhtVer, node.id(), topVer); + if (readRecordable) + hasOldVal = entry.hasValue(); + } - if (inTx()) { - tx.hasRemoteLocks(true); + GridCacheVersion dhtVer = res.dhtVersion(i); + GridCacheVersion mappedVer = res.mappedVersion(i); - if (implicitTx() && tx.onePhaseCommit()) { - boolean pass = res.filterResult(i); + if (newVal == null) { + if (oldValTup != null) { + if (oldValTup.get1().equals(dhtVer)) + newVal = oldValTup.get2(); - tx.entry(cctx.txKey(k)).filters(pass ? CU.empty0() : CU.alwaysFalse0Arr()); - } + oldVal = oldValTup.get2(); } + } - entry.readyNearLock(lockVer, - mappedVer, - res.committedVersions(), - res.rolledbackVersions(), - res.pending()); - - if (retval) { - if (readRecordable) - cctx.events().addEvent( - entry.partition(), - entry.key(), - tx, - null, - EVT_CACHE_OBJECT_READ, - newVal, - newVal != null, - oldVal, - hasOldVal, - CU.subjectId(tx, cctx.shared()), - null, - inTx() ? tx.resolveTaskName() : null, - keepBinary); - - if (cctx.statisticsEnabled()) - cctx.cache().metrics0().onRead(false); - } + // Lock is held at this point, so we can set the + // returned value if any. + entry.resetFromPrimary(newVal, lockVer, dhtVer, node.id(), topVer); - if (log.isDebugEnabled()) - log.debug("Processed response for entry [res=" + res + ", entry=" + entry + ']'); + if (inTx()) { + tx.hasRemoteLocks(true); - break; // Inner while loop. - } - catch (GridCacheEntryRemovedException ignored) { - if (log.isDebugEnabled()) - log.debug("Failed to add candidates because entry was removed (will renew)."); + if (implicitTx() && tx.onePhaseCommit()) { + boolean pass = res.filterResult(i); - synchronized (GridNearLockFuture.this) { - // Replace old entry with new one. - entries.set(i, - (GridDistributedCacheEntry)cctx.cache().entryEx(entry.key())); + tx.entry(cctx.txKey(k)).filters(pass ? CU.empty0() : CU.alwaysFalse0Arr()); } } + + entry.readyNearLock(lockVer, + mappedVer, + res.committedVersions(), + res.rolledbackVersions(), + res.pending()); + + if (retval) { + if (readRecordable) + cctx.events().addEvent( + entry.partition(), + entry.key(), + tx, + null, + EVT_CACHE_OBJECT_READ, + newVal, + newVal != null, + oldVal, + hasOldVal, + CU.subjectId(tx, cctx.shared()), + null, + inTx() ? tx.resolveTaskName() : null, + keepBinary); + + if (cctx.statisticsEnabled()) + cctx.cache().metrics0().onRead(false); + } + + if (log.isDebugEnabled()) + log.debug("Processed response for entry [res=" + res + ", entry=" + entry + ']'); + + break; // Inner while loop. } + catch (GridCacheEntryRemovedException ignored) { + if (log.isDebugEnabled()) + log.debug("Failed to add candidates because entry was removed (will renew)."); - i++; + synchronized (GridNearLockFuture.this) { + // Replace old entry with new one. + entries.set(i, + (GridDistributedCacheEntry)cctx.cache().entryEx(entry.key())); + } + } } - try { - proceedMapping(); - } - catch (IgniteCheckedException e) { - onDone(e); - } + i++; + } - onDone(true); + try { + proceedMapping(); } + catch (IgniteCheckedException e) { + onDone(e); + } + + onDone(true); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockRequest.java index 8c2d0e706d09c..f074613d3380e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockRequest.java @@ -224,6 +224,13 @@ public boolean firstClientRequest() { return topVer; } + /** + * @param topVer Topology version. + */ + public void topologyVersion(AffinityTopologyVersion topVer) { + this.topVer = topVer; + } + /** * @return Subject ID. */ @@ -376,61 +383,61 @@ public long accessTtl() { } switch (writer.state()) { - case 21: + case 22: if (!writer.writeLong("accessTtl", accessTtl)) return false; writer.incrementState(); - case 22: + case 23: if (!writer.writeLong("createTtl", createTtl)) return false; writer.incrementState(); - case 23: + case 24: if (!writer.writeObjectArray("dhtVers", dhtVers, MessageCollectionItemType.MSG)) return false; writer.incrementState(); - case 24: + case 25: if (!writer.writeObjectArray("filter", filter, MessageCollectionItemType.MSG)) return false; writer.incrementState(); - case 25: + case 26: if (!writer.writeByte("flags", flags)) return false; writer.incrementState(); - case 26: + case 27: if (!writer.writeInt("miniId", miniId)) return false; writer.incrementState(); - case 27: + case 28: if (!writer.writeUuid("subjId", subjId)) return false; writer.incrementState(); - case 28: + case 29: if (!writer.writeInt("taskNameHash", taskNameHash)) return false; writer.incrementState(); - case 29: + case 30: if (!writer.writeAffinityTopologyVersion("topVer", topVer)) return false; writer.incrementState(); - case 30: + case 31: if (!writer.writeString("txLbl", txLbl)) return false; @@ -452,7 +459,7 @@ public long accessTtl() { return false; switch (reader.state()) { - case 21: + case 22: accessTtl = reader.readLong("accessTtl"); if (!reader.isLastRead()) @@ -460,7 +467,7 @@ public long accessTtl() { reader.incrementState(); - case 22: + case 23: createTtl = reader.readLong("createTtl"); if (!reader.isLastRead()) @@ -468,7 +475,7 @@ public long accessTtl() { reader.incrementState(); - case 23: + case 24: dhtVers = reader.readObjectArray("dhtVers", MessageCollectionItemType.MSG, GridCacheVersion.class); if (!reader.isLastRead()) @@ -476,7 +483,7 @@ public long accessTtl() { reader.incrementState(); - case 24: + case 25: filter = reader.readObjectArray("filter", MessageCollectionItemType.MSG, CacheEntryPredicate.class); if (!reader.isLastRead()) @@ -484,7 +491,7 @@ public long accessTtl() { reader.incrementState(); - case 25: + case 26: flags = reader.readByte("flags"); if (!reader.isLastRead()) @@ -492,7 +499,7 @@ public long accessTtl() { reader.incrementState(); - case 26: + case 27: miniId = reader.readInt("miniId"); if (!reader.isLastRead()) @@ -500,7 +507,7 @@ public long accessTtl() { reader.incrementState(); - case 27: + case 28: subjId = reader.readUuid("subjId"); if (!reader.isLastRead()) @@ -508,7 +515,7 @@ public long accessTtl() { reader.incrementState(); - case 28: + case 29: taskNameHash = reader.readInt("taskNameHash"); if (!reader.isLastRead()) @@ -516,7 +523,7 @@ public long accessTtl() { reader.incrementState(); - case 29: + case 30: topVer = reader.readAffinityTopologyVersion("topVer"); if (!reader.isLastRead()) @@ -524,7 +531,7 @@ public long accessTtl() { reader.incrementState(); - case 30: + case 31: txLbl = reader.readString("txLbl"); if (!reader.isLastRead()) @@ -544,7 +551,7 @@ public long accessTtl() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 31; + return 32; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockResponse.java index b6c6d8c903c49..367e724d2baea 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockResponse.java @@ -22,6 +22,7 @@ import java.util.Collection; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.GridDirectCollection; +import org.apache.ignite.internal.GridDirectTransient; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.CacheObject; import org.apache.ignite.internal.processors.cache.distributed.GridDistributedLockResponse; @@ -32,12 +33,14 @@ import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableResponse; import org.jetbrains.annotations.Nullable; /** * Near cache lock response. */ -public class GridNearLockResponse extends GridDistributedLockResponse { +public class GridNearLockResponse extends GridDistributedLockResponse implements ProcessingTimeLoggableResponse { /** */ private static final long serialVersionUID = 0L; @@ -60,9 +63,23 @@ public class GridNearLockResponse extends GridDistributedLockResponse { /** Filter evaluation results for fast-commit transactions. */ private boolean[] filterRes; - /** {@code True} if client node should remap lock request. */ + /** Set if client node should remap lock request. */ private AffinityTopologyVersion clientRemapVer; + /** {@code True} if remap version is compatible with current version. Used together with clientRemapVer. */ + private boolean compatibleRemapVer; + + /** @see ProcessingTimeLoggableResponse#reqSentTimestamp(). */ + @GridDirectTransient + private long reqSentTimestamp = INVALID_TIMESTAMP; + + /** @see ProcessingTimeLoggableResponse#reqReceivedTimestamp(). */ + @GridDirectTransient + private long reqReceivedTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableResponse#reqTimeData(). */ + private long reqTimeData = INVALID_TIMESTAMP; + /** * Empty constructor (required by {@link Externalizable}). */ @@ -78,8 +95,10 @@ public GridNearLockResponse() { * @param filterRes {@code True} if need to allocate array for filter evaluation results. * @param cnt Count. * @param err Error. - * @param clientRemapVer {@code True} if client node should remap lock request. + * @param clientRemapVer {@code True} if client node should remap lock request. If {@code compatibleRemapVer} is + * {@code true} when first request is not remapped, but all subsequent will use remap version. * @param addDepInfo Deployment info. + * @param compatibleRemapVer {@code True} if remap version is compatible with lock version. */ public GridNearLockResponse( int cacheId, @@ -90,7 +109,8 @@ public GridNearLockResponse( int cnt, Throwable err, AffinityTopologyVersion clientRemapVer, - boolean addDepInfo + boolean addDepInfo, + boolean compatibleRemapVer ) { super(cacheId, lockVer, futId, cnt, err, addDepInfo); @@ -104,6 +124,8 @@ public GridNearLockResponse( if (filterRes) this.filterRes = new boolean[cnt]; + + this.compatibleRemapVer = compatibleRemapVer; } /** @@ -113,6 +135,13 @@ public GridNearLockResponse( return clientRemapVer; } + /** + * @return {@code True} is remap version is compatible with current topology version. + */ + public boolean compatibleRemapVersion() { + return compatibleRemapVer; + } + /** * Gets pending versions that are less than {@link #version()}. * @@ -193,6 +222,37 @@ public void addValueBytes( addValue(val); } + /** {@inheritDoc} */ + @Override public void reqSentTimestamp(long reqSentTimestamp) { + this.reqSentTimestamp = reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqSentTimestamp() { + return reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqReceivedTimestamp(long reqReceivedTimestamp) { + this.reqReceivedTimestamp = reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqReceivedTimestamp() { + return reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqTimeData(long reqTimeData) { + this.reqTimeData = reqTimeData; + } + + /** {@inheritDoc} */ + @Override public long reqTimeData() { + return reqTimeData; + } + + /** {@inheritDoc} */ @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { writer.setBuffer(buf); @@ -215,35 +275,47 @@ public void addValueBytes( writer.incrementState(); case 12: - if (!writer.writeObjectArray("dhtVers", dhtVers, MessageCollectionItemType.MSG)) + if (!writer.writeBoolean("compatibleRemapVer", compatibleRemapVer)) return false; writer.incrementState(); case 13: - if (!writer.writeBooleanArray("filterRes", filterRes)) + if (!writer.writeObjectArray("dhtVers", dhtVers, MessageCollectionItemType.MSG)) return false; writer.incrementState(); case 14: - if (!writer.writeObjectArray("mappedVers", mappedVers, MessageCollectionItemType.MSG)) + if (!writer.writeBooleanArray("filterRes", filterRes)) return false; writer.incrementState(); case 15: - if (!writer.writeInt("miniId", miniId)) + if (!writer.writeObjectArray("mappedVers", mappedVers, MessageCollectionItemType.MSG)) return false; writer.incrementState(); case 16: + if (!writer.writeInt("miniId", miniId)) + return false; + + writer.incrementState(); + + case 17: if (!writer.writeCollection("pending", pending, MessageCollectionItemType.MSG)) return false; writer.incrementState(); + case 18: + if (!writer.writeLong("reqTimeData", reqTimeData)) + return false; + + writer.incrementState(); + } return true; @@ -269,7 +341,7 @@ public void addValueBytes( reader.incrementState(); case 12: - dhtVers = reader.readObjectArray("dhtVers", MessageCollectionItemType.MSG, GridCacheVersion.class); + compatibleRemapVer = reader.readBoolean("compatibleRemapVer"); if (!reader.isLastRead()) return false; @@ -277,7 +349,7 @@ public void addValueBytes( reader.incrementState(); case 13: - filterRes = reader.readBooleanArray("filterRes"); + dhtVers = reader.readObjectArray("dhtVers", MessageCollectionItemType.MSG, GridCacheVersion.class); if (!reader.isLastRead()) return false; @@ -285,7 +357,7 @@ public void addValueBytes( reader.incrementState(); case 14: - mappedVers = reader.readObjectArray("mappedVers", MessageCollectionItemType.MSG, GridCacheVersion.class); + filterRes = reader.readBooleanArray("filterRes"); if (!reader.isLastRead()) return false; @@ -293,7 +365,7 @@ public void addValueBytes( reader.incrementState(); case 15: - miniId = reader.readInt("miniId"); + mappedVers = reader.readObjectArray("mappedVers", MessageCollectionItemType.MSG, GridCacheVersion.class); if (!reader.isLastRead()) return false; @@ -301,6 +373,14 @@ public void addValueBytes( reader.incrementState(); case 16: + miniId = reader.readInt("miniId"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 17: pending = reader.readCollection("pending", MessageCollectionItemType.MSG); if (!reader.isLastRead()) @@ -308,6 +388,14 @@ public void addValueBytes( reader.incrementState(); + case 18: + reqTimeData = reader.readLong("reqTimeData"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + } return reader.afterMessageRead(GridNearLockResponse.class); @@ -320,7 +408,7 @@ public void addValueBytes( /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 17; + return 19; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java index 7e85e0592eaf8..ba76636815be1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java @@ -42,6 +42,7 @@ import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.transactions.IgniteTxOptimisticCheckedException; +import org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException; import org.apache.ignite.internal.util.future.GridCompoundFuture; import org.apache.ignite.internal.util.future.GridFinishedFuture; import org.apache.ignite.internal.util.future.GridFutureAdapter; @@ -52,6 +53,7 @@ import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.P1; import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiInClosure; @@ -963,6 +965,14 @@ void onResult(final GridNearTxPrepareResponse res, boolean updateMapping) { * @param res Response. */ private void remap(final GridNearTxPrepareResponse res) { + if (parent.tx.isRollbackOnly()) { + onDone(new IgniteTxRollbackCheckedException( + "Failed to prepare the transaction, due to the transaction is marked as rolled back " + + "[tx=" + CU.txString(parent.tx) + ']')); + + return; + } + parent.prepareOnTopology(true, new Runnable() { @Override public void run() { onDone(res); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java index 8e10ad3ac5cd1..df375348bdfba 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java @@ -63,7 +63,6 @@ import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiClosure; -import org.apache.ignite.lang.IgniteInClosure; import org.apache.ignite.transactions.TransactionDeadlockException; import org.apache.ignite.transactions.TransactionTimeoutException; import org.jetbrains.annotations.Nullable; @@ -161,30 +160,8 @@ void onError(Throwable e, boolean discoThread) { } } - if (ERR_UPD.compareAndSet(this, null, e)) { - boolean marked = tx.setRollbackOnly(); - - if (e instanceof IgniteTxRollbackCheckedException) { - if (marked) { - tx.rollbackAsync().listen(new IgniteInClosure>() { - @Override public void apply(IgniteInternalFuture fut) { - try { - fut.get(); - } - catch (IgniteCheckedException e) { - U.error(log, "Failed to automatically rollback transaction: " + tx, e); - } - - onComplete(); - } - }); - - return; - } - } - + if (ERR_UPD.compareAndSet(this, null, e)) onComplete(); - } } /** {@inheritDoc} */ @@ -543,7 +520,7 @@ private void proceedPrepare(GridDistributedTxMapping m, @Nullable final Queue TIME_FORMAT = + ThreadLocal.withInitial(() -> new SimpleDateFormat("HH:mm:ss.SSS")); + /** Prepare future updater. */ private static final AtomicReferenceFieldUpdater PREP_FUT_UPD = AtomicReferenceFieldUpdater.newUpdater(GridNearTxLocal.class, IgniteInternalFuture.class, "prepFut"); @@ -168,6 +182,40 @@ public class GridNearTxLocal extends GridDhtTxLocalAdapter implements GridTimeou /** */ private boolean trackTimeout; + /** + * Counts how much time this transaction has spent on system calls, in nanoseconds. + */ + private final AtomicLong systemTime = new AtomicLong(0); + + /** + * Stores the nano time value when current system time has started, or 0 if no system section + * is running currently. + */ + private final AtomicLong systemStartTime = new AtomicLong(0); + + /** + * Stores the nano time value when prepare step has started, or 0 if no prepare step + * has started yet. + */ + private final AtomicLong prepareStartTime = new AtomicLong(0); + + /** + * Stores prepare step duration, or 0 if it has not finished yet. + */ + private final AtomicLong prepareTime = new AtomicLong(0); + + /** + * Stores the nano time value when commit or rollback step has started, or 0 if it + * has not started yet. + */ + private final AtomicLong commitOrRollbackStartTime = new AtomicLong(0); + + /** Stores commit or rollback step duration, or 0 if it has not finished yet. */ + private final AtomicLong commitOrRollbackTime = new AtomicLong(0); + + /** */ + private IgniteTxManager.TxDumpsThrottling txDumpsThrottling; + /** */ @GridToStringExclude private TransactionProxyImpl proxy; @@ -200,6 +248,7 @@ public GridNearTxLocal() { * @param subjId Subject ID. * @param taskNameHash Task name hash code. * @param lb Label. + * @param txDumpsThrottling Log throttling information. */ public GridNearTxLocal( GridCacheSharedContext ctx, @@ -214,7 +263,8 @@ public GridNearTxLocal( int txSize, @Nullable UUID subjId, int taskNameHash, - @Nullable String lb + @Nullable String lb, + IgniteTxManager.TxDumpsThrottling txDumpsThrottling ) { super( ctx, @@ -237,6 +287,8 @@ public GridNearTxLocal( mappings = implicitSingle ? new IgniteTxMappingsSingleImpl() : new IgniteTxMappingsImpl(); + this.txDumpsThrottling = txDumpsThrottling; + initResult(); trackTimeout = timeout() > 0 && !implicit() && cctx.time().addTimeoutObject(this); @@ -3235,10 +3287,131 @@ private void readyNearLock(IgniteTxEntry txEntry, return true; } + /** + * Returns current amount of time that transaction has spent on system activities (acquiring locks, commiting, + * rolling back, etc.) + * + * @return Amount of time in milliseconds. + */ + public long systemTimeCurrent() { + long systemTime0 = systemTime.get(); + + long systemStartTime0 = systemStartTime.get(); + + long t = systemStartTime0 == 0 ? 0 : (System.nanoTime() - systemStartTime0); + + return U.nanosToMillis(systemTime0 + t); + } + + /** {@inheritDoc} */ + @Override public boolean state(TransactionState state) { + boolean res = super.state(state); + + if (state == COMMITTED || state == ROLLED_BACK) { + leaveSystemSection(); + + // If commitOrRollbackTime != 0 it means that we already have written metrics and dumped it in log at least once. + if (!commitOrRollbackTime.compareAndSet(0, System.nanoTime() - commitOrRollbackStartTime.get())) + return res; + + long systemTimeMillis = U.nanosToMillis(this.systemTime.get()); + long totalTimeMillis = System.currentTimeMillis() - startTime(); + + // In some cases totalTimeMillis can be less than systemTimeMillis, as they are calculated with different precision. + long userTimeMillis = Math.max(totalTimeMillis - systemTimeMillis, 0); + + writeTxMetrics(systemTimeMillis, userTimeMillis); + + boolean willBeSkipped = txDumpsThrottling == null || txDumpsThrottling.skipCurrent(); + + if (!willBeSkipped) { + long transactionTimeDumpThreshold = cctx.tm().longTransactionTimeDumpThreshold(); + + double transactionTimeDumpSamplesCoefficient = cctx.tm().transactionTimeDumpSamplesCoefficient(); + + boolean isLong = transactionTimeDumpThreshold > 0 && totalTimeMillis > transactionTimeDumpThreshold; + + boolean randomlyChosen = transactionTimeDumpSamplesCoefficient > 0.0 + && ThreadLocalRandom.current().nextDouble() <= transactionTimeDumpSamplesCoefficient; + + if (randomlyChosen || isLong) { + String txDump = completedTransactionDump(state, systemTimeMillis, userTimeMillis, isLong); + + if (isLong) + log.warning(txDump); + else + log.info(txDump); + + txDumpsThrottling.dump(); + } + } + else if (txDumpsThrottling != null) + txDumpsThrottling.skip(); + } + + return res; + } + + /** + * Builds dump string for completed transaction. + * + * @param state Transaction state. + * @param systemTimeMillis System time in milliseconds. + * @param userTimeMillis User time in milliseconds. + * @param isLong Whether the dumped transaction is long running or not. + * @return Dump string. + */ + private String completedTransactionDump( + TransactionState state, + long systemTimeMillis, + long userTimeMillis, + boolean isLong + ) { + long cacheOperationsTimeMillis = + U.nanosToMillis(systemTime.get() - prepareTime.get() - commitOrRollbackTime.get()); + + GridStringBuilder warning = new GridStringBuilder(isLong ? "Long transaction time dump " : "Transaction time dump ") + .a("[startTime=") + .a(TIME_FORMAT.get().format(new Date(startTime))) + .a(", totalTime=") + .a(systemTimeMillis + userTimeMillis) + .a(", systemTime=") + .a(systemTimeMillis) + .a(", userTime=") + .a(userTimeMillis) + .a(", cacheOperationsTime=") + .a(cacheOperationsTimeMillis); + + if (state == COMMITTED) { + warning + .a(", prepareTime=") + .a(timeMillis(prepareTime)) + .a(", commitTime=") + .a(timeMillis(commitOrRollbackTime)); + } + else { + warning + .a(", rollbackTime=") + .a(timeMillis(commitOrRollbackTime)); + } + + warning + .a(", tx=") + .a(this) + .a("]"); + + return warning.toString(); + } + /** * @return Tx prepare future. */ public IgniteInternalFuture prepareNearTxLocal() { + enterSystemSection(); + + // We assume that prepare start time should be set only once for the transaction. + prepareStartTime.compareAndSet(0, System.nanoTime()); + GridNearTxPrepareFutureAdapter fut = (GridNearTxPrepareFutureAdapter)prepFut; if (fut == null) { @@ -3352,6 +3525,11 @@ public IgniteInternalFuture commitNearTxLocalAsync() { prepareFut.listen(new CI1>() { @Override public void apply(IgniteInternalFuture f) { + // These values should not be changed after set once. + prepareTime.compareAndSet(0, System.nanoTime() - prepareStartTime.get()); + + commitOrRollbackStartTime.compareAndSet(0, System.nanoTime()); + try { // Make sure that here are no exceptions. f.get(); @@ -3410,6 +3588,11 @@ public IgniteInternalFuture rollbackNearTxLocalAsync(final boo if (log.isDebugEnabled()) log.debug("Rolling back near tx: " + this); + enterSystemSection(); + + // This value should not be changed after set once. + commitOrRollbackStartTime.compareAndSet(0, System.nanoTime()); + if (!onTimeout && trackTimeout) removeTimeoutHandler(); @@ -3974,18 +4157,21 @@ public void close(boolean clearThreadMap) throws IgniteCheckedException { /** * @param topVer New topology version. + * @param reset {@code True} if need to reset tx state. */ - public void onRemap(AffinityTopologyVersion topVer) { + public void onRemap(AffinityTopologyVersion topVer, boolean reset) { assert cctx.kernalContext().clientNode(); - mapped = false; - nearLocallyMapped = false; - colocatedLocallyMapped = false; - txNodes = null; - onePhaseCommit = false; - nearMap.clear(); - dhtMap.clear(); - mappings.clear(); + if (reset) { + mapped = false; + nearLocallyMapped = false; + colocatedLocallyMapped = false; + txNodes = null; + onePhaseCommit = false; + nearMap.clear(); + dhtMap.clear(); + mappings.clear(); + } synchronized (this) { this.topVer = topVer; @@ -4331,6 +4517,44 @@ public boolean addTimeoutHandler() { } } + /** */ + private long timeMillis(AtomicLong atomicNanoTime) { + return U.nanosToMillis(atomicNanoTime.get()); + } + + /** + * Enters the section when system time for this transaction is counted. + */ + public void enterSystemSection() { + // Setting systemStartTime only if it equals 0, otherwise it means that we are already in system section + // and should do nothing. + systemStartTime.compareAndSet(0, System.nanoTime()); + } + + /** + * Leaves the section when system time for this transaction is counted. + */ + public void leaveSystemSection() { + long systemStartTime0 = systemStartTime.getAndSet(0); + + if (systemStartTime0 > 0) + systemTime.addAndGet(System.nanoTime() - systemStartTime0); + } + + /** + * Writes system and user time metrics. + * + * @param systemTime System time. + * @param userTime User time. + */ + private void writeTxMetrics(long systemTime, long userTime) { + if (systemTime >= 0) + cctx.txMetrics().writeTxSystemTime(systemTime); + + if (userTime >= 0) + cctx.txMetrics().writeTxUserTime(userTime); + } + /** * Post-lock closure. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareRequest.java index 87546aa2174c6..6c3b95d679d2a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareRequest.java @@ -315,43 +315,43 @@ private boolean isFlag(int mask) { } switch (writer.state()) { - case 21: + case 22: if (!writer.writeByte("flags", flags)) return false; writer.incrementState(); - case 22: + case 23: if (!writer.writeIgniteUuid("futId", futId)) return false; writer.incrementState(); - case 23: + case 24: if (!writer.writeInt("miniId", miniId)) return false; writer.incrementState(); - case 24: + case 25: if (!writer.writeUuid("subjId", subjId)) return false; writer.incrementState(); - case 25: + case 26: if (!writer.writeInt("taskNameHash", taskNameHash)) return false; writer.incrementState(); - case 26: + case 27: if (!writer.writeAffinityTopologyVersion("topVer", topVer)) return false; writer.incrementState(); - case 27: + case 28: if (!writer.writeString("txLbl", txLbl)) return false; @@ -373,7 +373,7 @@ private boolean isFlag(int mask) { return false; switch (reader.state()) { - case 21: + case 22: flags = reader.readByte("flags"); if (!reader.isLastRead()) @@ -381,7 +381,7 @@ private boolean isFlag(int mask) { reader.incrementState(); - case 22: + case 23: futId = reader.readIgniteUuid("futId"); if (!reader.isLastRead()) @@ -389,7 +389,7 @@ private boolean isFlag(int mask) { reader.incrementState(); - case 23: + case 24: miniId = reader.readInt("miniId"); if (!reader.isLastRead()) @@ -397,7 +397,7 @@ private boolean isFlag(int mask) { reader.incrementState(); - case 24: + case 25: subjId = reader.readUuid("subjId"); if (!reader.isLastRead()) @@ -405,7 +405,7 @@ private boolean isFlag(int mask) { reader.incrementState(); - case 25: + case 26: taskNameHash = reader.readInt("taskNameHash"); if (!reader.isLastRead()) @@ -413,7 +413,7 @@ private boolean isFlag(int mask) { reader.incrementState(); - case 26: + case 27: topVer = reader.readAffinityTopologyVersion("topVer"); if (!reader.isLastRead()) @@ -421,7 +421,7 @@ private boolean isFlag(int mask) { reader.incrementState(); - case 27: + case 28: txLbl = reader.readString("txLbl"); if (!reader.isLastRead()) @@ -441,7 +441,7 @@ private boolean isFlag(int mask) { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 28; + return 29; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareResponse.java index 5decd8613e37c..96b64230c0db4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareResponse.java @@ -43,12 +43,14 @@ import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.apache.ignite.plugin.extensions.communication.ProcessingTimeLoggableResponse; +import org.apache.ignite.plugin.extensions.communication.TimeLoggableResponse; import org.jetbrains.annotations.Nullable; /** * Near cache prepare response. */ -public class GridNearTxPrepareResponse extends GridDistributedTxPrepareResponse { +public class GridNearTxPrepareResponse extends GridDistributedTxPrepareResponse implements ProcessingTimeLoggableResponse { /** */ private static final long serialVersionUID = 0L; @@ -97,6 +99,17 @@ public class GridNearTxPrepareResponse extends GridDistributedTxPrepareResponse /** Not {@code null} if client node should remap transaction. */ private AffinityTopologyVersion clientRemapVer; + /** @see ProcessingTimeLoggableResponse#reqSentTimestamp(). */ + @GridDirectTransient + private long reqSentTimestamp = INVALID_TIMESTAMP; + + /** @see ProcessingTimeLoggableResponse#reqReceivedTimestamp(). */ + @GridDirectTransient + private long reqReceivedTimestamp = INVALID_TIMESTAMP; + + /** @see TimeLoggableResponse#reqTimeData(). */ + private long reqTimeData = INVALID_TIMESTAMP; + /** * Empty constructor required by {@link Externalizable}. */ @@ -343,6 +356,37 @@ public boolean hasOwnedValue(IgniteTxKey key) { } } + /** {@inheritDoc} */ + @Override public void reqSentTimestamp(long reqSentTimestamp) { + this.reqSentTimestamp = reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqSentTimestamp() { + return reqSentTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqReceivedTimestamp(long reqReceivedTimestamp) { + this.reqReceivedTimestamp = reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public long reqReceivedTimestamp() { + return reqReceivedTimestamp; + } + + /** {@inheritDoc} */ + @Override public void reqTimeData(long reqTimeData) { + this.reqTimeData = reqTimeData; + } + + /** {@inheritDoc} */ + @Override public long reqTimeData() { + return reqTimeData; + } + + /** {@inheritDoc} */ @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { writer.setBuffer(buf); @@ -407,12 +451,18 @@ public boolean hasOwnedValue(IgniteTxKey key) { writer.incrementState(); case 19: - if (!writer.writeMessage("retVal", retVal)) + if (!writer.writeLong("reqTimeData", reqTimeData)) return false; writer.incrementState(); case 20: + if (!writer.writeMessage("retVal", retVal)) + return false; + + writer.incrementState(); + + case 21: if (!writer.writeMessage("writeVer", writeVer)) return false; @@ -499,7 +549,7 @@ public boolean hasOwnedValue(IgniteTxKey key) { reader.incrementState(); case 19: - retVal = reader.readMessage("retVal"); + reqTimeData = reader.readLong("reqTimeData"); if (!reader.isLastRead()) return false; @@ -507,6 +557,14 @@ public boolean hasOwnedValue(IgniteTxKey key) { reader.incrementState(); case 20: + retVal = reader.readMessage("retVal"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 21: writeVer = reader.readMessage("writeVer"); if (!reader.isLastRead()) @@ -526,7 +584,7 @@ public boolean hasOwnedValue(IgniteTxKey key) { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 21; + return 22; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridCacheDrManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridCacheDrManager.java index 33a52a11a03d1..f2a4b30c4af82 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridCacheDrManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridCacheDrManager.java @@ -60,10 +60,9 @@ public void replicate(KeyCacheObject key, * * @param topVer Topology version. * @param left {@code True} if exchange has been caused by node leave. - * @param activate {@code True} if exchange has been caused by cluster activation. * @throws IgniteCheckedException If failed. */ - public void onExchange(AffinityTopologyVersion topVer, boolean left, boolean activate) throws IgniteCheckedException; + public void onExchange(AffinityTopologyVersion topVer, boolean left) throws IgniteCheckedException; /** * @return {@code True} is DR is enabled. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridOsCacheDrManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridOsCacheDrManager.java index 425e79c536344..f3c1b23f7c7d6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridOsCacheDrManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridOsCacheDrManager.java @@ -78,7 +78,7 @@ public class GridOsCacheDrManager implements GridCacheDrManager { } /** {@inheritDoc} */ - @Override public void onExchange(AffinityTopologyVersion topVer, boolean left, boolean activate) throws IgniteCheckedException { + @Override public void onExchange(AffinityTopologyVersion topVer, boolean left) throws IgniteCheckedException { // No-op. } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/GridLocalCacheEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/GridLocalCacheEntry.java index e26174a2adef5..cbcb4b8e54082 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/GridLocalCacheEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/GridLocalCacheEntry.java @@ -199,8 +199,12 @@ void readyLocal(GridCacheMvccCandidate cand) { /** * Rechecks if lock should be reassigned. + * + * @param ver Thread chain version. + * + * @return {@code True} if thread chain processing must be stopped. */ - public void recheck() { + public boolean recheck(GridCacheVersion ver) { CacheObject val; CacheLockCandidates prev = null; CacheLockCandidates owner = null; @@ -225,7 +229,9 @@ public void recheck() { unlockEntry(); } - checkOwnerChanged(prev, owner, val); + checkOwnerChanged(prev, owner, val, true); + + return owner == null || !owner.hasCandidate(ver); // Will return false if locked by thread chain version. } /** {@inheritDoc} */ @@ -246,12 +252,9 @@ public void recheck() { GridLocalCacheEntry e = (GridLocalCacheEntry)cctx0.cache().peekEx(cand.parent().key()); - // At this point candidate may have been removed and entry destroyed, - // so we check for null. - if (e != null) - e.recheck(); - - break; + // At this point candidate may have been removed and entry destroyed, so we check for null. + if (e == null || e.recheck(owner.version())) + break; } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java index 930f8f96d4db3..276e84608af50 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataStructure.java @@ -49,6 +49,9 @@ public abstract class DataStructure { /** */ protected final int grpId; + /** */ + protected final String grpName; + /** */ protected final PageMemory pageMem; @@ -62,19 +65,22 @@ public abstract class DataStructure { protected ReuseList reuseList; /** - * @param cacheId Cache group ID. + * @param cacheGrpId Cache group ID. + * @param grpName Cache group name. * @param pageMem Page memory. * @param wal Write ahead log manager. */ public DataStructure( - int cacheId, + int cacheGrpId, + String grpName, PageMemory pageMem, IgniteWriteAheadLogManager wal, PageLockListener lockLsnr ) { assert pageMem != null; - this.grpId = cacheId; + this.grpId = cacheGrpId; + this.grpName = grpName; this.pageMem = pageMem; this.wal = wal; this.lockLsnr = lockLsnr == null ? NOOP_LSNR : lockLsnr; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java index a566ae90b53eb..6bd901dffcf6c 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java @@ -17,6 +17,7 @@ package org.apache.ignite.internal.processors.cache.persistence; +import javax.management.ObjectName; import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; @@ -65,7 +66,6 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; -import javax.management.ObjectName; import org.apache.ignite.DataRegionMetricsProvider; import org.apache.ignite.DataStorageMetrics; import org.apache.ignite.IgniteCheckedException; @@ -94,7 +94,6 @@ import org.apache.ignite.internal.mem.DirectMemoryRegion; import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.pagemem.PageIdAllocator; -import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.PageUtils; import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; @@ -144,17 +143,19 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIO; import org.apache.ignite.internal.processors.cache.persistence.wal.FileWALPointer; import org.apache.ignite.internal.processors.cache.persistence.wal.crc.IgniteDataIntegrityViolationException; +import org.apache.ignite.internal.processors.port.GridPortProcessor; import org.apache.ignite.internal.processors.port.GridPortRecord; import org.apache.ignite.internal.processors.query.GridQueryProcessor; import org.apache.ignite.internal.stat.IoStatisticsHolderNoOp; import org.apache.ignite.internal.util.GridConcurrentHashSet; -import org.apache.ignite.internal.util.GridCountDownCallback; import org.apache.ignite.internal.util.GridMultiCollectionWrapper; import org.apache.ignite.internal.util.GridReadOnlyArrayView; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.StripedExecutor; +import org.apache.ignite.internal.util.TimeBag; import org.apache.ignite.internal.util.future.CountDownFuture; import org.apache.ignite.internal.util.future.GridCompoundFuture; +import org.apache.ignite.internal.util.future.GridFinishedFuture; import org.apache.ignite.internal.util.future.GridFutureAdapter; import org.apache.ignite.internal.util.lang.GridInClosure3X; import org.apache.ignite.internal.util.lang.GridTuple3; @@ -171,6 +172,7 @@ import org.apache.ignite.internal.util.worker.GridWorker; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.lang.IgniteFuture; +import org.apache.ignite.lang.IgniteInClosure; import org.apache.ignite.lang.IgniteOutClosure; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.mxbean.DataStorageMetricsMXBean; @@ -186,6 +188,7 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_PDS_SKIP_CRC; import static org.apache.ignite.IgniteSystemProperties.IGNITE_PDS_WAL_REBALANCE_THRESHOLD; import static org.apache.ignite.IgniteSystemProperties.IGNITE_RECOVERY_SEMAPHORE_PERMITS; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_RECOVERY_VERBOSE_LOGGING; import static org.apache.ignite.IgniteSystemProperties.getBoolean; import static org.apache.ignite.IgniteSystemProperties.getInteger; import static org.apache.ignite.failure.FailureType.CRITICAL_ERROR; @@ -195,6 +198,14 @@ import static org.apache.ignite.internal.pagemem.PageIdUtils.partId; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.CHECKPOINT_RECORD; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.fromOrdinal; +import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.CheckpointProgress.State.FINISHED; +import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.CheckpointProgress.State.LOCK_RELEASED; +import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.CheckpointProgress.State.LOCK_TAKEN; +import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.CheckpointProgress.State.MARKER_STORED_TO_DISK; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.TMP_FILE_MATCHER; +import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.getType; +import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.getVersion; +import static org.apache.ignite.internal.util.IgniteUtils.hexLong; /** * @@ -207,6 +218,9 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan /** */ public static final String IGNITE_PDS_SKIP_CHECKPOINT_ON_NODE_STOP = "IGNITE_PDS_SKIP_CHECKPOINT_ON_NODE_STOP"; + /** Log read lock holders. */ + public static final String IGNITE_PDS_LOG_CP_READ_LOCK_HOLDERS = "IGNITE_PDS_LOG_CP_READ_LOCK_HOLDERS"; + /** MemoryPolicyConfiguration name reserved for meta store. */ private static final String METASTORE_DATA_REGION_NAME = "metastoreMemPlc"; @@ -235,6 +249,9 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan /** */ private final boolean skipCheckpointOnNodeStop = getBoolean(IGNITE_PDS_SKIP_CHECKPOINT_ON_NODE_STOP, false); + /** */ + private final boolean logReadLockHolders = getBoolean(IGNITE_PDS_LOG_CP_READ_LOCK_HOLDERS); + /** * Starting from this number of dirty pages in checkpoint, array will be sorted with * {@link Arrays#parallelSort(Comparable[])} in case of {@link CheckpointWriteOrder#SEQUENTIAL}. @@ -300,7 +317,7 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan /** For testing only. */ private volatile GridFutureAdapter enableChangeApplied; - /** */ + /** Checkpont lock. */ ReentrantReadWriteLock checkpointLock = new ReentrantReadWriteLock(); /** */ @@ -404,6 +421,8 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan /** Timeout for checkpoint read lock acquisition in milliseconds. */ private volatile long checkpointReadLockTimeout; + /** Flag allows to log additional information about partitions during recovery phases. */ + private final boolean recoveryVerboseLogging = getBoolean(IGNITE_RECOVERY_VERBOSE_LOGGING, false); /** Pointer to a memory recovery record that should be included into the next checkpoint record. */ private volatile WALPointer memoryRecoveryRecordPtr; @@ -538,6 +557,9 @@ private DataRegionConfiguration createMetastoreDataRegionConfig(DataStorageConfi final GridKernalContext kernalCtx = cctx.kernalContext(); + if (logReadLockHolders) + checkpointLock = new U.ReentrantReadWriteLockTracer(checkpointLock, kernalCtx, 5_000); + if (!kernalCtx.clientNode()) { checkpointer = new Checkpointer(cctx.igniteInstanceName(), "db-checkpoint-thread", log); @@ -574,10 +596,7 @@ private DataRegionConfiguration createMetastoreDataRegionConfig(DataStorageConfi */ public void cleanupTempCheckpointDirectory() throws IgniteCheckedException { try { - try (DirectoryStream files = Files.newDirectoryStream( - cpDir.toPath(), - path -> path.endsWith(FilePageStoreManager.TMP_SUFFIX)) - ) { + try (DirectoryStream files = Files.newDirectoryStream(cpDir.toPath(), TMP_FILE_MATCHER::matches)) { for (Path path : files) Files.delete(path); } @@ -976,7 +995,7 @@ private void finishRecovery() throws IgniteCheckedException { long time = System.currentTimeMillis(); - checkpointReadLock(); + CHECKPOINT_LOCK_HOLD_COUNT.set(CHECKPOINT_LOCK_HOLD_COUNT.get() + 1); try { for (DatabaseLifecycleListener lsnr : getDatabaseListeners(cctx.kernalContext())) @@ -1008,7 +1027,7 @@ private void finishRecovery() throws IgniteCheckedException { throw e; } finally { - checkpointReadUnlock(); + CHECKPOINT_LOCK_HOLD_COUNT.set(CHECKPOINT_LOCK_HOLD_COUNT.get() - 1); } } @@ -1457,8 +1476,7 @@ private void shutdownCheckpointer(boolean cancel) { checkpointer = null; - cp.scheduledCp.cpFinishFut.onDone( - new NodeStoppingException("Checkpointer is stopped during node stop.")); + cp.scheduledCp.fail(new NodeStoppingException("Checkpointer is stopped during node stop.")); break; } @@ -1579,8 +1597,8 @@ private void prepareIndexRebuildFuture(int cacheId) { CacheConfiguration ccfg = cacheCtx.config(); - if (ccfg != null) { - log().info("Finished indexes rebuilding for cache [name=" + ccfg.getName() + if (ccfg != null && log.isInfoEnabled()) { + log.info("Finished indexes rebuilding for cache [name=" + ccfg.getName() + ", grpName=" + ccfg.getGroupName() + ']'); } } @@ -1708,7 +1726,7 @@ private void prepareIndexRebuildFuture(int cacheId) { throw new IgniteException(new NodeStoppingException("Failed to perform cache update: node is stopping.")); } - if (checkpointLock.getReadHoldCount() > 1 || safeToUpdatePageMemories()) + if (checkpointLock.getReadHoldCount() > 1 || safeToUpdatePageMemories() || checkpointerThread == null) break; else { checkpointLock.readLock().unlock(); @@ -1717,7 +1735,8 @@ private void prepareIndexRebuildFuture(int cacheId) { failCheckpointReadLock(); try { - checkpointer.wakeupForCheckpoint(0, "too many dirty pages").cpBeginFut + checkpointer.wakeupForCheckpoint(0, "too many dirty pages") + .futureFor(LOCK_RELEASED) .getUninterruptibly(); } catch (IgniteFutureTimeoutCheckedException e) { @@ -1800,25 +1819,6 @@ private boolean safeToUpdatePageMemories() { checkpointLock.readLock().unlock(); - if (checkpointer != null) { - Collection dataRegs = context().database().dataRegions(); - - if (dataRegs != null) { - for (DataRegion dataReg : dataRegs) { - if (!dataReg.config().isPersistenceEnabled()) - continue; - - PageMemoryEx mem = (PageMemoryEx)dataReg.pageMemory(); - - if (mem != null && !mem.safeToUpdate()) { - checkpointer.wakeupForCheckpoint(0, "too many dirty pages"); - - break; - } - } - } - } - if (ASSERTION_ENABLED) CHECKPOINT_LOCK_HOLD_COUNT.set(CHECKPOINT_LOCK_HOLD_COUNT.get() - 1); } @@ -1963,36 +1963,24 @@ private Map> partitionsApplicableForWalRebalance() { Checkpointer cp = checkpointer; if (cp != null) - return cp.wakeupForCheckpoint(0, reason).cpBeginFut; + return cp.wakeupForCheckpoint(0, reason).futureFor(LOCK_RELEASED); return null; } /** {@inheritDoc} */ - @Override public void waitForCheckpoint(String reason) throws IgniteCheckedException { + @Override public void waitForCheckpoint(String reason, IgniteInClosure> lsnr) + throws IgniteCheckedException { Checkpointer cp = checkpointer; if (cp == null) return; - CheckpointProgressSnapshot progSnapshot = cp.wakeupForCheckpoint(0, reason); - - IgniteInternalFuture fut1 = progSnapshot.cpFinishFut; - - fut1.get(); - - if (!progSnapshot.started) - return; - - IgniteInternalFuture fut2 = cp.wakeupForCheckpoint(0, reason).cpFinishFut; - - assert fut1 != fut2; - - fut2.get(); + cp.wakeupForCheckpoint(0, reason, lsnr).futureFor(FINISHED).get(); } /** {@inheritDoc} */ - @Override public CheckpointFuture forceCheckpoint(String reason) { + @Override public CheckpointProgress forceCheckpoint(String reason) { Checkpointer cp = checkpointer; if (cp == null) @@ -2110,7 +2098,7 @@ private WALPointer readPointer(File cpMarkerFile, ByteBuffer buf) throws IgniteC } /** {@inheritDoc} */ - @Override public void startMemoryRestore(GridKernalContext kctx) throws IgniteCheckedException { + @Override public void startMemoryRestore(GridKernalContext kctx, TimeBag startTimer) throws IgniteCheckedException { if (kctx.clientNode()) return; @@ -2120,6 +2108,8 @@ private WALPointer readPointer(File cpMarkerFile, ByteBuffer buf) throws IgniteC // Preform early regions startup before restoring state. initAndStartRegions(kctx.config().getDataStorageConfiguration()); + startTimer.finishGlobalStage("Init and start regions"); + for (DatabaseLifecycleListener lsnr : getDatabaseListeners(kctx)) lsnr.beforeBinaryMemoryRestore(this); @@ -2129,18 +2119,20 @@ private WALPointer readPointer(File cpMarkerFile, ByteBuffer buf) throws IgniteC // Restore binary memory for all not WAL disabled cache groups. WALPointer restored = restoreBinaryMemory( - g -> !initiallyGlobalWalDisabledGrps.contains(g) && !initiallyLocalWalDisabledGrps.contains(g) + g -> !initiallyGlobalWalDisabledGrps.contains(g) && !initiallyLocalWalDisabledGrps.contains(g) ); if (restored != null) U.log(log, "Binary memory state restored at node startup [restoredPtr=" + restored + ']'); - if (log.isInfoEnabled()) { + if (recoveryVerboseLogging && log.isInfoEnabled()) { log.info("Partition states information after BINARY RECOVERY phase:"); dumpPartitionsInfo(cctx, log); } + startTimer.finishGlobalStage("Restore binary memory"); + for (DatabaseLifecycleListener lsnr : getDatabaseListeners(kctx)) lsnr.afterBinaryMemoryRestore(this); @@ -2157,18 +2149,20 @@ private WALPointer readPointer(File cpMarkerFile, ByteBuffer buf) throws IgniteC CheckpointStatus status = readCheckpointStatus(); RestoreLogicalState logicalState = applyLogicalUpdates( - status, - g -> !initiallyGlobalWalDisabledGrps.contains(g) && !initiallyLocalWalDisabledGrps.contains(g), - true + status, + g -> !initiallyGlobalWalDisabledGrps.contains(g) && !initiallyLocalWalDisabledGrps.contains(g), + true ); - if (log.isInfoEnabled()) { + if (recoveryVerboseLogging && log.isInfoEnabled()) { log.info("Partition states information after LOGICAL RECOVERY phase:"); dumpPartitionsInfo(cctx, log); } - walTail = tailPointer(logicalState.lastRead); + startTimer.finishGlobalStage("Restore logical state"); + + walTail = tailPointer(logicalState); cctx.wal().onDeActivate(kctx); } @@ -2185,25 +2179,33 @@ private WALPointer readPointer(File cpMarkerFile, ByteBuffer buf) throws IgniteC /** * Calculates tail pointer for WAL at the end of logical recovery. * - * @param from Start replay WAL from. + * @param logicalState State after logical recovery. * @return Tail pointer. * @throws IgniteCheckedException If failed. */ - private WALPointer tailPointer(WALPointer from) throws IgniteCheckedException { - WALPointer lastRead = from; + private WALPointer tailPointer(RestoreLogicalState logicalState) throws IgniteCheckedException { + // Should flush all data in buffers before read last WAL pointer. + // Iterator read records only from files. + WALPointer lastFlushPtr = cctx.wal().flush(null, true); - try (WALIterator it = cctx.wal().replay(from)) { - while (it.hasNextX()) { - IgniteBiTuple rec = it.nextX(); + // We must return null for NULL_PTR record, because FileWriteAheadLogManager.resumeLogging + // can't write header without that condition. + WALPointer lastReadPtr = logicalState.lastReadRecordPointer(); - if (rec == null) - break; + if (lastFlushPtr != null && lastReadPtr == null) + return lastFlushPtr; - lastRead = rec.get1(); - } + if (lastFlushPtr == null && lastReadPtr != null) + return lastReadPtr; + + if (lastFlushPtr != null && lastReadPtr != null) { + FileWALPointer lastFlushPtr0 = (FileWALPointer)lastFlushPtr; + FileWALPointer lastReadPtr0 = (FileWALPointer)lastReadPtr; + + return lastReadPtr0.compareTo(lastFlushPtr0) >= 0 ? lastReadPtr : lastFlushPtr0; } - return lastRead != null ? lastRead.next() : null; + return null; } /** @@ -2220,10 +2222,10 @@ private WALPointer tailPointer(WALPointer from) throws IgniteCheckedException { checkpointerThread = cpThread; - CheckpointProgressSnapshot chp = checkpointer.wakeupForCheckpoint(0, "node started"); + CheckpointProgress chp = checkpointer.wakeupForCheckpoint(0, "node started"); if (chp != null) - chp.cpBeginFut.get(); + chp.futureFor(LOCK_RELEASED).get(); } /** @@ -2261,8 +2263,9 @@ private WALPointer performBinaryMemoryRestore( WALPointer cpMark = ((CheckpointRecord)rec).checkpointMark(); if (cpMark != null) { - log.info("Restoring checkpoint after logical recovery, will start physical recovery from " + - "back pointer: " + cpMark); + if (log.isInfoEnabled()) + log.info("Restoring checkpoint after logical recovery, will start physical recovery from " + + "back pointer: " + cpMark); recPtr = cpMark; } @@ -2379,7 +2382,7 @@ private WALPointer performBinaryMemoryRestore( stripedApplyPage((pageMem) -> { try { - applyPageDelta(pageMem, pageDelta); + applyPageDelta(pageMem, pageDelta, true); applied.incrementAndGet(); } @@ -2407,10 +2410,12 @@ private WALPointer performBinaryMemoryRestore( "on disk, but checkpoint record is missed in WAL) " + "[cpStatus=" + status + ", lastRead=" + lastReadPtr + "]"); - log.info("Finished applying memory changes [changesApplied=" + applied + - ", time=" + (U.currentTimeMillis() - start) + " ms]"); + if (log.isInfoEnabled()) + log.info("Finished applying memory changes [changesApplied=" + applied + + ", time=" + (U.currentTimeMillis() - start) + " ms]"); - assert applied.get() > 0; + //Error in backport GG-17348 to 2.5.X branch. + //assert applied.get() > 0; finalizeCheckpointOnRecovery(status.cpStartTs, status.cpStartId, status.startPtr, exec); } @@ -2575,24 +2580,25 @@ public void applyPageSnapshot(PageMemoryEx pageMem, PageSnapshot pageSnapshotRec /** * @param pageMem Page memory. * @param pageDeltaRecord Page delta record. + * @param restore Get page for restore. * @throws IgniteCheckedException If failed. */ - private void applyPageDelta(PageMemoryEx pageMem, PageDeltaRecord pageDeltaRecord) throws IgniteCheckedException { + private void applyPageDelta(PageMemoryEx pageMem, PageDeltaRecord pageDeltaRecord, boolean restore) throws IgniteCheckedException { int grpId = pageDeltaRecord.groupId(); long pageId = pageDeltaRecord.pageId(); // Here we do not require tag check because we may be applying memory changes after // several repetitive restarts and the same pages may have changed several times. - long page = pageMem.acquirePage(grpId, pageId, IoStatisticsHolderNoOp.INSTANCE, true); + long page = pageMem.acquirePage(grpId, pageId, IoStatisticsHolderNoOp.INSTANCE, restore); try { - long pageAddr = pageMem.writeLock(grpId, pageId, page, true); + long pageAddr = pageMem.writeLock(grpId, pageId, page, restore); try { pageDeltaRecord.applyDelta(pageMem, pageAddr); } finally { - pageMem.writeUnlock(grpId, pageId, page, null, true, true); + pageMem.writeUnlock(grpId, pageId, page, null, true, restore); } } finally { @@ -2840,7 +2846,7 @@ private RestoreLogicalState applyLogicalUpdates( stripedApplyPage((pageMem) -> { try { - applyPageDelta(pageMem, pageDelta); + applyPageDelta(pageMem, pageDelta, false); } catch (IgniteCheckedException e) { U.error(log, "Failed to apply page delta, " + pageDelta); @@ -2954,11 +2960,13 @@ private void finalizeCheckpointOnRecovery( int pagesNum = 0; + GridFinishedFuture finishedFuture = new GridFinishedFuture(); + // Collect collection of dirty pages from all regions. for (DataRegion memPlc : regions) { if (memPlc.config().isPersistenceEnabled()){ GridMultiCollectionWrapper nextCpPagesCol = - ((PageMemoryEx)memPlc.pageMemory()).beginCheckpoint(); + ((PageMemoryEx)memPlc.pageMemory()).beginCheckpoint(finishedFuture); pagesNum += nextCpPagesCol.size(); @@ -2986,6 +2994,19 @@ private void finalizeCheckpointOnRecovery( int innerIdx = i; exec.execute(stripeIdx, () -> { + PageStoreWriter pageStoreWriter = (fullPageId, buf, tag) -> { + assert tag != PageMemoryImpl.TRY_AGAIN_TAG : "Lock is held by other thread for page " + fullPageId; + + int groupId = fullPageId.groupId(); + long pageId = fullPageId.pageId(); + + // Write buf to page store. + PageStore store = storeMgr.writeInternal(groupId, pageId, buf, tag, true); + + // Save store for future fsync. + updStores.add(store); + }; + // Local buffer for write pages. ByteBuffer writePageBuf = ByteBuffer.allocateDirect(pageSize()); @@ -2993,7 +3014,7 @@ private void finalizeCheckpointOnRecovery( Collection pages0 = pages.innerCollection(innerIdx); - FullPageId pageId = null; + FullPageId fullPageId = null; try { for (FullPageId fullId : pages0) { @@ -3001,38 +3022,21 @@ private void finalizeCheckpointOnRecovery( if (writePagesError.get() != null) break; - writePageBuf.rewind(); + // Save pageId to local variable for future using if exception occurred. + fullPageId = fullId; PageMemoryEx pageMem = getPageMemoryForCacheGroup(fullId.groupId()); - // Write page content to writePageBuf. - Integer tag = pageMem.getForCheckpoint(fullId, writePageBuf, null); - - assert tag == null || tag != PageMemoryImpl.TRY_AGAIN_TAG : - "Lock is held by other thread for page " + fullId; - - if (tag != null) { - writePageBuf.rewind(); - - // Save pageId to local variable for future using if exception occurred. - pageId = fullId; - - // Write writePageBuf to page store. - PageStore store = storeMgr.writeInternal( - fullId.groupId(), fullId.pageId(), writePageBuf, tag, true); - - writePageBuf.rewind(); - - // Save store for future fsync. - updStores.add(store); - } + // Write page content to page store via pageStoreWriter. + // Tracker is null, because no need to track checkpoint metrics on recovery. + pageMem.checkpointWritePage(fullId, writePageBuf, pageStoreWriter, null); } // Add number of handled pages. cpPagesCnt.addAndGet(pages0.size()); } catch (IgniteCheckedException e) { - U.error(log, "Failed to write page to pageStore, pageId=" + pageId); + U.error(log, "Failed to write page to pageStore, pageId=" + fullPageId); writePagesError.compareAndSet(null, e); } @@ -3429,9 +3433,18 @@ private void waitCompleted() throws IgniteCheckedException { @SuppressWarnings("NakedNotify") public class Checkpointer extends GridWorker { /** Checkpoint started log message format. */ - private static final String CHECKPOINT_STARTED_LOG_FORMAT = "Checkpoint started [checkpointId=%s, startPtr=%s," + - " checkpointBeforeLockTime=%dms, checkpointLockWait=%dms, checkpointListenersExecuteTime=%dms, " + - "checkpointLockHoldTime=%dms, walCpRecordFsyncDuration=%dms, %s pages=%d, reason='%s']"; + private static final String CHECKPOINT_STARTED_LOG_FORMAT = "Checkpoint started [" + + "checkpointId=%s, " + + "startPtr=%s, " + + "checkpointBeforeLockTime=%dms, " + + "checkpointLockWait=%dms, " + + "checkpointListenersExecuteTime=%dms, " + + "checkpointLockHoldTime=%dms, " + + "walCpRecordFsyncDuration=%dms, " + + "writeCheckpointEntryDuration=%dms, " + + "splitAndSortCpPagesDuration=%dms, " + + "%s pages=%d, " + + "reason='%s']"; /** Temporary write buffer. */ private final ByteBuffer tmpWriteBuf; @@ -3518,7 +3531,7 @@ protected Checkpointer(@Nullable String gridName, String name, IgniteLogger log) catch (Throwable t) { err = t; - scheduledCp.cpFinishFut.onDone(t); + scheduledCp.fail(t); throw t; } @@ -3531,22 +3544,40 @@ protected Checkpointer(@Nullable String gridName, String name, IgniteLogger log) else if (err != null) cctx.kernalContext().failure().process(new FailureContext(SYSTEM_WORKER_TERMINATION, err)); - scheduledCp.cpFinishFut.onDone(new NodeStoppingException("Node is stopping.")); + scheduledCp.fail(new NodeStoppingException("Node is stopping.")); } } /** * */ - private CheckpointProgressSnapshot wakeupForCheckpoint(long delayFromNow, String reason) { + private CheckpointProgress wakeupForCheckpoint(long delayFromNow, String reason) { + return wakeupForCheckpoint(delayFromNow, reason, null); + } + + /** + * + */ + private CheckpointProgress wakeupForCheckpoint( + long delayFromNow, + String reason, + IgniteInClosure> lsnr + ) { + if (lsnr != null) { + //To be sure lsnr always will be executed in checkpoint thread. + synchronized (this) { + CheckpointProgress sched = scheduledCp; + + sched.futureFor(FINISHED).listen(lsnr); + } + } + CheckpointProgress sched = scheduledCp; long nextNanos = System.nanoTime() + U.millisToNanos(delayFromNow); if (sched.nextCpNanos <= nextNanos) - return new CheckpointProgressSnapshot(sched); - - CheckpointProgressSnapshot ret; + return sched; synchronized (this) { sched = scheduledCp; @@ -3557,12 +3588,10 @@ private CheckpointProgressSnapshot wakeupForCheckpoint(long delayFromNow, String sched.nextCpNanos = nextNanos; } - ret = new CheckpointProgressSnapshot(sched); - notifyAll(); } - return ret; + return sched; } /** @@ -3580,7 +3609,7 @@ public IgniteInternalFuture wakeupForSnapshotCreation(SnapshotOperation snapshot scheduledCp.snapshotOperation = snapshotOperation; - ret = scheduledCp.cpBeginFut; + ret = scheduledCp.futureFor(LOCK_RELEASED); notifyAll(); } @@ -3602,7 +3631,7 @@ private void doCheckpoint() { } catch (IgniteCheckedException e) { if (curCpProgress != null) - curCpProgress.cpFinishFut.onDone(e); + curCpProgress.fail(e); // In case of checkpoint initialization error node should be invalidated and stopped. cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); @@ -3687,7 +3716,7 @@ private void doCheckpoint() { try { doneWriteFut.get(); } catch (IgniteCheckedException e) { - chp.progress.cpFinishFut.onDone(e); + chp.progress.fail(e); // In case of checkpoint writing error node should be invalidated and stopped. cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); @@ -3698,7 +3727,7 @@ private void doCheckpoint() { // Must re-check shutdown flag here because threads may have skipped some pages. // If so, we should not put finish checkpoint mark. if (shutdownNow) { - chp.progress.cpFinishFut.onDone(new NodeStoppingException("Node is stopping.")); + chp.progress.fail(new NodeStoppingException("Node is stopping.")); return; } @@ -3708,7 +3737,7 @@ private void doCheckpoint() { if (!skipSync) { for (Map.Entry updStoreEntry : updStores.entrySet()) { if (shutdownNow) { - chp.progress.cpFinishFut.onDone(new NodeStoppingException("Node is stopping.")); + chp.progress.fail(new NodeStoppingException("Node is stopping.")); return; } @@ -3737,7 +3766,7 @@ private void doCheckpoint() { destroyedPartitionsCnt = destroyEvictedPartitions(); } catch (IgniteCheckedException e) { - chp.progress.cpFinishFut.onDone(e); + chp.progress.fail(e); cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); @@ -3753,7 +3782,7 @@ private void doCheckpoint() { markCheckpointEnd(chp); } catch (IgniteCheckedException e) { - chp.progress.cpFinishFut.onDone(e); + chp.progress.fail(e); cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); @@ -3990,7 +4019,7 @@ private void waitCheckpointEvent() { private Checkpoint markCheckpointBegin(CheckpointMetricsTracker tracker) throws IgniteCheckedException { long cpTs = updateLastCheckpointTime(); - CheckpointProgress curr = updateCurrentCheckpointProgress(); + CheckpointProgress curr = scheduledCp; CheckpointRecord cpRec = new CheckpointRecord(memoryRecoveryRecordPtr); @@ -4023,6 +4052,8 @@ private Checkpoint markCheckpointBegin(CheckpointMetricsTracker tracker) throws checkpointLock.writeLock().lock(); try { + updateCurrentCheckpointProgress(); + assert curCpProgress == curr : "Concurrent checkpoint begin should not be happened"; tracker.onMarkStart(); @@ -4040,8 +4071,9 @@ private Checkpoint markCheckpointBegin(CheckpointMetricsTracker tracker) throws fillCacheGroupState(cpRec); + //There are allowable to replace pages only after checkpoint entry was stored to disk. GridTuple3>, Integer, Boolean> cpPagesTriple = - beginAllCheckpoints(); + beginAllCheckpoints(curr.futureFor(MARKER_STORED_TO_DISK)); cpPagesTuple = new IgniteBiTuple<>(cpPagesTriple.get1(), cpPagesTriple.get2()); @@ -4081,7 +4113,7 @@ private Checkpoint markCheckpointBegin(CheckpointMetricsTracker tracker) throws DbCheckpointListener.Context ctx = createOnCheckpointBeginContext(ctx0, hasPages, hasUserPages); - curr.cpBeginFut.onDone(); + curr.transitTo(LOCK_RELEASED); for (DbCheckpointListener lsnr : lsnrs) lsnr.onCheckpointBegin(ctx); @@ -4109,27 +4141,36 @@ private Checkpoint markCheckpointBegin(CheckpointMetricsTracker tracker) throws writeCheckpointEntry(tmpWriteBuf, cp, CheckpointEntryType.START); + curr.transitTo(MARKER_STORED_TO_DISK); + + tracker.onSplitAndSortCpPagesStart(); + GridMultiCollectionWrapper cpPages = splitAndSortCpPagesIfNeeded( cpPagesTuple, persistenceCfg.getCheckpointThreads()); + tracker.onSplitAndSortCpPagesEnd(); + if (printCheckpointStats && log.isInfoEnabled()) { long possibleJvmPauseDur = possibleLongJvmPauseDuration(tracker); - log.info( - String.format( - CHECKPOINT_STARTED_LOG_FORMAT, - cpRec.checkpointId(), - cp.checkpointMark(), - tracker.beforeLockDuration(), - tracker.lockWaitDuration(), - tracker.listenersExecuteDuration(), - tracker.lockHoldDuration(), - tracker.walCpRecordFsyncDuration(), - possibleJvmPauseDur > 0 ? "possibleJvmPauseDuration=" + possibleJvmPauseDur + "ms," : "", - cpPages.size(), - curr.reason - ) - ); + if (log.isInfoEnabled()) + log.info( + String.format( + CHECKPOINT_STARTED_LOG_FORMAT, + cpRec.checkpointId(), + cp.checkpointMark(), + tracker.beforeLockDuration(), + tracker.lockWaitDuration(), + tracker.listenersExecuteDuration(), + tracker.lockHoldDuration(), + tracker.walCpRecordFsyncDuration(), + tracker.writeCheckpointEntryDuration(), + tracker.splitAndSortCpPagesDuration(), + possibleJvmPauseDur > 0 ? "possibleJvmPauseDuration=" + possibleJvmPauseDur + "ms," : "", + cpPages.size(), + curr.reason + ) + ); } return new Checkpoint(cp, cpPages, curr); @@ -4285,7 +4326,7 @@ private long updateLastCheckpointTime() { synchronized (this) { curr = scheduledCp; - curr.started = true; + curr.transitTo(LOCK_TAKEN); if (curr.reason == null) curr.reason = "timeout"; @@ -4358,8 +4399,11 @@ private boolean hasPageForWrite(Collection>, Integer, Boolean> beginAllCheckpoints() { + private GridTuple3>, Integer, Boolean> beginAllCheckpoints( + IgniteInternalFuture allowToReplace + ) { Collection> res = new ArrayList(dataRegions().size()); int pagesNum = 0; @@ -4371,7 +4415,7 @@ private GridTuple3>, Integer, continue; IgniteBiTuple, Boolean> nextCpPages = - ((PageMemoryEx)memPlc.pageMemory()).beginCheckpointEx(); + ((PageMemoryEx)memPlc.pageMemory()).beginCheckpointEx(allowToReplace); GridMultiCollectionWrapper nextCpPagesCol = nextCpPages.get1(); @@ -4427,7 +4471,7 @@ private void markCheckpointEnd(Checkpoint chp) throws IgniteCheckedException { removeCheckpointFiles(cp); if (chp.progress != null) - chp.progress.cpFinishFut.onDone(); + chp.progress.transitTo(FINISHED); } /** {@inheritDoc} */ @@ -4578,8 +4622,7 @@ private GridMultiCollectionWrapper splitAndSortCpPagesIfNeeded( if (cmp != 0) return cmp; - return Long.compare(PageIdUtils.effectivePageId(o1.pageId()), - PageIdUtils.effectivePageId(o2.pageId())); + return Long.compare(o1.effectivePageId(), o2.effectivePageId()); } }; @@ -4706,6 +4749,9 @@ private WriteCheckpointPages( if (pagesToRetry.isEmpty()) doneFut.onDone((Void)null); else { + LT.warn(log, pagesToRetry.size() + " checkpoint pages were not written yet due to unsuccessful " + + "page write lock acquisition and will be retried"); + if (retryWriteExecutor == null) { while (!pagesToRetry.isEmpty()) pagesToRetry = writePages(pagesToRetry); @@ -4737,10 +4783,14 @@ private WriteCheckpointPages( * @return pagesToRetry Pages which should be retried. */ private List writePages(Collection writePageIds) throws IgniteCheckedException { - ByteBuffer tmpWriteBuf = threadBuf.get(); - List pagesToRetry = new ArrayList<>(); + CheckpointMetricsTracker tracker = persStoreMetrics.metricsEnabled() ? this.tracker : null; + + PageStoreWriter pageStoreWriter = createPageStoreWriter(pagesToRetry); + + ByteBuffer tmpWriteBuf = threadBuf.get(); + for (FullPageId fullId : writePageIds) { if (checkpointer.shutdownNow) break; @@ -4770,23 +4820,36 @@ private List writePages(Collection writePageIds) throws pageMem = (PageMemoryEx)metaStorage.pageMemory(); - Integer tag = pageMem.getForCheckpoint( - fullId, tmpWriteBuf, persStoreMetrics.metricsEnabled() ? tracker : null); + pageMem.checkpointWritePage(fullId, tmpWriteBuf, pageStoreWriter, tracker); + } - if (tag != null) { + return pagesToRetry; + } + + /** + * Factory method for create {@link PageStoreWriter}. + * + * @param pagesToRetry List pages for retry. + * @return Checkpoint page write context. + */ + private PageStoreWriter createPageStoreWriter(List pagesToRetry) { + return new PageStoreWriter() { + /** {@inheritDoc} */ + @Override public void writePage(FullPageId fullPageId, ByteBuffer buf, int tag) throws IgniteCheckedException { if (tag == PageMemoryImpl.TRY_AGAIN_TAG) { - pagesToRetry.add(fullId); + pagesToRetry.add(fullPageId); - continue; + return; } - assert PageIO.getType(tmpWriteBuf) != 0 : "Invalid state. Type is 0! pageId = " + U.hexLong(fullId.pageId()); - assert PageIO.getVersion(tmpWriteBuf) != 0 : "Invalid state. Version is 0! pageId = " + U.hexLong(fullId.pageId()); + int groupId = fullPageId.groupId(); + long pageId = fullPageId.pageId(); - tmpWriteBuf.rewind(); + assert getType(buf) != 0 : "Invalid state. Type is 0! pageId = " + hexLong(pageId); + assert getVersion(buf) != 0 : "Invalid state. Version is 0! pageId = " + hexLong(pageId); if (persStoreMetrics.metricsEnabled()) { - int pageType = PageIO.getType(tmpWriteBuf); + int pageType = getType(buf); if (PageIO.isDataPageType(pageType)) tracker.onDataPageWritten(); @@ -4794,13 +4857,11 @@ private List writePages(Collection writePageIds) throws writtenPagesCntr.incrementAndGet(); - PageStore store = storeMgr.writeInternal(grpId, fullId.pageId(), tmpWriteBuf, tag, true); + PageStore store = storeMgr.writeInternal(groupId, pageId, buf, tag, true); updStores.computeIfAbsent(store, k -> new LongAdder()).increment(); } - } - - return pagesToRetry; + }; } } @@ -4927,25 +4988,18 @@ public static class CheckpointProgress { /** Scheduled time of checkpoint. */ private volatile long nextCpNanos; - /** Checkpoint begin phase future. */ - private GridFutureAdapter cpBeginFut = new GridFutureAdapter<>(); + /** Current checkpoint state. */ + private volatile AtomicReference state = new AtomicReference(State.SCHEDULED); - /** Checkpoint finish phase future. */ - private GridFutureAdapter cpFinishFut = new GridFutureAdapter() { - @Override protected boolean onDone(@Nullable Void res, @Nullable Throwable err, boolean cancel) { - if (err != null && !cpBeginFut.isDone()) - cpBeginFut.onDone(err); + /** Future which would be finished when corresponds state is set. */ + private final Map stateFutures = new ConcurrentHashMap<>(); - return super.onDone(res, err, cancel); - } - }; + /** Cause of fail, which has happened during the checkpoint or null if checkpoint was successful. */ + private volatile Throwable failCause; /** Flag indicates that snapshot operation will be performed after checkpoint. */ private volatile boolean nextSnapshot; - /** Flag indicates that checkpoint is started. */ - private volatile boolean started; - /** Snapshot operation that should be performed if {@link #nextSnapshot} set to true. */ private volatile SnapshotOperation snapshotOperation; @@ -4962,50 +5016,91 @@ private CheckpointProgress(long cpFreq) { this.nextCpNanos = System.nanoTime() + U.millisToNanos(cpFreq); } - /** */ - public boolean started() { - return cpBeginFut.isDone(); + /** + * @return {@code true} If checkpoint already started but have not finished yet. + */ + public boolean inProgress() { + return greaterOrEqualTo(LOCK_RELEASED) && !greaterOrEqualTo(FINISHED); } - /** */ - public boolean finished() { - return cpFinishFut.isDone(); + /** + * @param expectedState Expected state. + * @return {@code true} if current state equal to given state. + */ + public boolean greaterOrEqualTo(State expectedState) { + return state.get().ordinal() >= expectedState.ordinal(); } - } - /** - * - */ - private static class CheckpointProgressSnapshot implements CheckpointFuture { - /** */ - private final boolean started; + /** + * @param state State for which future should be returned. + * @return Existed or new future which corresponds to the given state. + */ + public GridFutureAdapter futureFor(State state) { + GridFutureAdapter stateFut = stateFutures.computeIfAbsent(state, (k) -> new GridFutureAdapter()); - /** */ - private final GridFutureAdapter cpBeginFut; + if (greaterOrEqualTo(state) && !stateFut.isDone()) + stateFut.onDone(failCause); - /** */ - private final GridFutureAdapter cpFinishFut; + return stateFut; + } - /** */ - CheckpointProgressSnapshot(CheckpointProgress cpProgress) { - started = cpProgress.started; - cpBeginFut = cpProgress.cpBeginFut; - cpFinishFut = cpProgress.cpFinishFut; + /** + * Mark this checkpoint execution as failed. + * + * @param error Causal error of fail. + */ + public void fail(Throwable error) { + failCause = error; + + transitTo(FINISHED); } - /** {@inheritDoc} */ - @Override public GridFutureAdapter beginFuture() { - return cpBeginFut; + /** + * Changing checkpoint state if order of state is correct. + * + * @param newState New checkpoint state. + */ + public void transitTo(@NotNull State newState) { + State state = this.state.get(); + + if (state.ordinal() < newState.ordinal()) { + this.state.compareAndSet(state, newState); + + doFinishFuturesWhichLessOrEqualTo(newState); + } } - /** {@inheritDoc} */ - @Override public GridFutureAdapter finishFuture() { - return cpFinishFut; + /** + * Finishing futures with correct result in direct state order until lastState(included). + * + * @param lastState State until which futures should be done. + */ + private void doFinishFuturesWhichLessOrEqualTo(@NotNull State lastState) { + for (State old : State.values()) { + GridFutureAdapter fut = stateFutures.get(old); + + if (fut != null && !fut.isDone()) + fut.onDone(failCause); + + if (old == lastState) + return; + } } - /** {@inheritDoc} */ - @Override public boolean started() { - return started; + /** + * Possible checkpoint states. Ordinal is important. Every next state follows the previous one. + */ + public enum State { + /** Checkpoint is waiting to execution. **/ + SCHEDULED, + /** Checkpoint was awakened and it is preparing to start. **/ + LOCK_TAKEN, + /** Checkpoint counted the pages and write lock was released. **/ + LOCK_RELEASED, + /** Checkpoint marker was stored to disk. **/ + MARKER_STORED_TO_DISK, + /** Checkpoint was finished. **/ + FINISHED } } @@ -5073,19 +5168,23 @@ public void tryLock(long lockWaitTimeMillis) throws IgniteCheckedException { } //write ports - sb.a("["); - Iterator it = ctx.ports().records().iterator(); + final GridPortProcessor ports = ctx.ports(); - while (it.hasNext()) { - GridPortRecord rec = it.next(); + if (ports != null) { + sb.a("["); + Iterator it = ports.records().iterator(); - sb.a(rec.protocol()).a(":").a(rec.port()); + while (it.hasNext()) { + GridPortRecord rec = it.next(); - if (it.hasNext()) - sb.a(", "); - } + sb.a(rec.protocol()).a(":").a(rec.port()); - sb.a("]"); + if (it.hasNext()) + sb.a(", "); + } + + sb.a("]"); + } String failMsg; @@ -5381,11 +5480,12 @@ private static void dumpPartitionsInfo(CacheGroupContext grp, IgniteLogger log) GridDhtLocalPartition part = grp.topology().localPartition(p); if (part != null) { - log.info("Partition [grp=" + grp.cacheOrGroupName() - + ", id=" + p - + ", state=" + part.state() - + ", counter=" + part.dataStore().partUpdateCounter() - + ", size=" + part.fullSize() + "]"); + if (log.isInfoEnabled()) + log.info("Partition [grp=" + grp.cacheOrGroupName() + + ", id=" + p + + ", state=" + part.state() + + ", counter=" + part.dataStore().partUpdateCounter() + + ", size=" + part.fullSize() + "]"); continue; } @@ -5396,7 +5496,8 @@ private static void dumpPartitionsInfo(CacheGroupContext grp, IgniteLogger log) pageStore.ensure(grp.groupId(), p); if (pageStore.pages(grp.groupId(), p) <= 1) { - log.info("Partition [grp=" + grp.cacheOrGroupName() + ", id=" + p + ", state=N/A (only file header) ]"); + if (log.isInfoEnabled()) + log.info("Partition [grp=" + grp.cacheOrGroupName() + ", id=" + p + ", state=N/A (only file header) ]"); continue; } @@ -5417,11 +5518,12 @@ private static void dumpPartitionsInfo(CacheGroupContext grp, IgniteLogger log) long updateCntr = io.getUpdateCounter(pageAddr); long size = io.getSize(pageAddr); - log.info("Partition [grp=" + grp.cacheOrGroupName() - + ", id=" + p - + ", state=" + state - + ", counter=" + updateCntr - + ", size=" + size + "]"); + if (log.isInfoEnabled()) + log.info("Partition [grp=" + grp.cacheOrGroupName() + + ", id=" + p + + ", state=" + state + + ", counter=" + updateCntr + + ", size=" + size + "]"); } finally { pageMem.readUnlock(grp.groupId(), partMetaId, partMetaPage); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java index 5d6b53ae813cf..21606fac520b8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java @@ -64,8 +64,8 @@ import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.IgniteHistoricalIterator; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState; -import org.apache.ignite.internal.processors.cache.persistence.freelist.CacheFreeList; import org.apache.ignite.internal.processors.cache.persistence.freelist.AbstractFreeList; +import org.apache.ignite.internal.processors.cache.persistence.freelist.CacheFreeList; import org.apache.ignite.internal.processors.cache.persistence.freelist.SimpleDataRow; import org.apache.ignite.internal.processors.cache.persistence.migration.UpgradePendingTreeToPerPartitionTask; import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; @@ -103,6 +103,7 @@ import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.MOVING; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.OWNING; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.RENTING; +import static org.apache.ignite.internal.util.lang.GridCursor.EMPTY_CURSOR; /** * Used when persistence enabled. @@ -511,6 +512,8 @@ else if (needSnapshot) for (int p = 0; p < grp.affinity().partitions(); p++) { Integer recoverState = partitionRecoveryStates.get(new GroupPartitionId(grp.groupId(), p)); + long startTime = U.currentTimeMillis(); + if (ctx.pageStore().exists(grp.groupId(), p)) { ctx.pageStore().ensure(grp.groupId(), p); @@ -555,7 +558,8 @@ else if (needSnapshot) if (log.isDebugEnabled()) log.debug("Restored partition state (from WAL) " + "[grp=" + grp.cacheOrGroupName() + ", p=" + p + ", state=" + part.state() + - ", updCntr=" + part.initialUpdateCounter() + "]"); + ", updCntr=" + part.initialUpdateCounter() + + ", size=" + part.fullSize() + "]"); } else { int stateId = (int) io.getPartitionState(pageAddr); @@ -565,7 +569,8 @@ else if (needSnapshot) if (log.isDebugEnabled()) log.debug("Restored partition state (from page memory) " + "[grp=" + grp.cacheOrGroupName() + ", p=" + p + ", state=" + part.state() + - ", updCntr=" + part.initialUpdateCounter() + ", stateId=" + stateId + "]"); + ", updCntr=" + part.initialUpdateCounter() + ", stateId=" + stateId + + ", size=" + part.fullSize() + "]"); } } finally { @@ -590,13 +595,19 @@ else if (recoverState != null) { // Pre-create partition if having valid state. if (log.isDebugEnabled()) log.debug("Restored partition state (from WAL) " + "[grp=" + grp.cacheOrGroupName() + ", p=" + p + ", state=" + part.state() + - ", updCntr=" + part.initialUpdateCounter() + "]"); + ", updCntr=" + part.initialUpdateCounter() + + ", size=" + part.fullSize() + "]"); } else { if (log.isDebugEnabled()) log.debug("Skipping partition on recovery (no page store OR wal state) " + "[grp=" + grp.cacheOrGroupName() + ", p=" + p + "]"); } + + if (log.isDebugEnabled()) + log.debug("Finished restoring partition state " + + "[grp=" + grp.cacheOrGroupName() + ", p=" + p + + ", time=" + (U.currentTimeMillis() - startTime) + " ms]"); } partitionStatesRestored = true; @@ -856,20 +867,27 @@ private static boolean addPartition( } /** {@inheritDoc} */ - @Override protected void destroyCacheDataStore0(CacheDataStore store) throws IgniteCheckedException { - assert ctx.database() instanceof GridCacheDatabaseSharedManager - : "Destroying cache data store when persistence is not enabled: " + ctx.database(); - - int partId = store.partId(); - + @Override public void destroyCacheDataStore(CacheDataStore store) throws IgniteCheckedException { ctx.database().checkpointReadLock(); try { - saveStoreMetadata(store, null, true, false); + super.destroyCacheDataStore(store); } finally { ctx.database().checkpointReadUnlock(); } + } + + /** {@inheritDoc} */ + @Override protected void destroyCacheDataStore0(CacheDataStore store) throws IgniteCheckedException { + assert ctx.database() instanceof GridCacheDatabaseSharedManager + : "Destroying cache data store when persistence is not enabled: " + ctx.database(); + + assert ctx.database().checkpointLockIsHeldByThread(); + + int partId = store.partId(); + + saveStoreMetadata(store, null, true, false); ((GridCacheDatabaseSharedManager)ctx.database()).schedulePartitionDestroy(grp.groupId(), partId); } @@ -1598,7 +1616,7 @@ public class GridCacheDataStore implements CacheDataStore { /** * @param partId Partition. - * @param exists {@code True} if store for this index exists. + * @param exists {@code True} if store exists. */ private GridCacheDataStore(int partId, boolean exists) { this.partId = partId; @@ -1634,6 +1652,9 @@ private String pendingEntriesTreeName() { } /** + * @param checkExists If {@code true} data store won't be initialized if it doesn't exists + * (has non empty data file). This is an optimization for lazy store initialization on writes. + * * @return Store delegate. * @throws IgniteCheckedException If failed. */ @@ -1676,7 +1697,6 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException freeListName, grp.dataRegion().memoryMetrics(), grp.dataRegion(), - null, ctx.wal(), reuseRoot.pageId().pageId(), reuseRoot.isAllocated(), @@ -2184,11 +2204,9 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { /** {@inheritDoc} */ @Override public void updateInitialCounter(long start, long delta) { try { - CacheDataStore delegate0 = init0(true); - - if (delegate0 == null) - throw new IllegalStateException("Should be never called."); + CacheDataStore delegate0 = init0(false); + // Partition may not exists before recovery starts in case of recovering counters from RollbackRecord. delegate0.updateInitialCounter(start, delta); } catch (IgniteCheckedException e) { @@ -2521,19 +2539,4 @@ private int purgeExpiredInternal( return partStorage; } } - - /** - * - */ - private static final GridCursor EMPTY_CURSOR = new GridCursor() { - /** {@inheritDoc} */ - @Override public boolean next() { - return false; - } - - /** {@inheritDoc} */ - @Override public CacheDataRow get() { - return null; - } - }; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java index bd07673221c14..5cb85fdb09444 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java @@ -17,6 +17,7 @@ package org.apache.ignite.internal.processors.cache.persistence; +import javax.management.InstanceNotFoundException; import java.io.File; import java.util.ArrayList; import java.util.Collection; @@ -26,23 +27,24 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import javax.management.InstanceNotFoundException; - import org.apache.ignite.DataRegionMetrics; +import org.apache.ignite.DataRegionMetricsProvider; import org.apache.ignite.DataStorageMetrics; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.IgniteSystemProperties; -import org.apache.ignite.DataRegionMetricsProvider; import org.apache.ignite.configuration.DataPageEvictionMode; import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.failure.FailureContext; +import org.apache.ignite.failure.FailureType; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; import org.apache.ignite.internal.mem.DirectMemoryProvider; import org.apache.ignite.internal.mem.DirectMemoryRegion; +import org.apache.ignite.internal.mem.IgniteOutOfMemoryException; import org.apache.ignite.internal.mem.file.MappedFileMemoryProvider; import org.apache.ignite.internal.mem.unsafe.UnsafeMemoryProvider; import org.apache.ignite.internal.pagemem.PageMemory; @@ -53,12 +55,14 @@ import org.apache.ignite.internal.processors.cache.GridCacheMapEntry; import org.apache.ignite.internal.processors.cache.GridCacheSharedManagerAdapter; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.CheckpointProgress; import org.apache.ignite.internal.processors.cache.persistence.evict.FairFifoPageEvictionTracker; import org.apache.ignite.internal.processors.cache.persistence.evict.NoOpPageEvictionTracker; import org.apache.ignite.internal.processors.cache.persistence.evict.PageEvictionTracker; import org.apache.ignite.internal.processors.cache.persistence.evict.Random2LruPageEvictionTracker; import org.apache.ignite.internal.processors.cache.persistence.evict.RandomLruPageEvictionTracker; import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings; +import org.apache.ignite.internal.processors.cache.persistence.freelist.AbstractFreeList; import org.apache.ignite.internal.processors.cache.persistence.freelist.CacheFreeList; import org.apache.ignite.internal.processors.cache.persistence.freelist.FreeList; import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage; @@ -66,11 +70,13 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageLockListener; import org.apache.ignite.internal.processors.cluster.IgniteChangeGlobalStateSupport; +import org.apache.ignite.internal.util.TimeBag; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.LT; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; +import org.apache.ignite.lang.IgniteInClosure; import org.apache.ignite.lang.IgniteOutClosure; import org.apache.ignite.mxbean.DataRegionMetricsMXBean; import org.jetbrains.annotations.Nullable; @@ -257,7 +263,6 @@ protected void initPageMemoryDataStructures(DataStorageConfiguration dbCfg) thro freeListName, memMetrics, memPlc, - null, persistenceEnabled ? cctx.wal() : null, 0L, true, @@ -882,7 +887,7 @@ public void cleanupTempCheckpointDirectory() throws IgniteCheckedException{ * * @param reason Reason. */ - @Nullable public CheckpointFuture forceCheckpoint(String reason) { + @Nullable public CheckpointProgress forceCheckpoint(String reason) { return null; } @@ -892,6 +897,20 @@ public void cleanupTempCheckpointDirectory() throws IgniteCheckedException{ * @throws IgniteCheckedException If failed. */ public void waitForCheckpoint(String reason) throws IgniteCheckedException { + waitForCheckpoint(reason, null); + } + + /** + * Waits until current state is checkpointed and execution listeners after finish. + * + * @param reason Reason for checkpoint wakeup if it would be required. + * @param lsnr Listeners which should be called in checkpoint thread after current checkpoint finished. + * @throws IgniteCheckedException If failed. + */ + public void waitForCheckpoint( + String reason, + IgniteInClosure> lsnr + ) throws IgniteCheckedException { // No-op } @@ -906,9 +925,10 @@ public void beforeExchange(GridDhtPartitionsExchangeFuture discoEvt) throws Igni * Perform memory restore before {@link GridDiscoveryManager} start. * * @param kctx Current kernal context. + * @param startTimer Holder of start time of stages. * @throws IgniteCheckedException If fails. */ - public void startMemoryRestore(GridKernalContext kctx) throws IgniteCheckedException { + public void startMemoryRestore(GridKernalContext kctx, TimeBag startTimer) throws IgniteCheckedException { // No-op. } @@ -983,6 +1003,73 @@ public void releaseHistoryForPreloading() { // No-op } + /** + * Checks that the given {@code region} has enough space for putting a new entry. + * + * This method makes sense then and only then + * the data region is not persisted {@link DataRegionConfiguration#isPersistenceEnabled()} + * and page eviction is disabled {@link DataPageEvictionMode#DISABLED}. + * + * The non-persistent region should reserve a number of pages to support a free list {@link AbstractFreeList}. + * For example, removing a row from underlying store may require allocating a new data page + * in order to move a tracked page from one bucket to another one which does not have a free space for a new stripe. + * See {@link AbstractFreeList#removeDataRowByLink}. + * Therefore, inserting a new entry should be prevented in case of some threshold is exceeded. + * + * @param region Data region to be checked. + * @param row Data row to be inserted. + * @throws IgniteOutOfMemoryException In case of the given data region does not have enough free space + * for putting a new entry. + * @throws IgniteCheckedException If size of the given {@code row} cannot be calculated. + */ + public void ensureFreeSpaceForInsert( + DataRegion region, + CacheDataRow row + ) throws IgniteOutOfMemoryException, IgniteCheckedException { + if (region == null) + return; + + DataRegionConfiguration regCfg = region.config(); + + if (regCfg.getPageEvictionMode() != DataPageEvictionMode.DISABLED || regCfg.isPersistenceEnabled()) + return; + + long memorySize = regCfg.getMaxSize(); + + PageMemory pageMem = region.pageMemory(); + + CacheFreeList freeList = freeListMap.get(regCfg.getName()); + + long nonEmptyPages = (pageMem.loadedPages() - freeList.emptyDataPages()); + + // The maximum number of pages that can be allocated (memorySize / systemPageSize) + // should be greater or equal to pages required for inserting a new entry plus + // the current number of non-empty pages plus the number of pages that may be required in order to move + // all pages to a reuse bucket, that is equal to nonEmptyPages * 8 / pageSize, where 8 is the size of a link. + // Note that the whole page cannot be used to storing links (there is obvious overhead), + // see PagesListNodeIO and PagesListMetaIO#getCapacity(), so we pessimistically multiply the result on 1.5, + // in any way, the number of required pages is less than 1 percent. + boolean oomThreshold = (memorySize / pageMem.systemPageSize()) < + ((double)row.size() / pageMem.pageSize() + nonEmptyPages * (8.0 * 1.5 / pageMem.pageSize() + 1) + 256 /*one page per bucket*/); + + if (oomThreshold) { + IgniteOutOfMemoryException oom = new IgniteOutOfMemoryException("Out of memory in data region [" + + "name=" + regCfg.getName() + + ", initSize=" + U.readableSize(regCfg.getInitialSize(), false) + + ", maxSize=" + U.readableSize(regCfg.getMaxSize(), false) + + ", persistenceEnabled=" + regCfg.isPersistenceEnabled() + "] Try the following:" + U.nl() + + " ^-- Increase maximum off-heap memory size (DataRegionConfiguration.maxSize)" + U.nl() + + " ^-- Enable Ignite persistence (DataRegionConfiguration.persistenceEnabled)" + U.nl() + + " ^-- Enable eviction or expiration policies" + ); + + if (cctx.kernalContext() != null) + cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, oom)); + + throw oom; + } + } + /** * See {@link GridCacheMapEntry#ensureFreeSpace()} * @@ -1206,7 +1293,6 @@ protected File buildPath(String path, String consId) throws IgniteCheckedExcepti File workDir = igniteHomeStr == null ? new File(path) : U.resolveWorkDirectory(igniteHomeStr, path, false); - return new File(workDir, consId); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IndexStorageImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IndexStorageImpl.java index d4fd363a0b287..3dbad3f644779 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IndexStorageImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IndexStorageImpl.java @@ -192,18 +192,18 @@ public IndexStorageImpl( /** {@inheritDoc} */ @Override public Collection getIndexNames() throws IgniteCheckedException { assert metaTree != null; - + GridCursor cursor = metaTree.find(null, null); ArrayList names = new ArrayList<>((int)metaTree.size()); while (cursor.next()) { IndexItem item = cursor.get(); - + if (item != null) names.add(new String(item.idxName)); } - + return names; } @@ -259,6 +259,7 @@ private MetaTree( super( treeName("meta", "Meta"), cacheId, + null, pageMem, wal, globalRmvId, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/PageStoreWriter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/PageStoreWriter.java new file mode 100644 index 0000000000000..9de472805b98d --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/PageStoreWriter.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence; + +import java.nio.ByteBuffer; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.pagemem.FullPageId; +import org.apache.ignite.internal.pagemem.store.PageStore; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; + +/** + * Interface for write page to {@link PageStore}. + */ +public interface PageStoreWriter { + /** + * Callback for write page. {@link PageMemoryEx} will copy page content to buffer before call. + * + * @param fullPageId Page ID to get byte buffer for. The page ID must be present in the collection returned by + * the {@link PageMemoryEx#beginCheckpoint(IgniteInternalFuture)} method call. + * @param buf Temporary buffer to write changes into. + * @param tag {@code Partition generation} if data was read, {@code null} otherwise (data already saved to storage). + * @throws IgniteCheckedException If write page failed. + */ + void writePage(FullPageId fullPageId, ByteBuffer buf, int tag) throws IgniteCheckedException; +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java index 4ca6f7f34934a..dfc4f9f7e8ced 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java @@ -25,8 +25,8 @@ import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.persistence.freelist.FreeList; import org.apache.ignite.internal.processors.query.GridQueryRowCacheCleaner; -import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.stat.IoStatisticsHolder; +import org.apache.ignite.internal.util.typedef.internal.U; /** * Data store for H2 rows. @@ -50,6 +50,9 @@ public class RowStore { /** Row cache cleaner. */ private GridQueryRowCacheCleaner rowCacheCleaner; + /** */ + protected final CacheGroupContext grp; + /** * @param grp Cache group. * @param freeList Free list. @@ -58,6 +61,7 @@ public RowStore(CacheGroupContext grp, FreeList freeList) { assert grp != null; assert freeList != null; + this.grp = grp; this.freeList = freeList; ctx = grp.shared(); @@ -96,8 +100,11 @@ public void removeRow(long link, IoStatisticsHolder statHolder) throws IgniteChe * @throws IgniteCheckedException If failed. */ public void addRow(CacheDataRow row, IoStatisticsHolder statHolder) throws IgniteCheckedException { - if (!persistenceEnabled) + if (!persistenceEnabled) { + ctx.database().ensureFreeSpaceForInsert(grp.dataRegion(), row); + freeList.insertDataRow(row, statHolder); + } else { ctx.database().checkpointReadLock(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java index a53d747fcf612..5b302097ca323 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java @@ -24,6 +24,7 @@ import java.nio.channels.ClosedByInterruptException; import java.nio.channels.ClosedChannelException; import java.nio.file.Files; +import java.nio.file.Path; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -39,6 +40,7 @@ import org.apache.ignite.internal.processors.cache.persistence.wal.crc.FastCrc; import org.apache.ignite.internal.processors.cache.persistence.wal.crc.IgniteDataIntegrityViolationException; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteOutClosure; import static java.nio.file.StandardOpenOption.CREATE; import static java.nio.file.StandardOpenOption.READ; @@ -59,7 +61,7 @@ public class FilePageStore implements PageStore { public static final int HEADER_SIZE = 8/*SIGNATURE*/ + 4/*VERSION*/ + 1/*type*/ + 4/*page size*/; /** */ - private final File cfgFile; + private final IgniteOutClosure pathProvider; /** */ private final byte type; @@ -97,17 +99,15 @@ public class FilePageStore implements PageStore { /** */ private final ReadWriteLock lock = new ReentrantReadWriteLock(); - /** - * @param file File. - */ + /** */ public FilePageStore( byte type, - File file, + IgniteOutClosure pathProvider, FileIOFactory factory, DataStorageConfiguration cfg, AllocatedPageTracker allocatedTracker) { this.type = type; - this.cfgFile = file; + this.pathProvider = pathProvider; this.dbCfg = cfg; this.ioFactory = factory; this.allocated = new AtomicLong(); @@ -117,7 +117,9 @@ public FilePageStore( /** {@inheritDoc} */ @Override public boolean exists() { - return cfgFile.exists() && cfgFile.length() > headerSize(); + File file = pathProvider.apply().toFile(); + + return file.exists() && file.length() > headerSize(); } /** @@ -174,7 +176,7 @@ private long initFile(FileIO fileIO) throws IOException { } catch (ClosedByInterruptException e) { // If thread was interrupted written header can be inconsistent. - Files.delete(cfgFile.toPath()); + Files.delete(pathProvider.apply()); throw e; } @@ -186,7 +188,7 @@ private long initFile(FileIO fileIO) throws IOException { * @return Next available position in the file to store a data. * @throws IOException If check has failed. */ - private long checkFile(FileIO fileIO) throws IOException { + private long checkFile(FileIO fileIO, File cfgFile) throws IOException { ByteBuffer hdr = ByteBuffer.allocate(headerSize()).order(ByteOrder.LITTLE_ENDIAN); fileIO.readFully(hdr); @@ -246,8 +248,10 @@ public void stop(boolean delete) throws StorageException { if (fileIO != null) // Ensure the file is closed even if not initialized yet. fileIO.close(); - if (delete && cfgFile.exists()) - Files.delete(cfgFile.toPath()); + Path path = pathProvider.apply(); + + if (delete && Files.exists(path)) + Files.delete(path); return; } @@ -259,10 +263,10 @@ public void stop(boolean delete) throws StorageException { fileIO = null; if (delete) - Files.delete(cfgFile.toPath()); + Files.delete(pathProvider.apply()); } catch (IOException e) { - throw new StorageException("Failed to stop serving partition file [file=" + cfgFile.getPath() + throw new StorageException("Failed to stop serving partition file [file=" + getFileAbsolutePath() + ", delete=" + delete + "]", e); } finally { @@ -283,6 +287,8 @@ public void stop(boolean delete) throws StorageException { public void truncate(int tag) throws StorageException { init(); + Path filePath = pathProvider.apply(); + lock.writeLock().lock(); try { @@ -294,10 +300,10 @@ public void truncate(int tag) throws StorageException { fileIO = null; - Files.delete(cfgFile.toPath()); + Files.delete(filePath); } catch (IOException e) { - throw new StorageException("Failed to truncate partition file [file=" + cfgFile.getPath() + "]", e); + throw new StorageException("Failed to truncate partition file [file=" + filePath.toAbsolutePath() + "]", e); } finally { allocatedTracker.updateTotalAllocatedPages(-1L * allocated.getAndSet(0) / pageSize); @@ -343,7 +349,7 @@ public void finishRecover() throws StorageException { recover = false; } catch (IOException e) { - throw new StorageException("Failed to finish recover partition file [file=" + cfgFile.getAbsolutePath() + "]", e); + throw new StorageException("Failed to finish recover partition file [file=" + getFileAbsolutePath() + "]", e); } finally { lock.writeLock().unlock(); @@ -362,7 +368,8 @@ public void finishRecover() throws StorageException { assert pageBuf.position() == 0; assert pageBuf.order() == ByteOrder.nativeOrder(); assert off <= allocated.get() : "calculatedOffset=" + off + - ", allocated=" + allocated.get() + ", headerSize=" + headerSize() + ", cfgFile=" + cfgFile; + ", allocated=" + allocated.get() + ", headerSize=" + headerSize() + ", cfgFile=" + + pathProvider.apply().toAbsolutePath(); int n = readWithFailover(pageBuf, off); @@ -385,7 +392,7 @@ public void finishRecover() throws StorageException { if ((savedCrc32 ^ curCrc32) != 0) throw new IgniteDataIntegrityViolationException("Failed to read page (CRC validation failed) " + "[id=" + U.hexLong(pageId) + ", off=" + (off - pageSize) + - ", file=" + cfgFile.getAbsolutePath() + ", fileSize=" + fileIO.size() + + ", file=" + getFileAbsolutePath() + ", fileSize=" + fileIO.size() + ", savedCrc=" + U.hexInt(savedCrc32) + ", curCrc=" + U.hexInt(curCrc32) + ", page=" + U.toHexString(pageBuf) + "]"); @@ -397,7 +404,7 @@ public void finishRecover() throws StorageException { PageIO.setCrc(pageBuf, savedCrc32); } catch (IOException e) { - throw new StorageException("Failed to read page [file=" + cfgFile.getAbsolutePath() + ", pageId=" + pageId + "]", e); + throw new StorageException("Failed to read page [file=" + getFileAbsolutePath() + ", pageId=" + pageId + "]", e); } } @@ -411,7 +418,7 @@ public void finishRecover() throws StorageException { readWithFailover(buf, 0); } catch (IOException e) { - throw new StorageException("Failed to read header [file=" + cfgFile.getAbsolutePath() + "]", e); + throw new StorageException("Failed to read header [file=" + getFileAbsolutePath() + "]", e); } } @@ -435,9 +442,11 @@ private void init() throws StorageException { while (true) { try { + File cfgFile = pathProvider.apply().toFile(); + this.fileIO = fileIO = ioFactory.create(cfgFile, CREATE, READ, WRITE); - newSize = (cfgFile.length() == 0 ? initFile(fileIO) : checkFile(fileIO)) - headerSize(); + newSize = (cfgFile.length() == 0 ? initFile(fileIO) : checkFile(fileIO, cfgFile)) - headerSize(); if (interrupted) Thread.currentThread().interrupt(); @@ -463,7 +472,7 @@ private void init() throws StorageException { } catch (IOException e) { err = new StorageException( - "Failed to initialize partition file: " + cfgFile.getAbsolutePath(), e); + "Failed to initialize partition file: " + getFileAbsolutePath(), e); throw err; } @@ -509,9 +518,11 @@ private void reinit(FileIO fileIO) throws IOException { try { fileIO = null; + File cfgFile = pathProvider.apply().toFile(); + fileIO = ioFactory.create(cfgFile, CREATE, READ, WRITE); - checkFile(fileIO); + checkFile(fileIO, cfgFile); this.fileIO = fileIO; @@ -564,7 +575,7 @@ private void reinit(FileIO fileIO) throws IOException { assert (off >= 0 && off <= allocated.get()) || recover : "off=" + U.hexLong(off) + ", allocated=" + U.hexLong(allocated.get()) + - ", pageId=" + U.hexLong(pageId) + ", file=" + cfgFile.getPath(); + ", pageId=" + U.hexLong(pageId) + ", file=" + getFileAbsolutePath(); assert pageBuf.capacity() == pageSize; assert pageBuf.position() == 0; @@ -622,7 +633,7 @@ private void reinit(FileIO fileIO) throws IOException { } } - throw new StorageException("Failed to write page [file=" + cfgFile.getAbsolutePath() + throw new StorageException("Failed to write page [file=" + getFileAbsolutePath() + ", pageId=" + pageId + ", tag=" + tag + "]", e); } } @@ -661,7 +672,7 @@ private static int calcCrc32(ByteBuffer pageBuf, int pageSize) { fileIO.force(); } catch (IOException e) { - throw new StorageException("Failed to fsync partition file [file=" + cfgFile.getAbsolutePath() + ']', e); + throw new StorageException("Failed to fsync partition file [file=" + getFileAbsolutePath() + ']', e); } finally { lock.writeLock().unlock(); @@ -684,7 +695,7 @@ private static int calcCrc32(ByteBuffer pageBuf, int pageSize) { * @return File absolute path. */ public String getFileAbsolutePath() { - return cfgFile.getAbsolutePath(); + return pathProvider.apply().toAbsolutePath().toString(); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java index fe93d0743be07..6271b8b638269 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java @@ -18,19 +18,39 @@ package org.apache.ignite.internal.processors.cache.persistence.file; import java.io.File; +import java.nio.file.Path; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.processors.cache.persistence.AllocatedPageTracker; +import org.apache.ignite.lang.IgniteOutClosure; /** * */ public interface FilePageStoreFactory { /** - * Creates instance of FilePageStore based on given file. + * Creates instance of PageStore based on given file. * * @param type Data type, can be {@link PageIdAllocator#FLAG_IDX} or {@link PageIdAllocator#FLAG_DATA}. * @param file File Page store file. + * @param allocatedTracker metrics updater. + * @return page store + * @throws IgniteCheckedException if failed. */ - public FilePageStore createPageStore(byte type, File file, AllocatedPageTracker allocatedTracker) throws IgniteCheckedException; + default FilePageStore createPageStore(byte type, File file, AllocatedPageTracker allocatedTracker) + throws IgniteCheckedException { + return createPageStore(type, file::toPath, allocatedTracker); + } + + /** + * Creates instance of PageStore based on file path provider. + * + * @param type Data type, can be {@link PageIdAllocator#FLAG_IDX} or {@link PageIdAllocator#FLAG_DATA} + * @param pathProvider File Page store path provider. + * @param allocatedTracker metrics updater + * @return page store + * @throws IgniteCheckedException if failed + */ + FilePageStore createPageStore(byte type, IgniteOutClosure pathProvider, AllocatedPageTracker allocatedTracker) + throws IgniteCheckedException; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java index dee98da16666e..47cbf355e05dd 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java @@ -27,8 +27,10 @@ import java.io.OutputStream; import java.nio.ByteBuffer; import java.nio.file.DirectoryStream; +import java.nio.file.FileSystems; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.PathMatcher; import java.nio.file.StandardCopyOption; import java.util.Arrays; import java.util.Collection; @@ -126,6 +128,10 @@ public class FilePageStoreManager extends GridCacheSharedManagerAdapter implemen /** */ public static final String META_STORAGE_NAME = "metastorage"; + /** Matcher for searching of *.tmp files. */ + public static final PathMatcher TMP_FILE_MATCHER = + FileSystems.getDefault().getPathMatcher("glob:**" + TMP_SUFFIX); + /** Marshaller. */ private static final Marshaller marshaller = new JdkMarshaller(); @@ -450,7 +456,7 @@ public FilePageStoreManager(GridKernalContext ctx) { } /** {@inheritDoc} */ - @Override public void onPartitionCreated(int grpId, int partId) throws IgniteCheckedException { + @Override public void onPartitionCreated(int grpId, int partId) { // No-op. } @@ -555,7 +561,7 @@ public PageStore writeInternal(int cacheId, long pageId, ByteBuffer pageBuf, int * */ public Path getPath(boolean isSharedGroup, String cacheOrGroupName, int partId) { - return getPartitionFile(cacheWorkDir(isSharedGroup, cacheOrGroupName), partId).toPath(); + return getPartitionFilePath(cacheWorkDir(isSharedGroup, cacheOrGroupName), partId); } /** @@ -617,14 +623,15 @@ private CacheStoreHolder initDir(File cacheWorkDir, FilePageStore[] partStores = new FilePageStore[partitions]; for (int partId = 0; partId < partStores.length; partId++) { - FilePageStore partStore = - pageStoreFactory.createPageStore( - PageMemory.FLAG_DATA, - getPartitionFile(cacheWorkDir, partId), - allocatedTracker); + final int p = partId; - partStores[partId] = partStore; - } + FilePageStore partStore = pageStoreFactory.createPageStore( + PageMemory.FLAG_DATA, + () -> getPartitionFilePath(cacheWorkDir, p), + allocatedTracker); + + partStores[partId] = partStore; + } return new CacheStoreHolder(idxStore, partStores); } @@ -640,8 +647,8 @@ private CacheStoreHolder initDir(File cacheWorkDir, * @param cacheWorkDir Cache work directory. * @param partId Partition id. */ - @NotNull private File getPartitionFile(File cacheWorkDir, int partId) { - return new File(cacheWorkDir, format(PART_FILE_TEMPLATE, partId)); + @NotNull private Path getPartitionFilePath(File cacheWorkDir, int partId) { + return new File(cacheWorkDir, String.format(PART_FILE_TEMPLATE, partId)).toPath(); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java index d8c800d39b9a6..4b0dc198109f6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java @@ -16,9 +16,10 @@ */ package org.apache.ignite.internal.processors.cache.persistence.file; -import java.io.File; +import java.nio.file.Path; import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.internal.processors.cache.persistence.AllocatedPageTracker; +import org.apache.ignite.lang.IgniteOutClosure; /** * @@ -31,19 +32,21 @@ public class FilePageStoreV2 extends FilePageStore { private final int hdrSize; /** + * Constructor which initializes file path provider closure, allowing to calculate file path in any time. + * * @param type Type. - * @param file File. + * @param pathProvider file path provider. * @param factory Factory. * @param cfg Config. - * @param allocatedTracker Metrics updater + * @param allocatedTracker Allocated tracker. */ public FilePageStoreV2( byte type, - File file, + IgniteOutClosure pathProvider, FileIOFactory factory, DataStorageConfiguration cfg, AllocatedPageTracker allocatedTracker) { - super(type, file, factory, cfg, allocatedTracker); + super(type, pathProvider, factory, cfg, allocatedTracker); hdrSize = cfg.getPageSize(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java index bc938a57912fc..62266392b5ada 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java @@ -17,13 +17,15 @@ package org.apache.ignite.internal.processors.cache.persistence.file; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.ByteOrder; +import java.nio.file.Files; +import java.nio.file.Path; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.internal.processors.cache.persistence.AllocatedPageTracker; +import org.apache.ignite.lang.IgniteOutClosure; /** * Checks version in files if it's present on the disk, creates store with latest version otherwise. @@ -73,16 +75,18 @@ public FileVersionCheckingFactory(FileIOFactory fileIOFactory, DataStorageConfig /** {@inheritDoc} */ @Override public FilePageStore createPageStore( byte type, - File file, + IgniteOutClosure pathProvider, AllocatedPageTracker allocatedTracker) throws IgniteCheckedException { - if (!file.exists()) - return createPageStore(type, file, latestVersion(), allocatedTracker); + Path filePath = pathProvider.apply(); - try (FileIO fileIO = fileIOFactoryStoreV1.create(file)) { + if (!Files.exists(filePath)) + return createPageStore(type, pathProvider, latestVersion(), allocatedTracker); + + try (FileIO fileIO = fileIOFactoryStoreV1.create(filePath.toFile())) { int minHdr = FilePageStore.HEADER_SIZE; if (fileIO.size() < minHdr) - return createPageStore(type, file, latestVersion(), allocatedTracker); + return createPageStore(type, pathProvider, latestVersion(), allocatedTracker); ByteBuffer hdr = ByteBuffer.allocate(minHdr).order(ByteOrder.LITTLE_ENDIAN); @@ -94,10 +98,10 @@ public FileVersionCheckingFactory(FileIOFactory fileIOFactory, DataStorageConfig int ver = hdr.getInt(); - return createPageStore(type, file, ver, allocatedTracker); + return createPageStore(type, pathProvider, ver, allocatedTracker); } catch (IOException e) { - throw new IgniteCheckedException("Error while creating file page store [file=" + file + "]:", e); + throw new IgniteCheckedException("Error while creating file page store [file=" + filePath.toAbsolutePath() + "]:", e); } } @@ -120,24 +124,24 @@ public int latestVersion() { * Instantiates specific version of FilePageStore. * * @param type Type. - * @param file File. * @param ver Version. * @param allocatedTracker Metrics updater */ public FilePageStore createPageStore( byte type, - File file, + IgniteOutClosure pathProvider, int ver, AllocatedPageTracker allocatedTracker) { + switch (ver) { case FilePageStore.VERSION: - return new FilePageStore(type, file, fileIOFactoryStoreV1, memCfg, allocatedTracker); + return new FilePageStore(type, pathProvider, fileIOFactoryStoreV1, memCfg, allocatedTracker); case FilePageStoreV2.VERSION: - return new FilePageStoreV2(type, file, fileIOFactory, memCfg, allocatedTracker); + return new FilePageStoreV2(type, pathProvider, fileIOFactory, memCfg, allocatedTracker); default: - throw new IllegalArgumentException("Unknown version of file page store: " + ver + " for file [" + file.getAbsolutePath() + "]"); + throw new IllegalArgumentException("Unknown version of file page store: " + ver + " for file [" + pathProvider.apply().toAbsolutePath() + "]"); } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsConsistentIdProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsConsistentIdProcessor.java index ffef9af7f350a..3eee3e12ca32e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsConsistentIdProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsConsistentIdProcessor.java @@ -136,13 +136,12 @@ private PdsFolderSettings compatibleResolve( if (settings == null) { settings = prepareNewSettings(); - if (!settings.isCompatible()) { - if (log.isInfoEnabled()) - log.info("Consistent ID used for local node is [" + settings.consistentId() + "] " + - "according to persistence data storage folders"); - + if (!settings.isCompatible()) ctx.discovery().consistentId(settings.consistentId()); - } + + if (log.isInfoEnabled()) + log.info("Consistent ID used for local node is [" + settings.consistentId() + "] " + + "according to persistence data storage folders"); } return settings; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java index f450e19d658aa..7fa5be0abe3d0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java @@ -388,7 +388,7 @@ public long freeSpace() { for (int b = BUCKETS - 2; b > 0; b--) { long perPageFreeSpace = b << shift; - long pages = bucketsSize[b].longValue(); + long pages = bucketsSize.get(b); freeSpace += pages * perPageFreeSpace; } @@ -403,7 +403,7 @@ public long freeSpace() { final boolean dumpBucketsInfo = false; for (int b = 0; b < BUCKETS; b++) { - long size = bucketsSize[b].longValue(); + long size = bucketsSize.get(b); if (!isReuseBucket(b)) dataPages += size; @@ -436,7 +436,7 @@ public long freeSpace() { log.info("FreeList [name=" + name + ", buckets=" + BUCKETS + ", dataPages=" + dataPages + - ", reusePages=" + bucketsSize[REUSE_BUCKET].longValue() + "]"); + ", reusePages=" + bucketsSize.get(REUSE_BUCKET) + "]"); } } @@ -540,7 +540,7 @@ private long allocateDataPage(int part) throws IgniteCheckedException { Boolean updated = write(pageId, updateRow, row, itemId, null, statHolder); assert updated != null; // Can't fail here. - + return updated; } catch (IgniteCheckedException | Error e) { @@ -601,7 +601,7 @@ private long allocateDataPage(int part) throws IgniteCheckedException { * @return Number of empty data pages in free list. */ public int emptyDataPages() { - return bucketsSize[emptyDataPagesBucket].intValue(); + return (int)bucketsSize.get(emptyDataPagesBucket); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeList.java index 755610cff59f6..f6f549563619e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeList.java @@ -23,7 +23,6 @@ import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; -import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageLockListener; import org.apache.ignite.internal.stat.IoStatisticsHolder; import org.apache.ignite.internal.util.typedef.internal.U; @@ -37,7 +36,6 @@ public class CacheFreeList extends AbstractFreeList { * @param name Name. * @param regionMetrics Region metrics. * @param dataRegion Data region. - * @param reuseList Reuse list. * @param wal Wal. * @param metaPageId Meta page id. * @param initNew Initialize new. @@ -47,7 +45,6 @@ public CacheFreeList( String name, DataRegionMetricsImpl regionMetrics, DataRegion dataRegion, - ReuseList reuseList, IgniteWriteAheadLogManager wal, long metaPageId, boolean initNew, @@ -58,7 +55,7 @@ public CacheFreeList( name, regionMetrics, dataRegion, - reuseList, + null, wal, metaPageId, initNew, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java index 51139aed10ef7..ff8d39542e679 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/PagesList.java @@ -21,7 +21,7 @@ import java.util.HashMap; import java.util.Map; import java.util.Objects; -import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicLongArray; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.internal.managers.communication.GridIoPolicy; @@ -75,7 +75,7 @@ public abstract class PagesList extends DataStructure { Math.max(8, Runtime.getRuntime().availableProcessors())); /** */ - protected final AtomicLong[] bucketsSize; + protected final AtomicLongArray bucketsSize; /** */ protected volatile boolean changed; @@ -142,16 +142,13 @@ protected PagesList( long metaPageId, PageLockListener lockLsnr ) { - super(cacheId, pageMem, wal, lockLsnr); + super(cacheId, null, pageMem, wal, lockLsnr); this.name = name; this.buckets = buckets; this.metaPageId = metaPageId; - bucketsSize = new AtomicLong[buckets]; - - for (int i = 0; i < buckets; i++) - bucketsSize[i] = new AtomicLong(); + bucketsSize = new AtomicLongArray(buckets); } /** @@ -249,7 +246,7 @@ protected final void init(long metaPageId, boolean initNew) throws IgniteChecked assert ok; - bucketsSize[bucket].set(bucketSize); + bucketsSize.set(bucket, bucketSize); } } } @@ -643,7 +640,7 @@ protected final long storedPagesCount(int bucket) throws IgniteCheckedException } } - assert res == bucketsSize[bucket].get() : "Wrong bucket size counter [exp=" + res + ", cntr=" + bucketsSize[bucket].get() + ']'; + assert res == bucketsSize.get(bucket) : "Wrong bucket size counter [exp=" + res + ", cntr=" + bucketsSize.get(bucket) + ']'; return res; } @@ -1014,7 +1011,7 @@ private boolean putReuseBag( private Stripe getPageForTake(int bucket) { Stripe[] tails = getBucket(bucket); - if (tails == null || bucketsSize[bucket].get() == 0) + if (tails == null || bucketsSize.get(bucket) == 0) return null; int len = tails.length; @@ -1120,7 +1117,7 @@ protected final long takeEmptyPage(int bucket, @Nullable IOVersions initIoVers, // Another thread took the last page. writeUnlock(tailId, tailPage, tailAddr, false); - if (bucketsSize[bucket].get() > 0) { + if (bucketsSize.get(bucket) > 0) { lockAttempt--; // Ignore current attempt. continue; @@ -1535,7 +1532,7 @@ private void fairMerge( * @param bucket Bucket number. */ private void incrementBucketSize(int bucket) { - bucketsSize[bucket].incrementAndGet(); + bucketsSize.incrementAndGet(bucket); } /** @@ -1544,7 +1541,7 @@ private void incrementBucketSize(int bucket) { * @param bucket Bucket number. */ private void decrementBucketSize(int bucket) { - bucketsSize[bucket].decrementAndGet(); + bucketsSize.decrementAndGet(bucket); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java index 23940baa9f053..9c662c13e592c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java @@ -42,12 +42,12 @@ import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.pagemem.store.PageStore; import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager; import org.apache.ignite.internal.pagemem.wal.WALPointer; import org.apache.ignite.internal.pagemem.wal.record.MetastoreDataRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRecord; import org.apache.ignite.internal.processors.cache.CacheDiagnosticManager; -import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.GridCacheProcessor; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.persistence.DataRegion; @@ -57,12 +57,13 @@ import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager; import org.apache.ignite.internal.processors.cache.persistence.RootPage; import org.apache.ignite.internal.processors.cache.persistence.StorageException; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStore; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager; import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; import org.apache.ignite.internal.processors.cache.persistence.partstorage.PartitionMetaStorageImpl; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIO; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler; -import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageLockListener; import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.apache.ignite.internal.util.lang.GridCursor; import org.apache.ignite.internal.util.typedef.internal.CU; @@ -176,14 +177,29 @@ else if (gcProcessor.getTmpStorage() != null) { gcProcessor.setTmpStorage(null); - // remove old partitions - CacheGroupContext cgc = cctx.cache().cacheGroup(METASTORAGE_CACHE_ID); + db.addCheckpointListener(new DbCheckpointListener() { + @Override public void onMarkCheckpointBegin(Context ctx) { - if (cgc != null) { - db.schedulePartitionDestroy(METASTORAGE_CACHE_ID, OLD_METASTORE_PARTITION); + } + + @Override public void onCheckpointBegin(Context ctx) throws IgniteCheckedException { + assert cctx.pageStore() != null; + + int partTag = ((PageMemoryEx)dataRegion.pageMemory()).invalidate(METASTORAGE_CACHE_ID, OLD_METASTORE_PARTITION); + cctx.pageStore().onPartitionDestroyed(METASTORAGE_CACHE_ID, OLD_METASTORE_PARTITION, partTag); + + int idxTag = ((PageMemoryEx)dataRegion.pageMemory()).invalidate(METASTORAGE_CACHE_ID, PageIdAllocator.INDEX_PARTITION); + PageStore store = ((FilePageStoreManager)cctx.pageStore()).getStore(METASTORAGE_CACHE_ID, PageIdAllocator.INDEX_PARTITION); + ((FilePageStore)store).truncate(idxTag); + + db.removeCheckpointListener(this); + } + + @Override public void beforeCheckpointBegin(Context ctx) { + + } + }); - db.schedulePartitionDestroy(METASTORAGE_CACHE_ID, PageIdAllocator.INDEX_PARTITION); - } } } } @@ -341,7 +357,7 @@ public Collection> readAll() throws IgniteCheckedE while (cur.next()) { MetastorageDataRow row = cur.get(); - res.add(new IgniteBiTuple<>(row.key(), row.value())); + res.add(new IgniteBiTuple<>(row.key(), marshaller.unmarshal(row.value(), getClass().getClassLoader()))); } return res; @@ -353,7 +369,15 @@ public Collection> readAll() throws IgniteCheckedE byte[] data = marshaller.marshal(val); - putData(key, data); + final WALPointer ptr; + + synchronized (this) { + ptr = wal.log(new MetastoreDataRecord(key, data)); + + putData(key, data); + } + + wal.flush(ptr, false); } /** {@inheritDoc} */ @@ -364,10 +388,6 @@ public Collection> readAll() throws IgniteCheckedE /** */ public void putData(String key, byte[] data) throws IgniteCheckedException { if (!readOnly) { - WALPointer ptr = wal.log(new MetastoreDataRecord(key, data)); - - wal.flush(ptr, false); - synchronized (this) { MetastorageDataRow oldRow = tree.findOne(new MetastorageDataRow(key, null)); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageTree.java index e286668c4f43a..8bbfeef75ff94 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageTree.java @@ -76,6 +76,7 @@ public MetastorageTree( super( name, cacheId, + null, pageMem, wal, globalRmvId, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/CheckpointMetricsTracker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/CheckpointMetricsTracker.java index d0ffefdb54875..381e5ae4c1e0d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/CheckpointMetricsTracker.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/CheckpointMetricsTracker.java @@ -17,8 +17,12 @@ package org.apache.ignite.internal.processors.cache.persistence.pagemem; +import java.nio.ByteBuffer; import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; import org.apache.ignite.internal.pagemem.wal.record.CheckpointRecord; +import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointEntry; +import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointEntryType; /** * Tracks various checkpoint phases and stats. @@ -76,6 +80,12 @@ public class CheckpointMetricsTracker { /** */ private long walCpRecordFsyncEnd; + /** */ + private long splitAndSortCpPagesStart; + + /** */ + private long splitAndSortCpPagesEnd; + /** */ private long listenersExecEnd; @@ -163,6 +173,20 @@ public void onWalCpRecordFsyncStart() { walCpRecordFsyncStart = System.currentTimeMillis(); } + /** + * + */ + public void onSplitAndSortCpPagesStart() { + splitAndSortCpPagesStart = System.currentTimeMillis(); + } + + /** + * + */ + public void onSplitAndSortCpPagesEnd() { + splitAndSortCpPagesEnd = System.currentTimeMillis(); + } + /** * */ @@ -233,6 +257,22 @@ public long walCpRecordFsyncDuration() { return walCpRecordFsyncEnd - walCpRecordFsyncStart; } + /** + * @return Duration of checkpoint entry buffer writing to file. + * + * @see GridCacheDatabaseSharedManager#writeCheckpointEntry(ByteBuffer, CheckpointEntry, CheckpointEntryType) + */ + public long writeCheckpointEntryDuration() { + return splitAndSortCpPagesStart - walCpRecordFsyncEnd; + } + + /** + * @return Duration of splitting and sorting checkpoint pages. + */ + public long splitAndSortCpPagesDuration() { + return splitAndSortCpPagesEnd - splitAndSortCpPagesStart; + } + /** * @return Checkpoint start time. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/CheckpointPages.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/CheckpointPages.java new file mode 100644 index 0000000000000..46540f92c0224 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/CheckpointPages.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence.pagemem; + +import java.util.Collection; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.pagemem.FullPageId; + +/** + * View of pages which should be stored during current checkpoint. + */ +class CheckpointPages { + /** */ + private final Collection segCheckpointPages; + + /** The sign which allows to replace pages from a checkpoint by page replacer. */ + private final IgniteInternalFuture allowToReplace; + + /** + * @param pages Pages which would be stored to disk in current checkpoint. + * @param replaceFuture The sign which allows to replace pages from a checkpoint by page replacer. + */ + CheckpointPages(Collection pages, IgniteInternalFuture replaceFuture) { + segCheckpointPages = pages; + allowToReplace = replaceFuture; + } + + /** + * @param fullPageId Page id for checking. + * @return {@code true} If fullPageId is allowable to store to disk. + */ + public boolean allowToSave(FullPageId fullPageId) throws IgniteCheckedException { + Collection checkpointPages = segCheckpointPages; + + if (checkpointPages == null || allowToReplace == null) + return false; + + //Uninterruptibly is important because otherwise in case of interrupt of client thread node would be stopped. + allowToReplace.getUninterruptibly(); + + return checkpointPages.contains(fullPageId); + } + + /** + * @param fullPageId Page id for checking. + * @return {@code true} If fullPageId is candidate to stored to disk by current checkpoint. + */ + public boolean contains(FullPageId fullPageId) { + Collection checkpointPages = segCheckpointPages; + + return checkpointPages != null && checkpointPages.contains(fullPageId); + } + + /** + * @param fullPageId Page id which should be marked as saved to disk. + * @return {@code true} if is marking was successful. + */ + public boolean markAsSaved(FullPageId fullPageId) { + Collection checkpointPages = segCheckpointPages; + + return checkpointPages != null && checkpointPages.remove(fullPageId); + } + + /** + * @return Size of all pages in current checkpoint. + */ + public int size() { + Collection checkpointPages = segCheckpointPages; + + return checkpointPages == null ? 0 : checkpointPages.size(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedDirtyPageWrite.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedDirtyPageStoreWrite.java similarity index 90% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedDirtyPageWrite.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedDirtyPageStoreWrite.java index b08ddc2f89146..2061b4ad4160a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedDirtyPageWrite.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedDirtyPageStoreWrite.java @@ -20,6 +20,7 @@ import java.nio.ByteBuffer; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.pagemem.FullPageId; +import org.apache.ignite.internal.processors.cache.persistence.PageStoreWriter; import org.apache.ignite.internal.util.GridUnsafe; import org.jetbrains.annotations.Nullable; @@ -28,9 +29,9 @@ * content without holding segment lock. Page data is copied into temp buffer during {@link #writePage(FullPageId, * ByteBuffer, int)} and then sent to real implementation by {@link #finishReplacement()}. */ -public class DelayedDirtyPageWrite implements ReplacedPageWriter { +public class DelayedDirtyPageStoreWrite implements PageStoreWriter { /** Real flush dirty page implementation. */ - private final ReplacedPageWriter flushDirtyPage; + private final PageStoreWriter flushDirtyPage; /** Page size. */ private final int pageSize; @@ -56,9 +57,12 @@ public class DelayedDirtyPageWrite implements ReplacedPageWriter { * @param pageSize page size. * @param tracker tracker to lock/unlock page reads. */ - public DelayedDirtyPageWrite(ReplacedPageWriter flushDirtyPage, - ThreadLocal byteBufThreadLoc, int pageSize, - DelayedPageReplacementTracker tracker) { + public DelayedDirtyPageStoreWrite( + PageStoreWriter flushDirtyPage, + ThreadLocal byteBufThreadLoc, + int pageSize, + DelayedPageReplacementTracker tracker + ) { this.flushDirtyPage = flushDirtyPage; this.pageSize = pageSize; this.byteBufThreadLoc = byteBufThreadLoc; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedPageReplacementTracker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedPageReplacementTracker.java index aa1b06161c042..83033b2422ab8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedPageReplacementTracker.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedPageReplacementTracker.java @@ -26,6 +26,7 @@ import org.apache.ignite.IgniteInterruptedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.pagemem.FullPageId; +import org.apache.ignite.internal.processors.cache.persistence.PageStoreWriter; /** * Delayed page writes tracker. Provides delayed write implementations and allows to check if page is actually being @@ -36,7 +37,7 @@ public class DelayedPageReplacementTracker { private final int pageSize; /** Flush dirty page real implementation. */ - private final ReplacedPageWriter flushDirtyPage; + private final PageStoreWriter flushDirtyPage; /** Logger. */ private final IgniteLogger log; @@ -57,11 +58,11 @@ public class DelayedPageReplacementTracker { }; /** - * Dirty page write for replacement operations thread local. Because page write {@link DelayedDirtyPageWrite} is + * Dirty page write for replacement operations thread local. Because page write {@link DelayedDirtyPageStoreWrite} is * stateful and not thread safe, this thread local protects from GC pressure on pages replacement.
Map is used * instead of build-in thread local to allow GC to remove delayed writers for alive threads after node stop. */ - private final Map delayedPageWriteThreadLocMap = new ConcurrentHashMap<>(); + private final Map delayedPageWriteThreadLocMap = new ConcurrentHashMap<>(); /** * @param pageSize Page size. @@ -69,8 +70,12 @@ public class DelayedPageReplacementTracker { * @param log Logger. * @param segmentCnt Segments count. */ - public DelayedPageReplacementTracker(int pageSize, ReplacedPageWriter flushDirtyPage, - IgniteLogger log, int segmentCnt) { + public DelayedPageReplacementTracker( + int pageSize, + PageStoreWriter flushDirtyPage, + IgniteLogger log, + int segmentCnt + ) { this.pageSize = pageSize; this.flushDirtyPage = flushDirtyPage; this.log = log; @@ -83,9 +88,9 @@ public DelayedPageReplacementTracker(int pageSize, ReplacedPageWriter flushDirty /** * @return delayed page write implementation, finish method to be called to actually write page. */ - public DelayedDirtyPageWrite delayedPageWrite() { + public DelayedDirtyPageStoreWrite delayedPageWrite() { return delayedPageWriteThreadLocMap.computeIfAbsent(Thread.currentThread().getId(), - id -> new DelayedDirtyPageWrite(flushDirtyPage, byteBufThreadLoc, pageSize, this)); + id -> new DelayedDirtyPageStoreWrite(flushDirtyPage, byteBufThreadLoc, pageSize, this)); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java index 9b0cdb2eb023c..58f7d7fe66ccb 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java @@ -23,11 +23,11 @@ import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.processors.cache.persistence.PageStoreWriter; import org.apache.ignite.internal.processors.cache.persistence.StorageException; import org.apache.ignite.internal.stat.IoStatisticsHolder; import org.apache.ignite.internal.util.GridMultiCollectionWrapper; import org.apache.ignite.lang.IgniteBiTuple; -import org.jetbrains.annotations.Nullable; /** * @@ -91,7 +91,7 @@ public long acquirePage(int grpId, long pageId, IoStatisticsHolder statHldr, /** * Heuristic method which allows a thread to check if it safe to start memory struture modifications - * in regard with checkpointing. + * in regard with checkpointing. May return false-negative result during or after partition eviction. * * @return {@code False} if there are too many dirty pages and a thread should wait for a * checkpoint to begin. @@ -106,8 +106,9 @@ public long acquirePage(int grpId, long pageId, IoStatisticsHolder statHldr, * * @return Collection of dirty page IDs. * @throws IgniteException If checkpoint has been already started and was not finished. + * @param allowToReplace The sign which allows to replace pages from a checkpoint by page replacer. */ - public GridMultiCollectionWrapper beginCheckpoint() throws IgniteException; + public GridMultiCollectionWrapper beginCheckpoint(IgniteInternalFuture allowToReplace) throws IgniteException; /** * Gets a collection of dirty page IDs since the last checkpoint and dirty pages with user data are presented. If a @@ -118,8 +119,10 @@ public long acquirePage(int grpId, long pageId, IoStatisticsHolder statHldr, * @return Couple of collection of dirty page IDs and flag. The flag is {@code true}, if since last checkpoint at * least one page with user data (not relates with system cache) became a dirty, and {@code false} otherwise. * @throws IgniteException If checkpoint has been already started and was not finished. + * @param allowToReplace The sign which allows to replace pages from a checkpoint by page replacer. */ - public IgniteBiTuple, Boolean> beginCheckpointEx() throws IgniteException; + public IgniteBiTuple, Boolean> beginCheckpointEx( + IgniteInternalFuture allowToReplace) throws IgniteException; /** * Finishes checkpoint operation. @@ -127,16 +130,22 @@ public long acquirePage(int grpId, long pageId, IoStatisticsHolder statHldr, public void finishCheckpoint(); /** - * Gets page byte buffer for the checkpoint procedure. + * Prepare page for write during checkpoint. + *{@link PageStoreWriter} will be called when the page will be ready to write. * * @param pageId Page ID to get byte buffer for. The page ID must be present in the collection returned by - * the {@link #beginCheckpoint()} method call. - * @param outBuf Temporary buffer to write changes into. + * the {@link #beginCheckpoint(IgniteInternalFuture)} method call. + * @param buf Temporary buffer to write changes into. + * @param pageWriter Checkpoint page write context. * @param tracker Checkpoint metrics tracker. - * @return {@code Partition generation} if data was read, {@code null} otherwise (data already saved to storage). - * @throws IgniteException If failed to obtain page data. + * @throws IgniteCheckedException If failed to obtain page data. */ - @Nullable public Integer getForCheckpoint(FullPageId pageId, ByteBuffer outBuf, CheckpointMetricsTracker tracker); + public void checkpointWritePage( + FullPageId pageId, + ByteBuffer buf, + PageStoreWriter pageWriter, + CheckpointMetricsTracker tracker + ) throws IgniteCheckedException; /** * Marks partition as invalid / outdated. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java index 1f8df6fc14859..ffb21d336e368 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java @@ -34,6 +34,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; @@ -64,6 +65,7 @@ import org.apache.ignite.internal.processors.cache.persistence.CheckpointLockStateChecker; import org.apache.ignite.internal.processors.cache.persistence.CheckpointWriteProgressSupplier; import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.PageStoreWriter; import org.apache.ignite.internal.processors.cache.persistence.StorageException; import org.apache.ignite.internal.processors.cache.persistence.freelist.io.PagesListMetaIO; import org.apache.ignite.internal.processors.cache.persistence.partstate.GroupPartitionId; @@ -89,6 +91,7 @@ import org.apache.ignite.lang.IgniteBiTuple; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import org.jetbrains.annotations.TestOnly; import static java.lang.Boolean.FALSE; import static java.lang.Boolean.TRUE; @@ -230,6 +233,9 @@ public class PageMemoryImpl implements PageMemoryEx { /** Segments array. */ private Segment[] segments; + /** @see #safeToUpdate() */ + private final AtomicBoolean safeToUpdate = new AtomicBoolean(true); + /** */ private PagePool checkpointPool; @@ -237,7 +243,7 @@ public class PageMemoryImpl implements PageMemoryEx { private OffheapReadWriteLock rwLock; /** Flush dirty page closure. When possible, will be called by evictPage(). */ - private final ReplacedPageWriter flushDirtyPage; + private final PageStoreWriter flushDirtyPage; /** */ private final AtomicBoolean dirtyUserPagesPresent = new AtomicBoolean(); @@ -290,7 +296,7 @@ public PageMemoryImpl( long[] sizes, GridCacheSharedContext ctx, int pageSize, - ReplacedPageWriter flushDirtyPage, + PageStoreWriter flushDirtyPage, @Nullable GridInClosure3X changeTracker, CheckpointLockStateChecker stateChecker, DataRegionMetricsImpl memMetrics, @@ -484,7 +490,7 @@ else if (throttlingPlc == ThrottlingPolicy.CHECKPOINT_BUFFER_ONLY) // because there is no crc inside them. Segment seg = segment(grpId, pageId); - DelayedDirtyPageWrite delayedWriter = delayedPageReplacementTracker != null + DelayedDirtyPageStoreWrite delayedWriter = delayedPageReplacementTracker != null ? delayedPageReplacementTracker.delayedPageWrite() : null; FullPageId fullId = new FullPageId(pageId, grpId); @@ -573,11 +579,12 @@ else if (throttlingPlc == ThrottlingPolicy.CHECKPOINT_BUFFER_ONLY) } finally { seg.writeLock().unlock(); - - if (delayedWriter != null) - delayedWriter.finishReplacement(); } + //Finish replacement only when an exception wasn't thrown otherwise it possible to corrupt B+Tree. + if (delayedWriter != null) + delayedWriter.finishReplacement(); + //we have allocated 'tracking' page, we need to allocate regular one return isTrackingPage ? allocatePage(grpId, partId, flags) : pageId; } @@ -675,7 +682,7 @@ private DataRegionConfiguration getDataRegionConfiguration() { seg.readLock().unlock(); } - DelayedDirtyPageWrite delayedWriter = delayedPageReplacementTracker != null + DelayedDirtyPageStoreWrite delayedWriter = delayedPageReplacementTracker != null ? delayedPageReplacementTracker.delayedPageWrite() : null; seg.writeLock().lock(); @@ -858,25 +865,35 @@ private long refreshOutdatedPage(Segment seg, int grpId, long pageId, boolean rm // We pinned the page when allocated the temp buffer, release it now. PageHeader.releasePage(absPtr); - checkpointPool.releaseFreePage(tmpBufPtr); + releaseCheckpointBufferPage(tmpBufPtr); } if (rmv) seg.loadedPages.remove(grpId, PageIdUtils.effectivePageId(pageId)); - Collection cpPages = seg.segCheckpointPages; + CheckpointPages cpPages = seg.checkpointPages; if (cpPages != null) - cpPages.remove(new FullPageId(pageId, grpId)); + cpPages.markAsSaved(new FullPageId(pageId, grpId)); Collection dirtyPages = seg.dirtyPages; - if (dirtyPages != null) - dirtyPages.remove(new FullPageId(pageId, grpId)); + if (dirtyPages != null) { + if (dirtyPages.remove(new FullPageId(pageId, grpId))) + seg.dirtyPagesCntr.decrementAndGet(); + } return relPtr; } + /** */ + private void releaseCheckpointBufferPage(long tmpBufPtr) { + int resCntr = checkpointPool.releaseFreePage(tmpBufPtr); + + if (resCntr == checkpointBufferPagesSize() / 2 && writeThrottle != null) + writeThrottle.tryWakeupThrottledThreads(); + } + /** * Restores page from WAL page snapshot & delta records. * @@ -975,11 +992,8 @@ private void tryToRestorePage(FullPageId fullId, ByteBuffer buf) throws IgniteCh /** {@inheritDoc} */ @Override public boolean safeToUpdate() { - if (segments != null) { - for (Segment segment : segments) - if (!segment.safeToUpdate()) - return false; - } + if (segments != null) + return safeToUpdate.get(); return true; } @@ -1022,12 +1036,15 @@ public long totalPages() { } /** {@inheritDoc} */ - @Override public GridMultiCollectionWrapper beginCheckpoint() throws IgniteException { - return beginCheckpointEx().get1(); + @Override public GridMultiCollectionWrapper beginCheckpoint( + IgniteInternalFuture allowToReplace + ) throws IgniteException { + return beginCheckpointEx(allowToReplace).get1(); } /** {@inheritDoc} */ @Override public IgniteBiTuple, Boolean> beginCheckpointEx( + IgniteInternalFuture allowToReplace ) throws IgniteException { if (segments == null) return new IgniteBiTuple<>(new GridMultiCollectionWrapper<>(Collections.emptyList()), false); @@ -1037,14 +1054,20 @@ public long totalPages() { for (int i = 0; i < segments.length; i++) { Segment seg = segments[i]; - if (seg.segCheckpointPages != null) + if (seg.checkpointPages != null) throw new IgniteException("Failed to begin checkpoint (it is already in progress)."); - collections[i] = seg.segCheckpointPages = seg.dirtyPages; + Collection dirtyPages = seg.dirtyPages; + collections[i] = dirtyPages; + + seg.checkpointPages = new CheckpointPages(dirtyPages, allowToReplace); seg.dirtyPages = new GridConcurrentHashSet<>(); + seg.dirtyPagesCntr.set(0); } + safeToUpdate.set(true); + memMetrics.resetDirtyPages(); boolean hasUserDirtyPages = dirtyUserPagesPresent.getAndSet(false); @@ -1069,15 +1092,20 @@ private boolean isThrottlingEnabled() { return; for (Segment seg : segments) - seg.segCheckpointPages = null; + seg.checkpointPages = null; if (throttlingPlc != ThrottlingPolicy.DISABLED) writeThrottle.onFinishCheckpoint(); } /** {@inheritDoc} */ - @Override public Integer getForCheckpoint(FullPageId fullId, ByteBuffer outBuf, CheckpointMetricsTracker tracker) { - assert outBuf.remaining() == pageSize(); + @Override public void checkpointWritePage( + FullPageId fullId, + ByteBuffer buf, + PageStoreWriter pageStoreWriter, + CheckpointMetricsTracker metricsTracker + ) throws IgniteCheckedException { + assert buf.remaining() == pageSize(); Segment seg = segment(fullId.groupId(), fullId.pageId()); @@ -1093,26 +1121,18 @@ private boolean isThrottlingEnabled() { try { if (!isInCheckpoint(fullId)) - return null; - - tag = seg.partGeneration(fullId.groupId(), PageIdUtils.partId(fullId.pageId())); + return; - relPtr = seg.loadedPages.get( - fullId.groupId(), - PageIdUtils.effectivePageId(fullId.pageId()), - tag, - INVALID_REL_PTR, - OUTDATED_REL_PTR - ); + relPtr = resolveRelativePointer(seg, fullId, tag = generationTag(seg, fullId)); // Page may have been cleared during eviction. We have nothing to do in this case. if (relPtr == INVALID_REL_PTR) - return null; + return; if (relPtr != OUTDATED_REL_PTR) { absPtr = seg.absolute(relPtr); - // Pin the page until page will not be copied. + // Pin the page until page will not be copied. This helpful to prevent page replacement of this page. if (PageHeader.tempBufferPointer(absPtr) == INVALID_REL_PTR) PageHeader.acquirePage(absPtr); else @@ -1128,61 +1148,56 @@ private boolean isThrottlingEnabled() { try { // Double-check. - relPtr = seg.loadedPages.get( - fullId.groupId(), - PageIdUtils.effectivePageId(fullId.pageId()), - seg.partGeneration( - fullId.groupId(), - PageIdUtils.partId(fullId.pageId()) - ), - INVALID_REL_PTR, - OUTDATED_REL_PTR - ); + relPtr = resolveRelativePointer(seg, fullId, generationTag(seg, fullId)); if (relPtr == INVALID_REL_PTR) - return null; + return; if (relPtr == OUTDATED_REL_PTR) { relPtr = refreshOutdatedPage( seg, fullId.groupId(), - PageIdUtils.effectivePageId(fullId.pageId()), + fullId.effectivePageId(), true ); seg.pool.releaseFreePage(relPtr); } - return null; + return; } finally { seg.writeLock().unlock(); } } - else - return copyPageForCheckpoint(absPtr, fullId, outBuf, pageSingleAcquire, tracker) ? tag : TRY_AGAIN_TAG; + + copyPageForCheckpoint(absPtr, fullId, buf, tag, pageSingleAcquire, pageStoreWriter, metricsTracker); } /** * @param absPtr Absolute ptr. * @param fullId Full id. - * @param outBuf Output buffer to write page content into. + * @param buf Buffer for copy page content for future write via {@link PageStoreWriter}. * @param pageSingleAcquire Page is acquired only once. We don't pin the page second time (until page will not be * copied) in case checkpoint temporary buffer is used. - * @param tracker Checkpoint statistics tracker. - * - * @return False if someone else holds lock on page. + * @param pageStoreWriter Checkpoint page write context. */ - private boolean copyPageForCheckpoint( + private void copyPageForCheckpoint( long absPtr, FullPageId fullId, - ByteBuffer outBuf, + ByteBuffer buf, + Integer tag, boolean pageSingleAcquire, + PageStoreWriter pageStoreWriter, CheckpointMetricsTracker tracker - ) { + ) throws IgniteCheckedException { assert absPtr != 0; assert PageHeader.isAcquired(absPtr); + // Exception protection flag. + // No need to write if exception occurred. + boolean canWrite = false; + boolean locked = rwLock.tryWriteLock(absPtr + PAGE_LOCK_OFFSET, OffheapReadWriteLock.TAG_LOCK_ALWAYS); if (!locked) { @@ -1191,7 +1206,11 @@ private boolean copyPageForCheckpoint( if (!pageSingleAcquire) PageHeader.releasePage(absPtr); - return false; + buf.clear(); + + pageStoreWriter.writePage(fullId, buf, TRY_AGAIN_TAG); + + return; } try { @@ -1206,37 +1225,44 @@ private boolean copyPageForCheckpoint( long tmpAbsPtr = checkpointPool.absolute(tmpRelPtr); - copyInBuffer(tmpAbsPtr, outBuf); + copyInBuffer(tmpAbsPtr, buf); GridUnsafe.setMemory(tmpAbsPtr + PAGE_OVERHEAD, pageSize(), (byte)0); if (tracker != null) tracker.onCowPageWritten(); - checkpointPool.releaseFreePage(tmpRelPtr); + releaseCheckpointBufferPage(tmpRelPtr); // Need release again because we pin page when resolve abs pointer, // and page did not have tmp buffer page. if (!pageSingleAcquire) PageHeader.releasePage(absPtr); - } else { - copyInBuffer(absPtr, outBuf); + copyInBuffer(absPtr, buf); PageHeader.dirty(absPtr, false); } - assert PageIO.getType(outBuf) != 0 : "Invalid state. Type is 0! pageId = " + U.hexLong(fullId.pageId()); - assert PageIO.getVersion(outBuf) != 0 : "Invalid state. Version is 0! pageId = " + U.hexLong(fullId.pageId()); - - memMetrics.onPageWritten(); + assert PageIO.getType(buf) != 0 : "Invalid state. Type is 0! pageId = " + U.hexLong(fullId.pageId()); + assert PageIO.getVersion(buf) != 0 : "Invalid state. Version is 0! pageId = " + U.hexLong(fullId.pageId()); - return true; + canWrite = true; } finally { rwLock.writeUnlock(absPtr + PAGE_LOCK_OFFSET, OffheapReadWriteLock.TAG_LOCK_ALWAYS); + if (canWrite){ + buf.rewind(); + + pageStoreWriter.writePage(fullId, buf, tag); + + memMetrics.onPageWritten(); + + buf.rewind(); + } + // We pinned the page either when allocated the temp buffer, or when resolved abs pointer. // Must release the page only after write unlock. PageHeader.releasePage(absPtr); @@ -1266,6 +1292,38 @@ private void copyInBuffer(long absPtr, ByteBuffer buf) { } } + /** + * Get current prartition generation tag. + * + * @param seg Segment. + * @param fullId Full page id. + * @return Current partition generation tag. + */ + private int generationTag(Segment seg, FullPageId fullId) { + return seg.partGeneration( + fullId.groupId(), + PageIdUtils.partId(fullId.pageId()) + ); + } + + /** + * Resolver relative pointer via {@link LoadedPagesMap}. + * + * @param seg Segment. + * @param fullId Full page id. + * @param reqVer Required version. + * @return Relative pointer. + */ + private long resolveRelativePointer(Segment seg, FullPageId fullId, int reqVer) { + return seg.loadedPages.get( + fullId.groupId(), + fullId.effectivePageId(), + reqVer, + INVALID_REL_PTR, + OUTDATED_REL_PTR + ); + } + /** {@inheritDoc} */ @Override public int invalidate(int grpId, int partId) { int tag = 0; @@ -1375,6 +1433,30 @@ public long acquiredPages() { return total; } + /** + * @param fullPageId Full page ID to check. + * @return {@code true} if the page is contained in the loaded pages table, {@code false} otherwise. + */ + public boolean hasLoadedPage(FullPageId fullPageId) { + int grpId = fullPageId.groupId(); + long pageId = fullPageId.effectivePageId(); + int partId = PageIdUtils.partId(pageId); + + Segment seg = segment(grpId, pageId); + + seg.readLock().lock(); + + try { + long res = + seg.loadedPages.get(grpId, pageId, seg.partGeneration(grpId, partId), INVALID_REL_PTR, INVALID_REL_PTR); + + return res != INVALID_REL_PTR; + } + finally { + seg.readLock().unlock(); + } + } + /** * @param absPtr Absolute pointer to read lock. * @param fullId Full page ID. @@ -1517,38 +1599,58 @@ private void writeUnlockPage( ) { boolean wasDirty = isDirty(page); - //if page is for restore, we shouldn't mark it as changed - if (!restore && markDirty && !wasDirty && changeTracker != null) - changeTracker.apply(page, fullId, this); + try { + //if page is for restore, we shouldn't mark it as changed + if (!restore && markDirty && !wasDirty && changeTracker != null) + changeTracker.apply(page, fullId, this); - boolean pageWalRec = markDirty && walPlc != FALSE && (walPlc == TRUE || !wasDirty); + boolean pageWalRec = markDirty && walPlc != FALSE && (walPlc == TRUE || !wasDirty); - assert GridUnsafe.getInt(page + PAGE_OVERHEAD + 4) == 0; //TODO GG-11480 + assert GridUnsafe.getInt(page + PAGE_OVERHEAD + 4) == 0; //TODO GG-11480 - if (markDirty) - setDirty(fullId, page, markDirty, false); + if (markDirty) + setDirty(fullId, page, true, false); - beforeReleaseWrite(fullId, page + PAGE_OVERHEAD, pageWalRec); + beforeReleaseWrite(fullId, page + PAGE_OVERHEAD, pageWalRec); + } + // Always release the lock. + finally { + long pageId = PageIO.getPageId(page + PAGE_OVERHEAD); - long pageId = PageIO.getPageId(page + PAGE_OVERHEAD); + try { + assert pageId != 0 : U.hexLong(PageHeader.readPageId(page)); - assert pageId != 0 : U.hexLong(PageHeader.readPageId(page)); - assert PageIO.getVersion(page + PAGE_OVERHEAD) != 0 : U.hexLong(pageId); - assert PageIO.getType(page + PAGE_OVERHEAD) != 0 : U.hexLong(pageId); + rwLock.writeUnlock(page + PAGE_LOCK_OFFSET, PageIdUtils.tag(pageId)); - try { - rwLock.writeUnlock(page + PAGE_LOCK_OFFSET, PageIdUtils.tag(pageId)); + assert PageIO.getVersion(page + PAGE_OVERHEAD) != 0 : dumpPage(pageId, fullId.groupId()); + assert PageIO.getType(page + PAGE_OVERHEAD) != 0 : U.hexLong(pageId); - if (throttlingPlc != ThrottlingPolicy.DISABLED && !restore && markDirty && !wasDirty) - writeThrottle.onMarkDirty(isInCheckpoint(fullId)); - } - catch (AssertionError ex) { - U.error(log, "Failed to unlock page [fullPageId=" + fullId + ", binPage=" + U.toHexString(page, systemPageSize()) + ']'); + if (throttlingPlc != ThrottlingPolicy.DISABLED && !restore && markDirty && !wasDirty) + writeThrottle.onMarkDirty(isInCheckpoint(fullId)); + } + catch (AssertionError ex) { + U.error(log, "Failed to unlock page [fullPageId=" + fullId + + ", binPage=" + U.toHexString(page, systemPageSize()) + ']'); - throw ex; + throw ex; + } } } + /** + * Prepares page details for assertion. + * @param pageId Page id. + * @param grpId Group id. + */ + @NotNull private String dumpPage(long pageId, int grpId) { + int pageIdx = PageIdUtils.pageIndex(pageId); + int partId = PageIdUtils.partId(pageId); + long off = (long)(pageIdx + 1) * pageSize(); + + return U.hexLong(pageId) + " (grpId=" + grpId + ", pageIdx=" + pageIdx + ", partId=" + partId + ", offH=" + + Long.toHexString(off) + ")"; + } + /** * @param absPtr Absolute pointer to the page. * @return {@code True} if write lock acquired for the page. @@ -1572,7 +1674,7 @@ boolean isPageReadLocked(long absPtr) { boolean isInCheckpoint(FullPageId pageId) { Segment seg = segment(pageId.groupId(), pageId.pageId()); - Collection pages0 = seg.segCheckpointPages; + CheckpointPages pages0 = seg.checkpointPages; return pages0 != null && pages0.contains(pageId); } @@ -1584,11 +1686,11 @@ boolean isInCheckpoint(FullPageId pageId) { boolean clearCheckpoint(FullPageId fullPageId) { Segment seg = segment(fullPageId.groupId(), fullPageId.pageId()); - Collection pages0 = seg.segCheckpointPages; + CheckpointPages pages0 = seg.checkpointPages; assert pages0 != null; - return pages0.remove(fullPageId); + return pages0.markAsSaved(fullPageId); } /** @@ -1641,20 +1743,29 @@ private void setDirty(FullPageId pageId, long absPtr, boolean dirty, boolean for assert stateChecker.checkpointLockIsHeldByThread(); if (!wasDirty || forceAdd) { - boolean added = segment(pageId.groupId(), pageId.pageId()).dirtyPages.add(pageId); + Segment seg = segment(pageId.groupId(), pageId.pageId()); + + if (seg.dirtyPages.add(pageId)) { + long dirtyPagesCnt = seg.dirtyPagesCntr.incrementAndGet(); + + if (dirtyPagesCnt >= seg.maxDirtyPages) + safeToUpdate.set(false); - if (added) memMetrics.incrementDirtyPages(); + } } if (pageId.groupId() != CU.UTILITY_CACHE_GROUP_ID && !dirtyUserPagesPresent.get()) dirtyUserPagesPresent.set(true); } else { - boolean rmv = segment(pageId.groupId(), pageId.pageId()).dirtyPages.remove(pageId); + Segment seg = segment(pageId.groupId(), pageId.pageId()); + + if (seg.dirtyPages.remove(pageId)) { + seg.dirtyPagesCntr.decrementAndGet(); - if (rmv) memMetrics.decrementDirtyPages(); + } } } @@ -1824,14 +1935,17 @@ private long allocateFreePage(long pageId) throws GridOffHeapOutOfMemoryExceptio /** * @param relPtr Relative pointer to free. + * @return Resulting number of pages in pool if pages counter is enabled, 0 otherwise. */ - private void releaseFreePage(long relPtr) { + private int releaseFreePage(long relPtr) { long absPtr = absolute(relPtr); assert !PageHeader.isAcquired(absPtr) : "Release pinned page: " + PageHeader.fullPageId(absPtr); + int resCntr = 0; + if (pagesCntr != null) - pagesCntr.getAndDecrement(); + resCntr = pagesCntr.decrementAndGet(); while (true) { long freePageRelPtrMasked = GridUnsafe.getLong(freePageListPtr); @@ -1841,7 +1955,7 @@ private void releaseFreePage(long relPtr) { GridUnsafe.putLong(absPtr, freePageRelPtr); if (GridUnsafe.compareAndSwapLong(null, freePageListPtr, freePageRelPtrMasked, relPtr)) - return; + return resCntr; } } @@ -1882,7 +1996,11 @@ private int pages() { * * @return Collection of all page IDs marked as dirty. */ + @TestOnly public Collection dirtyPages() { + if (segments == null) + return Collections.emptySet(); + Collection res = new HashSet<>((int)loadedPages()); for (Segment seg : segments) @@ -1922,11 +2040,14 @@ private class Segment extends ReentrantReadWriteLock { /** Pages marked as dirty since the last checkpoint. */ private volatile Collection dirtyPages = new GridConcurrentHashSet<>(); - /** */ - private volatile Collection segCheckpointPages; + /** Atomic size counter for {@link #dirtyPages}. Used for {@link PageMemoryImpl#safeToUpdate()} calculation. */ + private final AtomicLong dirtyPagesCntr = new AtomicLong(); + + /** Wrapper of pages of current checkpoint. */ + private volatile CheckpointPages checkpointPages; /** */ - private final int maxDirtyPages; + private final long maxDirtyPages; /** Initial partition generation. */ private static final int INIT_PART_GENERATION = 1; @@ -1967,8 +2088,8 @@ private Segment(int idx, DirectMemoryRegion region, int cpPoolPages, ThrottlingP pool = new PagePool(idx, poolRegion, null); maxDirtyPages = throttlingPlc != ThrottlingPolicy.DISABLED - ? pool.pages() * 3 / 4 - : Math.min(pool.pages() * 2 / 3, cpPoolPages); + ? pool.pages() * 3L / 4 + : Math.min(pool.pages() * 2L / 3, cpPoolPages); } /** @@ -1985,13 +2106,6 @@ private void close() { } } - /** - * - */ - private boolean safeToUpdate() { - return dirtyPages.size() < maxDirtyPages; - } - /** * @param dirtyRatioThreshold Throttle threshold. */ @@ -2003,7 +2117,7 @@ private boolean shouldThrottle(double dirtyRatioThreshold) { * @return dirtyRatio to be compared with Throttle threshold. */ private double getDirtyPagesRatio() { - return ((double)dirtyPages.size()) / pages(); + return dirtyPagesCntr.doubleValue() / pages(); } /** @@ -2062,7 +2176,7 @@ private long borrowOrAllocateFreePage(long pageId) { * @return {@code True} if it is ok to replace this page, {@code false} if another page should be selected. * @throws IgniteCheckedException If failed to write page to the underlying store during eviction. */ - private boolean preparePageRemoval(FullPageId fullPageId, long absPtr, ReplacedPageWriter saveDirtyPage) throws IgniteCheckedException { + private boolean preparePageRemoval(FullPageId fullPageId, long absPtr, PageStoreWriter saveDirtyPage) throws IgniteCheckedException { assert writeLock().isHeldByCurrentThread(); // Do not evict cache meta pages. @@ -2072,14 +2186,13 @@ private boolean preparePageRemoval(FullPageId fullPageId, long absPtr, ReplacedP if (PageHeader.isAcquired(absPtr)) return false; - Collection cpPages = segCheckpointPages; - clearRowCache(fullPageId, absPtr); if (isDirty(absPtr)) { + CheckpointPages checkpointPages = this.checkpointPages; // Can evict a dirty page only if should be written by a checkpoint. // These pages does not have tmp buffer. - if (cpPages != null && cpPages.contains(fullPageId)) { + if (checkpointPages != null && checkpointPages.allowToSave(fullPageId)) { assert storeMgr != null; memMetrics.updatePageReplaceRate(U.currentTimeMillis() - PageHeader.readTimestamp(absPtr)); @@ -2095,7 +2208,7 @@ private boolean preparePageRemoval(FullPageId fullPageId, long absPtr, ReplacedP setDirty(fullPageId, absPtr, false, true); - cpPages.remove(fullPageId); + checkpointPages.markAsSaved(fullPageId); return true; } @@ -2155,7 +2268,7 @@ private void clearRowCache(FullPageId fullPageId, long absPtr) throws IgniteChec * @throws IgniteCheckedException If failed to evict page. * @param saveDirtyPage Replaced page writer, implementation to save dirty page to persistent storage. */ - private long removePageForReplacement(ReplacedPageWriter saveDirtyPage) throws IgniteCheckedException { + private long removePageForReplacement(PageStoreWriter saveDirtyPage) throws IgniteCheckedException { assert getWriteHoldCount() > 0; if (!pageReplacementWarned) { @@ -2291,7 +2404,7 @@ else if (dirtyAddr != INVALID_REL_PTR) loadedPages.remove( fullPageId.groupId(), - PageIdUtils.effectivePageId(fullPageId.pageId()) + fullPageId.effectivePageId() ); return relRmvAddr; @@ -2326,7 +2439,7 @@ private boolean isStoreMetadataPage(long absPageAddr) { * @param cap Capacity. * @param saveDirtyPage Evicted page writer. */ - private long tryToFindSequentially(int cap, ReplacedPageWriter saveDirtyPage) throws IgniteCheckedException { + private long tryToFindSequentially(int cap, PageStoreWriter saveDirtyPage) throws IgniteCheckedException { assert getWriteHoldCount() > 0; long prevAddr = INVALID_REL_PTR; @@ -2364,7 +2477,7 @@ private long tryToFindSequentially(int cap, ReplacedPageWriter saveDirtyPage) th if (preparePageRemoval(fullPageId, absEvictAddr, saveDirtyPage)) { loadedPages.remove( fullPageId.groupId(), - PageIdUtils.effectivePageId(fullPageId.pageId()) + fullPageId.effectivePageId() ); return addr; @@ -2380,8 +2493,8 @@ private long tryToFindSequentially(int cap, ReplacedPageWriter saveDirtyPage) th throw new IgniteOutOfMemoryException("Failed to find a page for eviction [segmentCapacity=" + cap + ", loaded=" + loadedPages.size() + ", maxDirtyPages=" + maxDirtyPages + - ", dirtyPages=" + dirtyPages.size() + - ", cpPages=" + (segCheckpointPages == null ? 0 : segCheckpointPages.size()) + + ", dirtyPages=" + dirtyPagesCntr + + ", cpPages=" + (checkpointPages == null ? 0 : checkpointPages.size()) + ", pinnedInSegment=" + pinnedCnt + ", failedToPrepare=" + failToPrepare + ']' + U.nl() + "Out of memory in data region [" + @@ -2797,7 +2910,8 @@ private ClearSegmentRunnable( if (rmvDirty) { FullPageId fullId = PageHeader.fullPageId(absPtr); - seg.dirtyPages.remove(fullId); + if (seg.dirtyPages.remove(fullId)) + seg.dirtyPagesCntr.decrementAndGet(); } GridUnsafe.setMemory(absPtr + PAGE_OVERHEAD, pageSize, (byte)0); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteSpeedBasedThrottle.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteSpeedBasedThrottle.java index 2dd81275d108e..d497bdafa5e4b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteSpeedBasedThrottle.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteSpeedBasedThrottle.java @@ -288,7 +288,7 @@ private void recurrentLogIfNeed() { if (weight <= WARN_THRESHOLD) return; - if (prevWarnTime.compareAndSet(prevWarningNs, curNs)) { + if (prevWarnTime.compareAndSet(prevWarningNs, curNs) && log.isInfoEnabled()) { String msg = String.format("Throttling is applied to page modifications " + "[percentOfPartTime=%.2f, markDirty=%d pages/sec, checkpointWrite=%d pages/sec, " + "estIdealMarkDirty=%d pages/sec, curDirty=%.2f, maxDirty=%.2f, avgParkTime=%d ns, " + diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottle.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottle.java index 2828c4348af8d..548eb55706c63 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottle.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottle.java @@ -16,8 +16,7 @@ */ package org.apache.ignite.internal.processors.cache.persistence.pagemem; -import java.util.Collection; -import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.LockSupport; import org.apache.ignite.IgniteLogger; @@ -63,8 +62,8 @@ public class PagesWriteThrottle implements PagesWriteThrottlePolicy { /** Logger. */ private IgniteLogger log; - /** Currently parking threads. */ - private final Collection parkThrds = new ConcurrentLinkedQueue<>(); + /** Threads that are throttled due to checkpoint buffer overflow. */ + private final ConcurrentHashMap cpBufThrottledThreads = new ConcurrentHashMap<>(); /** * @param pageMemory Page memory. @@ -95,11 +94,8 @@ public PagesWriteThrottle(PageMemoryImpl pageMemory, boolean shouldThrottle = false; - if (isPageInCheckpoint) { - int checkpointBufLimit = (int)(pageMemory.checkpointBufferPagesSize() * CP_BUF_FILL_THRESHOLD); - - shouldThrottle = pageMemory.checkpointBufferPagesCount() > checkpointBufLimit; - } + if (isPageInCheckpoint) + shouldThrottle = shouldThrottle(); if (!shouldThrottle && !throttleOnlyPagesInCheckpoint) { AtomicInteger writtenPagesCntr = cpProgress.writtenPagesCounter(); @@ -132,23 +128,45 @@ public PagesWriteThrottle(PageMemoryImpl pageMemory, long throttleParkTimeNs = (long) (STARTING_THROTTLE_NANOS * Math.pow(BACKOFF_RATIO, throttleLevel)); + Thread curThread = Thread.currentThread(); + if (throttleParkTimeNs > LOGGING_THRESHOLD) { - U.warn(log, "Parking thread=" + Thread.currentThread().getName() + U.warn(log, "Parking thread=" + curThread.getName() + " for timeout(ms)=" + (throttleParkTimeNs / 1_000_000)); } - if (isPageInCheckpoint) - parkThrds.add(Thread.currentThread()); + if (isPageInCheckpoint) { + cpBufThrottledThreads.put(curThread.getId(), curThread); + + try { + LockSupport.parkNanos(throttleParkTimeNs); + } + finally { + cpBufThrottledThreads.remove(curThread.getId()); - LockSupport.parkNanos(throttleParkTimeNs); + if (throttleParkTimeNs > LOGGING_THRESHOLD) { + U.warn(log, "Unparking thread=" + curThread.getName() + + " with park timeout(ms)=" + (throttleParkTimeNs / 1_000_000)); + } + } + } + else + LockSupport.parkNanos(throttleParkTimeNs); } else { int oldCntr = cntr.getAndSet(0); - if (isPageInCheckpoint && oldCntr != 0) { - parkThrds.forEach(LockSupport::unpark); - parkThrds.clear(); - } + if (isPageInCheckpoint && oldCntr != 0) + cpBufThrottledThreads.values().forEach(LockSupport::unpark); + } + } + + /** {@inheritDoc} */ + @Override public void tryWakeupThrottledThreads() { + if (!shouldThrottle()) { + inCheckpointBackoffCntr.set(0); + + cpBufThrottledThreads.values().forEach(LockSupport::unpark); } } @@ -162,4 +180,13 @@ public PagesWriteThrottle(PageMemoryImpl pageMemory, notInCheckpointBackoffCntr.set(0); } + + /** + * @return {@code True} if throttling should be enabled, and {@code False} otherwise. + */ + private boolean shouldThrottle() { + int checkpointBufLimit = (int)(pageMemory.checkpointBufferPagesSize() * CP_BUF_FILL_THRESHOLD); + + return pageMemory.checkpointBufferPagesCount() > checkpointBufLimit; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottlePolicy.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottlePolicy.java index e6aab794761eb..a271ed973ae9d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottlePolicy.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottlePolicy.java @@ -37,6 +37,13 @@ public interface PagesWriteThrottlePolicy { */ void onMarkDirty(boolean isPageInCheckpoint); + /** + * Callback to try wakeup throttled threads. + */ + default void tryWakeupThrottledThreads() { + // No-op. + } + /** * Callback to notify throttling policy checkpoint was started. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java index bf322bd485ceb..b8f966a948357 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.processors.cache.persistence.tree; import java.io.Externalizable; +import java.sql.SQLException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -31,6 +32,7 @@ import org.apache.ignite.failure.FailureContext; import org.apache.ignite.failure.FailureType; import org.apache.ignite.internal.IgniteInterruptedCheckedException; +import org.apache.ignite.internal.IgniteVersionUtils; import org.apache.ignite.internal.UnregisteredBinaryTypeException; import org.apache.ignite.internal.UnregisteredClassException; import org.apache.ignite.internal.pagemem.PageIdUtils; @@ -42,7 +44,7 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.InsertRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageAddRootRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageCutRootRecord; -import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootInlineRecord; +import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootInlineFlagsCreatedVersionRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.NewRootInitRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.RemoveRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.ReplaceRecord; @@ -97,6 +99,12 @@ public abstract class BPlusTree extends DataStructure implements /** Wrapper for tree pages operations. Noop by default. Override for test purposes. */ public static volatile PageHandlerWrapper pageHndWrapper = (tree, hnd) -> hnd; + /** */ + public static final ThreadLocal suspendFailureDiagnostic = ThreadLocal.withInitial(() -> false); + + /** Destroy msg. */ + public static final String CONC_DESTROY_MSG = "Tree is being concurrently destroyed: "; + /** */ private static volatile boolean interrupted; @@ -719,9 +727,10 @@ private class InitRoot extends PageHandler { io.initRoot(pageAddr, rootId, pageSize()); io.setInlineSize(pageAddr, inlineSize); + io.initFlagsAndVersion(pageAddr, BPlusMetaIO.FLAGS_DEFAULT, IgniteVersionUtils.VER); if (needWalDeltaRecord(metaId, metaPage, walPlc)) - wal.log(new MetaPageInitRootInlineRecord(cacheId, metaId, rootId, inlineSize)); + wal.log(new MetaPageInitRootInlineFlagsCreatedVersionRecord(cacheId, metaId, rootId, inlineSize)); assert io.getRootLevel(pageAddr) == 0; assert io.getFirstPageId(pageAddr, 0) == rootId; @@ -734,7 +743,8 @@ private class InitRoot extends PageHandler { /** * @param name Tree name. - * @param cacheId Cache ID. + * @param cacheGrpId Cache group ID. + * @param cacheGrpName Cache group name. * @param pageMem Page memory. * @param wal Write ahead log manager. * @param globalRmvId Remove ID. @@ -747,7 +757,8 @@ private class InitRoot extends PageHandler { */ protected BPlusTree( String name, - int cacheId, + int cacheGrpId, + String cacheGrpName, PageMemory pageMem, IgniteWriteAheadLogManager wal, AtomicLong globalRmvId, @@ -760,7 +771,8 @@ protected BPlusTree( ) throws IgniteCheckedException { this( name, - cacheId, + cacheGrpId, + cacheGrpName, pageMem, wal, globalRmvId, @@ -775,7 +787,8 @@ protected BPlusTree( /** * @param name Tree name. - * @param cacheId Cache ID. + * @param cacheGrpId Cache ID. + * @param grpName Cache group name. * @param pageMem Page memory. * @param wal Write ahead log manager. * @param globalRmvId Remove ID. @@ -786,7 +799,8 @@ protected BPlusTree( */ protected BPlusTree( String name, - int cacheId, + int cacheGrpId, + String grpName, PageMemory pageMem, IgniteWriteAheadLogManager wal, AtomicLong globalRmvId, @@ -795,7 +809,7 @@ protected BPlusTree( @Nullable FailureProcessor failureProcessor, @Nullable PageLockListener lsnr ) throws IgniteCheckedException { - super(cacheId, pageMem, wal, lsnr); + super(cacheGrpId, grpName, pageMem, wal, lsnr); assert !F.isEmpty(name); @@ -999,7 +1013,7 @@ private GridCursor findLowerUnbounded(L upper, Object x) throws IgniteChecked */ private void checkDestroyed() { if (destroyed.get()) - throw new IllegalStateException("Tree is being concurrently destroyed: " + getName()); + throw new IllegalStateException(CONC_DESTROY_MSG + getName()); } /** {@inheritDoc} */ @@ -1025,6 +1039,9 @@ public final GridCursor find(L lower, L upper, Object x) throws IgniteChecked throw new IgniteCheckedException("Runtime failure on bounds: [lower=" + lower + ", upper=" + upper + "]", e); } catch (RuntimeException | AssertionError e) { + if (e.getCause() instanceof SQLException) + throw e; + long[] pageIds = pages( lower == null || cursor == null || cursor.getCursor == null, () -> new long[]{cursor.getCursor.pageId} @@ -1042,44 +1059,105 @@ public final GridCursor find(L lower, L upper, Object x) throws IgniteChecked /** {@inheritDoc} */ @Override public T findFirst() throws IgniteCheckedException { + return findFirst(null); + } + + /** + * Returns a value mapped to the lowest key, or {@code null} if tree is empty or no entry matches the passed filter. + * @param filter Filter closure. + * @return Value. + * @throws IgniteCheckedException If failed. + */ + public T findFirst(TreeRowClosure filter) throws IgniteCheckedException { checkDestroyed(); long curPageId = 0L; long nextPageId = 0L; try { - long firstPageId; - - long metaPage = acquirePage(metaPageId); - try { - firstPageId = getFirstPageId(metaPageId, metaPage, 0); - } - finally { - releasePage(metaPageId, metaPage); - } + for (;;) { - long page = acquirePage(firstPageId); + long metaPage = acquirePage(metaPageId); - try { - long pageAddr = readLock(firstPageId, page); + try { + curPageId = getFirstPageId(metaPageId, metaPage, 0); // Level 0 is always at the bottom. + } + finally { + releasePage(metaPageId, metaPage); + } + long curPage = acquirePage(curPageId); try { - BPlusIO io = io(pageAddr); + long curPageAddr = readLock(curPageId, curPage); - int cnt = io.getCount(pageAddr); + if (curPageAddr == 0) + continue; // The first page has gone: restart scan. - if (cnt == 0) - return null; + try { + BPlusIO io = io(curPageAddr); + + assert io.isLeaf(); + + for (;;) { + int cnt = io.getCount(curPageAddr); + + for (int i = 0; i < cnt; ++i) { + if (filter == null || filter.apply(this, io, curPageAddr, i)) + return getRow(io, curPageAddr, i); + } + + nextPageId = io.getForward(curPageAddr); - return getRow(io, pageAddr, 0); + if (nextPageId == 0) + return null; + + long nextPage = acquirePage(nextPageId); + + try { + long nextPageAddr = readLock(nextPageId, nextPage); + + // In the current implementation the next page can't change when the current page is locked. + assert nextPageAddr != 0 : nextPageAddr; + + try { + long pa = curPageAddr; + curPageAddr = 0; // Set to zero to avoid double unlocking in finalizer. + + readUnlock(curPageId, curPage, pa); + + long p = curPage; + curPage = 0; // Set to zero to avoid double release in finalizer. + + releasePage(curPageId, p); + + curPageId = nextPageId; + curPage = nextPage; + curPageAddr = nextPageAddr; + + nextPage = 0; + nextPageAddr = 0; + } + finally { + if (nextPageAddr != 0) + readUnlock(nextPageId, nextPage, nextPageAddr); + } + } + finally { + if (nextPage != 0) + releasePage(nextPageId, nextPage); + } + } + } + finally { + if (curPageAddr != 0) + readUnlock(curPageId, curPage, curPageAddr); + } } finally { - readUnlock(firstPageId, page, pageAddr); + if (curPage != 0) + releasePage(curPageId, curPage); } } - finally { - releasePage(firstPageId, page); - } } catch (IgniteCheckedException e) { throw new IgniteCheckedException("Runtime failure on first row lookup", e); @@ -1092,6 +1170,7 @@ public final GridCursor find(L lower, L upper, Object x) throws IgniteChecked } } + /** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public T findLast() throws IgniteCheckedException { @@ -1409,8 +1488,12 @@ private void validateFirstPages(long metaId, long metaPage, int rootLvl) throws /** * @param msg Message. */ - private static void fail(Object msg) { - throw new AssertionError(msg); + private void fail(Object msg) { + AssertionError err = new AssertionError(msg); + + processFailure(FailureType.CRITICAL_ERROR, err); + + throw err; } /** @@ -2659,8 +2742,7 @@ void checkLockRetry() throws IgniteCheckedException { "(the tree may be corrupted). Increase " + IGNITE_BPLUS_TREE_LOCK_RETRIES + " system property " + "if you regularly see this message (current value is " + getLockRetries() + ")."); - if (failureProcessor != null) - failureProcessor.process(new FailureContext(FailureType.CRITICAL_ERROR, e)); + processFailure(FailureType.CRITICAL_ERROR, e); throw e; } @@ -4602,7 +4684,7 @@ protected int compare(int lvl, BPlusIO io, long pageAddr, int idx, L row) thr * @return Full detached data row. * @throws IgniteCheckedException If failed. */ - protected final T getRow(BPlusIO io, long pageAddr, int idx) throws IgniteCheckedException { + public final T getRow(BPlusIO io, long pageAddr, int idx) throws IgniteCheckedException { return getRow(io, pageAddr, idx, null); } @@ -4923,6 +5005,11 @@ private void updateLowerBound(T lower) { return r; } + + /** {@inheritDoc} */ + @Override public void close() { + rows = null; + } } /** @@ -5101,8 +5188,8 @@ private long[] pages(boolean empty, Supplier pages) { * @param pageIds Pages ids. * @return New CorruptedTreeException instance. */ - private CorruptedTreeException corruptedTreeException(String msg, Throwable cause, int grpId, long... pageIds) { - CorruptedTreeException e = new CorruptedTreeException(msg, cause, grpId, pageIds); + protected CorruptedTreeException corruptedTreeException(String msg, Throwable cause, int grpId, long... pageIds) { + CorruptedTreeException e = new CorruptedTreeException(msg, cause, grpId, grpName, pageIds); if (failureProcessor != null) failureProcessor.process(new FailureContext(FailureType.CRITICAL_ERROR, e)); @@ -5180,4 +5267,15 @@ protected final R read( protected IoStatisticsHolder statisticsHolder() { return IoStatisticsHolderNoOp.INSTANCE; } + + /** + * Processes failure with failure processor. + * + * @param failureType Failure type. + * @param e Exception. + */ + protected void processFailure(FailureType failureType, Throwable e) { + if (failureProcessor != null && !suspendFailureDiagnostic.get()) + failureProcessor.process(new FailureContext(failureType, e)); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/CorruptedTreeException.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/CorruptedTreeException.java index 56dc7db1f41c6..215584a350b47 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/CorruptedTreeException.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/CorruptedTreeException.java @@ -25,9 +25,10 @@ import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.processors.cache.persistence.CorruptedPersistenceException; +import org.apache.ignite.internal.util.GridStringBuilder; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.X; -import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.jetbrains.annotations.Nullable; import static java.util.Arrays.asList; @@ -46,19 +47,51 @@ public class CorruptedTreeException extends IgniteCheckedException implements Co * @param msg Message. * @param cause Cause. * @param grpId Group id of potentially corrupted pages. + * @param grpName Group name of potentially corrupted pages. * @param pageIds Potentially corrupted pages. */ - public CorruptedTreeException(String msg, @Nullable Throwable cause, int grpId, long... pageIds) { - this(msg, cause, toPagesArray(grpId, pageIds)); + public CorruptedTreeException(String msg, @Nullable Throwable cause, int grpId, String grpName, long... pageIds) { + this(msg, null, null, grpName, cause, toPagesArray(grpId, pageIds)); } /** * @param msg Message. * @param cause Cause. + * @param grpId Group id of potentially corrupted pages. + * @param grpName Group name of potentially corrupted pages. + * @param cacheName Cache name. + * @param indexName Index name. + * @param pageIds Potentially corrupted pages. + */ + public CorruptedTreeException( + String msg, + @Nullable Throwable cause, + int grpId, + String grpName, + String cacheName, + String indexName, + long... pageIds + ) { + this(msg, cacheName, indexName, grpName, cause, toPagesArray(grpId, pageIds)); + } + + /** + * @param msg Message. + * @param cacheName Cache name. + * @param indexName Index name. + * @param grpName Cache group name. + * @param cause Cause. * @param pages (groupId, pageId) pairs for pages that might be corrupted. */ - public CorruptedTreeException(String msg, @Nullable Throwable cause, T2... pages) { - super(getMsg(msg, pages), cause); + public CorruptedTreeException( + String msg, + String cacheName, + String indexName, + String grpName, + @Nullable Throwable cause, + T2... pages + ) { + super(getMsg(msg, cacheName, indexName, grpName, pages), cause); this.pages = expandPagesArray(pages, cause); } @@ -99,11 +132,25 @@ private static T2[] expandPagesArray(T2[] pages, T } /** */ - private static String getMsg(String msg, T2... pages) { - return S.toString("B+Tree is corrupted", - "pages(groupId, pageId)", Arrays.toString(pages), false, - "msg", msg, false - ); + private static String getMsg(String msg, String cacheName, String indexName, String grpName, T2... pages) { + GridStringBuilder stringBuilder = new GridStringBuilder("B+Tree is corrupted [") + .a("pages(groupId, pageId)=").a(Arrays.toString(pages)); + + if (cacheName != null) { + stringBuilder + .a(", cacheId=").a(CU.cacheId(cacheName)) + .a(", cacheName=").a(cacheName); + } + + if (indexName != null) + stringBuilder.a(", indexName=").a(indexName); + + if (grpName != null) + stringBuilder.a(", groupName=").a(grpName); + + stringBuilder.a(", msg=").a(msg).a("]"); + + return stringBuilder.toString(); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/BPlusMetaIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/BPlusMetaIO.java index 623951bafc58d..56630e6bb25f2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/BPlusMetaIO.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/BPlusMetaIO.java @@ -19,8 +19,10 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.IgniteVersionUtils; import org.apache.ignite.internal.pagemem.PageUtils; import org.apache.ignite.internal.util.GridStringBuilder; +import org.apache.ignite.lang.IgniteProductVersion; /** * IO routines for B+Tree meta pages. @@ -28,17 +30,38 @@ public class BPlusMetaIO extends PageIO { /** */ public static final IOVersions VERSIONS = new IOVersions<>( - new BPlusMetaIO(1), new BPlusMetaIO(2) + new BPlusMetaIO(1), + new BPlusMetaIO(2), + new BPlusMetaIO(3), + new BPlusMetaIO(4) ); /** */ - private static final int LVLS_OFF = COMMON_HEADER_END; + private static final int LVLS_OFFSET = COMMON_HEADER_END; /** */ - private final int refsOff; + private static final int INLINE_SIZE_OFFSET = LVLS_OFFSET + 1; + + /** */ + private static final int FLAGS_OFFSET = INLINE_SIZE_OFFSET + 2; + + /** */ + private static final int CREATED_VER_OFFSET = FLAGS_OFFSET + 8; + + /** */ + private static final int REFS_OFFSET = CREATED_VER_OFFSET + IgniteProductVersion.SIZE_IN_BYTES; + + /** */ + private static final long FLAG_UNWRAPPED_PK = 1L; + + /** */ + private static final long FLAG_INLINE_OBJECT_SUPPORTED = 2L; + + /** FLAG_UNWRAPPED_PK - not set because unwrap PK not supported by 8.5.x versions. */ + public static final long FLAGS_DEFAULT = FLAG_INLINE_OBJECT_SUPPORTED; /** */ - private final int inlineSizeOff; + private final int refsOff; /** * @param ver Page format version. @@ -48,13 +71,19 @@ private BPlusMetaIO(int ver) { switch (ver) { case 1: - inlineSizeOff = -1; - refsOff = LVLS_OFF + 1; + refsOff = LVLS_OFFSET + 1; break; case 2: - inlineSizeOff = LVLS_OFF + 1; - refsOff = inlineSizeOff + 2; + refsOff = INLINE_SIZE_OFFSET + 2; + break; + + case 3: + refsOff = INLINE_SIZE_OFFSET + 2; + break; + + case 4: + refsOff = REFS_OFFSET; break; default: @@ -77,7 +106,7 @@ public void initRoot(long pageAdrr, long rootId, int pageSize) { * @return Number of levels in this tree. */ public int getLevelsCount(long pageAddr) { - return Byte.toUnsignedInt(PageUtils.getByte(pageAddr, LVLS_OFF)); + return Byte.toUnsignedInt(PageUtils.getByte(pageAddr, LVLS_OFFSET)); } /** @@ -97,7 +126,7 @@ private int getMaxLevels(long pageAddr, int pageSize) { private void setLevelsCount(long pageAddr, int lvls, int pageSize) { assert lvls >= 0 && lvls <= getMaxLevels(pageAddr, pageSize) : lvls; - PageUtils.putByte(pageAddr, LVLS_OFF, (byte)lvls); + PageUtils.putByte(pageAddr, LVLS_OFFSET, (byte)lvls); assert getLevelsCount(pageAddr) == lvls; } @@ -172,14 +201,105 @@ public void cutRoot(long pageAddr, int pageSize) { */ public void setInlineSize(long pageAddr, int size) { if (getVersion() > 1) - PageUtils.putShort(pageAddr, inlineSizeOff, (short)size); + PageUtils.putShort(pageAddr, INLINE_SIZE_OFFSET, (short)size); } /** * @param pageAddr Page address. + * @return Inline size. */ public int getInlineSize(long pageAddr) { - return getVersion() > 1 ? PageUtils.getShort(pageAddr, inlineSizeOff) : 0; + return getVersion() > 1 ? PageUtils.getShort(pageAddr, INLINE_SIZE_OFFSET) : 0; + } + + /** + * @param pageAddr Page address. + * @return {@code true} In case use unwrapped PK. + */ + public boolean unwrappedPk(long pageAddr) { + return supportFlags() && (flags(pageAddr) & FLAG_UNWRAPPED_PK) != 0L || getVersion() == 3; + } + + /** + * @param pageAddr Page address. + * @return {@code true} In case inline object is supported by the tree. + */ + public boolean inlineObjectSupported(long pageAddr) { + assert supportFlags(); + + return (flags(pageAddr) & FLAG_INLINE_OBJECT_SUPPORTED) != 0L; + } + + /** + * @return {@code true} If flags are supported. + */ + public boolean supportFlags() { + return getVersion() > 3; + } + + /** + * @param pageAddr Page address. + * @param flags Flags. + * @param createdVer The version of the product that creates the page (b+tree). + */ + public void initFlagsAndVersion(long pageAddr, long flags, IgniteProductVersion createdVer) { + PageUtils.putLong(pageAddr, FLAGS_OFFSET, flags); + + setCreatedVersion(pageAddr, createdVer); + } + + /** + * @param pageAddr Page address. + * @param curVer Ignite current version. + */ + public void setCreatedVersion(long pageAddr, IgniteProductVersion curVer) { + assert curVer != null; + + PageUtils.putByte(pageAddr, CREATED_VER_OFFSET, curVer.major()); + PageUtils.putByte(pageAddr, CREATED_VER_OFFSET + 1, curVer.minor()); + PageUtils.putByte(pageAddr, CREATED_VER_OFFSET + 2, curVer.maintenance()); + PageUtils.putLong(pageAddr, CREATED_VER_OFFSET + 3, curVer.revisionTimestamp()); + PageUtils.putBytes(pageAddr, CREATED_VER_OFFSET + 11, curVer.revisionHash()); + } + + /** + * @param pageAddr Page address. + * @return The version of product that creates the page. + */ + public IgniteProductVersion createdVersion(long pageAddr) { + if (getVersion() < 4) + return null; + + return new IgniteProductVersion( + PageUtils.getByte(pageAddr, CREATED_VER_OFFSET), + PageUtils.getByte(pageAddr, CREATED_VER_OFFSET + 1), + PageUtils.getByte(pageAddr, CREATED_VER_OFFSET + 2), + PageUtils.getLong(pageAddr, CREATED_VER_OFFSET + 3), + PageUtils.getBytes(pageAddr, CREATED_VER_OFFSET + 11, IgniteProductVersion.REV_HASH_SIZE)); + } + + /** + * @param pageAddr Page address. + * @return Long with flags. + */ + private long flags(long pageAddr) { + assert supportFlags(); + + return PageUtils.getLong(pageAddr, FLAGS_OFFSET); + } + + /** + * @param pageAddr Page address. + * @param unwrappedPk unwrapped primary key of this tree flag. + * @param inlineObjSupported inline POJO by created tree flag. + */ + public void setFlags(long pageAddr, boolean unwrappedPk, boolean inlineObjSupported) { + assert supportFlags(); + + long flags = unwrappedPk ? FLAG_UNWRAPPED_PK : 0; + flags |= inlineObjSupported ? FLAG_INLINE_OBJECT_SUPPORTED : 0; + + PageUtils.putLong(pageAddr, FLAGS_OFFSET, flags); } /** {@inheritDoc} */ @@ -191,4 +311,34 @@ public int getInlineSize(long pageAddr) { ; //TODO print firstPageIds by level } + + /** + * @param pageAddr Page address. + * @param inlineObjSupported Supports inline object flag. + * @param unwrappedPk Unwrap PK flag. + * @param pageSize Page size. + */ + public static void upgradePageVersion(long pageAddr, boolean inlineObjSupported, boolean unwrappedPk, int pageSize) { + BPlusMetaIO ioPrev = VERSIONS.forPage(pageAddr); + + long[] lvls = new long[ioPrev.getLevelsCount(pageAddr)]; + + for (int i = 0; i < lvls.length; ++i) + lvls[i] = ioPrev.getFirstPageId(pageAddr, i); + + int inlineSize = ioPrev.getInlineSize(pageAddr); + + BPlusMetaIO ioNew = VERSIONS.latest(); + + setVersion(pageAddr, VERSIONS.latest().getVersion()); + + ioNew.setLevelsCount(pageAddr, lvls.length, pageSize); + + for (int i = 0; i < lvls.length; ++i) + ioNew.setFirstPageId(pageAddr, i, lvls[i]); + + ioNew.setInlineSize(pageAddr, inlineSize); + ioNew.setCreatedVersion(pageAddr, IgniteVersionUtils.VER); + ioNew.setFlags(pageAddr, unwrappedPk, inlineObjSupported); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java index e5ada663217cf..d67feace42eee 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java @@ -645,17 +645,22 @@ public static boolean isDataPageType(int type) { /** * @param addr Address. */ - public static String printPage(long addr, int pageSize) throws IgniteCheckedException { - PageIO io = getPageIO(addr); - + public static String printPage(long addr, int pageSize) { GridStringBuilder sb = new GridStringBuilder("Header [\n\ttype="); - sb.a(getType(addr)).a(" (").a(io.getClass().getSimpleName()) - .a("),\n\tver=").a(getVersion(addr)).a(",\n\tcrc=").a(getCrc(addr)) - .a(",\n\t").a(PageIdUtils.toDetailString(getPageId(addr))) - .a("\n],\n"); + try { + PageIO io = getPageIO(addr); + + sb.a(getType(addr)).a(" (").a(io.getClass().getSimpleName()) + .a("),\n\tver=").a(getVersion(addr)).a(",\n\tcrc=").a(getCrc(addr)) + .a(",\n\t").a(PageIdUtils.toDetailString(getPageId(addr))) + .a("\n],\n"); - io.printPage(addr, pageSize, sb); + io.printPage(addr, pageSize, sb); + } + catch (IgniteCheckedException e) { + sb.a("Failed to print page: ").a(e.getMessage()); + } return sb.toString(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/AbstractWalRecordsIterator.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/AbstractWalRecordsIterator.java index 8a38f28e607ed..298a5b59e09a3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/AbstractWalRecordsIterator.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/AbstractWalRecordsIterator.java @@ -60,6 +60,11 @@ public abstract class AbstractWalRecordsIterator */ protected IgniteBiTuple curRec; + /** + * The exception which can be thrown during reading next record. It holds until the next calling of next record. + */ + private IgniteCheckedException curException; + /** * Current WAL segment absolute index.
Determined as lowest number of file at start, is changed during advance * segment @@ -118,9 +123,17 @@ protected AbstractWalRecordsIterator( /** {@inheritDoc} */ @Override protected IgniteBiTuple onNext() throws IgniteCheckedException { + if (curException != null) + throw curException; + IgniteBiTuple ret = curRec; - advance(); + try { + advance(); + } + catch (IgniteCheckedException e) { + curException = e; + } return ret; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java index 8b166f3e9a0c6..f01dbf23614d4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java @@ -131,9 +131,13 @@ import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import static java.lang.Long.MAX_VALUE; import static java.nio.file.StandardOpenOption.CREATE; import static java.nio.file.StandardOpenOption.READ; import static java.nio.file.StandardOpenOption.WRITE; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE; import static org.apache.ignite.IgniteSystemProperties.IGNITE_WAL_COMPRESSOR_WORKER_THREAD_CNT; import static org.apache.ignite.IgniteSystemProperties.IGNITE_WAL_MMAP; import static org.apache.ignite.IgniteSystemProperties.IGNITE_WAL_SEGMENT_SYNC_TIMEOUT; @@ -256,6 +260,12 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl private final int WAL_COMPRESSOR_WORKER_THREAD_CNT = IgniteSystemProperties.getInteger(IGNITE_WAL_COMPRESSOR_WORKER_THREAD_CNT, 4); + /** + * Threshold time to print warning to log if awaiting for next wal segment took too long (exceeded this threshold). + */ + private static final long THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT = + IgniteSystemProperties.getLong(IGNITE_THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT, 1000L); + /** */ private final boolean alwaysWriteFullPages; @@ -324,9 +334,6 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl /** Decompressor. */ private volatile FileDecompressor decompressor; - /** */ - private final ThreadLocal lastWALPtr = new ThreadLocal<>(); - /** Current log segment handle. */ private volatile FileWriteHandle currHnd; @@ -831,8 +838,6 @@ private void checkWalRolloverRequiredDuringInactivityPeriod() { if (ptr != null) { metrics.onWalRecordLogged(); - lastWALPtr.set(ptr); - if (walAutoArchiveAfterInactivity > 0) lastRecordLoggedMs.set(U.currentTimeMillis()); @@ -849,29 +854,39 @@ private void checkWalRolloverRequiredDuringInactivityPeriod() { } /** {@inheritDoc} */ - @Override public void flush(WALPointer ptr, boolean explicitFsync) throws IgniteCheckedException, StorageException { + @Override public WALPointer flush(WALPointer ptr, boolean explicitFsync) throws IgniteCheckedException, StorageException { if (serializer == null || mode == WALMode.NONE) - return; + return null; FileWriteHandle cur = currentHandle(); // WAL manager was not started (client node). if (cur == null) - return; + return null; + + FileWALPointer filePtr; - FileWALPointer filePtr = (FileWALPointer)(ptr == null ? lastWALPtr.get() : ptr); + if (ptr == null) { + long pos = cur.buf.tail(); + + filePtr = new FileWALPointer(cur.getSegmentId(), (int)pos, 0); + } + else + filePtr = (FileWALPointer)ptr; if (mode == LOG_ONLY) cur.flushOrWait(filePtr); if (!explicitFsync && mode != WALMode.FSYNC) - return; // No need to sync in LOG_ONLY or BACKGROUND unless explicit fsync is required. + return filePtr; // No need to sync in LOG_ONLY or BACKGROUND unless explicit fsync is required. // No need to sync if was rolled over. - if (filePtr != null && !cur.needFsync(filePtr)) - return; + if (!cur.needFsync(filePtr)) + return filePtr; cur.fsync(filePtr); + + return filePtr; } /** {@inheritDoc} */ @@ -1439,7 +1454,7 @@ private void checkOrPrepareFiles() throws StorageException { createFile(first); } - else + else if (isArchiverEnabled()) checkFiles(0, false, null, null); } @@ -1528,9 +1543,24 @@ private File pollNextFile(long curIdx) throws StorageException, IgniteInterrupte return new File(walWorkDir, FileDescriptor.fileName(curIdx + 1)); } + long absNextIdxStartTime = System.nanoTime(); + // Signal to archiver that we are done with the segment and it can be archived. long absNextIdx = archiver0.nextAbsoluteSegmentIndex(); + long absNextIdxWaitTime = U.nanosToMillis(System.nanoTime() - absNextIdxStartTime); + + if (absNextIdxWaitTime > THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT) { + log.warning( + String.format("Waiting for next wal segment was too long " + + "[waitingTime=%s, curIdx=%s, absNextIdx=%s, walSegments=%s]", + absNextIdxWaitTime, + curIdx, + absNextIdx, + dsCfg.getWalSegments()) + ); + } + long segmentIdx = absNextIdx % dsCfg.getWalSegments(); return new File(walWorkDir, FileDescriptor.fileName(segmentIdx)); @@ -1817,31 +1847,31 @@ else if (err != null) * @throws StorageException If exception occurred in the archiver thread. */ private long nextAbsoluteSegmentIndex() throws StorageException, IgniteInterruptedCheckedException { - synchronized (this) { - if (cleanErr != null) - throw cleanErr; + if (cleanErr != null) + throw cleanErr; - try { - long nextIdx = segmentAware.nextAbsoluteSegmentIndex(); + try { + long nextIdx = segmentAware.nextAbsoluteSegmentIndex(); + synchronized (this) { // Wait for formatter so that we do not open an empty file in DEFAULT mode. while (nextIdx % dsCfg.getWalSegments() > formatted && cleanErr == null) wait(); + } - if (cleanErr != null) - throw cleanErr; + if (cleanErr != null) + throw cleanErr; - return nextIdx; - } - catch (IgniteInterruptedCheckedException e) { - if (cleanErr != null) - throw cleanErr; + return nextIdx; + } + catch (IgniteInterruptedCheckedException e) { + if (cleanErr != null) + throw cleanErr; - throw e; - } - catch (InterruptedException e) { - throw new IgniteInterruptedCheckedException(e); - } + throw e; + } + catch (InterruptedException e) { + throw new IgniteInterruptedCheckedException(e); } } @@ -2413,7 +2443,7 @@ else if (checkFile.length() != dsCfg.getWalSegmentSize() && mode == WALMode.FSYN "(WAL segment size change is not supported in 'DEFAULT' WAL mode) " + "[filePath=" + checkFile.getAbsolutePath() + ", fileSize=" + checkFile.length() + - ", configSize=" + dsCfg.getWalSegments() + ']'); + ", configSize=" + dsCfg.getWalSegmentSize() + ']'); } else if (create) createFile(checkFile); @@ -2708,7 +2738,7 @@ public void writeHeader() { * * @param ptr Pointer. */ - private void flushOrWait(FileWALPointer ptr) { + private void flushOrWait(FileWALPointer ptr) throws IgniteCheckedException { if (ptr != null) { // If requested obsolete file index, it must be already flushed by close. if (ptr.index() != getSegmentId()) @@ -2721,16 +2751,17 @@ private void flushOrWait(FileWALPointer ptr) { /** * @param ptr Pointer. */ - private void flush(FileWALPointer ptr) { + private void flush(FileWALPointer ptr) throws IgniteCheckedException { if (ptr == null) { // Unconditional flush. walWriter.flushAll(); return; } - assert ptr.index() == getSegmentId(); + assert ptr.index() == getSegmentId() : "Pointer segment idx is not equals to current write segment idx. " + + "ptr=" + ptr + " segmetntId=" + getSegmentId(); - walWriter.flushBuffer(ptr.fileOffset()); + walWriter.flushBuffer(ptr.fileOffset() + ptr.length()); } /** @@ -3390,7 +3421,7 @@ private class WALWriter extends GridWorker { } } else { - unparkWaiters(Long.MAX_VALUE); + unparkWaiters(MAX_VALUE); return; } @@ -3421,7 +3452,7 @@ else if (pos == FILE_FORCE) err = e; - unparkWaiters(Long.MAX_VALUE); + unparkWaiters(MAX_VALUE); return; } @@ -3455,7 +3486,9 @@ else if (pos == FILE_FORCE) finally { seg.release(); - long p = pos <= UNCONDITIONAL_FLUSH || err != null ? Long.MAX_VALUE : currentHandle().written; + boolean unparkAll = (pos == UNCONDITIONAL_FLUSH || pos == FILE_CLOSE) || err != null; + + long p = unparkAll ? MAX_VALUE : currentHandle().written; unparkWaiters(p); } @@ -3468,7 +3501,7 @@ else if (pos == FILE_FORCE) finally { this.err = err; - unparkWaiters(Long.MAX_VALUE); + unparkWaiters(MAX_VALUE); if (err == null && !isCancelled) err = new IllegalStateException("Worker " + name() + " is terminated unexpectedly"); @@ -3531,21 +3564,21 @@ private void unparkWaiters(long pos) { /** * Forces all made changes to the file. */ - void force() { + void force() throws IgniteCheckedException { flushBuffer(FILE_FORCE); } /** * Closes file. */ - void close() { + void close() throws IgniteCheckedException { flushBuffer(FILE_CLOSE); } /** * Flushes all data from the buffer. */ - void flushAll() { + void flushAll() throws IgniteCheckedException { flushBuffer(UNCONDITIONAL_FLUSH); } @@ -3553,7 +3586,7 @@ void flushAll() { * @param expPos Expected position. */ @SuppressWarnings("ForLoopReplaceableByForEach") - void flushBuffer(long expPos) { + void flushBuffer(long expPos) throws IgniteCheckedException { if (mmap) return; @@ -3579,6 +3612,12 @@ void flushBuffer(long expPos) { if (val == Long.MIN_VALUE) { waiters.remove(t); + Throwable walWriterError = walWriter.err; + + if (walWriterError != null) + throw new IgniteCheckedException("Flush buffer failed.", walWriterError); + + return; } else diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FsyncModeFileWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FsyncModeFileWriteAheadLogManager.java index 6bc12d4935629..a090c55e689bf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FsyncModeFileWriteAheadLogManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FsyncModeFileWriteAheadLogManager.java @@ -44,7 +44,6 @@ import java.util.NavigableMap; import java.util.Set; import java.util.TreeMap; -import java.util.TreeSet; import java.util.concurrent.PriorityBlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -281,9 +280,6 @@ public class FsyncModeFileWriteAheadLogManager extends GridCacheSharedManagerAda /** Decompressor. */ private volatile FileDecompressor decompressor; - /** */ - private final ThreadLocal lastWALPtr = new ThreadLocal<>(); - /** Current log segment handle */ private volatile FileWriteHandle currentHnd; @@ -726,8 +722,6 @@ private void checkWalRolloverRequiredDuringInactivityPeriod() { if (ptr != null) { metrics.onWalRecordLogged(); - lastWALPtr.set(ptr); - if (walAutoArchiveAfterInactivity > 0) lastRecordLoggedMs.set(U.currentTimeMillis()); @@ -744,23 +738,36 @@ private void checkWalRolloverRequiredDuringInactivityPeriod() { } /** {@inheritDoc} */ - @Override public void flush(WALPointer ptr, boolean explicitFsync) throws IgniteCheckedException, StorageException { + @Override public WALPointer flush(WALPointer ptr, boolean explicitFsync) throws IgniteCheckedException, StorageException { if (serializer == null || mode == WALMode.NONE) - return; + return null; FileWriteHandle cur = currentHandle(); // WAL manager was not started (client node). if (cur == null) - return; + return null; + + FileWALPointer filePtr; - FileWALPointer filePtr = (FileWALPointer)(ptr == null ? lastWALPtr.get() : ptr); + if (ptr == null) { + WALRecord rec = cur.head.get(); + + if (rec instanceof FakeRecord) + return null; + + filePtr = (FileWALPointer)rec.position(); + } + else + filePtr = (FileWALPointer)ptr; // No need to sync if was rolled over. - if (filePtr != null && !cur.needFsync(filePtr)) - return; + if (!cur.needFsync(filePtr)) + return filePtr; cur.fsync(filePtr, false); + + return filePtr; } /** {@inheritDoc} */ @@ -1346,7 +1353,7 @@ private void checkOrPrepareFiles() throws StorageException { createFile(first); } - else + else if (isArchiverEnabled()) checkFiles(0, false, null, null); } @@ -2653,8 +2660,14 @@ else if (stop) { lock.lock(); try { - while (written < expWritten && !cctx.kernalContext().invalid()) - U.awaitQuiet(writeComplete); + while (written < expWritten && !cctx.kernalContext().invalid()) { + try { + writeComplete.await(100, TimeUnit.MILLISECONDS); + } + catch (InterruptedException ignore) { + Thread.currentThread().interrupt(); + } + } } finally { lock.unlock(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java index d4253f3c2a108..19fbd80bb9f65 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java @@ -77,7 +77,7 @@ import org.apache.ignite.internal.processors.resource.GridResourceProcessor; import org.apache.ignite.internal.processors.rest.GridRestProcessor; import org.apache.ignite.internal.processors.schedule.IgniteScheduleProcessorAdapter; -import org.apache.ignite.internal.processors.security.GridSecurityProcessor; +import org.apache.ignite.internal.processors.security.IgniteSecurity; import org.apache.ignite.internal.processors.segmentation.GridSegmentationProcessor; import org.apache.ignite.internal.processors.service.GridServiceProcessor; import org.apache.ignite.internal.processors.session.GridTaskSessionProcessor; @@ -445,7 +445,7 @@ protected IgniteConfiguration prepareIgniteConfiguration() { } /** {@inheritDoc} */ - @Override public GridSecurityProcessor security() { + @Override public IgniteSecurity security() { return null; } @@ -583,6 +583,11 @@ protected IgniteConfiguration prepareIgniteConfiguration() { return null; } + /** {@inheritDoc} */ + @Override public ExecutorService getRebalanceExecutorService() { + return null; + } + /** {@inheritDoc} */ @Override public IgniteExceptionRegistry exceptionRegistry() { return null; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/record/RecordTypes.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/record/RecordTypes.java index 996df70fee1fc..daaa470f35193 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/record/RecordTypes.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/record/RecordTypes.java @@ -66,5 +66,6 @@ public final class RecordTypes { DELTA_TYPE_SET.add(WALRecord.RecordType.PAGE_LIST_META_RESET_COUNT_RECORD); DELTA_TYPE_SET.add(WALRecord.RecordType.DATA_PAGE_UPDATE_RECORD); DELTA_TYPE_SET.add(WALRecord.RecordType.BTREE_META_PAGE_INIT_ROOT2); + DELTA_TYPE_SET.add(WALRecord.RecordType.BTREE_META_PAGE_INIT_ROOT_V3); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/scanner/PrintToLogHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/scanner/PrintToLogHandler.java index 848acb913d2f1..551e4df97aade 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/scanner/PrintToLogHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/scanner/PrintToLogHandler.java @@ -63,7 +63,8 @@ public PrintToLogHandler(IgniteLogger log) { resultString = null; - log.info(msg); + if (log.isInfoEnabled()) + log.info(msg); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java index 34bc50545cbc8..026b717163de3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java @@ -54,6 +54,7 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageAddRootRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageCutRootRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRecord; +import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootInlineFlagsCreatedVersionRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootInlineRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateLastAllocatedIndex; @@ -93,6 +94,7 @@ import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.processors.cacheobject.IgniteCacheObjectProcessor; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteProductVersion; /** * Record data V1 serializer. @@ -207,6 +209,9 @@ assert record instanceof PageSnapshot; case BTREE_META_PAGE_INIT_ROOT2: return 4 + 8 + 8 + 2; + case BTREE_META_PAGE_INIT_ROOT_V3: + return 4 + 8 + 8 + 2 + 8 + IgniteProductVersion.SIZE_IN_BYTES; + case BTREE_META_PAGE_ADD_ROOT: return 4 + 8 + 8; @@ -533,6 +538,34 @@ assert record instanceof PageSnapshot; break; + case BTREE_META_PAGE_INIT_ROOT_V3: + cacheId = in.readInt(); + pageId = in.readLong(); + + long rootId3 = in.readLong(); + int inlineSize3 = in.readShort(); + + long flags = in.readLong(); + + byte[] revHash = new byte[IgniteProductVersion.REV_HASH_SIZE]; + byte maj = in.readByte(); + byte min = in.readByte(); + byte maint = in.readByte(); + long verTs = in.readLong(); + in.readFully(revHash); + + IgniteProductVersion createdVer = new IgniteProductVersion( + maj, + min, + maint, + verTs, + revHash); + + res = new MetaPageInitRootInlineFlagsCreatedVersionRecord(cacheId, pageId, rootId3, + inlineSize3, flags, createdVer); + + break; + case BTREE_META_PAGE_ADD_ROOT: cacheId = in.readInt(); pageId = in.readLong(); @@ -1045,6 +1078,29 @@ assert record instanceof PageSnapshot; buf.putShort((short)imRec2.inlineSize()); break; + case BTREE_META_PAGE_INIT_ROOT_V3: + MetaPageInitRootInlineFlagsCreatedVersionRecord imRec3 = + (MetaPageInitRootInlineFlagsCreatedVersionRecord)rec; + + buf.putInt(imRec3.groupId()); + buf.putLong(imRec3.pageId()); + + buf.putLong(imRec3.rootId()); + + buf.putShort((short)imRec3.inlineSize()); + + buf.putLong(imRec3.flags()); + + // Write created version. + IgniteProductVersion createdVer = imRec3.createdVersion(); + buf.put(createdVer.major()); + buf.put(createdVer.minor()); + buf.put(createdVer.maintenance()); + buf.putLong(createdVer.revisionTimestamp()); + buf.put(createdVer.revisionHash()); + + break; + case BTREE_META_PAGE_ADD_ROOT: MetaPageAddRootRecord arRec = (MetaPageAddRootRecord)rec; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV1Serializer.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV1Serializer.java index e27faa5f02025..193e492b06d3b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV1Serializer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV1Serializer.java @@ -56,7 +56,7 @@ * Record V1 serializer. * Stores records in following format: *