From aa48860fac246799f779f26e3b20e4dee0512bf7 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Fri, 10 Nov 2023 08:02:53 +0100 Subject: [PATCH 01/15] Make YamlRestTest and ElasticDistributionPlugin cc compatible (#101923) * Make YamlRestTest and ElasticDistributionPlugin cc compatible These changes are addressing configuration cache incompatibilities of running rest tests with the new ClassRule based Elasticsearch test cluster setup. This allows running those tests with configuration cache enabled: `./gradlew :modules:ingest-user-info:yamlRestTest --configuration-cache` --- .../InternalDistributionDownloadPlugin.java | 24 ++-- .../test/LegacyRestTestBasePlugin.java | 29 +++++ .../test/rest/RestTestBasePlugin.java | 28 +++-- .../gradle/TestClustersPluginFuncTest.groovy | 2 +- .../gradle/DistributionDownloadPlugin.java | 112 ++++++++++-------- .../gradle/DistributionResolution.java | 7 +- .../gradle/ElasticsearchDistribution.java | 13 +- .../testclusters/DefaultTestClustersTask.java | 2 +- .../StandaloneRestIntegTestTask.java | 27 ++--- .../testclusters/TestClustersAware.java | 7 ++ .../testclusters/TestClustersPlugin.java | 13 +- .../gradle/util/GradleUtils.java | 12 -- .../tools/java-version-checker/build.gradle | 4 +- 13 files changed, 165 insertions(+), 115 deletions(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java index 16c7bf6d32862..f92789f701049 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPlugin.java @@ -23,16 +23,17 @@ import org.elasticsearch.gradle.internal.info.GlobalBuildInfoPlugin; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.GradleException; -import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.artifacts.Dependency; +import org.gradle.api.artifacts.dsl.DependencyHandler; import org.gradle.api.provider.Provider; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.function.Function; -import static org.elasticsearch.gradle.util.GradleUtils.projectDependency; - /** * An internal elasticsearch build plugin that registers additional * distribution resolution strategies to the 'elasticsearch.download-distribution' plugin @@ -64,18 +65,18 @@ public void apply(Project project) { *

* BWC versions are resolved as project to projects under `:distribution:bwc`. */ - private void registerInternalDistributionResolutions(NamedDomainObjectContainer resolutions) { - resolutions.register("localBuild", distributionResolution -> distributionResolution.setResolver((project, distribution) -> { + private void registerInternalDistributionResolutions(List resolutions) { + resolutions.add(new DistributionResolution("local-build", (project, distribution) -> { if (isCurrentVersion(distribution)) { // non-external project, so depend on local build return new ProjectBasedDistributionDependency( - config -> projectDependency(project, distributionProjectPath(distribution), config) + config -> projectDependency(project.getDependencies(), distributionProjectPath(distribution), config) ); } return null; })); - resolutions.register("bwc", distributionResolution -> distributionResolution.setResolver((project, distribution) -> { + resolutions.add(new DistributionResolution("bwc", (project, distribution) -> { BwcVersions.UnreleasedVersionInfo unreleasedInfo = BuildParams.getBwcVersions() .unreleasedInfo(Version.fromString(distribution.getVersion())); if (unreleasedInfo != null) { @@ -89,7 +90,7 @@ private void registerInternalDistributionResolutions(NamedDomainObjectContainer< } String projectConfig = getProjectConfig(distribution, unreleasedInfo); return new ProjectBasedDistributionDependency( - (config) -> projectDependency(project, unreleasedInfo.gradleProjectPath(), projectConfig) + (config) -> projectDependency(project.getDependencies(), unreleasedInfo.gradleProjectPath(), projectConfig) ); } return null; @@ -116,6 +117,13 @@ private static String getProjectConfig(ElasticsearchDistribution distribution, B } } + private static Dependency projectDependency(DependencyHandler dependencyHandler, String projectPath, String projectConfig) { + Map depConfig = new HashMap<>(); + depConfig.put("path", projectPath); + depConfig.put("configuration", projectConfig); + return dependencyHandler.project(depConfig); + } + private static String distributionProjectPath(ElasticsearchDistribution distribution) { String projectPath = ":distribution"; if (distribution.getType() == ElasticsearchDistributionTypes.INTEG_TEST_ZIP) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java index eacc5da6220ab..cf68d2928a793 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/LegacyRestTestBasePlugin.java @@ -22,12 +22,18 @@ import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; +import org.gradle.api.Task; import org.gradle.api.plugins.JavaBasePlugin; import org.gradle.api.provider.Provider; import org.gradle.api.provider.ProviderFactory; +import org.gradle.api.specs.NotSpec; +import org.gradle.api.specs.Spec; import org.gradle.api.tasks.Sync; +import org.gradle.api.tasks.TaskContainer; import org.gradle.api.tasks.bundling.Zip; +import java.util.Collections; + import javax.inject.Inject; import static org.elasticsearch.gradle.internal.RestrictedBuildApiService.BUILD_API_RESTRICTIONS_SYS_PROPERTY; @@ -47,6 +53,7 @@ public class LegacyRestTestBasePlugin implements Plugin { private static final String TESTS_CLUSTER_REMOTE_ACCESS = "tests.cluster.remote_access"; private ProviderFactory providerFactory; + private Project project; @Inject public LegacyRestTestBasePlugin(ProviderFactory providerFactory) { @@ -55,6 +62,7 @@ public LegacyRestTestBasePlugin(ProviderFactory providerFactory) { @Override public void apply(Project project) { + this.project = project; Provider serviceProvider = project.getGradle() .getSharedServices() .registerIfAbsent("restrictedBuildAPI", RestrictedBuildApiService.class, spec -> { @@ -97,6 +105,7 @@ public void apply(Project project) { ); } } + configureCacheability(restIntegTestTask); }); project.getTasks() @@ -121,6 +130,26 @@ public void apply(Project project) { }); } + private void configureCacheability(RestIntegTestTask restIntegTestTask) { + TaskContainer tasks = project.getTasks(); + Spec taskSpec = t -> tasks.withType(StandaloneRestIntegTestTask.class) + .stream() + .filter(task -> task != restIntegTestTask) + .anyMatch(task -> Collections.disjoint(task.getClusters(), restIntegTestTask.getClusters()) == false); + restIntegTestTask.getOutputs() + .doNotCacheIf( + "Caching disabled for this task since it uses a cluster shared by other tasks", + /* + * Look for any other tasks which use the same cluster as this task. Since tests often have side effects for the cluster + * they execute against, this state can cause issues when trying to cache tests results of tasks that share a cluster. To + * avoid any undesired behavior we simply disable the cache if we detect that this task uses a cluster shared between + * multiple tasks. + */ + taskSpec + ); + restIntegTestTask.getOutputs().upToDateWhen(new NotSpec(taskSpec)); + } + private String systemProperty(String propName) { return providerFactory.systemProperty(propName).getOrNull(); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java index c602a50c2adb8..32e7f10d14355 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/RestTestBasePlugin.java @@ -38,6 +38,8 @@ import org.gradle.api.artifacts.ProjectDependency; import org.gradle.api.artifacts.type.ArtifactTypeDefinition; import org.gradle.api.attributes.Attribute; +import org.gradle.api.file.ConfigurableFileCollection; +import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileTree; import org.gradle.api.provider.ProviderFactory; import org.gradle.api.tasks.ClasspathNormalizer; @@ -134,16 +136,20 @@ public void apply(Project project) { task.systemProperty("tests.system_call_filter", "false"); // Register plugins and modules as task inputs and pass paths as system properties to tests - nonInputSystemProperties.systemProperty(TESTS_CLUSTER_MODULES_PATH_SYSPROP, modulesConfiguration::getAsPath); - registerConfigurationInputs(task, modulesConfiguration); - nonInputSystemProperties.systemProperty(TESTS_CLUSTER_PLUGINS_PATH_SYSPROP, pluginsConfiguration::getAsPath); - registerConfigurationInputs(task, extractedPluginsConfiguration); + var modulePath = project.getObjects().fileCollection().from(modulesConfiguration); + nonInputSystemProperties.systemProperty(TESTS_CLUSTER_MODULES_PATH_SYSPROP, modulePath::getAsPath); + registerConfigurationInputs(task, modulesConfiguration.getName(), modulePath); + var pluginPath = project.getObjects().fileCollection().from(pluginsConfiguration); + nonInputSystemProperties.systemProperty(TESTS_CLUSTER_PLUGINS_PATH_SYSPROP, pluginPath::getAsPath); + registerConfigurationInputs( + task, + extractedPluginsConfiguration.getName(), + project.getObjects().fileCollection().from(extractedPluginsConfiguration) + ); // Wire up integ-test distribution by default for all test tasks - nonInputSystemProperties.systemProperty( - INTEG_TEST_DISTRIBUTION_SYSPROP, - () -> integTestDistro.getExtracted().getSingleFile().getPath() - ); + FileCollection extracted = integTestDistro.getExtracted(); + nonInputSystemProperties.systemProperty(INTEG_TEST_DISTRIBUTION_SYSPROP, () -> extracted.getSingleFile().getPath()); nonInputSystemProperties.systemProperty(TESTS_RUNTIME_JAVA_SYSPROP, BuildParams.getRuntimeJavaHome()); // Add `usesDefaultDistribution()` extension method to test tasks to indicate they require the default distro @@ -216,15 +222,15 @@ private FileTree getDistributionFiles(ElasticsearchDistribution distribution, Ac return distribution.getExtracted().getAsFileTree().matching(patternFilter); } - private void registerConfigurationInputs(Task task, Configuration configuration) { + private void registerConfigurationInputs(Task task, String configurationName, ConfigurableFileCollection configuration) { task.getInputs() .files(providerFactory.provider(() -> configuration.getAsFileTree().filter(f -> f.getName().endsWith(".jar") == false))) - .withPropertyName(configuration.getName() + "-files") + .withPropertyName(configurationName + "-files") .withPathSensitivity(PathSensitivity.RELATIVE); task.getInputs() .files(providerFactory.provider(() -> configuration.getAsFileTree().filter(f -> f.getName().endsWith(".jar")))) - .withPropertyName(configuration.getName() + "-classpath") + .withPropertyName(configurationName + "-classpath") .withNormalizer(ClasspathNormalizer.class); } diff --git a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy index 6b662b8165034..719fae2b463c0 100644 --- a/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy +++ b/build-tools/src/integTest/groovy/org/elasticsearch/gradle/TestClustersPluginFuncTest.groovy @@ -34,7 +34,7 @@ class TestClustersPluginFuncTest extends AbstractGradleFuncTest { id 'elasticsearch.testclusters' } - class SomeClusterAwareTask extends DefaultTask implements TestClustersAware { + abstract class SomeClusterAwareTask extends DefaultTask implements TestClustersAware { private Collection clusters = new HashSet<>(); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java index d08dc469e5ba5..e12523870b15b 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionDownloadPlugin.java @@ -11,6 +11,7 @@ import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes; import org.elasticsearch.gradle.transform.SymbolicLinkPreservingUntarTransform; import org.elasticsearch.gradle.transform.UnzipTransform; +import org.gradle.api.Action; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -22,7 +23,8 @@ import org.gradle.api.provider.Property; import org.gradle.api.provider.Provider; -import java.util.Comparator; +import java.util.ArrayList; +import java.util.List; import javax.inject.Inject; @@ -42,9 +44,10 @@ public class DistributionDownloadPlugin implements Plugin { private static final String DOWNLOAD_REPO_NAME = "elasticsearch-downloads"; private static final String SNAPSHOT_REPO_NAME = "elasticsearch-snapshots"; public static final String DISTRO_EXTRACTED_CONFIG_PREFIX = "es_distro_extracted_"; + public static final String DISTRO_CONFIG_PREFIX = "es_distro_file_"; private NamedDomainObjectContainer distributionsContainer; - private NamedDomainObjectContainer distributionsResolutionStrategiesContainer; + private List distributionsResolutionStrategies; private Property dockerAvailability; @@ -77,7 +80,7 @@ public void apply(Project project) { private void setupDistributionContainer(Project project, Property dockerAvailable) { distributionsContainer = project.container(ElasticsearchDistribution.class, name -> { - Configuration fileConfiguration = project.getConfigurations().create("es_distro_file_" + name); + Configuration fileConfiguration = project.getConfigurations().create(DISTRO_CONFIG_PREFIX + name); Configuration extractedConfiguration = project.getConfigurations().create(DISTRO_EXTRACTED_CONFIG_PREFIX + name); extractedConfiguration.getAttributes() .attribute(ArtifactTypeDefinition.ARTIFACT_TYPE_ATTRIBUTE, ArtifactTypeDefinition.DIRECTORY_TYPE); @@ -85,21 +88,17 @@ private void setupDistributionContainer(Project project, Property docke name, project.getObjects(), dockerAvailability, - fileConfiguration, - extractedConfiguration, - (dist) -> finalizeDistributionDependencies(project, dist) + project.getObjects().fileCollection().from(fileConfiguration), + project.getObjects().fileCollection().from(extractedConfiguration), + new FinalizeDistributionAction(distributionsResolutionStrategies, project) ); }); project.getExtensions().add(CONTAINER_NAME, distributionsContainer); } private void setupResolutionsContainer(Project project) { - distributionsResolutionStrategiesContainer = project.container(DistributionResolution.class); - // We want this ordered in the same resolution strategies are added - distributionsResolutionStrategiesContainer.whenObjectAdded( - resolveDependencyNotation -> resolveDependencyNotation.setPriority(distributionsResolutionStrategiesContainer.size()) - ); - project.getExtensions().add(RESOLUTION_CONTAINER_NAME, distributionsResolutionStrategiesContainer); + distributionsResolutionStrategies = new ArrayList<>(); + project.getExtensions().add(RESOLUTION_CONTAINER_NAME, distributionsResolutionStrategies); } @SuppressWarnings("unchecked") @@ -108,30 +107,8 @@ public static NamedDomainObjectContainer getContainer } @SuppressWarnings("unchecked") - public static NamedDomainObjectContainer getRegistrationsContainer(Project project) { - return (NamedDomainObjectContainer) project.getExtensions().getByName(RESOLUTION_CONTAINER_NAME); - } - - private void finalizeDistributionDependencies(Project project, ElasticsearchDistribution distribution) { - DependencyHandler dependencies = project.getDependencies(); - // for the distribution as a file, just depend on the artifact directly - DistributionDependency distributionDependency = resolveDependencyNotation(project, distribution); - dependencies.add(distribution.configuration.getName(), distributionDependency.getDefaultNotation()); - // no extraction needed for rpm, deb or docker - if (distribution.getType().shouldExtract()) { - // The extracted configuration depends on the artifact directly but has - // an artifact transform registered to resolve it as an unpacked folder. - dependencies.add(distribution.getExtracted().getName(), distributionDependency.getExtractedNotation()); - } - } - - private DistributionDependency resolveDependencyNotation(Project p, ElasticsearchDistribution distribution) { - return distributionsResolutionStrategiesContainer.stream() - .sorted(Comparator.comparingInt(DistributionResolution::getPriority)) - .map(r -> r.getResolver().resolve(p, distribution)) - .filter(d -> d != null) - .findFirst() - .orElseGet(() -> DistributionDependency.of(dependencyNotation(distribution))); + public static List getRegistrationsContainer(Project project) { + return (List) project.getExtensions().getByName(RESOLUTION_CONTAINER_NAME); } private static void addIvyRepo(Project project, String name, String url, String group) { @@ -155,22 +132,53 @@ private static void setupDownloadServiceRepo(Project project) { addIvyRepo(project, SNAPSHOT_REPO_NAME, "https://snapshots-no-kpi.elastic.co", FAKE_SNAPSHOT_IVY_GROUP); } - /** - * Returns a dependency object representing the given distribution. - *

- * The returned object is suitable to be passed to {@link DependencyHandler}. - * The concrete type of the object will be a set of maven coordinates as a {@link String}. - * Maven coordinates point to either the integ-test-zip coordinates on maven central, or a set of artificial - * coordinates that resolve to the Elastic download service through an ivy repository. - */ - private String dependencyNotation(ElasticsearchDistribution distribution) { - if (distribution.getType() == ElasticsearchDistributionTypes.INTEG_TEST_ZIP) { - return "org.elasticsearch.distribution.integ-test-zip:elasticsearch:" + distribution.getVersion() + "@zip"; + private record FinalizeDistributionAction(List resolutionList, Project project) + implements + Action { + @Override + + public void execute(ElasticsearchDistribution distro) { + finalizeDistributionDependencies(project, distro); + } + + private void finalizeDistributionDependencies(Project project, ElasticsearchDistribution distribution) { + // for the distribution as a file, just depend on the artifact directly + DistributionDependency distributionDependency = resolveDependencyNotation(project, distribution); + project.getDependencies().add(DISTRO_CONFIG_PREFIX + distribution.getName(), distributionDependency.getDefaultNotation()); + // no extraction needed for rpm, deb or docker + if (distribution.getType().shouldExtract()) { + // The extracted configuration depends on the artifact directly but has + // an artifact transform registered to resolve it as an unpacked folder. + project.getDependencies() + .add(DISTRO_EXTRACTED_CONFIG_PREFIX + distribution.getName(), distributionDependency.getExtractedNotation()); + } + } + + private DistributionDependency resolveDependencyNotation(Project project, ElasticsearchDistribution distro) { + return resolutionList.stream() + .map(r -> r.getResolver().resolve(project, distro)) + .filter(d -> d != null) + .findFirst() + .orElseGet(() -> DistributionDependency.of(dependencyNotation(distro))); + } + + /** + * Returns a dependency object representing the given distribution. + *

+ * The returned object is suitable to be passed to {@link DependencyHandler}. + * The concrete type of the object will be a set of maven coordinates as a {@link String}. + * Maven coordinates point to either the integ-test-zip coordinates on maven central, or a set of artificial + * coordinates that resolve to the Elastic download service through an ivy repository. + */ + private String dependencyNotation(ElasticsearchDistribution distribution) { + if (distribution.getType() == ElasticsearchDistributionTypes.INTEG_TEST_ZIP) { + return "org.elasticsearch.distribution.integ-test-zip:elasticsearch:" + distribution.getVersion() + "@zip"; + } + Version distroVersion = Version.fromString(distribution.getVersion()); + String extension = distribution.getType().getExtension(distribution.getPlatform()); + String classifier = distribution.getType().getClassifier(distribution.getPlatform(), distroVersion); + String group = distribution.getVersion().endsWith("-SNAPSHOT") ? FAKE_SNAPSHOT_IVY_GROUP : FAKE_IVY_GROUP; + return group + ":elasticsearch" + ":" + distribution.getVersion() + classifier + "@" + extension; } - Version distroVersion = Version.fromString(distribution.getVersion()); - String extension = distribution.getType().getExtension(distribution.getPlatform()); - String classifier = distribution.getType().getClassifier(distribution.getPlatform(), distroVersion); - String group = distribution.getVersion().endsWith("-SNAPSHOT") ? FAKE_SNAPSHOT_IVY_GROUP : FAKE_IVY_GROUP; - return group + ":elasticsearch" + ":" + distribution.getVersion() + classifier + "@" + extension; } } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionResolution.java b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionResolution.java index 3b82c9f6975a0..0d8177dea5cb6 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/DistributionResolution.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/DistributionResolution.java @@ -12,9 +12,14 @@ public class DistributionResolution { private Resolver resolver; - private String name; + private final String name; private int priority; + public DistributionResolution(String name, Resolver resolver) { + this(name); + this.resolver = resolver; + } + public DistributionResolution(String name) { this.name = name; } diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java b/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java index 5350b6698cb30..f9805680ce8d4 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/ElasticsearchDistribution.java @@ -11,7 +11,8 @@ import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes; import org.gradle.api.Action; import org.gradle.api.Buildable; -import org.gradle.api.artifacts.Configuration; +import org.gradle.api.file.ConfigurableFileCollection; +import org.gradle.api.file.FileCollection; import org.gradle.api.model.ObjectFactory; import org.gradle.api.provider.Property; import org.gradle.api.tasks.TaskDependency; @@ -44,7 +45,7 @@ public String toString() { private final String name; private final Property dockerAvailability; // pkg private so plugin can configure - final Configuration configuration; + final FileCollection configuration; private final Property architecture; private final Property version; @@ -52,7 +53,7 @@ public String toString() { private final Property platform; private final Property bundledJdk; private final Property failIfUnavailable; - private final Configuration extracted; + private final ConfigurableFileCollection extracted; private Action distributionFinalizer; private boolean frozen = false; @@ -60,8 +61,8 @@ public String toString() { String name, ObjectFactory objectFactory, Property dockerAvailability, - Configuration fileConfiguration, - Configuration extractedConfiguration, + ConfigurableFileCollection fileConfiguration, + ConfigurableFileCollection extractedConfiguration, Action distributionFinalizer ) { this.name = name; @@ -172,7 +173,7 @@ public String getFilepath() { return configuration.getSingleFile().toString(); } - public Configuration getExtracted() { + public ConfigurableFileCollection getExtracted() { if (getType().shouldExtract() == false) { throw new UnsupportedOperationException( "distribution type [" + getType().getName() + "] for " + "elasticsearch distribution [" + name + "] cannot be extracted" diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/DefaultTestClustersTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/DefaultTestClustersTask.java index 5c98ab3bf4364..e80d2ed64cabd 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/DefaultTestClustersTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/DefaultTestClustersTask.java @@ -12,7 +12,7 @@ import java.util.Collection; import java.util.HashSet; -public class DefaultTestClustersTask extends DefaultTask implements TestClustersAware { +public abstract class DefaultTestClustersTask extends DefaultTask implements TestClustersAware { private Collection clusters = new HashSet<>(); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java index 2bd8219dc48e5..ba2a5a20c4fbb 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/StandaloneRestIntegTestTask.java @@ -8,11 +8,9 @@ package org.elasticsearch.gradle.testclusters; import org.elasticsearch.gradle.FileSystemOperationsAware; -import org.gradle.api.Task; +import org.gradle.api.provider.ProviderFactory; import org.gradle.api.services.internal.BuildServiceProvider; import org.gradle.api.services.internal.BuildServiceRegistryInternal; -import org.gradle.api.specs.NotSpec; -import org.gradle.api.specs.Spec; import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.Nested; @@ -28,6 +26,8 @@ import java.util.HashSet; import java.util.List; +import javax.inject.Inject; + import static org.elasticsearch.gradle.testclusters.TestClustersPlugin.THROTTLE_SERVICE_NAME; /** @@ -42,23 +42,6 @@ public abstract class StandaloneRestIntegTestTask extends Test implements TestCl private boolean debugServer = false; public StandaloneRestIntegTestTask() { - Spec taskSpec = t -> getProject().getTasks() - .withType(StandaloneRestIntegTestTask.class) - .stream() - .filter(task -> task != this) - .anyMatch(task -> Collections.disjoint(task.getClusters(), getClusters()) == false); - this.getOutputs() - .doNotCacheIf( - "Caching disabled for this task since it uses a cluster shared by other tasks", - /* - * Look for any other tasks which use the same cluster as this task. Since tests often have side effects for the cluster - * they execute against, this state can cause issues when trying to cache tests results of tasks that share a cluster. To - * avoid any undesired behavior we simply disable the cache if we detect that this task uses a cluster shared between - * multiple tasks. - */ - taskSpec - ); - this.getOutputs().upToDateWhen(new NotSpec(taskSpec)); this.getOutputs() .doNotCacheIf( "Caching disabled for this task since it is configured to preserve data directory", @@ -79,6 +62,10 @@ public Collection getClusters() { return clusters; } + @Override + @Inject + public abstract ProviderFactory getProviderFactory(); + @Override @Internal public List getSharedResources() { diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java index 9537162b5d109..09066d4b26e88 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersAware.java @@ -9,17 +9,24 @@ import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; +import org.gradle.api.provider.Property; import org.gradle.api.provider.Provider; +import org.gradle.api.services.ServiceReference; import org.gradle.api.tasks.Nested; import java.util.Collection; import java.util.concurrent.Callable; +import static org.elasticsearch.gradle.testclusters.TestClustersPlugin.REGISTRY_SERVICE_NAME; + public interface TestClustersAware extends Task { @Nested Collection getClusters(); + @ServiceReference(REGISTRY_SERVICE_NAME) + Property getRegistery(); + default void useCluster(ElasticsearchCluster cluster) { if (cluster.getPath().equals(getProject().getPath()) == false) { throw new TestClustersException("Task " + getPath() + " can't use test cluster from" + " another project " + cluster); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index 72a462c3cd8c9..d2ccda1c1f8c7 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -37,6 +37,7 @@ import java.io.File; import java.util.HashMap; import java.util.Map; +import java.util.Set; import java.util.function.Function; import javax.inject.Inject; @@ -49,7 +50,7 @@ public class TestClustersPlugin implements Plugin { public static final String THROTTLE_SERVICE_NAME = "testClustersThrottle"; private static final String LIST_TASK_NAME = "listTestClusters"; - private static final String REGISTRY_SERVICE_NAME = "testClustersRegistry"; + public static final String REGISTRY_SERVICE_NAME = "testClustersRegistry"; private static final Logger logger = Logging.getLogger(TestClustersPlugin.class); private final ProviderFactory providerFactory; private Provider runtimeJavaProvider; @@ -222,13 +223,21 @@ private void configureStartClustersHook( testClusterTasksService.get().register(awareTask.getPath(), awareTask); awareTask.doFirst(task -> { awareTask.beforeStart(); - awareTask.getClusters().forEach(registry::maybeStartCluster); + awareTask.getClusters().forEach(awareTask.getRegistery().get()::maybeStartCluster); }); }); }); } } + public static void maybeStartCluster(ElasticsearchCluster cluster, Set runningClusters) { + if (runningClusters.contains(cluster)) { + return; + } + runningClusters.add(cluster); + cluster.start(); + } + static public abstract class TaskEventsService implements BuildService, OperationCompletionListener { Map tasksMap = new HashMap<>(); diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java b/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java index ce69c4ec476f9..00e5834b0f826 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/util/GradleUtils.java @@ -13,7 +13,6 @@ import org.gradle.api.Task; import org.gradle.api.UnknownTaskException; import org.gradle.api.artifacts.Configuration; -import org.gradle.api.artifacts.Dependency; import org.gradle.api.artifacts.ModuleDependency; import org.gradle.api.artifacts.ProjectDependency; import org.gradle.api.plugins.JavaBasePlugin; @@ -34,7 +33,6 @@ import java.util.ArrayList; import java.util.Arrays; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.function.Function; @@ -183,16 +181,6 @@ public static void extendSourceSet(Project project, String parentSourceSetName, } } - public static Dependency projectDependency(Project project, String projectPath, String projectConfig) { - if (project.findProject(projectPath) == null) { - throw new GradleException("no project [" + projectPath + "], project names: " + project.getRootProject().getAllprojects()); - } - Map depConfig = new HashMap<>(); - depConfig.put("path", projectPath); - depConfig.put("configuration", projectConfig); - return project.getDependencies().project(depConfig); - } - /** * To calculate the project path from a task path without relying on Task#getProject() which is discouraged during * task execution time. diff --git a/distribution/tools/java-version-checker/build.gradle b/distribution/tools/java-version-checker/build.gradle index 39f9bbf536dda..0a47d0652e465 100644 --- a/distribution/tools/java-version-checker/build.gradle +++ b/distribution/tools/java-version-checker/build.gradle @@ -8,15 +8,17 @@ tasks.named(sourceSets.unsupportedJdkVersionEntrypoint.compileJavaTaskName).conf targetCompatibility = JavaVersion.VERSION_1_8 } + tasks.named("jar") { manifest { attributes("Multi-Release": "true") } + FileCollection mainOutput = sourceSets.main.output; from(sourceSets.unsupportedJdkVersionEntrypoint.output) eachFile { details -> if (details.path.equals("org/elasticsearch/tools/java_version_checker/JavaVersionChecker.class") && - sourceSets.main.output.asFileTree.contains(details.file)) { + mainOutput.asFileTree.contains(details.file)) { details.relativePath = details.relativePath.prepend("META-INF/versions/17") } } From 841f711a0a31512786526bd238b368cb945df32c Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Fri, 10 Nov 2023 08:05:13 +0100 Subject: [PATCH 02/15] Make gradle build finished logic CC compatible (#96475) * Make gradle build finished logic CC compatible * Make ElasticsearchBuildFinishedPlugin configuration cache aware * Add gradle enterprise plugin to buildlibs version catalogue --- build-tools-internal/build.gradle | 7 + .../elasticsearch.build-complete.gradle | 93 -------- .../groovy/elasticsearch.build-scan.gradle | 1 + .../ElasticsearchBuildFinishedPlugin.java | 219 ++++++++++++++++++ build.gradle | 2 +- gradle/build.versions.toml | 1 + 6 files changed, 229 insertions(+), 94 deletions(-) delete mode 100644 build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle create mode 100644 build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildFinishedPlugin.java diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle index c134638bcd6b6..8a7bc488cf51b 100644 --- a/build-tools-internal/build.gradle +++ b/build-tools-internal/build.gradle @@ -35,6 +35,10 @@ gradlePlugin { id = 'elasticsearch.build' implementationClass = 'org.elasticsearch.gradle.internal.BuildPlugin' } + buildFinished { + id = 'elasticsearch.build-finished' + implementationClass = 'org.elasticsearch.gradle.internal.ElasticsearchBuildFinishedPlugin' + } distro { id = 'elasticsearch.distro' implementationClass = 'org.elasticsearch.gradle.internal.distribution.ElasticsearchDistributionPlugin' @@ -266,6 +270,8 @@ dependencies { api buildLibs.apache.rat api buildLibs.jna api buildLibs.shadow.plugin + api buildLibs.gradle.enterprise + // for our ide tweaking api buildLibs.idea.ext // When upgrading forbidden apis, ensure dependency version is bumped in ThirdPartyPrecommitPlugin as well @@ -280,6 +286,7 @@ dependencies { api buildLibs.asm.tree api buildLibs.httpclient api buildLibs.httpcore + compileOnly buildLibs.checkstyle runtimeOnly "org.elasticsearch.gradle:reaper:$version" testImplementation buildLibs.checkstyle diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle deleted file mode 100644 index 1a0afe6d7d344..0000000000000 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-complete.gradle +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -import org.elasticsearch.gradle.util.GradleUtils - -import java.nio.file.Files - -String buildNumber = System.getenv('BUILD_NUMBER') ?: System.getenv('BUILDKITE_BUILD_NUMBER') -String performanceTest = System.getenv('BUILD_PERFORMANCE_TEST') -Boolean isNested = System.getProperty("scan.tag.NESTED") != null - -if (buildNumber && performanceTest == null && GradleUtils.isIncludedBuild(project) == false && isNested == false) { - def uploadFilePath = "build/${buildNumber}.tar.bz2" - File uploadFile = file(uploadFilePath) - project.gradle.buildFinished { result -> - println "build complete, generating: $uploadFile" - if (uploadFile.exists()) { - project.delete(uploadFile) - } - - try { - ant.tar(destfile: uploadFile, compression: "bzip2", longfile: "gnu") { - fileset(dir: projectDir) { - Set fileSet = fileTree(projectDir) { - include("**/*.hprof") - include("**/build/test-results/**/*.xml") - include("**/build/testclusters/**") - include("**/build/testrun/*/temp/**") - include("**/build/**/hs_err_pid*.log") - exclude("**/build/testclusters/**/data/**") - exclude("**/build/testclusters/**/distro/**") - exclude("**/build/testclusters/**/repo/**") - exclude("**/build/testclusters/**/extract/**") - exclude("**/build/testclusters/**/tmp/**") - exclude("**/build/testrun/*/temp/**/data/**") - exclude("**/build/testrun/*/temp/**/distro/**") - exclude("**/build/testrun/*/temp/**/repo/**") - exclude("**/build/testrun/*/temp/**/extract/**") - exclude("**/build/testrun/*/temp/**/tmp/**") - } - .files - .findAll { Files.isRegularFile(it.toPath()) } - - if (fileSet.empty) { - // In cases where we don't match any workspace files, exclude everything - ant.exclude(name: "**/*") - } else { - fileSet.each { - ant.include(name: projectDir.toPath().relativize(it.toPath())) - } - } - } - - fileset(dir: "${gradle.gradleUserHomeDir}/daemon/${gradle.gradleVersion}", followsymlinks: false) { - include(name: "**/daemon-${ProcessHandle.current().pid()}*.log") - } - - fileset(dir: "${gradle.gradleUserHomeDir}/workers", followsymlinks: false) - - fileset(dir: "${project.projectDir}/.gradle/reaper", followsymlinks: false, erroronmissingdir: false) - } - } catch (Exception e) { - logger.lifecycle("Failed to archive additional logs", e) - } - - if (uploadFile.exists() && System.getenv("BUILDKITE") == "true") { - try { - println "Uploading buildkite artifact: ${uploadFilePath}..." - new ProcessBuilder("buildkite-agent", "artifact", "upload", uploadFilePath) - .start() - .waitFor() - - println "Generating buildscan link for artifact..." - - def process = new ProcessBuilder("buildkite-agent", "artifact", "search", uploadFilePath, "--step", System.getenv('BUILDKITE_JOB_ID'), "--format", "%i").start() - process.waitFor() - def artifactUuid = (process.text ?: "").trim() - - println "Artifact UUID: ${artifactUuid}" - if (artifactUuid) { - buildScan.link 'Artifact Upload', "https://buildkite.com/organizations/elastic/pipelines/${System.getenv('BUILDKITE_PIPELINE_SLUG')}/builds/${buildNumber}/jobs/${System.getenv('BUILDKITE_JOB_ID')}/artifacts/${artifactUuid}" - } - } catch (Exception e) { - logger.lifecycle("Failed to upload buildkite artifact", e) - } - } - } -} diff --git a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle index 0f56dd2ef8992..acecec5b607ed 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.build-scan.gradle @@ -132,6 +132,7 @@ buildScan { } buildFinished { result -> + buildScanPublished { scan -> // Attach build scan link as build metadata // See: https://buildkite.com/docs/pipelines/build-meta-data diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildFinishedPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildFinishedPlugin.java new file mode 100644 index 0000000000000..639ceeaf041e3 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildFinishedPlugin.java @@ -0,0 +1,219 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal; + +import com.gradle.scan.plugin.BuildScanExtension; + +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; +import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; +import org.apache.commons.compress.compressors.bzip2.BZip2CompressorOutputStream; +import org.apache.commons.io.IOUtils; +import org.elasticsearch.gradle.util.GradleUtils; +import org.gradle.api.Plugin; +import org.gradle.api.Project; +import org.gradle.api.file.FileSystemOperations; +import org.gradle.api.flow.FlowAction; +import org.gradle.api.flow.FlowParameters; +import org.gradle.api.flow.FlowProviders; +import org.gradle.api.flow.FlowScope; +import org.gradle.api.internal.file.FileOperations; +import org.gradle.api.provider.ListProperty; +import org.gradle.api.provider.Property; +import org.gradle.api.tasks.Input; +import org.jetbrains.annotations.NotNull; + +import java.io.*; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +import javax.inject.Inject; + +public abstract class ElasticsearchBuildFinishedPlugin implements Plugin { + + @Inject + protected abstract FlowScope getFlowScope(); + + @Inject + protected abstract FlowProviders getFlowProviders(); + + @Inject + protected abstract FileOperations getFileOperations(); + + @Override + public void apply(Project target) { + String buildNumber = System.getenv("BUILD_NUMBER") != null + ? System.getenv("BUILD_NUMBER") + : System.getenv("BUILDKITE_BUILD_NUMBER"); + String performanceTest = System.getenv("BUILD_PERFORMANCE_TEST"); + if (buildNumber != null && performanceTest == null && GradleUtils.isIncludedBuild(target) == false) { + File targetFile = target.file("build/" + buildNumber + ".tar.bz2"); + File projectDir = target.getProjectDir(); + File gradleWorkersDir = new File(target.getGradle().getGradleUserHomeDir(), "workers/"); + BuildScanExtension extension = target.getExtensions().getByType(BuildScanExtension.class); + File daemonsLogDir = new File(target.getGradle().getGradleUserHomeDir(), "daemon/" + target.getGradle().getGradleVersion()); + + getFlowScope().always(BuildFinishedFlowAction.class, spec -> { + spec.getParameters().getBuildScan().set(extension); + spec.getParameters().getUploadFile().set(targetFile); + spec.getParameters().getProjectDir().set(projectDir); + spec.getParameters().getFilteredFiles().addAll(getFlowProviders().getBuildWorkResult().map((result) -> { + List files = new ArrayList<>(); + files.addAll(resolveProjectLogs(projectDir)); + if (files.isEmpty() == false) { + files.addAll(resolveDaemonLogs(daemonsLogDir)); + files.addAll(getFileOperations().fileTree(gradleWorkersDir).getFiles()); + files.addAll(getFileOperations().fileTree(new File(projectDir, ".gradle/reaper/")).getFiles()); + } + return files; + })); + }); + } + } + + private List resolveProjectLogs(File projectDir) { + var projectDirFiles = getFileOperations().fileTree(projectDir); + projectDirFiles.include("**/*.hprof"); + projectDirFiles.include("**/build/test-results/**/*.xml"); + projectDirFiles.include("**/build/testclusters/**"); + projectDirFiles.include("**/build/testrun/*/temp/**"); + projectDirFiles.include("**/build/**/hs_err_pid*.log"); + projectDirFiles.exclude("**/build/testclusters/**/data/**"); + projectDirFiles.exclude("**/build/testclusters/**/distro/**"); + projectDirFiles.exclude("**/build/testclusters/**/repo/**"); + projectDirFiles.exclude("**/build/testclusters/**/extract/**"); + projectDirFiles.exclude("**/build/testclusters/**/tmp/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/data/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/distro/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/repo/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/extract/**"); + projectDirFiles.exclude("**/build/testrun/*/temp/**/tmp/**"); + return projectDirFiles.getFiles().stream().filter(f -> Files.isRegularFile(f.toPath())).toList(); + } + + private List resolveDaemonLogs(File daemonsLogDir) { + var gradleDaemonFileSet = getFileOperations().fileTree(daemonsLogDir); + gradleDaemonFileSet.include("**/daemon-" + ProcessHandle.current().pid() + "*.log"); + return gradleDaemonFileSet.getFiles().stream().filter(f -> Files.isRegularFile(f.toPath())).toList(); + } + + public abstract static class BuildFinishedFlowAction implements FlowAction { + interface Parameters extends FlowParameters { + @Input + Property getUploadFile(); + + @Input + Property getProjectDir(); + + @Input + ListProperty getFilteredFiles(); + + @Input + Property getBuildScan(); + + } + + @Inject + protected abstract FileSystemOperations getFileSystemOperations(); + + @SuppressWarnings("checkstyle:DescendantToken") + @Override + public void execute(BuildFinishedFlowAction.Parameters parameters) throws FileNotFoundException { + File uploadFile = parameters.getUploadFile().get(); + if (uploadFile.exists()) { + getFileSystemOperations().delete(spec -> spec.delete(uploadFile)); + } + uploadFile.getParentFile().mkdirs(); + createBuildArchiveTar(parameters.getFilteredFiles().get(), parameters.getProjectDir().get(), uploadFile); + if (uploadFile.exists() && System.getenv("BUILDKITE").equals("true")) { + String uploadFilePath = "build/" + uploadFile.getName(); + try { + System.out.println("Uploading buildkite artifact: " + uploadFilePath + "..."); + new ProcessBuilder("buildkite-agent", "artifact", "upload", uploadFilePath).start().waitFor(); + + System.out.println("Generating buildscan link for artifact..."); + + Process process = new ProcessBuilder( + "buildkite-agent", + "artifact", + "search", + uploadFilePath, + "--step", + System.getenv("BUILDKITE_JOB_ID"), + "--format", + "%i" + ).start(); + process.waitFor(); + String processOutput; + try { + processOutput = IOUtils.toString(process.getInputStream()); + } catch (IOException e) { + processOutput = ""; + } + String artifactUuid = processOutput.trim(); + + System.out.println("Artifact UUID: " + artifactUuid); + if (artifactUuid.isEmpty() == false) { + String buildkitePipelineSlug = System.getenv("BUILDKITE_PIPELINE_SLUG"); + String targetLink = "https://buildkite.com/organizations/elastic/pipelines/" + + buildkitePipelineSlug + + "/builds/" + + System.getenv("BUILD_NUMBER") + + "/jobs/" + + System.getenv("BUILDKITE_JOB_ID") + + "/artifacts/" + + artifactUuid; + parameters.getBuildScan().get().link("Artifact Upload", targetLink); + } + } catch (Exception e) { + System.out.println("Failed to upload buildkite artifact " + e.getMessage()); + } + } + + } + + private static void createBuildArchiveTar(List files, File projectDir, File uploadFile) { + try ( + OutputStream fOut = Files.newOutputStream(uploadFile.toPath()); + BufferedOutputStream buffOut = new BufferedOutputStream(fOut); + BZip2CompressorOutputStream bzOut = new BZip2CompressorOutputStream(buffOut); + TarArchiveOutputStream tOut = new TarArchiveOutputStream(bzOut) + ) { + Path projectPath = projectDir.toPath(); + tOut.setLongFileMode(TarArchiveOutputStream.LONGFILE_POSIX); + for (Path path : files.stream().map(File::toPath).collect(Collectors.toList())) { + if (!Files.isRegularFile(path)) { + throw new IOException("Support only file!"); + } + + TarArchiveEntry tarEntry = new TarArchiveEntry(path.toFile(), calculateArchivePath(path, projectPath)); + + tOut.putArchiveEntry(tarEntry); + + // copy file to TarArchiveOutputStream + Files.copy(path, tOut); + tOut.closeArchiveEntry(); + + } + tOut.flush(); + tOut.finish(); + + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @NotNull + private static String calculateArchivePath(Path path, Path projectPath) { + return path.startsWith(projectPath) ? projectPath.relativize(path).toString() : path.getFileName().toString(); + } + } +} diff --git a/build.gradle b/build.gradle index d05c2bf53f660..981c1598ac515 100644 --- a/build.gradle +++ b/build.gradle @@ -29,8 +29,8 @@ plugins { id 'lifecycle-base' id 'elasticsearch.docker-support' id 'elasticsearch.global-build-info' + id 'elasticsearch.build-finished' id 'elasticsearch.build-scan' - id 'elasticsearch.build-complete' id 'elasticsearch.jdk-download' id 'elasticsearch.internal-distribution-download' id 'elasticsearch.runtime-jdk-provision' diff --git a/gradle/build.versions.toml b/gradle/build.versions.toml index 94ed94df43818..e8d94ce624dbb 100644 --- a/gradle/build.versions.toml +++ b/gradle/build.versions.toml @@ -17,6 +17,7 @@ commons-codec = "commons-codec:commons-codec:1.11" commmons-io = "commons-io:commons-io:2.2" docker-compose = "com.avast.gradle:gradle-docker-compose-plugin:0.17.5" forbiddenApis = "de.thetaphi:forbiddenapis:3.6" +gradle-enterprise = "com.gradle:gradle-enterprise-gradle-plugin:3.14.1" hamcrest = "org.hamcrest:hamcrest:2.1" httpcore = "org.apache.httpcomponents:httpcore:4.4.12" httpclient = "org.apache.httpcomponents:httpclient:4.5.10" From 85b1fb3fe5647d92e19252bcc6d281b6ab1099f5 Mon Sep 17 00:00:00 2001 From: Panagiotis Bailis Date: Fri, 10 Nov 2023 09:24:14 +0200 Subject: [PATCH 03/15] Fix for SearchWithRandomIOExceptionsIT#testRandomDirectoryIOExceptions (#101722) * Updating exception handling to make sure that the DirectoryReader in InternalEngine is properly closed --- .../search/basic/SearchWithRandomIOExceptionsIT.java | 1 - .../org/elasticsearch/index/engine/InternalEngine.java | 10 +++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java index 54ad0cd7e0cff..0ccde7a62a09e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -41,7 +41,6 @@ protected Collection> nodePlugins() { return Arrays.asList(MockFSIndexStore.TestPlugin.class); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99174") public void testRandomDirectoryIOExceptions() throws IOException, InterruptedException, ExecutionException { String mapping = Strings.toString( XContentFactory.jsonBuilder() diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 141a06eff0ec6..d217f6b844fe8 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -759,13 +759,11 @@ private static String loadHistoryUUID(Map commitData) { private ExternalReaderManager createReaderManager(RefreshWarmerListener externalRefreshListener) throws EngineException { boolean success = false; + ElasticsearchDirectoryReader directoryReader = null; ElasticsearchReaderManager internalReaderManager = null; try { try { - final ElasticsearchDirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap( - DirectoryReader.open(indexWriter), - shardId - ); + directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId); lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); internalReaderManager = createInternalReaderManager(directoryReader); ExternalReaderManager externalReaderManager = new ExternalReaderManager(internalReaderManager, externalRefreshListener); @@ -782,7 +780,9 @@ private ExternalReaderManager createReaderManager(RefreshWarmerListener external } } finally { if (success == false) { // release everything we created on a failure - IOUtils.closeWhileHandlingException(internalReaderManager, indexWriter); + // make sure that we close the directory reader even if the internal reader manager has failed to initialize + var reader = internalReaderManager == null ? directoryReader : internalReaderManager; + IOUtils.closeWhileHandlingException(reader, indexWriter); } } } From e111a8ef2ede8ec563ac54984cc1072e1b3b1913 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Fri, 10 Nov 2023 08:35:17 +0100 Subject: [PATCH 04/15] Make buildBwc task configuration cache compatible (#101927) --- .../gradle/internal/InternalDistributionBwcSetupPlugin.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java index 2468711561ae4..f727dc165a8a9 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPlugin.java @@ -286,11 +286,12 @@ static void createBuildBwcTask( if (project.getGradle().getStartParameter().isBuildCacheEnabled()) { c.getArgs().add("--build-cache"); } + File rootDir = project.getRootDir(); c.doLast(new Action() { @Override public void execute(Task task) { if (expectedOutputFile.exists() == false) { - Path relativeOutputPath = project.getRootDir().toPath().relativize(expectedOutputFile.toPath()); + Path relativeOutputPath = rootDir.toPath().relativize(expectedOutputFile.toPath()); final String message = "Building %s didn't generate expected artifact [%s]. The working branch may be " + "out-of-date - try merging in the latest upstream changes to the branch."; throw new InvalidUserDataException(message.formatted(bwcVersion.get(), relativeOutputPath)); From 46f545cf458c4a304046d2bced66f61f878f683e Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Fri, 10 Nov 2023 09:11:58 +0100 Subject: [PATCH 05/15] Keep old plugin name for build complete plugin (#101995) Fixes incompatibility with serverless builds --- build-tools-internal/build.gradle | 6 +++--- ...hedPlugin.java => ElasticsearchBuildCompletePlugin.java} | 2 +- build.gradle | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) rename build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/{ElasticsearchBuildFinishedPlugin.java => ElasticsearchBuildCompletePlugin.java} (99%) diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle index 8a7bc488cf51b..1717c40333cb0 100644 --- a/build-tools-internal/build.gradle +++ b/build-tools-internal/build.gradle @@ -35,9 +35,9 @@ gradlePlugin { id = 'elasticsearch.build' implementationClass = 'org.elasticsearch.gradle.internal.BuildPlugin' } - buildFinished { - id = 'elasticsearch.build-finished' - implementationClass = 'org.elasticsearch.gradle.internal.ElasticsearchBuildFinishedPlugin' + buildComplete { + id = 'elasticsearch.build-complete' + implementationClass = 'org.elasticsearch.gradle.internal.ElasticsearchBuildCompletePlugin' } distro { id = 'elasticsearch.distro' diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildFinishedPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java similarity index 99% rename from build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildFinishedPlugin.java rename to build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java index 639ceeaf041e3..8c0045e1dcff2 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildFinishedPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java @@ -37,7 +37,7 @@ import javax.inject.Inject; -public abstract class ElasticsearchBuildFinishedPlugin implements Plugin { +public abstract class ElasticsearchBuildCompletePlugin implements Plugin { @Inject protected abstract FlowScope getFlowScope(); diff --git a/build.gradle b/build.gradle index 981c1598ac515..0f11854dc57bf 100644 --- a/build.gradle +++ b/build.gradle @@ -29,7 +29,7 @@ plugins { id 'lifecycle-base' id 'elasticsearch.docker-support' id 'elasticsearch.global-build-info' - id 'elasticsearch.build-finished' + id 'elasticsearch.build-complete' id 'elasticsearch.build-scan' id 'elasticsearch.jdk-download' id 'elasticsearch.internal-distribution-download' From 0cf140319f1d1b5dc8f18565fd2874ba462b8e34 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 10 Nov 2023 10:03:37 +0100 Subject: [PATCH 06/15] Remove some more explicit SearchResponse use in tests (#101992) It's in the title, a couple examples of removing explict responses in the tests. --- .../script/expression/MoreExpressionIT.java | 488 +++++++++--------- .../upgrades/SearchStatesIT.java | 8 +- .../action/search/SearchResponseTests.java | 50 +- ...ecurityLicensingAndFeatureUsageRestIT.java | 6 +- .../RemoteClusterSecurityMutualTlsIT.java | 14 +- 5 files changed, 307 insertions(+), 259 deletions(-) diff --git a/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java index f71a55f4f6be0..23e5fcd312dcc 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.common.lucene.search.function.CombineFunction; @@ -37,6 +36,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; @@ -44,6 +44,8 @@ import static org.elasticsearch.search.aggregations.PipelineAggregatorBuilders.bucketScript; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -77,28 +79,30 @@ public void testBasic() throws Exception { createIndex("test"); ensureGreen("test"); client().prepareIndex("test").setId("1").setSource("foo", 4).setRefreshPolicy(IMMEDIATE).get(); - SearchResponse rsp = buildRequest("doc['foo'] + 1").get(); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + assertResponse(buildRequest("doc['foo'] + 1"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + }); } public void testFunction() throws Exception { createIndex("test"); ensureGreen("test"); client().prepareIndex("test").setId("1").setSource("foo", 4).setRefreshPolicy(IMMEDIATE).get(); - SearchResponse rsp = buildRequest("doc['foo'] + abs(1)").get(); - assertNoFailures(rsp); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + assertNoFailuresAndResponse(buildRequest("doc['foo'] + abs(1)"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + }); } public void testBasicUsingDotValue() throws Exception { createIndex("test"); ensureGreen("test"); client().prepareIndex("test").setId("1").setSource("foo", 4).setRefreshPolicy(IMMEDIATE).get(); - SearchResponse rsp = buildRequest("doc['foo'].value + 1").get(); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + assertResponse(buildRequest("doc['foo'].value + 1"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + }); } public void testScore() throws Exception { @@ -116,13 +120,14 @@ public void testScore() throws Exception { SearchRequestBuilder req = prepareSearch().setIndices("test"); req.setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("text", "hello"), score).boostMode(CombineFunction.REPLACE)); req.setSearchType(SearchType.DFS_QUERY_THEN_FETCH); // make sure DF is consistent - SearchResponse rsp = req.get(); - assertNoFailures(rsp); - SearchHits hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals("1", hits.getAt(0).getId()); - assertEquals("3", hits.getAt(1).getId()); - assertEquals("2", hits.getAt(2).getId()); + assertResponse(req, rsp -> { + assertNoFailures(rsp); + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals("1", hits.getAt(0).getId()); + assertEquals("3", hits.getAt(1).getId()); + assertEquals("2", hits.getAt(2).getId()); + }); req = prepareSearch().setIndices("test"); req.setQuery(QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("text", "hello"), score).boostMode(CombineFunction.REPLACE)); @@ -140,26 +145,30 @@ public void testDateMethods() throws Exception { client().prepareIndex("test").setId("1").setSource("id", 1, "date0", "2015-04-28T04:02:07Z", "date1", "1985-09-01T23:11:01Z"), client().prepareIndex("test").setId("2").setSource("id", 2, "date0", "2013-12-25T11:56:45Z", "date1", "1983-10-13T23:15:00Z") ); - SearchResponse rsp = buildRequest("doc['date0'].getSeconds() - doc['date0'].getMinutes()").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - SearchHits hits = rsp.getHits(); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(-11.0, hits.getAt(1).field("foo").getValue(), 0.0D); - rsp = buildRequest("doc['date0'].getHourOfDay() + doc['date1'].getDayOfMonth()").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - hits = rsp.getHits(); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(24.0, hits.getAt(1).field("foo").getValue(), 0.0D); - rsp = buildRequest("doc['date1'].getMonth() + 1").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - hits = rsp.getHits(); - assertEquals(9.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(10.0, hits.getAt(1).field("foo").getValue(), 0.0D); - rsp = buildRequest("doc['date1'].getYear()").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - hits = rsp.getHits(); - assertEquals(1985.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(1983.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertResponse(buildRequest("doc['date0'].getSeconds() - doc['date0'].getMinutes()"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(-11.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); + assertResponse(buildRequest("doc['date0'].getHourOfDay() + doc['date1'].getDayOfMonth()"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(24.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); + assertResponse(buildRequest("doc['date1'].getMonth() + 1"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(9.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(10.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); + assertResponse(buildRequest("doc['date1'].getYear()"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(1985.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(1983.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); } public void testDateObjectMethods() throws Exception { @@ -170,26 +179,30 @@ public void testDateObjectMethods() throws Exception { client().prepareIndex("test").setId("1").setSource("id", 1, "date0", "2015-04-28T04:02:07Z", "date1", "1985-09-01T23:11:01Z"), client().prepareIndex("test").setId("2").setSource("id", 2, "date0", "2013-12-25T11:56:45Z", "date1", "1983-10-13T23:15:00Z") ); - SearchResponse rsp = buildRequest("doc['date0'].date.secondOfMinute - doc['date0'].date.minuteOfHour").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - SearchHits hits = rsp.getHits(); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(-11.0, hits.getAt(1).field("foo").getValue(), 0.0D); - rsp = buildRequest("doc['date0'].date.getHourOfDay() + doc['date1'].date.dayOfMonth").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - hits = rsp.getHits(); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(24.0, hits.getAt(1).field("foo").getValue(), 0.0D); - rsp = buildRequest("doc['date1'].date.monthOfYear + 1").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - hits = rsp.getHits(); - assertEquals(10.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(11.0, hits.getAt(1).field("foo").getValue(), 0.0D); - rsp = buildRequest("doc['date1'].date.year").get(); - assertEquals(2, rsp.getHits().getTotalHits().value); - hits = rsp.getHits(); - assertEquals(1985.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(1983.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertResponse(buildRequest("doc['date0'].date.secondOfMinute - doc['date0'].date.minuteOfHour"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(-11.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); + assertResponse(buildRequest("doc['date0'].date.getHourOfDay() + doc['date1'].date.dayOfMonth"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(24.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); + assertResponse(buildRequest("doc['date1'].date.monthOfYear + 1"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(10.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(11.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); + assertResponse(buildRequest("doc['date1'].date.year"), rsp -> { + assertEquals(2, rsp.getHits().getTotalHits().value); + SearchHits hits = rsp.getHits(); + assertEquals(1985.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(1983.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); } public void testMultiValueMethods() throws Exception { @@ -221,79 +234,79 @@ public void testMultiValueMethods() throws Exception { client().prepareIndex("test").setId("3").setSource(doc3) ); - SearchResponse rsp = buildRequest("doc['double0'].count() + doc['double1'].count()").get(); - assertNoFailures(rsp); - SearchHits hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(2.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(2).field("foo").getValue(), 0.0D); - - rsp = buildRequest("doc['double0'].sum()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(7.5, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(6.0, hits.getAt(2).field("foo").getValue(), 0.0D); - - rsp = buildRequest("doc['double0'].avg() + doc['double1'].avg()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(4.3, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(8.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(5.5, hits.getAt(2).field("foo").getValue(), 0.0D); - - rsp = buildRequest("doc['double0'].median()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(1.5, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(1.25, hits.getAt(2).field("foo").getValue(), 0.0D); - - rsp = buildRequest("doc['double0'].min()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(1.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(-1.5, hits.getAt(2).field("foo").getValue(), 0.0D); - - rsp = buildRequest("doc['double0'].max()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(2).field("foo").getValue(), 0.0D); - - rsp = buildRequest("doc['double0'].sum()/doc['double0'].count()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(2.5, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(1.5, hits.getAt(2).field("foo").getValue(), 0.0D); + assertNoFailuresAndResponse(buildRequest("doc['double0'].count() + doc['double1'].count()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(2.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(2).field("foo").getValue(), 0.0D); + }); + + assertNoFailuresAndResponse(buildRequest("doc['double0'].sum()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(7.5, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(6.0, hits.getAt(2).field("foo").getValue(), 0.0D); + }); + + assertNoFailuresAndResponse(buildRequest("doc['double0'].avg() + doc['double1'].avg()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(4.3, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(8.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(5.5, hits.getAt(2).field("foo").getValue(), 0.0D); + }); + + assertNoFailuresAndResponse(buildRequest("doc['double0'].median()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(1.5, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(1.25, hits.getAt(2).field("foo").getValue(), 0.0D); + }); + + assertNoFailuresAndResponse(buildRequest("doc['double0'].min()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(1.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(-1.5, hits.getAt(2).field("foo").getValue(), 0.0D); + }); + + assertNoFailuresAndResponse(buildRequest("doc['double0'].max()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(2).field("foo").getValue(), 0.0D); + }); + + assertNoFailuresAndResponse(buildRequest("doc['double0'].sum()/doc['double0'].count()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(2.5, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(1.5, hits.getAt(2).field("foo").getValue(), 0.0D); + }); // make sure count() works for missing - rsp = buildRequest("doc['double2'].count()").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(1.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(0.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(0.0, hits.getAt(2).field("foo").getValue(), 0.0D); + assertNoFailuresAndResponse(buildRequest("doc['double2'].count()"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(1.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(0.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(0.0, hits.getAt(2).field("foo").getValue(), 0.0D); + }); // make sure .empty works in the same way - rsp = buildRequest("doc['double2'].empty ? 5.0 : 2.0").get(); - assertNoFailures(rsp); - hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(2.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(5.0, hits.getAt(2).field("foo").getValue(), 0.0D); + assertNoFailuresAndResponse(buildRequest("doc['double2'].empty ? 5.0 : 2.0"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(2.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(5.0, hits.getAt(2).field("foo").getValue(), 0.0D); + }); } public void testInvalidDateMethodCall() throws Exception { @@ -325,12 +338,12 @@ public void testSparseField() throws Exception { client().prepareIndex("test").setId("1").setSource("id", 1, "x", 4), client().prepareIndex("test").setId("2").setSource("id", 2, "y", 2) ); - SearchResponse rsp = buildRequest("doc['x'] + 1").get(); - assertNoFailures(rsp); - SearchHits hits = rsp.getHits(); - assertEquals(2, rsp.getHits().getTotalHits().value); - assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(1.0, hits.getAt(1).field("foo").getValue(), 0.0D); + assertNoFailuresAndResponse(buildRequest("doc['x'] + 1"), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(2, rsp.getHits().getTotalHits().value); + assertEquals(5.0, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(1.0, hits.getAt(1).field("foo").getValue(), 0.0D); + }); } public void testMissingField() throws Exception { @@ -361,12 +374,13 @@ public void testParams() throws Exception { ); // a = int, b = double, c = long String script = "doc['x'] * a + b + ((c + doc['x']) > 5000000009 ? 1 : 0)"; - SearchResponse rsp = buildRequest(script, "a", 2, "b", 3.5, "c", 5000000000L).get(); - SearchHits hits = rsp.getHits(); - assertEquals(3, hits.getTotalHits().value); - assertEquals(24.5, hits.getAt(0).field("foo").getValue(), 0.0D); - assertEquals(9.5, hits.getAt(1).field("foo").getValue(), 0.0D); - assertEquals(13.5, hits.getAt(2).field("foo").getValue(), 0.0D); + assertResponse(buildRequest(script, "a", 2, "b", 3.5, "c", 5000000000L), rsp -> { + SearchHits hits = rsp.getHits(); + assertEquals(3, hits.getTotalHits().value); + assertEquals(24.5, hits.getAt(0).field("foo").getValue(), 0.0D); + assertEquals(9.5, hits.getAt(1).field("foo").getValue(), 0.0D); + assertEquals(13.5, hits.getAt(2).field("foo").getValue(), 0.0D); + }); } public void testCompileFailure() { @@ -484,21 +498,22 @@ public void testSpecialValueVariable() throws Exception { .script(new Script(ScriptType.INLINE, ExpressionScriptEngine.NAME, "3.0", Collections.emptyMap())) ); - SearchResponse rsp = req.get(); - assertEquals(3, rsp.getHits().getTotalHits().value); + assertResponse(req, rsp -> { + assertEquals(3, rsp.getHits().getTotalHits().value); - Stats stats = rsp.getAggregations().get("int_agg"); - assertEquals(39.0, stats.getMax(), 0.0001); - assertEquals(15.0, stats.getMin(), 0.0001); + Stats stats = rsp.getAggregations().get("int_agg"); + assertEquals(39.0, stats.getMax(), 0.0001); + assertEquals(15.0, stats.getMin(), 0.0001); - stats = rsp.getAggregations().get("double_agg"); - assertEquals(0.7, stats.getMax(), 0.0001); - assertEquals(0.1, stats.getMin(), 0.0001); + stats = rsp.getAggregations().get("double_agg"); + assertEquals(0.7, stats.getMax(), 0.0001); + assertEquals(0.1, stats.getMin(), 0.0001); - stats = rsp.getAggregations().get("const_agg"); - assertThat(stats.getMax(), equalTo(3.0)); - assertThat(stats.getMin(), equalTo(3.0)); - assertThat(stats.getAvg(), equalTo(3.0)); + stats = rsp.getAggregations().get("const_agg"); + assertThat(stats.getMax(), equalTo(3.0)); + assertThat(stats.getMin(), equalTo(3.0)); + assertThat(stats.getAvg(), equalTo(3.0)); + }); } public void testStringSpecialValueVariable() throws Exception { @@ -520,18 +535,19 @@ public void testStringSpecialValueVariable() throws Exception { .script(new Script(ScriptType.INLINE, ExpressionScriptEngine.NAME, "_value", Collections.emptyMap())) ); - String message; + AtomicReference message = new AtomicReference<>(); try { // shards that don't have docs with the "text" field will not fail, // so we may or may not get a total failure - SearchResponse rsp = req.get(); - assertThat(rsp.getShardFailures().length, greaterThan(0)); // at least the shards containing the docs should have failed - message = rsp.getShardFailures()[0].reason(); + assertResponse(req, rsp -> { + assertThat(rsp.getShardFailures().length, greaterThan(0)); // at least the shards containing the docs should have failed + message.set(rsp.getShardFailures()[0].reason()); + }); } catch (SearchPhaseExecutionException e) { - message = e.toString(); + message.set(e.toString()); } - assertThat(message + "should have contained ScriptException", message.contains("ScriptException"), equalTo(true)); - assertThat(message + "should have contained text variable error", message.contains("text variable"), equalTo(true)); + assertThat(message + "should have contained ScriptException", message.get().contains("ScriptException"), equalTo(true)); + assertThat(message + "should have contained text variable error", message.get().contains("text variable"), equalTo(true)); } // test to make sure expressions are not allowed to be used as update scripts @@ -565,44 +581,52 @@ public void testPipelineAggregationScript() throws Exception { client().prepareIndex("agg_index").setId("4").setSource("one", 4.0, "two", 2.0, "three", 3.0, "four", 4.0), client().prepareIndex("agg_index").setId("5").setSource("one", 5.0, "two", 2.0, "three", 3.0, "four", 4.0) ); - SearchResponse response = prepareSearch("agg_index").addAggregation( - histogram("histogram").field("one") - .interval(2) - .subAggregation(sum("twoSum").field("two")) - .subAggregation(sum("threeSum").field("three")) - .subAggregation(sum("fourSum").field("four")) - .subAggregation( - bucketScript( - "totalSum", - new Script(ScriptType.INLINE, ExpressionScriptEngine.NAME, "_value0 + _value1 + _value2", Collections.emptyMap()), - "twoSum", - "threeSum", - "fourSum" + assertResponse( + prepareSearch("agg_index").addAggregation( + histogram("histogram").field("one") + .interval(2) + .subAggregation(sum("twoSum").field("two")) + .subAggregation(sum("threeSum").field("three")) + .subAggregation(sum("fourSum").field("four")) + .subAggregation( + bucketScript( + "totalSum", + new Script( + ScriptType.INLINE, + ExpressionScriptEngine.NAME, + "_value0 + _value1 + _value2", + Collections.emptyMap() + ), + "twoSum", + "threeSum", + "fourSum" + ) ) - ) - ).execute().actionGet(); - - Histogram histogram = response.getAggregations().get("histogram"); - assertThat(histogram, notNullValue()); - assertThat(histogram.getName(), equalTo("histogram")); - List buckets = histogram.getBuckets(); - - for (int bucketCount = 0; bucketCount < buckets.size(); ++bucketCount) { - Histogram.Bucket bucket = buckets.get(bucketCount); - if (bucket.getDocCount() == 1) { - SimpleValue seriesArithmetic = bucket.getAggregations().get("totalSum"); - assertThat(seriesArithmetic, notNullValue()); - double seriesArithmeticValue = seriesArithmetic.value(); - assertEquals(9.0, seriesArithmeticValue, 0.001); - } else if (bucket.getDocCount() == 2) { - SimpleValue seriesArithmetic = bucket.getAggregations().get("totalSum"); - assertThat(seriesArithmetic, notNullValue()); - double seriesArithmeticValue = seriesArithmetic.value(); - assertEquals(18.0, seriesArithmeticValue, 0.001); - } else { - fail("Incorrect number of documents in a bucket in the histogram."); + ), + response -> { + Histogram histogram = response.getAggregations().get("histogram"); + assertThat(histogram, notNullValue()); + assertThat(histogram.getName(), equalTo("histogram")); + List buckets = histogram.getBuckets(); + + for (int bucketCount = 0; bucketCount < buckets.size(); ++bucketCount) { + Histogram.Bucket bucket = buckets.get(bucketCount); + if (bucket.getDocCount() == 1) { + SimpleValue seriesArithmetic = bucket.getAggregations().get("totalSum"); + assertThat(seriesArithmetic, notNullValue()); + double seriesArithmeticValue = seriesArithmetic.value(); + assertEquals(9.0, seriesArithmeticValue, 0.001); + } else if (bucket.getDocCount() == 2) { + SimpleValue seriesArithmetic = bucket.getAggregations().get("totalSum"); + assertThat(seriesArithmetic, notNullValue()); + double seriesArithmeticValue = seriesArithmetic.value(); + assertEquals(18.0, seriesArithmeticValue, 0.001); + } else { + fail("Incorrect number of documents in a bucket in the histogram."); + } + } } - } + ); } public void testGeo() throws Exception { @@ -630,25 +654,25 @@ public void testGeo() throws Exception { .actionGet(); refresh(); // access .lat - SearchResponse rsp = buildRequest("doc['location'].lat").get(); - assertNoFailures(rsp); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(61.5240, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + assertNoFailuresAndResponse(buildRequest("doc['location'].lat"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(61.5240, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + }); // access .lon - rsp = buildRequest("doc['location'].lon").get(); - assertNoFailures(rsp); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(105.3188, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + assertNoFailuresAndResponse(buildRequest("doc['location'].lon"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(105.3188, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + }); // access .empty - rsp = buildRequest("doc['location'].empty ? 1 : 0").get(); - assertNoFailures(rsp); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(0, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + assertNoFailuresAndResponse(buildRequest("doc['location'].empty ? 1 : 0"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(0, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + }); // call haversin - rsp = buildRequest("haversin(38.9072, 77.0369, doc['location'].lat, doc['location'].lon)").get(); - assertNoFailures(rsp); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(3170D, rsp.getHits().getAt(0).field("foo").getValue(), 50D); + assertNoFailuresAndResponse(buildRequest("haversin(38.9072, 77.0369, doc['location'].lat, doc['location'].lon)"), rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(3170D, rsp.getHits().getAt(0).field("foo").getValue(), 50D); + }); } public void testBoolean() throws Exception { @@ -668,27 +692,27 @@ public void testBoolean() throws Exception { client().prepareIndex("test").setId("3").setSource("id", 3, "price", 2.0, "vip", false) ); // access .value - SearchResponse rsp = buildRequest("doc['vip'].value").get(); - assertNoFailures(rsp); - assertEquals(3, rsp.getHits().getTotalHits().value); - assertEquals(1.0D, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); - assertEquals(0.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D); - assertEquals(0.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D); + assertNoFailuresAndResponse(buildRequest("doc['vip'].value"), rsp -> { + assertEquals(3, rsp.getHits().getTotalHits().value); + assertEquals(1.0D, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + assertEquals(0.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D); + assertEquals(0.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D); + }); // access .empty - rsp = buildRequest("doc['vip'].empty ? 1 : 0").get(); - assertNoFailures(rsp); - assertEquals(3, rsp.getHits().getTotalHits().value); - assertEquals(0.0D, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); - assertEquals(0.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D); - assertEquals(1.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D); + assertNoFailuresAndResponse(buildRequest("doc['vip'].empty ? 1 : 0"), rsp -> { + assertEquals(3, rsp.getHits().getTotalHits().value); + assertEquals(0.0D, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + assertEquals(0.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D); + assertEquals(1.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D); + }); // ternary operator // vip's have a 50% discount - rsp = buildRequest("doc['vip'] ? doc['price']/2 : doc['price']").get(); - assertNoFailures(rsp); - assertEquals(3, rsp.getHits().getTotalHits().value); - assertEquals(0.5D, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); - assertEquals(2.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D); - assertEquals(2.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D); + assertNoFailuresAndResponse(buildRequest("doc['vip'] ? doc['price']/2 : doc['price']"), rsp -> { + assertEquals(3, rsp.getHits().getTotalHits().value); + assertEquals(0.5D, rsp.getHits().getAt(0).field("foo").getValue(), 1.0D); + assertEquals(2.0D, rsp.getHits().getAt(1).field("foo").getValue(), 1.0D); + assertEquals(2.0D, rsp.getHits().getAt(2).field("foo").getValue(), 1.0D); + }); } public void testFilterScript() throws Exception { @@ -702,9 +726,9 @@ public void testFilterScript() throws Exception { SearchRequestBuilder builder = buildRequest("doc['foo'].value"); Script script = new Script(ScriptType.INLINE, "expression", "doc['foo'].value", Collections.emptyMap()); builder.setQuery(QueryBuilders.boolQuery().filter(QueryBuilders.scriptQuery(script))); - SearchResponse rsp = builder.get(); - assertNoFailures(rsp); - assertEquals(1, rsp.getHits().getTotalHits().value); - assertEquals(1.0D, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + assertNoFailuresAndResponse(builder, rsp -> { + assertEquals(1, rsp.getHits().getTotalHits().value); + assertEquals(1.0D, rsp.getHits().getAt(0).field("foo").getValue(), 0.0D); + }); } } diff --git a/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java b/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java index 1bb2116cc680a..63860c6355630 100644 --- a/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java +++ b/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java @@ -175,8 +175,12 @@ void verifySearch(String localIndex, int localNumDocs, String remoteIndex, int r ) ) { SearchResponse searchResponse = SearchResponse.fromXContent(parser); - ElasticsearchAssertions.assertNoFailures(searchResponse); - ElasticsearchAssertions.assertHitCount(searchResponse, expectedDocs); + try { + ElasticsearchAssertions.assertNoFailures(searchResponse); + ElasticsearchAssertions.assertHitCount(searchResponse, expectedDocs); + } finally { + searchResponse.decRef(); + } } } catch (IOException e) { throw new UncheckedIOException(e); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java index 9a76aaf6f4b79..b02dea53bc8b9 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java @@ -302,7 +302,15 @@ private void doFromXContentTestWithRandomFields(SearchResponse response, boolean } try (XContentParser parser = createParser(xcontentType.xContent(), mutated)) { SearchResponse parsed = SearchResponse.fromXContent(parser); - assertToXContentEquivalent(originalBytes, XContentHelper.toXContent(parsed, xcontentType, params, humanReadable), xcontentType); + try { + assertToXContentEquivalent( + originalBytes, + XContentHelper.toXContent(parsed, xcontentType, params, humanReadable), + xcontentType + ); + } finally { + parsed.decRef(); + } assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); assertNull(parser.nextToken()); } @@ -331,25 +339,29 @@ public void testFromXContentWithFailures() throws IOException { ); try (XContentParser parser = createParser(xcontentType.xContent(), originalBytes)) { SearchResponse parsed = SearchResponse.fromXContent(parser); - for (int i = 0; i < parsed.getShardFailures().length; i++) { - ShardSearchFailure parsedFailure = parsed.getShardFailures()[i]; - ShardSearchFailure originalFailure = failures[i]; - assertEquals(originalFailure.index(), parsedFailure.index()); - assertEquals(originalFailure.shard(), parsedFailure.shard()); - assertEquals(originalFailure.shardId(), parsedFailure.shardId()); - String originalMsg = originalFailure.getCause().getMessage(); - assertEquals( - parsedFailure.getCause().getMessage(), - "Elasticsearch exception [type=parsing_exception, reason=" + originalMsg + "]" - ); - String nestedMsg = originalFailure.getCause().getCause().getMessage(); - assertEquals( - parsedFailure.getCause().getCause().getMessage(), - "Elasticsearch exception [type=illegal_argument_exception, reason=" + nestedMsg + "]" - ); + try { + for (int i = 0; i < parsed.getShardFailures().length; i++) { + ShardSearchFailure parsedFailure = parsed.getShardFailures()[i]; + ShardSearchFailure originalFailure = failures[i]; + assertEquals(originalFailure.index(), parsedFailure.index()); + assertEquals(originalFailure.shard(), parsedFailure.shard()); + assertEquals(originalFailure.shardId(), parsedFailure.shardId()); + String originalMsg = originalFailure.getCause().getMessage(); + assertEquals( + parsedFailure.getCause().getMessage(), + "Elasticsearch exception [type=parsing_exception, reason=" + originalMsg + "]" + ); + String nestedMsg = originalFailure.getCause().getCause().getMessage(); + assertEquals( + parsedFailure.getCause().getCause().getMessage(), + "Elasticsearch exception [type=illegal_argument_exception, reason=" + nestedMsg + "]" + ); + } + assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); + assertNull(parser.nextToken()); + } finally { + parsed.decRef(); } - assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); - assertNull(parser.nextToken()); } } diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java index 5b777a59d1069..bc7eaffff2e22 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityLicensingAndFeatureUsageRestIT.java @@ -176,7 +176,11 @@ public void testCrossClusterAccessFeatureTrackingAndLicensing() throws Exception final Response response = performRequestWithRemoteSearchUser(searchRequest); assertOK(response); final SearchResponse searchResponse = SearchResponse.fromXContent(responseAsParser(response)); - assertSearchResultContainsIndices(searchResponse, REMOTE_INDEX_NAME); + try { + assertSearchResultContainsIndices(searchResponse, REMOTE_INDEX_NAME); + } finally { + searchResponse.decRef(); + } // Check that the feature is tracked on both QC and FC. assertFeatureTracked(client()); diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityMutualTlsIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityMutualTlsIT.java index ed4136799870e..518c88b5ecb24 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityMutualTlsIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityMutualTlsIT.java @@ -117,11 +117,15 @@ public void testCrossClusterSearch() throws Exception { final SearchResponse metricSearchResponse = SearchResponse.fromXContent( responseAsParser(performRequestWithRemoteMetricUser(metricSearchRequest)) ); - assertThat(metricSearchResponse.getHits().getTotalHits().value, equalTo(4L)); - assertThat( - Arrays.stream(metricSearchResponse.getHits().getHits()).map(SearchHit::getIndex).collect(Collectors.toSet()), - containsInAnyOrder("shared-metrics") - ); + try { + assertThat(metricSearchResponse.getHits().getTotalHits().value, equalTo(4L)); + assertThat( + Arrays.stream(metricSearchResponse.getHits().getHits()).map(SearchHit::getIndex).collect(Collectors.toSet()), + containsInAnyOrder("shared-metrics") + ); + } finally { + metricSearchResponse.decRef(); + } } } From 344335bafbe1e303a1de221b423e835c845498a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Fri, 10 Nov 2023 10:21:37 +0100 Subject: [PATCH 07/15] Converting a couple of version checks to modules detection in ESRestTestCase (#101901) --- .../test/rest/ESRestTestCase.java | 30 ++++++++++++++----- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 1e1e1f084016a..9566456a041bc 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -197,6 +197,7 @@ public enum ProductFeature { CCR, SHUTDOWN, LEGACY_TEMPLATES, + SEARCHABLE_SNAPSHOTS } private static EnumSet availableFeatures; @@ -241,6 +242,9 @@ public void initClient() throws IOException { if (moduleName.equals("x-pack-shutdown")) { availableFeatures.add(ProductFeature.SHUTDOWN); } + if (moduleName.equals("searchable-snapshots")) { + availableFeatures.add(ProductFeature.SEARCHABLE_SNAPSHOTS); + } if (moduleName.startsWith("serverless-")) { serverless = true; } @@ -718,10 +722,11 @@ private void wipeCluster() throws Exception { } // Clean up searchable snapshots indices before deleting snapshots and repositories - if (has(ProductFeature.XPACK) - && nodeVersions.first().onOrAfter(Version.V_7_8_0) - && preserveSearchableSnapshotsIndicesUponCompletion() == false) { - wipeSearchableSnapshotsIndices(); + if (has(ProductFeature.SEARCHABLE_SNAPSHOTS)) { + assert nodeVersions.first().onOrAfter(Version.V_7_8_0); + if (preserveSearchableSnapshotsIndicesUponCompletion() == false) { + wipeSearchableSnapshotsIndices(); + } } wipeSnapshots(); @@ -962,14 +967,23 @@ private Set getAllUnexpectedTemplates() throws IOException { */ @SuppressWarnings("unchecked") protected void deleteAllNodeShutdownMetadata() throws IOException { - if (has(ProductFeature.SHUTDOWN) == false || minimumNodeVersion().before(Version.V_7_15_0)) { - // Node shutdown APIs are only present in xpack + if (has(ProductFeature.SHUTDOWN) == false) { return; } + Request getShutdownStatus = new Request("GET", "_nodes/shutdown"); Map statusResponse = responseAsMap(adminClient().performRequest(getShutdownStatus)); - List> nodesArray = (List>) statusResponse.get("nodes"); - List nodeIds = nodesArray.stream().map(nodeShutdownMetadata -> (String) nodeShutdownMetadata.get("node_id")).toList(); + + Object nodesResponse = statusResponse.get("nodes"); + final List nodeIds; + if (nodesResponse instanceof List) { // `nodes` is parsed as a List<> only if it's populated (not empty) + assert minimumNodeVersion().onOrAfter(Version.V_7_15_0); + List> nodesArray = (List>) nodesResponse; + nodeIds = nodesArray.stream().map(nodeShutdownMetadata -> (String) nodeShutdownMetadata.get("node_id")).toList(); + } else { + nodeIds = List.of(); + } + for (String nodeId : nodeIds) { Request deleteRequest = new Request("DELETE", "_nodes/" + nodeId + "/shutdown"); assertOK(adminClient().performRequest(deleteRequest)); From c5560bcfdb9cbf7863910b753a7606dce3a5032a Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 10 Nov 2023 10:41:04 +0000 Subject: [PATCH 08/15] Avoid negative DesiredBalanceStats#lastConvergedIndex (#101998) The initial state of the desired-balance allocator has `lastConvergedIndex` set to `-1`. This is not important to represent in the stats, so with this commit we map it to zero. --- docs/changelog/101998.yaml | 5 +++++ .../allocator/DesiredBalanceShardsAllocator.java | 2 +- .../allocation/allocator/DesiredBalanceStats.java | 7 +++++++ .../DesiredBalanceShardsAllocatorTests.java | 12 ++++++++++++ 4 files changed, 25 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/101998.yaml diff --git a/docs/changelog/101998.yaml b/docs/changelog/101998.yaml new file mode 100644 index 0000000000000..be0e2d8c61ba3 --- /dev/null +++ b/docs/changelog/101998.yaml @@ -0,0 +1,5 @@ +pr: 101998 +summary: Avoid negative `DesiredBalanceStats#lastConvergedIndex` +area: Allocation +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java index ee95074b8a730..11d2317f5bcea 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocator.java @@ -264,7 +264,7 @@ public void resetDesiredBalance() { public DesiredBalanceStats getStats() { return new DesiredBalanceStats( - currentDesiredBalance.lastConvergedIndex(), + Math.max(currentDesiredBalance.lastConvergedIndex(), 0L), desiredBalanceComputation.isActive(), computationsSubmitted.count(), computationsExecuted.count(), diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java index c017d77362427..b8a1d3e1b899d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java @@ -33,6 +33,13 @@ public record DesiredBalanceStats( private static final TransportVersion COMPUTED_SHARD_MOVEMENTS_VERSION = TransportVersions.V_8_8_0; + public DesiredBalanceStats { + if (lastConvergedIndex < 0) { + assert false : lastConvergedIndex; + throw new IllegalStateException("lastConvergedIndex must be nonnegative, but got [" + lastConvergedIndex + ']'); + } + } + public static DesiredBalanceStats readFrom(StreamInput in) throws IOException { return new DesiredBalanceStats( in.readVLong(), diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java index 31cdb082feabc..add94e3b9344b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java @@ -68,6 +68,7 @@ import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.elasticsearch.common.settings.ClusterSettings.createBuiltInClusterSettings; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.not; @@ -158,6 +159,7 @@ public ClusterState apply(ClusterState clusterState, RerouteStrategy routingAllo clusterService, reconcileAction ); + assertValidStats(desiredBalanceShardsAllocator.getStats()); var allocationService = createAllocationService(desiredBalanceShardsAllocator, createGatewayAllocator(allocateUnassigned)); allocationServiceRef.set(allocationService); @@ -200,11 +202,21 @@ public void onFailure(Exception e) { } } } + assertValidStats(desiredBalanceShardsAllocator.getStats()); } finally { clusterService.close(); } } + private void assertValidStats(DesiredBalanceStats stats) { + assertThat(stats.lastConvergedIndex(), greaterThanOrEqualTo(0L)); + try { + assertEquals(stats, copyWriteable(stats, writableRegistry(), DesiredBalanceStats::readFrom)); + } catch (Exception e) { + fail(e); + } + } + public void testShouldNotRemoveAllocationDelayMarkersOnReconcile() { var localNode = newNode(LOCAL_NODE_ID); From ae2626f07aa72cf239abedd9556bfa34b0fa4696 Mon Sep 17 00:00:00 2001 From: Abdon Pijpelink Date: Fri, 10 Nov 2023 12:40:23 +0100 Subject: [PATCH 09/15] [DOCS] DISSECT does not support reference keys (#102002) --- docs/reference/esql/esql-limitations.asciidoc | 12 ++++++++++++ .../esql-process-data-with-dissect-grok.asciidoc | 13 +++++++++---- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc index f2b5c13aaa6f5..3abe6a6df7e01 100644 --- a/docs/reference/esql/esql-limitations.asciidoc +++ b/docs/reference/esql/esql-limitations.asciidoc @@ -136,6 +136,18 @@ now() - 2023-10-26 include::esql-enrich-data.asciidoc[tag=limitations] +[discrete] +[[esql-limitations-dissect]] +=== Dissect limitations + +include::esql-process-data-with-dissect-grok.asciidoc[tag=dissect-limitations] + +[discrete] +[[esql-limitations-grok]] +=== Grok limitations + +include::esql-process-data-with-dissect-grok.asciidoc[tag=grok-limitations] + [discrete] [[esql-limitations-mv]] === Multivalue limitations diff --git a/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc b/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc index a37989b2b2da8..294ce52e18856 100644 --- a/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc +++ b/docs/reference/esql/esql-process-data-with-dissect-grok.asciidoc @@ -120,7 +120,6 @@ include::../ingest/processors/dissect.asciidoc[tag=dissect-key-modifiers] | `+` | Append | left | `%{+keyname} %{+keyname}` | Appends two or more fields together | <> | `+` with `/n` | Append with order | left and right | `%{+keyname/2} %{+keyname/1}` | Appends two or more fields together in the order specified | <> | `?` | Named skip key | left | `%{?ignoreme}` | Skips the matched value in the output. Same behavior as `%{}`| <> -| `*` and `&` | Reference keys | left | `%{*r1} %{&r1}` | Sets the output key as value of `*` and output value of `&` | <> |====== [[esql-dissect-modifier-skip-right-padding]] @@ -139,9 +138,13 @@ include::../ingest/processors/dissect.asciidoc[tag=append-order-modifier] ====== Named skip key (`?`) include::../ingest/processors/dissect.asciidoc[tag=named-skip-key] -[[esql-reference-keys]] -====== Reference keys (`*` and `&`) -include::../ingest/processors/dissect.asciidoc[tag=reference-keys] +[[esql-dissect-limitations]] +===== Limitations + +// tag::dissect-limitations[] +The `DISSECT` command does not support +<>. +// end::dissect-limitations[] [[esql-process-data-with-grok]] ==== Process data with `GROK` @@ -253,6 +256,8 @@ as the `GROK` command. [[esql-grok-limitations]] ===== Limitations +// tag::grok-limitations[] The `GROK` command does not support configuring <>, or <>. The `GROK` command is not subject to <>. +// end::grok-limitations[] \ No newline at end of file From bb5a86112c5f23ce4d14e63c318bd2e7c2432afd Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 10 Nov 2023 12:24:51 +0000 Subject: [PATCH 10/15] Corrected the expansion of overlapping terms in the unified highlighter (#101912) This commit addresses an issue in the passage formatter of the unified highlighter, where overlapping terms were not correctly expanded to be highlighted as a single object. The fix in this commit involves adjusting the expansion logic to consider the maximum end offset during the process, as matches are initially sorted by ascending start offset and then by ascending end offset. --- .../uhighlight/CustomPassageFormatter.java | 2 +- .../CustomUnifiedHighlighterTests.java | 48 ++++++++++++++++++- 2 files changed, 47 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatter.java b/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatter.java index eb87dc982543f..6ae2f53a94ad8 100644 --- a/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatter.java +++ b/server/src/main/java/org/elasticsearch/lucene/search/uhighlight/CustomPassageFormatter.java @@ -48,7 +48,7 @@ public Snippet[] format(Passage[] passages, String content) { assert end > start; // Look ahead to expand 'end' past all overlapping: while (i + 1 < passage.getNumMatches() && passage.getMatchStarts()[i + 1] < end) { - end = passage.getMatchEnds()[++i]; + end = Math.max(passage.getMatchEnds()[++i], end); } end = Math.min(end, passage.getEndOffset()); // in case match straddles past passage diff --git a/server/src/test/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java b/server/src/test/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java index 5df6840640264..bf249ba4409ab 100644 --- a/server/src/test/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/search/uhighlight/CustomUnifiedHighlighterTests.java @@ -13,6 +13,10 @@ import org.apache.lucene.analysis.custom.CustomAnalyzer; import org.apache.lucene.analysis.ngram.EdgeNGramTokenizerFactory; import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.analysis.standard.StandardTokenizerFactory; +import org.apache.lucene.analysis.synonym.SolrSynonymParser; +import org.apache.lucene.analysis.synonym.SynonymFilterFactory; +import org.apache.lucene.analysis.synonym.SynonymMap; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; @@ -35,11 +39,15 @@ import org.apache.lucene.search.uhighlight.UnifiedHighlighter; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.util.ResourceLoader; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.test.ESTestCase; +import java.io.IOException; +import java.io.StringReader; import java.text.BreakIterator; +import java.text.ParseException; import java.util.Locale; import java.util.Map; import java.util.TreeMap; @@ -153,9 +161,9 @@ private void assertHighlightOneDoc( true ); final Snippet[] snippets = highlighter.highlightField(getOnlyLeafReader(reader), topDocs.scoreDocs[0].doc, () -> rawValue); - assertEquals(snippets.length, expectedPassages.length); + assertEquals(expectedPassages.length, snippets.length); for (int i = 0; i < snippets.length; i++) { - assertEquals(snippets[i].getText(), expectedPassages[i]); + assertEquals(expectedPassages[i], snippets[i].getText()); } } } @@ -356,6 +364,42 @@ public void testOverlappingTerms() throws Exception { assertHighlightOneDoc("text", inputs, analyzer, query, Locale.ROOT, BreakIterator.getSentenceInstance(Locale.ROOT), 0, outputs); } + public static class NYCFilterFactory extends SynonymFilterFactory { + public NYCFilterFactory(Map args) { + super(args); + } + + @Override + protected SynonymMap loadSynonyms(ResourceLoader loader, String cname, boolean dedup, Analyzer analyzer) throws IOException, + ParseException { + SynonymMap.Parser parser = new SolrSynonymParser(false, false, analyzer); + parser.parse(new StringReader("new york city => nyc, new york city")); + return parser.build(); + } + } + + public void testOverlappingPositions() throws Exception { + final String[] inputs = { "new york city" }; + final String[] outputs = { "new york city" }; + BooleanQuery query = new BooleanQuery.Builder().add( + new BooleanQuery.Builder().add(new TermQuery(new Term("text", "nyc")), BooleanClause.Occur.SHOULD) + .add( + new BooleanQuery.Builder().add(new TermQuery(new Term("text", "new")), BooleanClause.Occur.MUST) + .add(new TermQuery(new Term("text", "york")), BooleanClause.Occur.MUST) + .add(new TermQuery(new Term("text", "city")), BooleanClause.Occur.MUST) + .build(), + BooleanClause.Occur.SHOULD + ) + .build(), + BooleanClause.Occur.MUST + ).build(); + Analyzer analyzer = CustomAnalyzer.builder() + .withTokenizer(StandardTokenizerFactory.class) + .addTokenFilter(NYCFilterFactory.class, "synonyms", "N/A") + .build(); + assertHighlightOneDoc("text", inputs, analyzer, query, Locale.ROOT, BreakIterator.getSentenceInstance(Locale.ROOT), 0, outputs); + } + public void testExceedMaxAnalyzedOffset() throws Exception { TermQuery query = new TermQuery(new Term("text", "max")); Analyzer analyzer = CustomAnalyzer.builder() From c2146e003c5c4b69dfa7597d130b093057dad3ca Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 10 Nov 2023 13:29:40 +0100 Subject: [PATCH 11/15] Mute tests for #102000 (#102006) It's in the title these two tests are failing near 100% of the time. for #102000 --- .../java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java index 5f15590c1a1d4..3b48f01d78dea 100644 --- a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java @@ -26,6 +26,7 @@ public class CcrRollingUpgradeIT extends AbstractMultiClusterUpgradeTestCase { + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102000") public void testUniDirectionalIndexFollowing() throws Exception { logger.info("clusterName={}, upgradeState={}", clusterName, upgradeState); @@ -87,6 +88,7 @@ public void testUniDirectionalIndexFollowing() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102000") public void testAutoFollowing() throws Exception { String leaderIndex1 = "logs-20200101"; String leaderIndex2 = "logs-20200102"; From f0d78886b4ccff8550e2b834ec34814f2cf132cb Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 10 Nov 2023 14:39:01 +0100 Subject: [PATCH 12/15] Mute test for #102010 (#102011) Muting this one that keeps failing for #102010 --- .../java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java index 3b48f01d78dea..b1e1888aba75d 100644 --- a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java @@ -240,6 +240,7 @@ public void testCannotFollowLeaderInUpgradedCluster() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102010") public void testBiDirectionalIndexFollowing() throws Exception { logger.info("clusterName={}, upgradeState={}", clusterName, upgradeState); From 9f27d3c935c8ca990da7fb7e40aedebe4bb1f075 Mon Sep 17 00:00:00 2001 From: Matteo Piergiovanni <134913285+piergm@users.noreply.github.com> Date: Fri, 10 Nov 2023 14:49:19 +0100 Subject: [PATCH 13/15] Remove some more explicit SearchResponse use in tests (#102003) The title says the whole story --- .../history/HistoryActionConditionTests.java | 141 ++++++++++-------- .../HistoryTemplateEmailMappingsTests.java | 105 ++++++------- .../HistoryTemplateHttpMappingsTests.java | 49 +++--- ...storyTemplateIndexActionMappingsTests.java | 31 ++-- ...storyTemplateSearchInputMappingsTests.java | 51 ++++--- .../input/chain/ChainIntegrationTests.java | 9 +- .../AbstractWatcherIntegrationTestCase.java | 116 ++++++++------ .../test/integration/BasicWatcherTests.java | 10 +- .../test/integration/BootStrapTests.java | 48 +++--- .../ExecutionVarsIntegrationTests.java | 59 ++++---- .../integration/HistoryIntegrationTests.java | 43 +++--- .../integration/RejectedExecutionTests.java | 8 +- .../test/integration/SingleNodeTests.java | 8 +- .../test/integration/WatchMetadataTests.java | 8 +- .../transform/TransformIntegrationTests.java | 63 ++++---- .../action/delete/DeleteWatchTests.java | 19 +-- .../condition/ScriptConditionTests.java | 64 +++++--- .../bench/WatcherScheduleEngineBenchmark.java | 49 +++--- .../oldrepos/OldRepositoryAccessIT.java | 102 ++++++++----- 19 files changed, 555 insertions(+), 428 deletions(-) diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java index 815f07bf64751..0d914def4831e 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java @@ -119,33 +119,41 @@ public void testActionConditionWithHardFailures() throws Exception { // Watcher history is now written asynchronously, so we check this in an assertBusy ensureGreen(HistoryStoreField.DATA_STREAM); final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); - assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); + try { + assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); + } finally { + response.decRef(); + } }); final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); - final SearchHit hit = response.getHits().getAt(0); - final List actions = getActionsFromHit(hit.getSourceAsMap()); - - for (int i = 0; i < actionConditionsWithFailure.size(); ++i) { - final Map action = (Map) actions.get(i); - final Map condition = (Map) action.get("condition"); - final Map logging = (Map) action.get("logging"); - - assertThat(action.get("id"), is("action" + i)); - - if (i == failedIndex) { - assertThat(action.get("status"), is("condition_failed")); - assertThat(action.get("reason"), is("condition failed. skipping: [expected] failed hard")); - assertThat(condition, nullValue()); - assertThat(logging, nullValue()); - } else { - assertThat(condition.get("type"), is(actionConditionsWithFailure.get(i).type())); - - assertThat(action.get("status"), is("success")); - assertThat(condition.get("met"), is(true)); - assertThat(action.get("reason"), nullValue()); - assertThat(logging.get("logged_text"), is(Integer.toString(i))); + try { + final SearchHit hit = response.getHits().getAt(0); + final List actions = getActionsFromHit(hit.getSourceAsMap()); + + for (int i = 0; i < actionConditionsWithFailure.size(); ++i) { + final Map action = (Map) actions.get(i); + final Map condition = (Map) action.get("condition"); + final Map logging = (Map) action.get("logging"); + + assertThat(action.get("id"), is("action" + i)); + + if (i == failedIndex) { + assertThat(action.get("status"), is("condition_failed")); + assertThat(action.get("reason"), is("condition failed. skipping: [expected] failed hard")); + assertThat(condition, nullValue()); + assertThat(logging, nullValue()); + } else { + assertThat(condition.get("type"), is(actionConditionsWithFailure.get(i).type())); + + assertThat(action.get("status"), is("success")); + assertThat(condition.get("met"), is(true)); + assertThat(action.get("reason"), nullValue()); + assertThat(logging.get("logged_text"), is(Integer.toString(i))); + } } + } finally { + response.decRef(); } } @@ -168,32 +176,40 @@ public void testActionConditionWithFailures() throws Exception { // Watcher history is now written asynchronously, so we check this in an assertBusy ensureGreen(HistoryStoreField.DATA_STREAM); final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); - assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); + try { + assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); + } finally { + response.decRef(); + } }); final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); - final SearchHit hit = response.getHits().getAt(0); - final List actions = getActionsFromHit(hit.getSourceAsMap()); - - for (int i = 0; i < actionConditionsWithFailure.length; ++i) { - final Map action = (Map) actions.get(i); - final Map condition = (Map) action.get("condition"); - final Map logging = (Map) action.get("logging"); - - assertThat(action.get("id"), is("action" + i)); - assertThat(condition.get("type"), is(actionConditionsWithFailure[i].type())); - - if (i == failedIndex) { - assertThat(action.get("status"), is("condition_failed")); - assertThat(condition.get("met"), is(false)); - assertThat(action.get("reason"), is("condition not met. skipping")); - assertThat(logging, nullValue()); - } else { - assertThat(action.get("status"), is("success")); - assertThat(condition.get("met"), is(true)); - assertThat(action.get("reason"), nullValue()); - assertThat(logging.get("logged_text"), is(Integer.toString(i))); + try { + final SearchHit hit = response.getHits().getAt(0); + final List actions = getActionsFromHit(hit.getSourceAsMap()); + + for (int i = 0; i < actionConditionsWithFailure.length; ++i) { + final Map action = (Map) actions.get(i); + final Map condition = (Map) action.get("condition"); + final Map logging = (Map) action.get("logging"); + + assertThat(action.get("id"), is("action" + i)); + assertThat(condition.get("type"), is(actionConditionsWithFailure[i].type())); + + if (i == failedIndex) { + assertThat(action.get("status"), is("condition_failed")); + assertThat(condition.get("met"), is(false)); + assertThat(action.get("reason"), is("condition not met. skipping")); + assertThat(logging, nullValue()); + } else { + assertThat(action.get("status"), is("success")); + assertThat(condition.get("met"), is(true)); + assertThat(action.get("reason"), nullValue()); + assertThat(logging.get("logged_text"), is(Integer.toString(i))); + } } + } finally { + response.decRef(); } } @@ -223,25 +239,32 @@ public void testActionCondition() throws Exception { // Watcher history is now written asynchronously, so we check this in an assertBusy ensureGreen(HistoryStoreField.DATA_STREAM); final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); - assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); + try { + assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); + } finally { + response.decRef(); + } }); final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); + try { + final SearchHit hit = response.getHits().getAt(0); + final List actions = getActionsFromHit(hit.getSourceAsMap()); - final SearchHit hit = response.getHits().getAt(0); - final List actions = getActionsFromHit(hit.getSourceAsMap()); - - for (int i = 0; i < actionConditions.size(); ++i) { - final Map action = (Map) actions.get(i); - final Map condition = (Map) action.get("condition"); - final Map logging = (Map) action.get("logging"); + for (int i = 0; i < actionConditions.size(); ++i) { + final Map action = (Map) actions.get(i); + final Map condition = (Map) action.get("condition"); + final Map logging = (Map) action.get("logging"); - assertThat(action.get("id"), is("action" + i)); - assertThat(action.get("status"), is("success")); - assertThat(condition.get("type"), is(actionConditions.get(i).type())); - assertThat(condition.get("met"), is(true)); - assertThat(action.get("reason"), nullValue()); - assertThat(logging.get("logged_text"), is(Integer.toString(i))); + assertThat(action.get("id"), is("action" + i)); + assertThat(action.get("status"), is("success")); + assertThat(condition.get("type"), is(actionConditions.get(i).type())); + assertThat(condition.get("met"), is(true)); + assertThat(action.get("reason"), nullValue()); + assertThat(logging.get("logged_text"), is(Integer.toString(i))); + } + } finally { + response.decRef(); } } diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateEmailMappingsTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateEmailMappingsTests.java index b0e71ecfa3189..edee4fb515a81 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateEmailMappingsTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateEmailMappingsTests.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.watcher.history; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; @@ -23,6 +22,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.emailAction; import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; @@ -95,55 +95,58 @@ public void testEmailFields() throws Exception { // the action should fail as no email server is available assertWatchWithMinimumActionsCount("_id", ExecutionState.EXECUTED, 1); - SearchResponse response = prepareSearch(HistoryStoreField.DATA_STREAM + "*").setSource( - searchSource().aggregation(terms("from").field("result.actions.email.message.from")) - .aggregation(terms("to").field("result.actions.email.message.to")) - .aggregation(terms("cc").field("result.actions.email.message.cc")) - .aggregation(terms("bcc").field("result.actions.email.message.bcc")) - .aggregation(terms("reply_to").field("result.actions.email.message.reply_to")) - ).get(); - - assertThat(response, notNullValue()); - assertThat(response.getHits().getTotalHits().value, greaterThanOrEqualTo(1L)); - Aggregations aggs = response.getAggregations(); - assertThat(aggs, notNullValue()); - - Terms terms = aggs.get("from"); - assertThat(terms, notNullValue()); - assertThat(terms.getBuckets().size(), is(1)); - assertThat(terms.getBucketByKey("from@example.com"), notNullValue()); - assertThat(terms.getBucketByKey("from@example.com").getDocCount(), greaterThanOrEqualTo(1L)); - - terms = aggs.get("to"); - assertThat(terms, notNullValue()); - assertThat(terms.getBuckets().size(), is(2)); - assertThat(terms.getBucketByKey("to1@example.com"), notNullValue()); - assertThat(terms.getBucketByKey("to1@example.com").getDocCount(), greaterThanOrEqualTo(1L)); - assertThat(terms.getBucketByKey("to2@example.com"), notNullValue()); - assertThat(terms.getBucketByKey("to2@example.com").getDocCount(), greaterThanOrEqualTo(1L)); - - terms = aggs.get("cc"); - assertThat(terms, notNullValue()); - assertThat(terms.getBuckets().size(), is(2)); - assertThat(terms.getBucketByKey("cc1@example.com"), notNullValue()); - assertThat(terms.getBucketByKey("cc1@example.com").getDocCount(), greaterThanOrEqualTo(1L)); - assertThat(terms.getBucketByKey("cc2@example.com"), notNullValue()); - assertThat(terms.getBucketByKey("cc2@example.com").getDocCount(), greaterThanOrEqualTo(1L)); - - terms = aggs.get("bcc"); - assertThat(terms, notNullValue()); - assertThat(terms.getBuckets().size(), is(2)); - assertThat(terms.getBucketByKey("bcc1@example.com"), notNullValue()); - assertThat(terms.getBucketByKey("bcc1@example.com").getDocCount(), greaterThanOrEqualTo(1L)); - assertThat(terms.getBucketByKey("bcc2@example.com"), notNullValue()); - assertThat(terms.getBucketByKey("bcc2@example.com").getDocCount(), greaterThanOrEqualTo(1L)); - - terms = aggs.get("reply_to"); - assertThat(terms, notNullValue()); - assertThat(terms.getBuckets().size(), is(2)); - assertThat(terms.getBucketByKey("rt1@example.com"), notNullValue()); - assertThat(terms.getBucketByKey("rt1@example.com").getDocCount(), greaterThanOrEqualTo(1L)); - assertThat(terms.getBucketByKey("rt2@example.com"), notNullValue()); - assertThat(terms.getBucketByKey("rt2@example.com").getDocCount(), greaterThanOrEqualTo(1L)); + assertResponse( + prepareSearch(HistoryStoreField.DATA_STREAM + "*").setSource( + searchSource().aggregation(terms("from").field("result.actions.email.message.from")) + .aggregation(terms("to").field("result.actions.email.message.to")) + .aggregation(terms("cc").field("result.actions.email.message.cc")) + .aggregation(terms("bcc").field("result.actions.email.message.bcc")) + .aggregation(terms("reply_to").field("result.actions.email.message.reply_to")) + ), + response -> { + assertThat(response, notNullValue()); + assertThat(response.getHits().getTotalHits().value, greaterThanOrEqualTo(1L)); + Aggregations aggs = response.getAggregations(); + assertThat(aggs, notNullValue()); + + Terms terms = aggs.get("from"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(1)); + assertThat(terms.getBucketByKey("from@example.com"), notNullValue()); + assertThat(terms.getBucketByKey("from@example.com").getDocCount(), greaterThanOrEqualTo(1L)); + + terms = aggs.get("to"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(2)); + assertThat(terms.getBucketByKey("to1@example.com"), notNullValue()); + assertThat(terms.getBucketByKey("to1@example.com").getDocCount(), greaterThanOrEqualTo(1L)); + assertThat(terms.getBucketByKey("to2@example.com"), notNullValue()); + assertThat(terms.getBucketByKey("to2@example.com").getDocCount(), greaterThanOrEqualTo(1L)); + + terms = aggs.get("cc"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(2)); + assertThat(terms.getBucketByKey("cc1@example.com"), notNullValue()); + assertThat(terms.getBucketByKey("cc1@example.com").getDocCount(), greaterThanOrEqualTo(1L)); + assertThat(terms.getBucketByKey("cc2@example.com"), notNullValue()); + assertThat(terms.getBucketByKey("cc2@example.com").getDocCount(), greaterThanOrEqualTo(1L)); + + terms = aggs.get("bcc"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(2)); + assertThat(terms.getBucketByKey("bcc1@example.com"), notNullValue()); + assertThat(terms.getBucketByKey("bcc1@example.com").getDocCount(), greaterThanOrEqualTo(1L)); + assertThat(terms.getBucketByKey("bcc2@example.com"), notNullValue()); + assertThat(terms.getBucketByKey("bcc2@example.com").getDocCount(), greaterThanOrEqualTo(1L)); + + terms = aggs.get("reply_to"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(2)); + assertThat(terms.getBucketByKey("rt1@example.com"), notNullValue()); + assertThat(terms.getBucketByKey("rt1@example.com").getDocCount(), greaterThanOrEqualTo(1L)); + assertThat(terms.getBucketByKey("rt2@example.com"), notNullValue()); + assertThat(terms.getBucketByKey("rt2@example.com").getDocCount(), greaterThanOrEqualTo(1L)); + } + ); } } diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateHttpMappingsTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateHttpMappingsTests.java index 36d2cf0239bdc..01400c3192289 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateHttpMappingsTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateHttpMappingsTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.watcher.history; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilders; @@ -36,6 +35,7 @@ import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.webhookAction; import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; import static org.elasticsearch.xpack.watcher.input.InputBuilders.httpInput; @@ -94,28 +94,31 @@ public void testHttpFields() throws Exception { // the action should fail as no email server is available assertWatchWithMinimumActionsCount("_id", ExecutionState.EXECUTED, 1); - SearchResponse response = prepareSearch(HistoryStoreField.DATA_STREAM + "*").setSource( - searchSource().aggregation(terms("input_result_path").field("result.input.http.request.path")) - .aggregation(terms("input_result_host").field("result.input.http.request.host")) - .aggregation(terms("webhook_path").field("result.actions.webhook.request.path")) - ).get(); - - assertThat(response, notNullValue()); - assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); - Aggregations aggs = response.getAggregations(); - assertThat(aggs, notNullValue()); - - Terms terms = aggs.get("input_result_path"); - assertThat(terms, notNullValue()); - assertThat(terms.getBuckets().size(), is(1)); - assertThat(terms.getBucketByKey("/input/path"), notNullValue()); - assertThat(terms.getBucketByKey("/input/path").getDocCount(), is(1L)); - - terms = aggs.get("webhook_path"); - assertThat(terms, notNullValue()); - assertThat(terms.getBuckets().size(), is(1)); - assertThat(terms.getBucketByKey("/webhook/path"), notNullValue()); - assertThat(terms.getBucketByKey("/webhook/path").getDocCount(), is(1L)); + assertResponse( + prepareSearch(HistoryStoreField.DATA_STREAM + "*").setSource( + searchSource().aggregation(terms("input_result_path").field("result.input.http.request.path")) + .aggregation(terms("input_result_host").field("result.input.http.request.host")) + .aggregation(terms("webhook_path").field("result.actions.webhook.request.path")) + ), + response -> { + assertThat(response, notNullValue()); + assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); + Aggregations aggs = response.getAggregations(); + assertThat(aggs, notNullValue()); + + Terms terms = aggs.get("input_result_path"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(1)); + assertThat(terms.getBucketByKey("/input/path"), notNullValue()); + assertThat(terms.getBucketByKey("/input/path").getDocCount(), is(1L)); + + terms = aggs.get("webhook_path"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(1)); + assertThat(terms.getBucketByKey("/webhook/path"), notNullValue()); + assertThat(terms.getBucketByKey("/webhook/path").getDocCount(), is(1L)); + } + ); assertThat(webServer.requests(), hasSize(2)); assertThat(webServer.requests().get(0).getUri().getPath(), is("/input/path")); diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateIndexActionMappingsTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateIndexActionMappingsTests.java index ecd3424f88139..1f2810c4d82f3 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateIndexActionMappingsTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateIndexActionMappingsTests.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.watcher.history; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.terms.Terms; @@ -17,6 +16,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.indexAction; import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; @@ -48,19 +48,22 @@ public void testIndexActionFields() throws Exception { flush(); refresh(); - SearchResponse response = prepareSearch(HistoryStoreField.DATA_STREAM + "*").setSource( - searchSource().aggregation(terms("index_action_indices").field("result.actions.index.response.index")) - ).get(); - - assertThat(response, notNullValue()); - assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); - Aggregations aggs = response.getAggregations(); - assertThat(aggs, notNullValue()); + assertResponse( + prepareSearch(HistoryStoreField.DATA_STREAM + "*").setSource( + searchSource().aggregation(terms("index_action_indices").field("result.actions.index.response.index")) + ), + response -> { + assertThat(response, notNullValue()); + assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); + Aggregations aggs = response.getAggregations(); + assertThat(aggs, notNullValue()); - Terms terms = aggs.get("index_action_indices"); - assertThat(terms, notNullValue()); - assertThat(terms.getBuckets().size(), is(1)); - assertThat(terms.getBucketByKey(index), notNullValue()); - assertThat(terms.getBucketByKey(index).getDocCount(), is(1L)); + Terms terms = aggs.get("index_action_indices"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(1)); + assertThat(terms.getBucketByKey(index), notNullValue()); + assertThat(terms.getBucketByKey(index).getDocCount(), is(1L)); + } + ); } } diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateSearchInputMappingsTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateSearchInputMappingsTests.java index 4fba54f7e0208..2c86df184dc22 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateSearchInputMappingsTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryTemplateSearchInputMappingsTests.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.watcher.history; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; @@ -21,6 +20,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; import static org.elasticsearch.xpack.watcher.input.InputBuilders.searchInput; @@ -64,31 +64,34 @@ public void testHttpFields() throws Exception { // the action should fail as no email server is available assertWatchWithMinimumActionsCount("_id", ExecutionState.EXECUTED, 1); - SearchResponse response = prepareSearch(HistoryStoreField.DATA_STREAM + "*").setSource( - searchSource().aggregation(terms("input_search_type").field("result.input.search.request.search_type")) - .aggregation(terms("input_indices").field("result.input.search.request.indices")) - .aggregation(terms("input_body").field("result.input.search.request.body")) - ).get(); - - assertThat(response, notNullValue()); - assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); - Aggregations aggs = response.getAggregations(); - assertThat(aggs, notNullValue()); + assertResponse( + prepareSearch(HistoryStoreField.DATA_STREAM + "*").setSource( + searchSource().aggregation(terms("input_search_type").field("result.input.search.request.search_type")) + .aggregation(terms("input_indices").field("result.input.search.request.indices")) + .aggregation(terms("input_body").field("result.input.search.request.body")) + ), + response -> { + assertThat(response, notNullValue()); + assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); + Aggregations aggs = response.getAggregations(); + assertThat(aggs, notNullValue()); - Terms terms = aggs.get("input_search_type"); - assertThat(terms, notNullValue()); - assertThat(terms.getBuckets().size(), is(1)); - assertThat(terms.getBucketByKey("query_then_fetch"), notNullValue()); - assertThat(terms.getBucketByKey("query_then_fetch").getDocCount(), is(oneOf(1L, 2L))); + Terms terms = aggs.get("input_search_type"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(1)); + assertThat(terms.getBucketByKey("query_then_fetch"), notNullValue()); + assertThat(terms.getBucketByKey("query_then_fetch").getDocCount(), is(oneOf(1L, 2L))); - terms = aggs.get("input_indices"); - assertThat(terms, notNullValue()); - assertThat(terms.getBuckets().size(), is(1)); - assertThat(terms.getBucketByKey(index), notNullValue()); - assertThat(terms.getBucketByKey(index).getDocCount(), is(oneOf(1L, 2L))); + terms = aggs.get("input_indices"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(1)); + assertThat(terms.getBucketByKey(index), notNullValue()); + assertThat(terms.getBucketByKey(index).getDocCount(), is(oneOf(1L, 2L))); - terms = aggs.get("input_body"); - assertThat(terms, notNullValue()); - assertThat(terms.getBuckets().size(), is(0)); + terms = aggs.get("input_body"); + assertThat(terms, notNullValue()); + assertThat(terms.getBuckets().size(), is(0)); + } + ); } } diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java index cec68468acf0d..041c03af0af3c 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/input/chain/ChainIntegrationTests.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.watcher.input.chain; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -24,6 +23,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.indexAction; import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; @@ -82,9 +82,10 @@ public void testChainedInputsAreWorking() throws Exception { public void assertWatchExecuted() { try { refresh(); - SearchResponse searchResponse = prepareSearch("my-index").get(); - assertHitCount(searchResponse, 1); - assertThat(searchResponse.getHits().getAt(0).getSourceAsString(), containsString("the-most-awesome-index-ever")); + assertResponse(prepareSearch("my-index"), searchResponse -> { + assertHitCount(searchResponse, 1); + assertThat(searchResponse.getHits().getAt(0).getSourceAsString(), containsString("the-most-awesome-index-ever")); + }); } catch (IndexNotFoundException e) { fail("Index not found: [" + e.getIndex() + "]"); } diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java index 93741f8e48ea5..84f608b91dc95 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java @@ -85,6 +85,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.core.watcher.support.WatcherIndexTemplateRegistryField.HISTORY_TEMPLATE_NAME; import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.equalTo; @@ -367,33 +368,41 @@ protected void assertWatchWithMinimumPerformedActionsCount( } refresh(); - SearchResponse searchResponse = prepareSearch(HistoryStoreField.DATA_STREAM + "*").setIndicesOptions( - IndicesOptions.lenientExpandOpen() - ) - .setQuery(boolQuery().must(matchQuery("watch_id", watchName)).must(matchQuery("state", ExecutionState.EXECUTED.id()))) - .get(); - lastResponse.set(searchResponse); - assertThat( - "could not find executed watch record for watch " + watchName, - searchResponse.getHits().getTotalHits().value, - greaterThanOrEqualTo(minimumExpectedWatchActionsWithActionPerformed) - ); - if (assertConditionMet) { - assertThat( - (Integer) XContentMapValues.extractValue( - "result.input.payload.hits.total", - searchResponse.getHits().getAt(0).getSourceAsMap() + assertResponse( + prepareSearch(HistoryStoreField.DATA_STREAM + "*").setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setQuery( + boolQuery().must(matchQuery("watch_id", watchName)).must(matchQuery("state", ExecutionState.EXECUTED.id())) ), - greaterThanOrEqualTo(1) - ); - } + searchResponse -> { + lastResponse.set(searchResponse); + assertThat( + "could not find executed watch record for watch " + watchName, + searchResponse.getHits().getTotalHits().value, + greaterThanOrEqualTo(minimumExpectedWatchActionsWithActionPerformed) + ); + if (assertConditionMet) { + assertThat( + (Integer) XContentMapValues.extractValue( + "result.input.payload.hits.total", + searchResponse.getHits().getAt(0).getSourceAsMap() + ), + greaterThanOrEqualTo(1) + ); + } + } + ); + }); } catch (AssertionError error) { SearchResponse searchResponse = lastResponse.get(); - logger.info("Found [{}] records for watch [{}]", searchResponse.getHits().getTotalHits().value, watchName); - int counter = 1; - for (SearchHit hit : searchResponse.getHits().getHits()) { - logger.info("hit [{}]=\n {}", counter++, XContentHelper.convertToJson(hit.getSourceRef(), true, true)); + try { + logger.info("Found [{}] records for watch [{}]", searchResponse.getHits().getTotalHits().value, watchName); + int counter = 1; + for (SearchHit hit : searchResponse.getHits().getHits()) { + logger.info("hit [{}]=\n {}", counter++, XContentHelper.convertToJson(hit.getSourceRef(), true, true)); + } + } finally { + searchResponse.decRef(); } throw error; } @@ -410,7 +419,13 @@ protected long findNumberOfPerformedActions(String watchName) { SearchResponse searchResponse = prepareSearch(HistoryStoreField.DATA_STREAM + "*").setIndicesOptions( IndicesOptions.lenientExpandOpen() ).setQuery(boolQuery().must(matchQuery("watch_id", watchName)).must(matchQuery("state", ExecutionState.EXECUTED.id()))).get(); - return searchResponse.getHits().getTotalHits().value; + long totalHistsValue; + try { + totalHistsValue = searchResponse.getHits().getTotalHits().value; + } finally { + searchResponse.decRef(); + } + return totalHistsValue; } protected void assertWatchWithNoActionNeeded(final String watchName, final long expectedWatchActionsWithNoActionNeeded) @@ -434,23 +449,31 @@ protected void assertWatchWithNoActionNeeded(final String watchName, final long assertThat(routingTable.allPrimaryShardsActive(), is(true)); } refresh(); - SearchResponse searchResponse = prepareSearch(HistoryStoreField.DATA_STREAM + "*").setIndicesOptions( - IndicesOptions.lenientExpandOpen() - ) - .setQuery( - boolQuery().must(matchQuery("watch_id", watchName)) - .must(matchQuery("state", ExecutionState.EXECUTION_NOT_NEEDED.id())) - ) - .get(); - lastResponse.set(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, greaterThanOrEqualTo(expectedWatchActionsWithNoActionNeeded)); + assertResponse( + prepareSearch(HistoryStoreField.DATA_STREAM + "*").setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setQuery( + boolQuery().must(matchQuery("watch_id", watchName)) + .must(matchQuery("state", ExecutionState.EXECUTION_NOT_NEEDED.id())) + ), + searchResponse -> { + lastResponse.set(searchResponse); + assertThat( + searchResponse.getHits().getTotalHits().value, + greaterThanOrEqualTo(expectedWatchActionsWithNoActionNeeded) + ); + } + ); }); } catch (AssertionError error) { SearchResponse searchResponse = lastResponse.get(); - logger.info("Found [{}] records for watch [{}]", searchResponse.getHits().getTotalHits().value, watchName); - int counter = 1; - for (SearchHit hit : searchResponse.getHits().getHits()) { - logger.info("hit [{}]=\n {}", counter++, XContentHelper.convertToJson(hit.getSourceRef(), true, true)); + try { + logger.info("Found [{}] records for watch [{}]", searchResponse.getHits().getTotalHits().value, watchName); + int counter = 1; + for (SearchHit hit : searchResponse.getHits().getHits()) { + logger.info("hit [{}]=\n {}", counter++, XContentHelper.convertToJson(hit.getSourceRef(), true, true)); + } + } finally { + searchResponse.decRef(); } throw error; } @@ -474,13 +497,16 @@ protected void assertWatchWithMinimumActionsCount(final String watchName, final } refresh(); - SearchResponse searchResponse = prepareSearch(HistoryStoreField.DATA_STREAM + "*").setIndicesOptions( - IndicesOptions.lenientExpandOpen() - ).setQuery(boolQuery().must(matchQuery("watch_id", watchName)).must(matchQuery("state", recordState.id()))).get(); - assertThat( - "could not find executed watch record", - searchResponse.getHits().getTotalHits().value, - greaterThanOrEqualTo(recordCount) + assertResponse( + prepareSearch(HistoryStoreField.DATA_STREAM + "*").setIndicesOptions(IndicesOptions.lenientExpandOpen()) + .setQuery(boolQuery().must(matchQuery("watch_id", watchName)).must(matchQuery("state", recordState.id()))), + searchResponse -> { + assertThat( + "could not find executed watch record", + searchResponse.getHits().getTotalHits().value, + greaterThanOrEqualTo(recordCount) + ); + } ); }); } diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java index 5f572b3646365..c6d9d9eab4525 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java @@ -278,9 +278,13 @@ public void testInputFiltering() throws Exception { // Check that the input result payload has been filtered refresh(); SearchResponse searchResponse = searchWatchRecords(builder -> builder.setQuery(matchQuery("watch_id", "_name1"))); - assertHitCount(searchResponse, 1); - XContentSource source = xContentSource(searchResponse.getHits().getAt(0).getSourceRef()); - assertThat(source.getValue("result.input.payload.hits.total"), equalTo((Object) 1)); + try { + assertHitCount(searchResponse, 1); + XContentSource source = xContentSource(searchResponse.getHits().getAt(0).getSourceRef()); + assertThat(source.getValue("result.input.payload.hits.total"), equalTo((Object) 1)); + } finally { + searchResponse.decRef(); + } } public void testPutWatchWithNegativeSchedule() throws Exception { diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java index c32246e33c571..a200fccfa928d 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/BootStrapTests.java @@ -8,7 +8,6 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.cluster.service.ClusterService; @@ -39,6 +38,7 @@ import java.util.Arrays; import java.util.List; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.index.query.QueryBuilders.termQuery; @@ -46,6 +46,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.indexAction; import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; @@ -316,25 +317,27 @@ private void assertSingleExecutionAndCompleteWatchHistory(final long numberOfWat WatcherStatsResponse response = new WatcherStatsRequestBuilder(client()).setIncludeCurrentWatches(true).get(); long maxSize = response.getNodes().stream().map(WatcherStatsResponse.Node::getSnapshots).mapToLong(List::size).sum(); assertThat(maxSize, equalTo(0L)); - + AtomicLong successfulWatchExecutions = new AtomicLong(); refresh(); - SearchResponse searchResponse = prepareSearch("output").get(); - assertThat(searchResponse.getHits().getTotalHits().value, is(greaterThanOrEqualTo(numberOfWatches))); - long successfulWatchExecutions = searchResponse.getHits().getTotalHits().value; + assertResponse(prepareSearch("output"), searchResponse -> { + assertThat(searchResponse.getHits().getTotalHits().value, is(greaterThanOrEqualTo(numberOfWatches))); + successfulWatchExecutions.set(searchResponse.getHits().getTotalHits().value); + }); // the watch history should contain entries for each triggered watch, which a few have been marked as not executed - SearchResponse historySearchResponse = prepareSearch(HistoryStoreField.INDEX_PREFIX + "*").setSize(10000).get(); - assertHitCount(historySearchResponse, expectedWatchHistoryCount); - long notExecutedCount = Arrays.stream(historySearchResponse.getHits().getHits()) - .filter(hit -> hit.getSourceAsMap().get("state").equals(ExecutionState.NOT_EXECUTED_ALREADY_QUEUED.id())) - .count(); - logger.info( - "Watches not executed: [{}]: expected watch history count [{}] - [{}] successful watch exections", - notExecutedCount, - expectedWatchHistoryCount, - successfulWatchExecutions - ); - assertThat(notExecutedCount, is(expectedWatchHistoryCount - successfulWatchExecutions)); + assertResponse(prepareSearch(HistoryStoreField.INDEX_PREFIX + "*").setSize(10000), historySearchResponse -> { + assertHitCount(historySearchResponse, expectedWatchHistoryCount); + long notExecutedCount = Arrays.stream(historySearchResponse.getHits().getHits()) + .filter(hit -> hit.getSourceAsMap().get("state").equals(ExecutionState.NOT_EXECUTED_ALREADY_QUEUED.id())) + .count(); + logger.info( + "Watches not executed: [{}]: expected watch history count [{}] - [{}] successful watch exections", + notExecutedCount, + expectedWatchHistoryCount, + successfulWatchExecutions + ); + assertThat(notExecutedCount, is(expectedWatchHistoryCount - successfulWatchExecutions.get())); + }); }, 20, TimeUnit.SECONDS); } @@ -402,11 +405,12 @@ public void testWatchRecordSavedTwice() throws Exception { // but even then since the execution of the watch record is async it may take a little bit before // the actual documents are in the output index refresh(); - SearchResponse searchResponse = prepareSearch(HistoryStoreField.DATA_STREAM).setSize(numRecords).get(); - assertThat(searchResponse.getHits().getTotalHits().value, Matchers.equalTo((long) numRecords)); - for (int i = 0; i < numRecords; i++) { - assertThat(searchResponse.getHits().getAt(i).getSourceAsMap().get("state"), is(ExecutionState.EXECUTED.id())); - } + assertResponse(prepareSearch(HistoryStoreField.DATA_STREAM).setSize(numRecords), searchResponse -> { + assertThat(searchResponse.getHits().getTotalHits().value, Matchers.equalTo((long) numRecords)); + for (int i = 0; i < numRecords; i++) { + assertThat(searchResponse.getHits().getAt(i).getSourceAsMap().get("state"), is(ExecutionState.EXECUTED.id())); + } + }); }); } } diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/ExecutionVarsIntegrationTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/ExecutionVarsIntegrationTests.java index 67fa185da8f23..14de0f115f14a 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/ExecutionVarsIntegrationTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/ExecutionVarsIntegrationTests.java @@ -140,36 +140,39 @@ public void testVars() throws Exception { SearchResponse searchResponse = searchWatchRecords(builder -> { // defaults to match all; }); - - assertHitCount(searchResponse, 1L); - - Map source = searchResponse.getHits().getAt(0).getSourceAsMap(); - - assertValue(source, "watch_id", is(watchId)); - assertValue(source, "state", is("executed")); - - // we don't store the computed vars in history - assertValue(source, "vars", nullValue()); - - assertValue(source, "result.condition.status", is("success")); - assertValue(source, "result.transform.status", is("success")); - - List> actions = ObjectPath.eval("result.actions", source); - for (Map action : actions) { - String id = (String) action.get("id"); - switch (id) { - case "a1" -> { - assertValue(action, "status", is("success")); - assertValue(action, "transform.status", is("success")); - assertValue(action, "transform.payload.a1_transformed_value", equalTo(25)); - } - case "a2" -> { - assertValue(action, "status", is("success")); - assertValue(action, "transform.status", is("success")); - assertValue(action, "transform.payload.a2_transformed_value", equalTo(35)); + try { + assertHitCount(searchResponse, 1L); + + Map source = searchResponse.getHits().getAt(0).getSourceAsMap(); + + assertValue(source, "watch_id", is(watchId)); + assertValue(source, "state", is("executed")); + + // we don't store the computed vars in history + assertValue(source, "vars", nullValue()); + + assertValue(source, "result.condition.status", is("success")); + assertValue(source, "result.transform.status", is("success")); + + List> actions = ObjectPath.eval("result.actions", source); + for (Map action : actions) { + String id = (String) action.get("id"); + switch (id) { + case "a1" -> { + assertValue(action, "status", is("success")); + assertValue(action, "transform.status", is("success")); + assertValue(action, "transform.payload.a1_transformed_value", equalTo(25)); + } + case "a2" -> { + assertValue(action, "status", is("success")); + assertValue(action, "transform.status", is("success")); + assertValue(action, "transform.payload.a2_transformed_value", equalTo(35)); + } + default -> fail("there should not be an action result for action with an id other than a1 or a2"); } - default -> fail("there should not be an action result for action with an id other than a1 or a2"); } + } finally { + searchResponse.decRef(); } }); } diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java index 2332ef24ff5ef..e8bda244271c0 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/HistoryIntegrationTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.watcher.test.integration; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.sort.SortBuilders; @@ -33,6 +32,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; @@ -187,33 +187,34 @@ public void testThatHistoryContainsStatus() throws Exception { assertBusy(() -> { refresh(".watcher-history*"); - SearchResponse searchResponse = prepareSearch(".watcher-history*").setSize(1).get(); - assertHitCount(searchResponse, 1); - SearchHit hit = searchResponse.getHits().getAt(0); + assertResponse(prepareSearch(".watcher-history*").setSize(1), searchResponse -> { + assertHitCount(searchResponse, 1); + SearchHit hit = searchResponse.getHits().getAt(0); - XContentSource source = new XContentSource(hit.getSourceRef(), XContentType.JSON); + XContentSource source = new XContentSource(hit.getSourceRef(), XContentType.JSON); - Boolean active = source.getValue("status.state.active"); - assertThat(active, is(status.state().isActive())); + Boolean active = source.getValue("status.state.active"); + assertThat(active, is(status.state().isActive())); - String timestamp = source.getValue("status.state.timestamp"); - assertThat(timestamp, WatcherTestUtils.isSameDate(status.state().getTimestamp())); + String timestamp = source.getValue("status.state.timestamp"); + assertThat(timestamp, WatcherTestUtils.isSameDate(status.state().getTimestamp())); - String lastChecked = source.getValue("status.last_checked"); - assertThat(lastChecked, WatcherTestUtils.isSameDate(status.lastChecked())); - String lastMetCondition = source.getValue("status.last_met_condition"); - assertThat(lastMetCondition, WatcherTestUtils.isSameDate(status.lastMetCondition())); + String lastChecked = source.getValue("status.last_checked"); + assertThat(lastChecked, WatcherTestUtils.isSameDate(status.lastChecked())); + String lastMetCondition = source.getValue("status.last_met_condition"); + assertThat(lastMetCondition, WatcherTestUtils.isSameDate(status.lastMetCondition())); - Integer version = source.getValue("status.version"); - int expectedVersion = (int) (status.version() - 1); - assertThat(version, is(expectedVersion)); + Integer version = source.getValue("status.version"); + int expectedVersion = (int) (status.version() - 1); + assertThat(version, is(expectedVersion)); - ActionStatus actionStatus = status.actionStatus("_logger"); - String ackStatusState = source.getValue("status.actions._logger.ack.state").toString().toUpperCase(Locale.ROOT); - assertThat(ackStatusState, is(actionStatus.ackStatus().state().toString())); + ActionStatus actionStatus = status.actionStatus("_logger"); + String ackStatusState = source.getValue("status.actions._logger.ack.state").toString().toUpperCase(Locale.ROOT); + assertThat(ackStatusState, is(actionStatus.ackStatus().state().toString())); - Boolean lastExecutionSuccesful = source.getValue("status.actions._logger.last_execution.successful"); - assertThat(lastExecutionSuccesful, is(actionStatus.lastExecution().successful())); + Boolean lastExecutionSuccesful = source.getValue("status.actions._logger.last_execution.successful"); + assertThat(lastExecutionSuccesful, is(actionStatus.lastExecution().successful())); + }); }); assertBusy(() -> { diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java index 5c4039566661a..a3dc49411cc86 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/RejectedExecutionTests.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.watcher.test.integration; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.LicenseSettings; import org.elasticsearch.xpack.core.XPackSettings; @@ -18,6 +17,7 @@ import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; import static org.elasticsearch.xpack.watcher.input.InputBuilders.searchInput; @@ -50,8 +50,10 @@ public void testHistoryOnRejection() throws Exception { assertBusy(() -> { flushAndRefresh(".watcher-history-*"); - SearchResponse searchResponse = prepareSearch(".watcher-history-*").get(); - assertThat(searchResponse.getHits().getTotalHits().value, greaterThanOrEqualTo(2L)); + assertResponse( + prepareSearch(".watcher-history-*"), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, greaterThanOrEqualTo(2L)) + ); }); } diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java index be9b2da6e739c..d1153b6eca3e6 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java @@ -8,7 +8,6 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; import org.elasticsearch.rest.RestStatus; @@ -24,6 +23,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; import static org.elasticsearch.xpack.watcher.input.InputBuilders.simpleInput; @@ -67,8 +67,10 @@ public void testThatLoadingWithNonExistingIndexWorks() throws Exception { assertBusy(() -> { RefreshResponse refreshResponse = indicesAdmin().prepareRefresh(".watcher-history*").get(); assertThat(refreshResponse.getStatus(), equalTo(RestStatus.OK)); - SearchResponse searchResponse = prepareSearch(".watcher-history*").setSize(0).get(); - assertThat(searchResponse.getHits().getTotalHits().value, is(greaterThanOrEqualTo(1L))); + assertResponse( + prepareSearch(".watcher-history*").setSize(0), + searchResponse -> assertThat(searchResponse.getHits().getTotalHits().value, is(greaterThanOrEqualTo(1L))) + ); }, 30, TimeUnit.SECONDS); } diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java index bb4fa3b12bab4..e0c9c0098df5f 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/WatchMetadataTests.java @@ -77,8 +77,12 @@ public void testWatchMetadata() throws Exception { throw e; } } - assertNotNull(searchResponse); - assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); + try { + assertNotNull(searchResponse); + assertThat(searchResponse.getHits().getTotalHits().value, greaterThan(0L)); + } finally { + searchResponse.decRef(); + } }); } diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java index 5ea85eb813982..bdc040ff2eca7 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/transform/TransformIntegrationTests.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.watcher.transform; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -37,7 +36,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.indexAction; import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; import static org.elasticsearch.xpack.watcher.input.InputBuilders.searchInput; @@ -141,17 +140,17 @@ public void testScriptTransform() throws Exception { assertWatchWithMinimumPerformedActionsCount("_id2", 1, false); refresh(); - SearchResponse response = prepareSearch("output1").get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, greaterThanOrEqualTo(1L)); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("key3").toString(), equalTo("20")); - - response = prepareSearch("output2").get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, greaterThanOrEqualTo(1L)); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("key3").toString(), equalTo("20")); + assertNoFailuresAndResponse(prepareSearch("output1"), response -> { + assertThat(response.getHits().getTotalHits().value, greaterThanOrEqualTo(1L)); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("key3").toString(), equalTo("20")); + }); + + assertNoFailuresAndResponse(prepareSearch("output2"), response -> { + assertThat(response.getHits().getTotalHits().value, greaterThanOrEqualTo(1L)); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("key3").toString(), equalTo("20")); + }); } public void testSearchTransform() throws Exception { @@ -186,15 +185,15 @@ public void testSearchTransform() throws Exception { assertWatchWithMinimumPerformedActionsCount("_id2", 1, false); refresh(); - SearchResponse response = prepareSearch("output1").get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, greaterThanOrEqualTo(1L)); - assertThat(response.getHits().getAt(0).getSourceAsString(), containsString("mytestresult")); + assertNoFailuresAndResponse(prepareSearch("output1"), response -> { + assertThat(response.getHits().getTotalHits().value, greaterThanOrEqualTo(1L)); + assertThat(response.getHits().getAt(0).getSourceAsString(), containsString("mytestresult")); + }); - response = prepareSearch("output2").get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, greaterThanOrEqualTo(1L)); - assertThat(response.getHits().getAt(0).getSourceAsString(), containsString("mytestresult")); + assertNoFailuresAndResponse(prepareSearch("output2"), response -> { + assertThat(response.getHits().getTotalHits().value, greaterThanOrEqualTo(1L)); + assertThat(response.getHits().getAt(0).getSourceAsString(), containsString("mytestresult")); + }); } public void testChainTransform() throws Exception { @@ -225,17 +224,17 @@ public void testChainTransform() throws Exception { assertWatchWithMinimumPerformedActionsCount("_id2", 1, false); refresh(); - SearchResponse response = prepareSearch("output1").get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, greaterThanOrEqualTo(1L)); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("key4").toString(), equalTo("30")); - - response = prepareSearch("output2").get(); - assertNoFailures(response); - assertThat(response.getHits().getTotalHits().value, greaterThanOrEqualTo(1L)); - assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); - assertThat(response.getHits().getAt(0).getSourceAsMap().get("key4").toString(), equalTo("30")); + assertNoFailuresAndResponse(prepareSearch("output1"), response -> { + assertThat(response.getHits().getTotalHits().value, greaterThanOrEqualTo(1L)); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("key4").toString(), equalTo("30")); + }); + + assertNoFailuresAndResponse(prepareSearch("output2"), response -> { + assertThat(response.getHits().getTotalHits().value, greaterThanOrEqualTo(1L)); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("key4").toString(), equalTo("30")); + }); } private void executeWatch(String watchId) { diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/transport/action/delete/DeleteWatchTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/transport/action/delete/DeleteWatchTests.java index e48c4efd32b0d..ab8f44f2976e9 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/transport/action/delete/DeleteWatchTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/transport/action/delete/DeleteWatchTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.watcher.transport.action.delete; import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.core.TimeValue; import org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; @@ -29,6 +28,7 @@ import static com.carrotsearch.randomizedtesting.RandomizedTest.sleep; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction; import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; import static org.elasticsearch.xpack.watcher.input.InputBuilders.httpInput; @@ -81,15 +81,16 @@ public void testWatchDeletionDuringExecutionWorks() throws Exception { // during execution refresh(HistoryStoreField.INDEX_PREFIX + "*"); - SearchResponse searchResponse = prepareSearch(HistoryStoreField.INDEX_PREFIX + "*").setQuery(matchAllQuery()).get(); - assertHitCount(searchResponse, 1); + assertResponse(prepareSearch(HistoryStoreField.INDEX_PREFIX + "*").setQuery(matchAllQuery()), searchResponse -> { + assertHitCount(searchResponse, 1); - Map source = searchResponse.getHits().getAt(0).getSourceAsMap(); - // watch has been executed successfully - String state = ObjectPath.eval("state", source); - assertThat(state, is("executed")); - // no exception occurred - assertThat(source, not(hasKey("exception"))); + Map source = searchResponse.getHits().getAt(0).getSourceAsMap(); + // watch has been executed successfully + String state = ObjectPath.eval("state", source); + assertThat(state, is("executed")); + // no exception occurred + assertThat(source, not(hasKey("exception"))); + }); }); } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java index 3b80fcdab159c..89ddb2c0011bb 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java @@ -105,8 +105,12 @@ public void testExecute() throws Exception { ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY ); - WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.XContent(response, Settings.EMPTY_PARAMS)); - assertFalse(condition.execute(ctx).met()); + try { + WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.XContent(response, Settings.EMPTY_PARAMS)); + assertFalse(condition.execute(ctx).met()); + } finally { + response.decRef(); + } } public void testExecuteMergedParams() throws Exception { @@ -127,8 +131,12 @@ public void testExecuteMergedParams() throws Exception { ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY ); - WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.XContent(response, Settings.EMPTY_PARAMS)); - assertFalse(executable.execute(ctx).met()); + try { + WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.XContent(response, Settings.EMPTY_PARAMS)); + assertFalse(executable.execute(ctx).met()); + } finally { + response.decRef(); + } } public void testParserValid() throws Exception { @@ -149,18 +157,22 @@ public void testParserValid() throws Exception { ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY ); - WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.XContent(response, Settings.EMPTY_PARAMS)); + try { + WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.XContent(response, Settings.EMPTY_PARAMS)); - assertFalse(executable.execute(ctx).met()); + assertFalse(executable.execute(ctx).met()); - builder = createConditionContent("return true", "mockscript", ScriptType.INLINE); - parser = createParser(builder); - parser.nextToken(); - executable = ScriptCondition.parse(scriptService, "_watch", parser); + builder = createConditionContent("return true", "mockscript", ScriptType.INLINE); + parser = createParser(builder); + parser.nextToken(); + executable = ScriptCondition.parse(scriptService, "_watch", parser); - ctx = mockExecutionContext("_name", new Payload.XContent(response, Settings.EMPTY_PARAMS)); + ctx = mockExecutionContext("_name", new Payload.XContent(response, Settings.EMPTY_PARAMS)); - assertTrue(executable.execute(ctx).met()); + assertTrue(executable.execute(ctx).met()); + } finally { + response.decRef(); + } } public void testParserInvalid() throws Exception { @@ -221,9 +233,13 @@ public void testScriptConditionThrowException() throws Exception { ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY ); - WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.XContent(response, ToXContent.EMPTY_PARAMS)); - ScriptException exception = expectThrows(ScriptException.class, () -> condition.execute(ctx)); - assertThat(exception.getMessage(), containsString("Error evaluating null.foo")); + try { + WatchExecutionContext ctx = mockExecutionContext("_name", new Payload.XContent(response, ToXContent.EMPTY_PARAMS)); + ScriptException exception = expectThrows(ScriptException.class, () -> condition.execute(ctx)); + assertThat(exception.getMessage(), containsString("Error evaluating null.foo")); + } finally { + response.decRef(); + } } public void testScriptConditionAccessCtx() throws Exception { @@ -241,13 +257,17 @@ public void testScriptConditionAccessCtx() throws Exception { ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY ); - WatchExecutionContext ctx = mockExecutionContext( - "_name", - ZonedDateTime.now(ZoneOffset.UTC), - new Payload.XContent(response, ToXContent.EMPTY_PARAMS) - ); - Thread.sleep(10); - assertThat(condition.execute(ctx).met(), is(true)); + try { + WatchExecutionContext ctx = mockExecutionContext( + "_name", + ZonedDateTime.now(ZoneOffset.UTC), + new Payload.XContent(response, ToXContent.EMPTY_PARAMS) + ); + Thread.sleep(10); + assertThat(condition.execute(ctx).met(), is(true)); + } finally { + response.decRef(); + } } private static XContentBuilder createConditionContent(String script, String scriptLang, ScriptType scriptType) throws IOException { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java index f0fc8686840e1..670da0b8f788d 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.Strings; import org.elasticsearch.common.metrics.MeanMetric; @@ -52,6 +51,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.percentiles; import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xpack.watcher.input.InputBuilders.searchInput; import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.templateRequest; import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule; @@ -220,28 +220,31 @@ public void run() { "doc['trigger_event.schedule.triggered_time'].value - doc['trigger_event.schedule.scheduled_time'].value", emptyMap() ); - SearchResponse searchResponse = client.prepareSearch(HistoryStoreField.DATA_STREAM + "*") - .setQuery(QueryBuilders.rangeQuery("trigger_event.schedule.scheduled_time").gte(startTime).lte(endTime)) - .addAggregation(terms("state").field("state")) - .addAggregation(histogram("delay").script(script).interval(10)) - .addAggregation(percentiles("percentile_delay").script(script).percentiles(1.0, 20.0, 50.0, 80.0, 99.0)) - .get(); - Terms terms = searchResponse.getAggregations().get("state"); - stats.setStateStats(terms); - Histogram histogram = searchResponse.getAggregations().get("delay"); - stats.setDelayStats(histogram); - System.out.println("===> State"); - for (Terms.Bucket bucket : terms.getBuckets()) { - System.out.println("\t" + bucket.getKey() + "=" + bucket.getDocCount()); - } - System.out.println("===> Delay"); - for (Histogram.Bucket bucket : histogram.getBuckets()) { - System.out.println("\t" + bucket.getKey() + "=" + bucket.getDocCount()); - } - Percentiles percentiles = searchResponse.getAggregations().get("percentile_delay"); - stats.setDelayPercentiles(percentiles); - stats.setAvgJvmUsed(jvmUsedHeapSpace); - new WatcherServiceRequestBuilder(client).stop().get(); + assertResponse( + client.prepareSearch(HistoryStoreField.DATA_STREAM + "*") + .setQuery(QueryBuilders.rangeQuery("trigger_event.schedule.scheduled_time").gte(startTime).lte(endTime)) + .addAggregation(terms("state").field("state")) + .addAggregation(histogram("delay").script(script).interval(10)) + .addAggregation(percentiles("percentile_delay").script(script).percentiles(1.0, 20.0, 50.0, 80.0, 99.0)), + searchResponse -> { + Terms terms = searchResponse.getAggregations().get("state"); + stats.setStateStats(terms); + Histogram histogram = searchResponse.getAggregations().get("delay"); + stats.setDelayStats(histogram); + System.out.println("===> State"); + for (Terms.Bucket bucket : terms.getBuckets()) { + System.out.println("\t" + bucket.getKey() + "=" + bucket.getDocCount()); + } + System.out.println("===> Delay"); + for (Histogram.Bucket bucket : histogram.getBuckets()) { + System.out.println("\t" + bucket.getKey() + "=" + bucket.getDocCount()); + } + Percentiles percentiles = searchResponse.getAggregations().get("percentile_delay"); + stats.setDelayPercentiles(percentiles); + stats.setAvgJvmUsed(jvmUsedHeapSpace); + new WatcherServiceRequestBuilder(client).stop().get(); + } + ); } } diff --git a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java index 2a9d761b7c3c1..270628be2ea8e 100644 --- a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java +++ b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java @@ -374,22 +374,24 @@ private void assertDocs( .build(); RequestOptions randomRequestOptions = randomBoolean() ? RequestOptions.DEFAULT : v7RequestOptions; - SearchResponse searchResponse; - // run a search against the index - searchResponse = search(index, null, randomRequestOptions); - logger.info(searchResponse); - // check hit count - assertEquals(numDocs, searchResponse.getHits().getTotalHits().value); - // check that _index is properly set - assertTrue(Arrays.stream(searchResponse.getHits().getHits()).map(SearchHit::getIndex).allMatch(index::equals)); - // check that all _ids are there - assertEquals(expectedIds, Arrays.stream(searchResponse.getHits().getHits()).map(SearchHit::getId).collect(Collectors.toSet())); - // check that _source is present - assertTrue(Arrays.stream(searchResponse.getHits().getHits()).allMatch(SearchHit::hasSource)); - // check that correct _source present for each document - for (SearchHit h : searchResponse.getHits().getHits()) { - assertEquals(sourceForDoc(getIdAsNumeric(h.getId())), h.getSourceAsString()); + SearchResponse searchResponse = search(index, null, randomRequestOptions); + try { + logger.info(searchResponse); + // check hit count + assertEquals(numDocs, searchResponse.getHits().getTotalHits().value); + // check that _index is properly set + assertTrue(Arrays.stream(searchResponse.getHits().getHits()).map(SearchHit::getIndex).allMatch(index::equals)); + // check that all _ids are there + assertEquals(expectedIds, Arrays.stream(searchResponse.getHits().getHits()).map(SearchHit::getId).collect(Collectors.toSet())); + // check that _source is present + assertTrue(Arrays.stream(searchResponse.getHits().getHits()).allMatch(SearchHit::hasSource)); + // check that correct _source present for each document + for (SearchHit h : searchResponse.getHits().getHits()) { + assertEquals(sourceForDoc(getIdAsNumeric(h.getId())), h.getSourceAsString()); + } + } finally { + searchResponse.decRef(); } String id = randomFrom(expectedIds); @@ -402,10 +404,14 @@ private void assertDocs( .runtimeMappings(Map.of("val", Map.of("type", "long"))), randomRequestOptions ); - logger.info(searchResponse); - assertEquals(1, searchResponse.getHits().getTotalHits().value); - assertEquals(id, searchResponse.getHits().getHits()[0].getId()); - assertEquals(sourceForDoc(num), searchResponse.getHits().getHits()[0].getSourceAsString()); + try { + logger.info(searchResponse); + assertEquals(1, searchResponse.getHits().getTotalHits().value); + assertEquals(id, searchResponse.getHits().getHits()[0].getId()); + assertEquals(sourceForDoc(num), searchResponse.getHits().getHits()[0].getSourceAsString()); + } finally { + searchResponse.decRef(); + } if (sourceOnlyRepository == false) { // search using reverse sort on val @@ -416,12 +422,16 @@ private void assertDocs( .sort(SortBuilders.fieldSort("val").order(SortOrder.DESC)), randomRequestOptions ); - logger.info(searchResponse); - // check sort order - assertEquals( - expectedIds.stream().sorted(Comparator.comparingInt(this::getIdAsNumeric).reversed()).toList(), - Arrays.stream(searchResponse.getHits().getHits()).map(SearchHit::getId).toList() - ); + try { + logger.info(searchResponse); + // check sort order + assertEquals( + expectedIds.stream().sorted(Comparator.comparingInt(this::getIdAsNumeric).reversed()).toList(), + Arrays.stream(searchResponse.getHits().getHits()).map(SearchHit::getId).toList() + ); + } finally { + searchResponse.decRef(); + } // look up postings searchResponse = search( @@ -429,9 +439,13 @@ private void assertDocs( SearchSourceBuilder.searchSource().query(QueryBuilders.matchQuery("test", "test" + num)), randomRequestOptions ); - logger.info(searchResponse); - // check match - ElasticsearchAssertions.assertSearchHits(searchResponse, id); + try { + logger.info(searchResponse); + // check match + ElasticsearchAssertions.assertSearchHits(searchResponse, id); + } finally { + searchResponse.decRef(); + } if (oldVersion.before(Version.fromString("6.0.0"))) { // search on _type and check that results contain _type information @@ -442,13 +456,17 @@ private void assertDocs( SearchSourceBuilder.searchSource().query(QueryBuilders.termQuery("_type", randomType)), randomRequestOptions ); - logger.info(searchResponse); - assertEquals(typeCount, searchResponse.getHits().getTotalHits().value); - for (SearchHit hit : searchResponse.getHits().getHits()) { - DocumentField typeField = hit.field("_type"); - assertNotNull(typeField); - assertThat(typeField.getValue(), instanceOf(String.class)); - assertEquals(randomType, typeField.getValue()); + try { + logger.info(searchResponse); + assertEquals(typeCount, searchResponse.getHits().getTotalHits().value); + for (SearchHit hit : searchResponse.getHits().getHits()) { + DocumentField typeField = hit.field("_type"); + assertNotNull(typeField); + assertThat(typeField.getValue(), instanceOf(String.class)); + assertEquals(randomType, typeField.getValue()); + } + } finally { + searchResponse.decRef(); } } @@ -464,11 +482,15 @@ private void assertDocs( SearchSourceBuilder.searchSource().query(QueryBuilders.rangeQuery("create_date").from("2020-02-01")), randomRequestOptions ); - logger.info(searchResponse); - assertEquals(0, searchResponse.getHits().getTotalHits().value); - assertEquals(numberOfShards, searchResponse.getSuccessfulShards()); - // When all shards are skipped, at least one of them is queried in order to provide a proper search response. - assertEquals(numberOfShards - 1, searchResponse.getSkippedShards()); + try { + logger.info(searchResponse); + assertEquals(0, searchResponse.getHits().getTotalHits().value); + assertEquals(numberOfShards, searchResponse.getSuccessfulShards()); + // When all shards are skipped, at least one of them is queried in order to provide a proper search response. + assertEquals(numberOfShards - 1, searchResponse.getSkippedShards()); + } finally { + searchResponse.decRef(); + } } } From 65d26b2d49ea7cf04286e2c3fa62b36c468ec156 Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Fri, 10 Nov 2023 08:21:51 -0600 Subject: [PATCH 14/15] Allowing non-dynamic index settings to be updated by automatically unassigning shards (#101723) --- docs/changelog/101723.yaml | 6 + docs/reference/index-modules.asciidoc | 5 +- .../indices/update-settings.asciidoc | 10 + .../api/indices.put_settings.json | 4 + .../20_update_non_dynamic_settings.yml | 58 +++++ .../MetadataUpdateSettingsServiceIT.java | 198 ++++++++++++++++++ .../org/elasticsearch/TransportVersions.java | 1 + .../put/TransportUpdateSettingsAction.java | 1 + ...dateSettingsClusterStateUpdateRequest.java | 16 ++ .../settings/put/UpdateSettingsRequest.java | 23 +- .../MetadataUpdateSettingsService.java | 70 ++++++- .../indices/RestUpdateSettingsAction.java | 1 + ...dateSettingsRequestSerializationTests.java | 2 + .../put/UpdateSettingsRequestTests.java | 4 + 14 files changed, 390 insertions(+), 9 deletions(-) create mode 100644 docs/changelog/101723.yaml create mode 100644 rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_settings/20_update_non_dynamic_settings.yml create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsServiceIT.java diff --git a/docs/changelog/101723.yaml b/docs/changelog/101723.yaml new file mode 100644 index 0000000000000..146d164805f00 --- /dev/null +++ b/docs/changelog/101723.yaml @@ -0,0 +1,6 @@ +pr: 101723 +summary: Allowing non-dynamic index settings to be updated by automatically unassigning + shards +area: Indices APIs +type: enhancement +issues: [] diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 4f15bb1c1d694..31fe747feb63b 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -16,7 +16,10 @@ Index level settings can be set per-index. Settings may be: _static_:: They can only be set at index creation time or on a -<>. +<>, or by using the +<> API with the +`reopen` query parameter set to `true` (which automatically +closes and reopens impacted indices). _dynamic_:: diff --git a/docs/reference/indices/update-settings.asciidoc b/docs/reference/indices/update-settings.asciidoc index 45531dd58ccfc..1ac9ecbb6a6a3 100644 --- a/docs/reference/indices/update-settings.asciidoc +++ b/docs/reference/indices/update-settings.asciidoc @@ -60,6 +60,16 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailab (Optional, Boolean) If `true`, existing index settings remain unchanged. Defaults to `false`. +`reopen`:: +(Optional, Boolean) If `true`, then any static settings that would ordinarily only +be updated on closed indices will be updated by automatically closing and reopening +the affected indices. If `false`, attempts to update static settings on open indices +will fail. Defaults to `false`. + +NOTE: Changing index settings on an automatically closed index using the `reopen` +parameter will result in the index becoming unavailable momentarily while the index +is in the process of reopening. + include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json index c1f3079995de9..08134e211a312 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json @@ -45,6 +45,10 @@ "type":"boolean", "description":"Whether to update existing settings. If set to `true` existing settings on an index remain unchanged, the default is `false`" }, + "reopen":{ + "type":"boolean", + "description":"Whether to close and reopen the index to apply non-dynamic settings. If set to `true` the indices to which the settings are being applied will be closed temporarily and then reopened in order to apply the changes. The default is `false`" + }, "ignore_unavailable":{ "type":"boolean", "description":"Whether specified concrete indices should be ignored when unavailable (missing or closed)" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_settings/20_update_non_dynamic_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_settings/20_update_non_dynamic_settings.yml new file mode 100644 index 0000000000000..07c0e8b7a8b2a --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.put_settings/20_update_non_dynamic_settings.yml @@ -0,0 +1,58 @@ +setup: + - skip: + version: ' - 8.11.99' + reason: 'ability to update non-dynamic settings added in 8.12' + + - do: + indices.create: + index: test-index + body: + settings: + index: + number_of_replicas: 0 + +--- +"Test update non dynamic settings": + - do: + indices.put_settings: + index: test-index + body: + number_of_replicas: 1 + + - do: + catch: bad_request + indices.put_settings: + index: test-index + body: + index.codec: best_compression + + - do: + catch: bad_request + indices.put_settings: + index: test-index + reopen: false + body: + index.codec: best_compression + + - do: + indices.get_settings: + index: test-index + flat_settings: false + - match: + test-index.settings.index.codec: null + + - do: + indices.put_settings: + index: test-index + reopen: true + body: + index.codec: best_compression + - match: { acknowledged: true } + + - do: + indices.get_settings: + index: test-index + flat_settings: false + - match: + test-index.settings.index.codec: "best_compression" + diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsServiceIT.java new file mode 100644 index 0000000000000..59f4905d5924b --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsServiceIT.java @@ -0,0 +1,198 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.equalTo; + +public class MetadataUpdateSettingsServiceIT extends ESIntegTestCase { + + public void testThatNonDynamicSettingChangesTakeEffect() throws Exception { + /* + * This test makes sure that when non-dynamic settings are updated that they actually take effect (as opposed to just being set + * in the cluster state). + */ + createIndex("test", Settings.EMPTY); + MetadataUpdateSettingsService metadataUpdateSettingsService = internalCluster().getCurrentMasterNodeInstance( + MetadataUpdateSettingsService.class + ); + UpdateSettingsClusterStateUpdateRequest request = new UpdateSettingsClusterStateUpdateRequest(); + List indices = new ArrayList<>(); + for (IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + for (IndexService indexService : indicesService) { + indices.add(indexService.index()); + } + } + request.indices(indices.toArray(Index.EMPTY_ARRAY)); + request.settings(Settings.builder().put("index.codec", "FastDecompressionCompressingStoredFieldsData").build()); + + // First make sure it fails if reopenShards is not set on the request: + AtomicBoolean expectedFailureOccurred = new AtomicBoolean(false); + metadataUpdateSettingsService.updateSettings(request, new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + fail("Should have failed updating a non-dynamic setting without reopenShards set to true"); + } + + @Override + public void onFailure(Exception e) { + expectedFailureOccurred.set(true); + } + }); + assertBusy(() -> assertThat(expectedFailureOccurred.get(), equalTo(true))); + + // Now we set reopenShards and expect it to work: + request.reopenShards(true); + AtomicBoolean success = new AtomicBoolean(false); + metadataUpdateSettingsService.updateSettings(request, new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + success.set(true); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + }); + assertBusy(() -> assertThat(success.get(), equalTo(true))); + + // Now we look into the IndexShard objects to make sure that the code was actually updated (vs just the setting): + for (IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + for (IndexService indexService : indicesService) { + assertBusy(() -> { + for (IndexShard indexShard : indexService) { + final Engine engine = indexShard.getEngineOrNull(); + assertNotNull("engine is null for " + indexService.index().getName(), engine); + assertThat(engine.getEngineConfig().getCodec().getName(), equalTo("FastDecompressionCompressingStoredFieldsData")); + } + }); + } + } + } + + public void testThatNonDynamicSettingChangesDoNotUnncessesarilyCauseReopens() throws Exception { + /* + * This test makes sure that if a setting change request for a non-dynamic setting is made on an index that already has that + * value we don't unassign the shards to apply the change -- there is no need. First we set a non-dynamic setting for the + * first time, and see that the shards for the index are unassigned. Then we set a different dynamic setting, and include setting + * the original non-dynamic setting to the same value as the previous request. We make sure that the new setting comes through + * but that the shards are not unassigned. + */ + final String indexName = "test"; + createIndex(indexName, Settings.EMPTY); + MetadataUpdateSettingsService metadataUpdateSettingsService = internalCluster().getCurrentMasterNodeInstance( + MetadataUpdateSettingsService.class + ); + UpdateSettingsClusterStateUpdateRequest request = new UpdateSettingsClusterStateUpdateRequest(); + List indices = new ArrayList<>(); + for (IndicesService indicesService : internalCluster().getInstances(IndicesService.class)) { + for (IndexService indexService : indicesService) { + indices.add(indexService.index()); + } + } + request.indices(indices.toArray(Index.EMPTY_ARRAY)); + request.settings(Settings.builder().put("index.codec", "FastDecompressionCompressingStoredFieldsData").build()); + request.reopenShards(true); + + ClusterService clusterService = internalCluster().getInstance(ClusterService.class); + AtomicBoolean shardsUnassigned = new AtomicBoolean(false); + AtomicBoolean expectedSettingsChangeInClusterState = new AtomicBoolean(false); + AtomicReference expectedSetting = new AtomicReference<>("index.codec"); + AtomicReference expectedSettingValue = new AtomicReference<>("FastDecompressionCompressingStoredFieldsData"); + clusterService.addListener(event -> { + // We want the cluster change event where the setting is applied. This will be the same one where shards are unassigned + if (event.metadataChanged() + && event.state().metadata().index(indexName) != null + && expectedSettingValue.get().equals(event.state().metadata().index(indexName).getSettings().get(expectedSetting.get()))) { + expectedSettingsChangeInClusterState.set(true); + if (event.routingTableChanged() && event.state().routingTable().indicesRouting().containsKey(indexName)) { + if (hasUnassignedShards(event.state(), indexName)) { + shardsUnassigned.set(true); + } + } + } + }); + + AtomicBoolean success = new AtomicBoolean(false); + // Make the first request, just to set things up: + metadataUpdateSettingsService.updateSettings(request, new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + success.set(true); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + }); + assertBusy(() -> assertThat(success.get(), equalTo(true))); + assertBusy(() -> assertThat(expectedSettingsChangeInClusterState.get(), equalTo(true))); + assertThat(shardsUnassigned.get(), equalTo(true)); + + assertBusy(() -> assertThat(hasUnassignedShards(clusterService.state(), indexName), equalTo(false))); + + // Same request, except now we'll also set the dynamic "index.max_result_window" setting: + request.settings( + Settings.builder() + .put("index.codec", "FastDecompressionCompressingStoredFieldsData") + .put("index.max_result_window", "1500") + .build() + ); + success.set(false); + expectedSettingsChangeInClusterState.set(false); + shardsUnassigned.set(false); + expectedSetting.set("index.max_result_window"); + expectedSettingValue.set("1500"); + // Making this request ought to add this new setting but not unassign the shards: + metadataUpdateSettingsService.updateSettings(request, new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + success.set(true); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + }); + + assertBusy(() -> assertThat(success.get(), equalTo(true))); + assertBusy(() -> assertThat(expectedSettingsChangeInClusterState.get(), equalTo(true))); + assertThat(shardsUnassigned.get(), equalTo(false)); + + } + + private boolean hasUnassignedShards(ClusterState state, String indexName) { + return state.routingTable() + .indicesRouting() + .get(indexName) + .allShards() + .anyMatch(shardRoutingTable -> shardRoutingTable.shardsWithState(ShardRoutingState.UNASSIGNED).size() > 0); + } +} diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index b62a24fdc0b45..394aad6ab30b8 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -161,6 +161,7 @@ static TransportVersion def(int id) { public static final TransportVersion UNDESIRED_SHARD_ALLOCATIONS_COUNT_ADDED = def(8_530_00_0); public static final TransportVersion ML_INFERENCE_TASK_SETTINGS_OPTIONAL_ADDED = def(8_531_00_0); public static final TransportVersion DEPRECATED_COMPONENT_TEMPLATES_ADDED = def(8_532_00_0); + public static final TransportVersion UPDATE_NON_DYNAMIC_SETTINGS_ADDED = def(8_533_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index b613eab0d731c..19fa9c3d359fb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -126,6 +126,7 @@ protected void masterOperation( ) .settings(requestSettings) .setPreserveExisting(request.isPreserveExisting()) + .reopenShards(request.reopen()) .ackTimeout(request.timeout()) .masterNodeTimeout(request.masterNodeTimeout()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java index f52c659ea55f4..99a43c6594c62 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsClusterStateUpdateRequest.java @@ -22,6 +22,8 @@ public class UpdateSettingsClusterStateUpdateRequest extends IndicesClusterState private boolean preserveExisting = false; + private boolean reopenShards = false; + /** * Returns true iff the settings update should only add but not update settings. If the setting already exists * it should not be overwritten by this update. The default is false @@ -30,6 +32,20 @@ public boolean isPreserveExisting() { return preserveExisting; } + /** + * Returns true if non-dynamic setting updates should go through, by automatically unassigning shards in the same cluster + * state change as the setting update. The shards will be automatically reassigned after the cluster state update is made. The + * default is false. + */ + public boolean reopenShards() { + return reopenShards; + } + + public UpdateSettingsClusterStateUpdateRequest reopenShards(boolean reopenShards) { + this.reopenShards = reopenShards; + return this; + } + /** * Iff set to true this settings update will only add settings not already set on an index. Existing settings remain * unchanged. diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 4e31fbc2b5732..013e568eff7c9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -47,6 +47,7 @@ public class UpdateSettingsRequest extends AcknowledgedRequesttrue if non-dynamic setting updates should go through, by automatically unassigning shards in the same cluster + * state change as the setting update. The shards will be automatically reassigned after the cluster state update is made. The + * default is false. + */ + public boolean reopen() { + return reopen; + } + + public void reopen(boolean reopen) { + this.reopen = reopen; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -186,6 +203,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_12_0)) { out.writeString(origin); } + if (out.getTransportVersion().onOrAfter(TransportVersions.UPDATE_NON_DYNAMIC_SETTINGS_ADDED)) { + out.writeBoolean(reopen); + } } @Override @@ -243,12 +263,13 @@ public boolean equals(Object o) { && Objects.equals(settings, that.settings) && Objects.equals(indicesOptions, that.indicesOptions) && Objects.equals(preserveExisting, that.preserveExisting) + && Objects.equals(reopen, that.reopen) && Arrays.equals(indices, that.indices); } @Override public int hashCode() { - return Objects.hash(masterNodeTimeout, timeout, settings, indicesOptions, preserveExisting, Arrays.hashCode(indices)); + return Objects.hash(masterNodeTimeout, timeout, settings, indicesOptions, preserveExisting, reopen, Arrays.hashCode(indices)); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java index 1310c0f7ec5c9..5891b953acfca 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -20,7 +20,11 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.allocator.AllocationActionMultiListener; import org.elasticsearch.cluster.service.ClusterService; @@ -40,7 +44,9 @@ import java.io.IOException; import java.util.Arrays; import java.util.HashSet; +import java.util.List; import java.util.Locale; +import java.util.Objects; import java.util.Set; import java.util.function.BiFunction; @@ -192,9 +198,57 @@ ClusterState execute(ClusterState currentState) { } if (skippedSettings.isEmpty() == false && openIndices.isEmpty() == false) { - throw new IllegalArgumentException( - String.format(Locale.ROOT, "Can't update non dynamic settings [%s] for open indices %s", skippedSettings, openIndices) - ); + if (request.reopenShards()) { + // We have non-dynamic settings and open indices. We will unassign all of the shards in these indices so that the new + // changed settings are applied when the shards are re-assigned. + routingTableBuilder = RoutingTable.builder( + allocationService.getShardRoutingRoleStrategy(), + currentState.routingTable() + ); + for (Index index : openIndices) { + // We only want to take on the expense of reopening all shards for an index if the setting is really changing + Settings existingSettings = currentState.getMetadata().index(index).getSettings(); + boolean needToReopenIndex = false; + for (String setting : skippedSettings) { + String newValue = request.settings().get(setting); + if (Objects.equals(newValue, existingSettings.get(setting)) == false) { + needToReopenIndex = true; + break; + } + } + if (needToReopenIndex) { + List shardRoutingList = currentState.routingTable().allShards(index.getName()); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + for (ShardRouting shardRouting : shardRoutingList) { + if (ShardRoutingState.UNASSIGNED.equals(shardRouting.state()) == false) { + indexRoutingTableBuilder.addShard( + shardRouting.moveToUnassigned( + new UnassignedInfo( + UnassignedInfo.Reason.INDEX_REOPENED, + "Unassigning shards to update static settings" + ) + ) + ); + } else { + indexRoutingTableBuilder.addShard(shardRouting); + } + } + routingTableBuilder.add(indexRoutingTableBuilder.build()); + openIndices.remove(index); + closedIndices.add(index); + } + } + } else { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Can't update non dynamic settings [%s] for open indices %s unless the `reopen` query parameter is set to " + + "true. Alternatively, close the indices, apply the settings changes, and reopen the indices", + skippedSettings, + openIndices + ) + ); + } } if (IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.exists(openSettings)) { @@ -209,10 +263,12 @@ ClusterState execute(ClusterState currentState) { * * TODO: should we update the in-sync allocation IDs once the data is deleted by the node? */ - routingTableBuilder = RoutingTable.builder( - allocationService.getShardRoutingRoleStrategy(), - currentState.routingTable() - ); + if (routingTableBuilder == null) { + routingTableBuilder = RoutingTable.builder( + allocationService.getShardRoutingRoleStrategy(), + currentState.routingTable() + ); + } routingTableBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); metadataBuilder.updateNumberOfReplicas(updatedNumberOfReplicas, actualIndices); logger.info("updating number_of_replicas to [{}] for indices {}", updatedNumberOfReplicas, actualIndices); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java index 2f7468ee544bb..74eddca033398 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java @@ -46,6 +46,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC updateSettingsRequest.setPreserveExisting(request.paramAsBoolean("preserve_existing", updateSettingsRequest.isPreserveExisting())); updateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateSettingsRequest.masterNodeTimeout())); updateSettingsRequest.indicesOptions(IndicesOptions.fromRequest(request, updateSettingsRequest.indicesOptions())); + updateSettingsRequest.reopen(request.paramAsBoolean("reopen", false)); updateSettingsRequest.fromXContent(request.contentParser()); return channel -> client.admin().indices().updateSettings(updateSettingsRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestSerializationTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestSerializationTests.java index e9fa64ce0b0a3..9ff323028e2c5 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestSerializationTests.java @@ -45,6 +45,7 @@ protected UpdateSettingsRequest mutateInstance(UpdateSettingsRequest request) { ) ); mutators.add(() -> mutation.setPreserveExisting(request.isPreserveExisting() == false)); + mutators.add(() -> mutation.reopen(request.reopen() == false)); randomFrom(mutators).run(); return mutation; } @@ -67,6 +68,7 @@ public static UpdateSettingsRequest createTestItem() { request.timeout(randomTimeValue()); request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); request.setPreserveExisting(randomBoolean()); + request.reopen(randomBoolean()); return request; } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java index b997d2a421204..48ab2b0802616 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java @@ -64,6 +64,9 @@ protected UpdateSettingsRequest createTestInstance() { private static UpdateSettingsRequest createTestInstance(boolean enclosedSettings) { UpdateSettingsRequest testRequest = UpdateSettingsRequestSerializationTests.createTestItem(); + if (randomBoolean()) { + testRequest.reopen(true); + } if (enclosedSettings) { UpdateSettingsRequest requestWithEnclosingSettings = new UpdateSettingsRequest(testRequest.settings()) { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { @@ -75,6 +78,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } }; + requestWithEnclosingSettings.reopen(testRequest.reopen()); return requestWithEnclosingSettings; } return testRequest; From ea2035dd17399290679a2521f4e519725d75b932 Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Fri, 10 Nov 2023 16:41:17 +0200 Subject: [PATCH 15/15] ES-6566: Move the calculation of data tier usage stats to individual nodes (#100230) (#101599) --- docs/changelog/101599.yaml | 6 + .../DataTierAllocationDeciderIT.java | 2 +- .../DataTiersUsageRestCancellationIT.java | 9 +- .../core/src/main/java/module-info.java | 1 + .../core/DataTiersUsageTransportAction.java | 259 ------ .../xpack/core/XPackClientPlugin.java | 1 + .../elasticsearch/xpack/core/XPackPlugin.java | 4 + .../DataTiersFeatureSetUsage.java | 4 +- .../DataTiersInfoTransportAction.java | 3 +- .../DataTiersUsageTransportAction.java | 211 +++++ .../core/datatiers/NodeDataTiersUsage.java | 113 +++ .../NodesDataTiersUsageTransportAction.java | 210 +++++ .../DataTiersUsageTransportActionTests.java | 786 ------------------ .../core/datatiers/DataTierUsageFixtures.java | 114 +++ .../DataTiersFeatureSetUsageTests.java | 2 +- .../DataTiersUsageTransportActionTests.java | 535 ++++++++++++ ...desDataTiersUsageTransportActionTests.java | 214 +++++ .../xpack/security/operator/Constants.java | 1 + 18 files changed, 1421 insertions(+), 1054 deletions(-) create mode 100644 docs/changelog/101599.yaml delete mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersUsageTransportAction.java rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/{ => datatiers}/DataTiersFeatureSetUsage.java (98%) rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/{ => datatiers}/DataTiersInfoTransportAction.java (91%) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersUsageTransportAction.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodesDataTiersUsageTransportAction.java delete mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/DataTiersUsageTransportActionTests.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTierUsageFixtures.java rename x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/{ => datatiers}/DataTiersFeatureSetUsageTests.java (97%) create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTiersUsageTransportActionTests.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/NodesDataTiersUsageTransportActionTests.java diff --git a/docs/changelog/101599.yaml b/docs/changelog/101599.yaml new file mode 100644 index 0000000000000..4fb1c972eb083 --- /dev/null +++ b/docs/changelog/101599.yaml @@ -0,0 +1,6 @@ +pr: 101599 +summary: Move the calculation of data tier usage stats to individual nodes +area: ILM+SLM +type: bug +issues: + - 100230 diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java index 6421b70f9e453..20231af156ee1 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderIT.java @@ -26,9 +26,9 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xpack.core.DataTiersFeatureSetUsage; import org.elasticsearch.xpack.core.action.XPackUsageRequestBuilder; import org.elasticsearch.xpack.core.action.XPackUsageResponse; +import org.elasticsearch.xpack.core.datatiers.DataTiersFeatureSetUsage; import org.junit.Before; import java.util.ArrayList; diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/rest/action/DataTiersUsageRestCancellationIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/rest/action/DataTiersUsageRestCancellationIT.java index f669bb8589fd7..faeb760b3c181 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/rest/action/DataTiersUsageRestCancellationIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/rest/action/DataTiersUsageRestCancellationIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.core.rest.action; import org.apache.http.client.methods.HttpGet; -import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.PlainActionFuture; @@ -35,6 +34,7 @@ import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; import org.elasticsearch.xpack.core.action.XPackUsageResponse; +import org.elasticsearch.xpack.core.datatiers.NodesDataTiersUsageTransportAction; import java.nio.file.Path; import java.util.Arrays; @@ -76,7 +76,7 @@ public void testCancellation() throws Exception { final SubscribableListener nodeStatsRequestsReleaseListener = new SubscribableListener<>(); for (TransportService transportService : internalCluster().getInstances(TransportService.class)) { ((MockTransportService) transportService).addRequestHandlingBehavior( - TransportNodesStatsAction.TYPE.name() + "[n]", + NodesDataTiersUsageTransportAction.TYPE.name() + "[n]", (handler, request, channel, task) -> { tasksBlockedLatch.countDown(); nodeStatsRequestsReleaseListener.addListener( @@ -94,14 +94,13 @@ public void testCancellation() throws Exception { safeAwait(tasksBlockedLatch); // must wait for the node-level tasks to start to avoid cancelling being handled earlier cancellable.cancel(); - // NB this test works by blocking node-level stats requests; when #100230 is addressed this will need to target a different action. - assertAllCancellableTasksAreCancelled(TransportNodesStatsAction.TYPE.name()); + assertAllCancellableTasksAreCancelled(NodesDataTiersUsageTransportAction.TYPE.name()); assertAllCancellableTasksAreCancelled(XPackUsageAction.NAME); nodeStatsRequestsReleaseListener.onResponse(null); expectThrows(CancellationException.class, future::actionGet); - assertAllTasksHaveFinished(TransportNodesStatsAction.TYPE.name()); + assertAllTasksHaveFinished(NodesDataTiersUsageTransportAction.TYPE.name()); assertAllTasksHaveFinished(XPackUsageAction.NAME); } diff --git a/x-pack/plugin/core/src/main/java/module-info.java b/x-pack/plugin/core/src/main/java/module-info.java index deb3c4384a04b..d7b5a86d87f90 100644 --- a/x-pack/plugin/core/src/main/java/module-info.java +++ b/x-pack/plugin/core/src/main/java/module-info.java @@ -57,6 +57,7 @@ exports org.elasticsearch.xpack.core.common.validation; exports org.elasticsearch.xpack.core.common; exports org.elasticsearch.xpack.core.datastreams; + exports org.elasticsearch.xpack.core.datatiers; exports org.elasticsearch.xpack.core.deprecation; exports org.elasticsearch.xpack.core.downsample; exports org.elasticsearch.xpack.core.enrich.action; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersUsageTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersUsageTransportAction.java deleted file mode 100644 index 295df1ea51b6b..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersUsageTransportAction.java +++ /dev/null @@ -1,259 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.core; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; -import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; -import org.elasticsearch.action.admin.indices.stats.IndexShardStats; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.client.internal.Client; -import org.elasticsearch.client.internal.ParentTaskAssigningClient; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.node.DiscoveryNodeRole; -import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.RoutingNodes; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.allocation.DataTier; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.store.StoreStats; -import org.elasticsearch.protocol.xpack.XPackUsageRequest; -import org.elasticsearch.search.aggregations.metrics.TDigestState; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; -import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; -import org.elasticsearch.xpack.core.action.XPackUsageFeatureTransportAction; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.StreamSupport; - -public class DataTiersUsageTransportAction extends XPackUsageFeatureTransportAction { - - private final Client client; - - @Inject - public DataTiersUsageTransportAction( - TransportService transportService, - ClusterService clusterService, - ThreadPool threadPool, - ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, - Client client - ) { - super( - XPackUsageFeatureAction.DATA_TIERS.name(), - transportService, - clusterService, - threadPool, - actionFilters, - indexNameExpressionResolver - ); - this.client = client; - } - - @Override - protected void masterOperation( - Task task, - XPackUsageRequest request, - ClusterState state, - ActionListener listener - ) { - new ParentTaskAssigningClient(client, clusterService.localNode(), task).admin() - .cluster() - .prepareNodesStats() - .all() - .setIndices(CommonStatsFlags.ALL) - .execute(listener.delegateFailureAndWrap((delegate, nodesStatsResponse) -> { - final RoutingNodes routingNodes = state.getRoutingNodes(); - final Map indices = state.getMetadata().getIndices(); - - // Determine which tiers each index would prefer to be within - Map indicesToTiers = tierIndices(indices); - - // Generate tier specific stats for the nodes and indices - Map tierSpecificStats = calculateStats( - nodesStatsResponse.getNodes(), - indicesToTiers, - routingNodes - ); - - delegate.onResponse(new XPackUsageFeatureResponse(new DataTiersFeatureSetUsage(tierSpecificStats))); - })); - } - - // Visible for testing - // Takes a registry of indices and returns a mapping of index name to which tier it most prefers. Always 1 to 1, some may filter out. - static Map tierIndices(Map indices) { - Map indexByTier = new HashMap<>(); - indices.entrySet().forEach(entry -> { - String tierPref = entry.getValue().getSettings().get(DataTier.TIER_PREFERENCE); - if (Strings.hasText(tierPref)) { - String[] tiers = tierPref.split(","); - if (tiers.length > 0) { - indexByTier.put(entry.getKey(), tiers[0]); - } - } - }); - return indexByTier; - } - - /** - * Accumulator to hold intermediate data tier stats before final calculation. - */ - private static class TierStatsAccumulator { - int nodeCount = 0; - Set indexNames = new HashSet<>(); - int totalShardCount = 0; - long totalByteCount = 0; - long docCount = 0; - int primaryShardCount = 0; - long primaryByteCount = 0L; - final TDigestState valueSketch = TDigestState.create(1000); - } - - // Visible for testing - static Map calculateStats( - List nodesStats, - Map indexByTier, - RoutingNodes routingNodes - ) { - Map statsAccumulators = new HashMap<>(); - for (NodeStats nodeStats : nodesStats) { - aggregateDataTierNodeCounts(nodeStats, statsAccumulators); - aggregateDataTierIndexStats(nodeStats, routingNodes, indexByTier, statsAccumulators); - } - Map results = new HashMap<>(); - for (Map.Entry entry : statsAccumulators.entrySet()) { - results.put(entry.getKey(), calculateFinalTierStats(entry.getValue())); - } - return results; - } - - /** - * Determine which data tiers this node belongs to (if any), and increment the node counts for those tiers. - */ - private static void aggregateDataTierNodeCounts(NodeStats nodeStats, Map tiersStats) { - nodeStats.getNode() - .getRoles() - .stream() - .map(DiscoveryNodeRole::roleName) - .filter(DataTier::validTierName) - .forEach(tier -> tiersStats.computeIfAbsent(tier, k -> new TierStatsAccumulator()).nodeCount++); - } - - /** - * Locate which indices are hosted on the node specified by the NodeStats, then group and aggregate the available index stats by tier. - */ - private static void aggregateDataTierIndexStats( - NodeStats nodeStats, - RoutingNodes routingNodes, - Map indexByTier, - Map accumulators - ) { - final RoutingNode node = routingNodes.node(nodeStats.getNode().getId()); - if (node != null) { - StreamSupport.stream(node.spliterator(), false) - .map(ShardRouting::index) - .distinct() - .forEach(index -> classifyIndexAndCollectStats(index, nodeStats, indexByTier, node, accumulators)); - } - } - - /** - * Determine which tier an index belongs in, then accumulate its stats into that tier's stats. - */ - private static void classifyIndexAndCollectStats( - Index index, - NodeStats nodeStats, - Map indexByTier, - RoutingNode node, - Map accumulators - ) { - // Look up which tier this index belongs to (its most preferred) - String indexTier = indexByTier.get(index.getName()); - if (indexTier != null) { - final TierStatsAccumulator accumulator = accumulators.computeIfAbsent(indexTier, k -> new TierStatsAccumulator()); - accumulator.indexNames.add(index.getName()); - aggregateDataTierShardStats(nodeStats, index, node, accumulator); - } - } - - /** - * Collect shard-level data tier stats from shard stats contained in the node stats response. - */ - private static void aggregateDataTierShardStats(NodeStats nodeStats, Index index, RoutingNode node, TierStatsAccumulator accumulator) { - // Shard based stats - final List allShardStats = nodeStats.getIndices().getShardStats(index); - if (allShardStats != null) { - for (IndexShardStats shardStat : allShardStats) { - accumulator.totalByteCount += shardStat.getTotal().getStore().totalDataSetSizeInBytes(); - accumulator.docCount += shardStat.getTotal().getDocs().getCount(); - - // Accumulate stats about started shards - ShardRouting shardRouting = node.getByShardId(shardStat.getShardId()); - if (shardRouting != null && shardRouting.state() == ShardRoutingState.STARTED) { - accumulator.totalShardCount += 1; - - // Accumulate stats about started primary shards - StoreStats primaryStoreStats = shardStat.getPrimary().getStore(); - if (primaryStoreStats != null) { - // if primaryStoreStats is null, it means there is no primary on the node in question - accumulator.primaryShardCount++; - long primarySize = primaryStoreStats.totalDataSetSizeInBytes(); - accumulator.primaryByteCount += primarySize; - accumulator.valueSketch.add(primarySize); - } - } - } - } - } - - private static DataTiersFeatureSetUsage.TierSpecificStats calculateFinalTierStats(TierStatsAccumulator accumulator) { - long primaryShardSizeMedian = (long) accumulator.valueSketch.quantile(0.5); - long primaryShardSizeMAD = computeMedianAbsoluteDeviation(accumulator.valueSketch); - return new DataTiersFeatureSetUsage.TierSpecificStats( - accumulator.nodeCount, - accumulator.indexNames.size(), - accumulator.totalShardCount, - accumulator.primaryShardCount, - accumulator.docCount, - accumulator.totalByteCount, - accumulator.primaryByteCount, - primaryShardSizeMedian, - primaryShardSizeMAD - ); - } - - // Visible for testing - static long computeMedianAbsoluteDeviation(TDigestState valuesSketch) { - if (valuesSketch.size() == 0) { - return 0; - } else { - final double approximateMedian = valuesSketch.quantile(0.5); - final TDigestState approximatedDeviationsSketch = TDigestState.createUsingParamsFrom(valuesSketch); - valuesSketch.centroids().forEach(centroid -> { - final double deviation = Math.abs(approximateMedian - centroid.mean()); - approximatedDeviationsSketch.add(deviation, centroid.count()); - }); - - return (long) approximatedDeviationsSketch.quantile(0.5); - } - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 6d019e50f9d5f..ac16631bacb73 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -28,6 +28,7 @@ import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.datastreams.DataStreamFeatureSetUsage; import org.elasticsearch.xpack.core.datastreams.DataStreamLifecycleFeatureSetUsage; +import org.elasticsearch.xpack.core.datatiers.DataTiersFeatureSetUsage; import org.elasticsearch.xpack.core.downsample.DownsampleShardStatus; import org.elasticsearch.xpack.core.enrich.EnrichFeatureSetUsage; import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyStatus; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index d02e3f43d80cb..66534cccff064 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -98,6 +98,9 @@ import org.elasticsearch.xpack.core.action.XPackUsageResponse; import org.elasticsearch.xpack.core.async.DeleteAsyncResultAction; import org.elasticsearch.xpack.core.async.TransportDeleteAsyncResultAction; +import org.elasticsearch.xpack.core.datatiers.DataTiersInfoTransportAction; +import org.elasticsearch.xpack.core.datatiers.DataTiersUsageTransportAction; +import org.elasticsearch.xpack.core.datatiers.NodesDataTiersUsageTransportAction; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.rest.action.RestXPackInfoAction; import org.elasticsearch.xpack.core.rest.action.RestXPackUsageAction; @@ -362,6 +365,7 @@ public Collection createComponents(PluginServices services) { actions.add(new ActionHandler<>(XPackUsageFeatureAction.DATA_STREAM_LIFECYCLE, DataStreamLifecycleUsageTransportAction.class)); actions.add(new ActionHandler<>(XPackUsageFeatureAction.HEALTH, HealthApiUsageTransportAction.class)); actions.add(new ActionHandler<>(XPackUsageFeatureAction.REMOTE_CLUSTERS, RemoteClusterUsageTransportAction.class)); + actions.add(new ActionHandler<>(NodesDataTiersUsageTransportAction.TYPE, NodesDataTiersUsageTransportAction.class)); return actions; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java similarity index 98% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersFeatureSetUsage.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java index 0bf21f66b4888..f990118763bad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsage.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core; +package org.elasticsearch.xpack.core.datatiers; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; @@ -16,6 +16,8 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; import java.util.Collections; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersInfoTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersInfoTransportAction.java similarity index 91% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersInfoTransportAction.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersInfoTransportAction.java index 6134813dc4651..3af1945c53d3f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersInfoTransportAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersInfoTransportAction.java @@ -5,11 +5,12 @@ * 2.0. */ -package org.elasticsearch.xpack.core; +package org.elasticsearch.xpack.core.datatiers; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; import org.elasticsearch.xpack.core.action.XPackInfoFeatureTransportAction; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersUsageTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersUsageTransportAction.java new file mode 100644 index 0000000000000..b5a5e2a4e3273 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/DataTiersUsageTransportAction.java @@ -0,0 +1,211 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.datatiers; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.ParentTaskAssigningClient; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; +import org.elasticsearch.search.aggregations.metrics.TDigestState; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; +import org.elasticsearch.xpack.core.action.XPackUsageFeatureTransportAction; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +public class DataTiersUsageTransportAction extends XPackUsageFeatureTransportAction { + + private final Client client; + + @Inject + public DataTiersUsageTransportAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + Client client + ) { + super( + XPackUsageFeatureAction.DATA_TIERS.name(), + transportService, + clusterService, + threadPool, + actionFilters, + indexNameExpressionResolver + ); + this.client = client; + } + + @Override + protected void masterOperation( + Task task, + XPackUsageRequest request, + ClusterState state, + ActionListener listener + ) { + new ParentTaskAssigningClient(client, clusterService.localNode(), task).admin() + .cluster() + .execute( + NodesDataTiersUsageTransportAction.TYPE, + new NodesDataTiersUsageTransportAction.NodesRequest(), + listener.delegateFailureAndWrap((delegate, response) -> { + // Generate tier specific stats for the nodes and indices + delegate.onResponse( + new XPackUsageFeatureResponse( + new DataTiersFeatureSetUsage( + aggregateStats(response.getNodes(), getIndicesGroupedByTier(state, response.getNodes())) + ) + ) + ); + }) + ); + } + + // Visible for testing + static Map> getIndicesGroupedByTier(ClusterState state, List nodes) { + Set indices = nodes.stream() + .map(nodeResponse -> state.getRoutingNodes().node(nodeResponse.getNode().getId())) + .filter(Objects::nonNull) + .flatMap(node -> StreamSupport.stream(node.spliterator(), false)) + .map(ShardRouting::getIndexName) + .collect(Collectors.toSet()); + Map> indicesByTierPreference = new HashMap<>(); + for (String indexName : indices) { + IndexMetadata indexMetadata = state.metadata().index(indexName); + // If the index was deleted in the meantime, skip + if (indexMetadata == null) { + continue; + } + List tierPreference = indexMetadata.getTierPreference(); + if (tierPreference.isEmpty() == false) { + indicesByTierPreference.computeIfAbsent(tierPreference.get(0), ignored -> new HashSet<>()).add(indexName); + } + } + return indicesByTierPreference; + } + + /** + * Accumulator to hold intermediate data tier stats before final calculation. + */ + private static class TierStatsAccumulator { + int nodeCount = 0; + Set indexNames = new HashSet<>(); + int totalShardCount = 0; + long totalByteCount = 0; + long docCount = 0; + int primaryShardCount = 0; + long primaryByteCount = 0L; + final TDigestState valueSketch = TDigestState.create(1000); + } + + // Visible for testing + static Map aggregateStats( + List nodeDataTiersUsages, + Map> tierPreference + ) { + Map statsAccumulators = new HashMap<>(); + for (String tier : tierPreference.keySet()) { + statsAccumulators.put(tier, new TierStatsAccumulator()); + statsAccumulators.get(tier).indexNames.addAll(tierPreference.get(tier)); + } + for (NodeDataTiersUsage nodeDataTiersUsage : nodeDataTiersUsages) { + aggregateDataTierNodeCounts(nodeDataTiersUsage, statsAccumulators); + aggregateDataTierIndexStats(nodeDataTiersUsage, statsAccumulators); + } + Map results = new HashMap<>(); + for (Map.Entry entry : statsAccumulators.entrySet()) { + results.put(entry.getKey(), aggregateFinalTierStats(entry.getValue())); + } + return results; + } + + /** + * Determine which data tiers each node belongs to (if any), and increment the node counts for those tiers. + */ + private static void aggregateDataTierNodeCounts(NodeDataTiersUsage nodeStats, Map tiersStats) { + nodeStats.getNode() + .getRoles() + .stream() + .map(DiscoveryNodeRole::roleName) + .filter(DataTier::validTierName) + .forEach(tier -> tiersStats.computeIfAbsent(tier, k -> new TierStatsAccumulator()).nodeCount++); + } + + /** + * Iterate the preferred tiers of the indices for a node and aggregate their stats. + */ + private static void aggregateDataTierIndexStats(NodeDataTiersUsage nodeDataTiersUsage, Map accumulators) { + for (Map.Entry entry : nodeDataTiersUsage.getUsageStatsByTier().entrySet()) { + String tier = entry.getKey(); + NodeDataTiersUsage.UsageStats usage = entry.getValue(); + if (DataTier.validTierName(tier)) { + TierStatsAccumulator accumulator = accumulators.computeIfAbsent(tier, k -> new TierStatsAccumulator()); + accumulator.docCount += usage.getDocCount(); + accumulator.totalByteCount += usage.getTotalSize(); + accumulator.totalShardCount += usage.getTotalShardCount(); + for (Long primaryShardSize : usage.getPrimaryShardSizes()) { + accumulator.primaryShardCount += 1; + accumulator.primaryByteCount += primaryShardSize; + accumulator.valueSketch.add(primaryShardSize); + } + } + } + } + + private static DataTiersFeatureSetUsage.TierSpecificStats aggregateFinalTierStats(TierStatsAccumulator accumulator) { + long primaryShardSizeMedian = (long) accumulator.valueSketch.quantile(0.5); + long primaryShardSizeMAD = computeMedianAbsoluteDeviation(accumulator.valueSketch); + return new DataTiersFeatureSetUsage.TierSpecificStats( + accumulator.nodeCount, + accumulator.indexNames.size(), + accumulator.totalShardCount, + accumulator.primaryShardCount, + accumulator.docCount, + accumulator.totalByteCount, + accumulator.primaryByteCount, + primaryShardSizeMedian, + primaryShardSizeMAD + ); + } + + // Visible for testing + static long computeMedianAbsoluteDeviation(TDigestState valuesSketch) { + if (valuesSketch.size() == 0) { + return 0; + } else { + final double approximateMedian = valuesSketch.quantile(0.5); + final TDigestState approximatedDeviationsSketch = TDigestState.createUsingParamsFrom(valuesSketch); + valuesSketch.centroids().forEach(centroid -> { + final double deviation = Math.abs(approximateMedian - centroid.mean()); + approximatedDeviationsSketch.add(deviation, centroid.count()); + }); + + return (long) approximatedDeviationsSketch.quantile(0.5); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java new file mode 100644 index 0000000000000..c1903a2910629 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodeDataTiersUsage.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.datatiers; + +import org.elasticsearch.action.support.nodes.BaseNodeResponse; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * Data tier usage statistics on a specific node. The statistics groups the indices, shard sizes, shard counts based + * on their tier preference. + */ +public class NodeDataTiersUsage extends BaseNodeResponse { + + private final Map usageStatsByTier; + + public static class UsageStats implements Writeable { + private final List primaryShardSizes; + private int totalShardCount; + private long docCount; + private long totalSize; + + public UsageStats() { + this.primaryShardSizes = new ArrayList<>(); + this.totalShardCount = 0; + this.docCount = 0; + this.totalSize = 0; + } + + public UsageStats(List primaryShardSizes, int totalShardCount, long docCount, long totalSize) { + this.primaryShardSizes = primaryShardSizes; + this.totalShardCount = totalShardCount; + this.docCount = docCount; + this.totalSize = totalSize; + } + + static UsageStats read(StreamInput in) throws IOException { + return new UsageStats(in.readCollectionAsList(StreamInput::readVLong), in.readVInt(), in.readVLong(), in.readVLong()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(primaryShardSizes, StreamOutput::writeVLong); + out.writeVInt(totalShardCount); + out.writeVLong(docCount); + out.writeVLong(totalSize); + } + + public void addPrimaryShardSize(long primaryShardSize) { + primaryShardSizes.add(primaryShardSize); + } + + public void incrementTotalSize(long totalSize) { + this.totalSize += totalSize; + } + + public void incrementDocCount(long docCount) { + this.docCount += docCount; + } + + public void incrementTotalShardCount(int totalShardCount) { + this.totalShardCount += totalShardCount; + } + + public List getPrimaryShardSizes() { + return primaryShardSizes; + } + + public int getTotalShardCount() { + return totalShardCount; + } + + public long getDocCount() { + return docCount; + } + + public long getTotalSize() { + return totalSize; + } + } + + public NodeDataTiersUsage(StreamInput in) throws IOException { + super(in); + usageStatsByTier = in.readMap(UsageStats::read); + } + + public NodeDataTiersUsage(DiscoveryNode node, Map usageStatsByTier) { + super(node); + this.usageStatsByTier = usageStatsByTier; + } + + public Map getUsageStatsByTier() { + return Map.copyOf(usageStatsByTier); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeMap(usageStatsByTier, (o, v) -> v.writeTo(o)); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodesDataTiersUsageTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodesDataTiersUsageTransportAction.java new file mode 100644 index 0000000000000..85b1fa34c2dd4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datatiers/NodesDataTiersUsageTransportAction.java @@ -0,0 +1,210 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.datatiers; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; +import org.elasticsearch.action.admin.indices.stats.IndexShardStats; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.nodes.BaseNodesRequest; +import org.elasticsearch.action.support.nodes.BaseNodesResponse; +import org.elasticsearch.action.support.nodes.TransportNodesAction; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.store.StoreStats; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.indices.NodeIndicesStats; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +/** + * Sources locally data tier usage stats mainly indices and shard sizes grouped by preferred data tier. + */ +public class NodesDataTiersUsageTransportAction extends TransportNodesAction< + NodesDataTiersUsageTransportAction.NodesRequest, + NodesDataTiersUsageTransportAction.NodesResponse, + NodesDataTiersUsageTransportAction.NodeRequest, + NodeDataTiersUsage> { + + public static final ActionType TYPE = ActionType.localOnly("cluster:monitor/nodes/data_tier_usage"); + private static final CommonStatsFlags STATS_FLAGS = new CommonStatsFlags().clear() + .set(CommonStatsFlags.Flag.Docs, true) + .set(CommonStatsFlags.Flag.Store, true); + + private final IndicesService indicesService; + + @Inject + public NodesDataTiersUsageTransportAction( + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + IndicesService indicesService, + ActionFilters actionFilters + ) { + super( + TYPE.name(), + clusterService, + transportService, + actionFilters, + NodeRequest::new, + threadPool.executor(ThreadPool.Names.MANAGEMENT) + ); + this.indicesService = indicesService; + } + + @Override + protected NodesResponse newResponse(NodesRequest request, List responses, List failures) { + return new NodesResponse(clusterService.getClusterName(), responses, failures); + } + + @Override + protected NodeRequest newNodeRequest(NodesRequest request) { + return NodeRequest.INSTANCE; + } + + @Override + protected NodeDataTiersUsage newNodeResponse(StreamInput in, DiscoveryNode node) throws IOException { + return new NodeDataTiersUsage(in); + } + + @Override + protected NodeDataTiersUsage nodeOperation(NodeRequest nodeRequest, Task task) { + assert task instanceof CancellableTask; + + DiscoveryNode localNode = clusterService.localNode(); + NodeIndicesStats nodeIndicesStats = indicesService.stats(STATS_FLAGS, true); + ClusterState state = clusterService.state(); + RoutingNode routingNode = state.getRoutingNodes().node(localNode.getId()); + Map usageStatsByTier = aggregateStats(routingNode, state.metadata(), nodeIndicesStats); + return new NodeDataTiersUsage(clusterService.localNode(), usageStatsByTier); + } + + // For testing + static Map aggregateStats( + RoutingNode routingNode, + Metadata metadata, + NodeIndicesStats nodeIndicesStats + ) { + if (routingNode == null) { + return Map.of(); + } + Map usageStatsByTier = new HashMap<>(); + Set localIndices = StreamSupport.stream(routingNode.spliterator(), false) + .map(routing -> routing.index().getName()) + .collect(Collectors.toSet()); + for (String indexName : localIndices) { + IndexMetadata indexMetadata = metadata.index(indexName); + String tier = indexMetadata.getTierPreference().isEmpty() ? null : indexMetadata.getTierPreference().get(0); + if (tier != null) { + NodeDataTiersUsage.UsageStats usageStats = usageStatsByTier.computeIfAbsent( + tier, + ignored -> new NodeDataTiersUsage.UsageStats() + ); + List allShardStats = nodeIndicesStats.getShardStats(indexMetadata.getIndex()); + if (allShardStats != null) { + for (IndexShardStats indexShardStats : allShardStats) { + usageStats.incrementTotalSize(indexShardStats.getTotal().getStore().totalDataSetSizeInBytes()); + usageStats.incrementDocCount(indexShardStats.getTotal().getDocs().getCount()); + + ShardRouting shardRouting = routingNode.getByShardId(indexShardStats.getShardId()); + if (shardRouting != null && shardRouting.state() == ShardRoutingState.STARTED) { + usageStats.incrementTotalShardCount(1); + + // Accumulate stats about started primary shards + StoreStats primaryStoreStats = indexShardStats.getPrimary().getStore(); + if (shardRouting.primary() && primaryStoreStats != null) { + usageStats.addPrimaryShardSize(primaryStoreStats.totalDataSetSizeInBytes()); + } + } + } + } + } + } + return usageStatsByTier; + } + + public static class NodesRequest extends BaseNodesRequest { + + public NodesRequest() { + super((String[]) null); + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } + } + + public static class NodeRequest extends TransportRequest { + + static final NodeRequest INSTANCE = new NodeRequest(); + + public NodeRequest(StreamInput in) throws IOException { + super(in); + } + + public NodeRequest() { + + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } + } + + public static class NodesResponse extends BaseNodesResponse { + + public NodesResponse(ClusterName clusterName, List nodes, List failures) { + super(clusterName, nodes, failures); + } + + @Override + protected List readNodesFrom(StreamInput in) throws IOException { + return in.readCollectionAsList(NodeDataTiersUsage::new); + } + + @Override + protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeCollection(nodes); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/DataTiersUsageTransportActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/DataTiersUsageTransportActionTests.java deleted file mode 100644 index 93e991b0fa5ae..0000000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/DataTiersUsageTransportActionTests.java +++ /dev/null @@ -1,786 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.core; - -import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; -import org.elasticsearch.action.admin.indices.stats.CommonStats; -import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; -import org.elasticsearch.action.admin.indices.stats.IndexShardStats; -import org.elasticsearch.action.admin.indices.stats.ShardStats; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodeRole; -import org.elasticsearch.cluster.node.DiscoveryNodeUtils; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.IndexRoutingTable; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.RoutingNodes; -import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.TestShardRouting; -import org.elasticsearch.cluster.routing.allocation.DataTier; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.PathUtils; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.index.shard.DocsStats; -import org.elasticsearch.index.shard.IndexLongFieldRange; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.StoreStats; -import org.elasticsearch.indices.NodeIndicesStats; -import org.elasticsearch.search.aggregations.metrics.TDigestState; -import org.elasticsearch.test.ESTestCase; - -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class DataTiersUsageTransportActionTests extends ESTestCase { - - public void testCalculateMAD() { - assertThat(DataTiersUsageTransportAction.computeMedianAbsoluteDeviation(TDigestState.create(10)), equalTo(0L)); - - TDigestState sketch = TDigestState.create(randomDoubleBetween(1, 1000, false)); - sketch.add(1); - sketch.add(1); - sketch.add(2); - sketch.add(2); - sketch.add(4); - sketch.add(6); - sketch.add(9); - assertThat(DataTiersUsageTransportAction.computeMedianAbsoluteDeviation(sketch), equalTo(1L)); - } - - public void testTierIndices() { - IndexMetadata hotIndex1 = indexMetadata("hot-1", 1, 0, DataTier.DATA_HOT); - IndexMetadata hotIndex2 = indexMetadata("hot-2", 1, 0, DataTier.DATA_HOT); - IndexMetadata warmIndex1 = indexMetadata("warm-1", 1, 0, DataTier.DATA_WARM); - IndexMetadata coldIndex1 = indexMetadata("cold-1", 1, 0, DataTier.DATA_COLD); - IndexMetadata coldIndex2 = indexMetadata("cold-2", 1, 0, DataTier.DATA_COLD, DataTier.DATA_WARM); // Prefers cold over warm - IndexMetadata nonTiered = indexMetadata("non-tier", 1, 0); // No tier - - Map indices = new HashMap<>(); - indices.put("hot-1", hotIndex1); - indices.put("hot-2", hotIndex2); - indices.put("warm-1", warmIndex1); - indices.put("cold-1", coldIndex1); - indices.put("cold-2", coldIndex2); - indices.put("non-tier", nonTiered); - - Map tiers = DataTiersUsageTransportAction.tierIndices(indices); - assertThat(tiers.size(), equalTo(5)); - assertThat(tiers.get("hot-1"), equalTo(DataTier.DATA_HOT)); - assertThat(tiers.get("hot-2"), equalTo(DataTier.DATA_HOT)); - assertThat(tiers.get("warm-1"), equalTo(DataTier.DATA_WARM)); - assertThat(tiers.get("cold-1"), equalTo(DataTier.DATA_COLD)); - assertThat(tiers.get("cold-2"), equalTo(DataTier.DATA_COLD)); - assertThat(tiers.get("non-tier"), nullValue()); - } - - public void testCalculateStatsNoTiers() { - // Nodes: 0 Tiered Nodes, 1 Data Node - DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); - DiscoveryNode leader = newNode(0, DiscoveryNodeRole.MASTER_ROLE); - discoBuilder.add(leader); - discoBuilder.masterNodeId(leader.getId()); - - DiscoveryNode dataNode1 = newNode(1, DiscoveryNodeRole.DATA_ROLE); - discoBuilder.add(dataNode1); - - discoBuilder.localNodeId(dataNode1.getId()); - - // Indices: 1 Regular index - Metadata.Builder metadataBuilder = Metadata.builder(); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); - - IndexMetadata index1 = indexMetadata("index_1", 3, 1); - metadataBuilder.put(index1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index1.getIndex()); - routeTestShardToNodes(index1, 0, indexRoutingTableBuilder, dataNode1); - routeTestShardToNodes(index1, 1, indexRoutingTableBuilder, dataNode1); - routeTestShardToNodes(index1, 2, indexRoutingTableBuilder, dataNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - // Cluster State and create stats responses - ClusterState clusterState = ClusterState.builder(new ClusterName("test")) - .nodes(discoBuilder) - .metadata(metadataBuilder) - .routingTable(routingTableBuilder.build()) - .build(); - - long byteSize = randomLongBetween(1024L, 1024L * 1024L * 1024L * 30L); // 1 KB to 30 GB - long docCount = randomLongBetween(100L, 100000000L); // one hundred to one hundred million - List nodeStatsList = buildNodeStats(clusterState, byteSize, docCount); - - // Calculate usage - Map indexByTier = DataTiersUsageTransportAction.tierIndices(clusterState.metadata().indices()); - Map tierSpecificStats = DataTiersUsageTransportAction.calculateStats( - nodeStatsList, - indexByTier, - clusterState.getRoutingNodes() - ); - - // Verify - No results when no tiers present - assertThat(tierSpecificStats.size(), is(0)); - } - - public void testCalculateStatsTieredNodesOnly() { - // Nodes: 1 Data, 1 Hot, 1 Warm, 1 Cold, 1 Frozen - DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); - DiscoveryNode leader = newNode(0, DiscoveryNodeRole.MASTER_ROLE); - discoBuilder.add(leader); - discoBuilder.masterNodeId(leader.getId()); - - DiscoveryNode dataNode1 = newNode(1, DiscoveryNodeRole.DATA_ROLE); - discoBuilder.add(dataNode1); - DiscoveryNode hotNode1 = newNode(2, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); - discoBuilder.add(hotNode1); - DiscoveryNode warmNode1 = newNode(3, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); - discoBuilder.add(warmNode1); - DiscoveryNode coldNode1 = newNode(4, DiscoveryNodeRole.DATA_COLD_NODE_ROLE); - discoBuilder.add(coldNode1); - DiscoveryNode frozenNode1 = newNode(5, DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE); - discoBuilder.add(frozenNode1); - - discoBuilder.localNodeId(dataNode1.getId()); - - // Indices: 1 Regular index, not hosted on any tiers - Metadata.Builder metadataBuilder = Metadata.builder(); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); - - IndexMetadata index1 = indexMetadata("index_1", 3, 1); - metadataBuilder.put(index1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index1.getIndex()); - routeTestShardToNodes(index1, 0, indexRoutingTableBuilder, dataNode1); - routeTestShardToNodes(index1, 1, indexRoutingTableBuilder, dataNode1); - routeTestShardToNodes(index1, 2, indexRoutingTableBuilder, dataNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - // Cluster State and create stats responses - ClusterState clusterState = ClusterState.builder(new ClusterName("test")) - .nodes(discoBuilder) - .metadata(metadataBuilder) - .routingTable(routingTableBuilder.build()) - .build(); - - long byteSize = randomLongBetween(1024L, 1024L * 1024L * 1024L * 30L); // 1 KB to 30 GB - long docCount = randomLongBetween(100L, 100000000L); // one hundred to one hundred million - List nodeStatsList = buildNodeStats(clusterState, byteSize, docCount); - - // Calculate usage - Map indexByTier = DataTiersUsageTransportAction.tierIndices(clusterState.metadata().indices()); - Map tierSpecificStats = DataTiersUsageTransportAction.calculateStats( - nodeStatsList, - indexByTier, - clusterState.getRoutingNodes() - ); - - // Verify - Results are present but they lack index numbers because none are tiered - assertThat(tierSpecificStats.size(), is(4)); - - DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); - assertThat(hotStats, is(notNullValue())); - assertThat(hotStats.nodeCount, is(1)); - assertThat(hotStats.indexCount, is(0)); - assertThat(hotStats.totalShardCount, is(0)); - assertThat(hotStats.docCount, is(0L)); - assertThat(hotStats.totalByteCount, is(0L)); - assertThat(hotStats.primaryShardCount, is(0)); - assertThat(hotStats.primaryByteCount, is(0L)); - assertThat(hotStats.primaryByteCountMedian, is(0L)); // All same size - assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size - - DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); - assertThat(warmStats, is(notNullValue())); - assertThat(warmStats.nodeCount, is(1)); - assertThat(warmStats.indexCount, is(0)); - assertThat(warmStats.totalShardCount, is(0)); - assertThat(warmStats.docCount, is(0L)); - assertThat(warmStats.totalByteCount, is(0L)); - assertThat(warmStats.primaryShardCount, is(0)); - assertThat(warmStats.primaryByteCount, is(0L)); - assertThat(warmStats.primaryByteCountMedian, is(0L)); // All same size - assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size - - DataTiersFeatureSetUsage.TierSpecificStats coldStats = tierSpecificStats.get(DataTier.DATA_COLD); - assertThat(coldStats, is(notNullValue())); - assertThat(coldStats.nodeCount, is(1)); - assertThat(coldStats.indexCount, is(0)); - assertThat(coldStats.totalShardCount, is(0)); - assertThat(coldStats.docCount, is(0L)); - assertThat(coldStats.totalByteCount, is(0L)); - assertThat(coldStats.primaryShardCount, is(0)); - assertThat(coldStats.primaryByteCount, is(0L)); - assertThat(coldStats.primaryByteCountMedian, is(0L)); // All same size - assertThat(coldStats.primaryShardBytesMAD, is(0L)); // All same size - - DataTiersFeatureSetUsage.TierSpecificStats frozenStats = tierSpecificStats.get(DataTier.DATA_FROZEN); - assertThat(frozenStats, is(notNullValue())); - assertThat(frozenStats.nodeCount, is(1)); - assertThat(frozenStats.indexCount, is(0)); - assertThat(frozenStats.totalShardCount, is(0)); - assertThat(frozenStats.docCount, is(0L)); - assertThat(frozenStats.totalByteCount, is(0L)); - assertThat(frozenStats.primaryShardCount, is(0)); - assertThat(frozenStats.primaryByteCount, is(0L)); - assertThat(frozenStats.primaryByteCountMedian, is(0L)); // All same size - assertThat(frozenStats.primaryShardBytesMAD, is(0L)); // All same size - } - - public void testCalculateStatsTieredIndicesOnly() { - // Nodes: 3 Data, 0 Tiered - Only hosting indices on generic data nodes - int nodeId = 0; - DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); - DiscoveryNode leader = newNode(nodeId++, DiscoveryNodeRole.MASTER_ROLE); - discoBuilder.add(leader); - discoBuilder.masterNodeId(leader.getId()); - - DiscoveryNode dataNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_ROLE); - discoBuilder.add(dataNode1); - DiscoveryNode dataNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_ROLE); - discoBuilder.add(dataNode2); - DiscoveryNode dataNode3 = newNode(nodeId, DiscoveryNodeRole.DATA_ROLE); - discoBuilder.add(dataNode3); - - discoBuilder.localNodeId(dataNode1.getId()); - - // Indices: 1 Hot index, 2 Warm indices, 3 Cold indices - Metadata.Builder metadataBuilder = Metadata.builder(); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); - - IndexMetadata hotIndex1 = indexMetadata("hot_index_1", 3, 1, DataTier.DATA_HOT); - metadataBuilder.put(hotIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(hotIndex1.getIndex()); - routeTestShardToNodes(hotIndex1, 0, indexRoutingTableBuilder, dataNode1, dataNode2); - routeTestShardToNodes(hotIndex1, 1, indexRoutingTableBuilder, dataNode2, dataNode3); - routeTestShardToNodes(hotIndex1, 2, indexRoutingTableBuilder, dataNode3, dataNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - IndexMetadata warmIndex1 = indexMetadata("warm_index_1", 1, 1, DataTier.DATA_WARM); - metadataBuilder.put(warmIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(warmIndex1.getIndex()); - routeTestShardToNodes(warmIndex1, 0, indexRoutingTableBuilder, dataNode1, dataNode2); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - IndexMetadata warmIndex2 = indexMetadata("warm_index_2", 1, 1, DataTier.DATA_WARM); - metadataBuilder.put(warmIndex2, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(warmIndex2.getIndex()); - routeTestShardToNodes(warmIndex2, 0, indexRoutingTableBuilder, dataNode3, dataNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - IndexMetadata coldIndex1 = indexMetadata("cold_index_1", 1, 0, DataTier.DATA_COLD); - metadataBuilder.put(coldIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(coldIndex1.getIndex()); - routeTestShardToNodes(coldIndex1, 0, indexRoutingTableBuilder, dataNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - IndexMetadata coldIndex2 = indexMetadata("cold_index_2", 1, 0, DataTier.DATA_COLD); - metadataBuilder.put(coldIndex2, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(coldIndex2.getIndex()); - routeTestShardToNodes(coldIndex2, 0, indexRoutingTableBuilder, dataNode2); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - IndexMetadata coldIndex3 = indexMetadata("cold_index_3", 1, 0, DataTier.DATA_COLD); - metadataBuilder.put(coldIndex3, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(coldIndex3.getIndex()); - routeTestShardToNodes(coldIndex3, 0, indexRoutingTableBuilder, dataNode3); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - // Cluster State and create stats responses - ClusterState clusterState = ClusterState.builder(new ClusterName("test")) - .nodes(discoBuilder) - .metadata(metadataBuilder) - .routingTable(routingTableBuilder.build()) - .build(); - - long byteSize = randomLongBetween(1024L, 1024L * 1024L * 1024L * 30L); // 1 KB to 30 GB - long docCount = randomLongBetween(100L, 100000000L); // one hundred to one hundred million - List nodeStatsList = buildNodeStats(clusterState, byteSize, docCount); - - // Calculate usage - Map indexByTier = DataTiersUsageTransportAction.tierIndices(clusterState.metadata().indices()); - Map tierSpecificStats = DataTiersUsageTransportAction.calculateStats( - nodeStatsList, - indexByTier, - clusterState.getRoutingNodes() - ); - - // Verify - Index stats exist for the tiers, but no tiered nodes are found - assertThat(tierSpecificStats.size(), is(3)); - - DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); - assertThat(hotStats, is(notNullValue())); - assertThat(hotStats.nodeCount, is(0)); - assertThat(hotStats.indexCount, is(1)); - assertThat(hotStats.totalShardCount, is(6)); - assertThat(hotStats.docCount, is(6 * docCount)); - assertThat(hotStats.totalByteCount, is(6 * byteSize)); - assertThat(hotStats.primaryShardCount, is(3)); - assertThat(hotStats.primaryByteCount, is(3 * byteSize)); - assertThat(hotStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size - - DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); - assertThat(warmStats, is(notNullValue())); - assertThat(warmStats.nodeCount, is(0)); - assertThat(warmStats.indexCount, is(2)); - assertThat(warmStats.totalShardCount, is(4)); - assertThat(warmStats.docCount, is(4 * docCount)); - assertThat(warmStats.totalByteCount, is(4 * byteSize)); - assertThat(warmStats.primaryShardCount, is(2)); - assertThat(warmStats.primaryByteCount, is(2 * byteSize)); - assertThat(warmStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size - - DataTiersFeatureSetUsage.TierSpecificStats coldStats = tierSpecificStats.get(DataTier.DATA_COLD); - assertThat(coldStats, is(notNullValue())); - assertThat(coldStats.nodeCount, is(0)); - assertThat(coldStats.indexCount, is(3)); - assertThat(coldStats.totalShardCount, is(3)); - assertThat(coldStats.docCount, is(3 * docCount)); - assertThat(coldStats.totalByteCount, is(3 * byteSize)); - assertThat(coldStats.primaryShardCount, is(3)); - assertThat(coldStats.primaryByteCount, is(3 * byteSize)); - assertThat(coldStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(coldStats.primaryShardBytesMAD, is(0L)); // All same size - } - - public void testCalculateStatsReasonableCase() { - // Nodes: 3 Hot, 5 Warm, 1 Cold - int nodeId = 0; - DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); - DiscoveryNode leader = newNode(nodeId++, DiscoveryNodeRole.MASTER_ROLE); - discoBuilder.add(leader); - discoBuilder.masterNodeId(leader.getId()); - - DiscoveryNode hotNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); - discoBuilder.add(hotNode1); - DiscoveryNode hotNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); - discoBuilder.add(hotNode2); - DiscoveryNode hotNode3 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); - discoBuilder.add(hotNode3); - DiscoveryNode warmNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); - discoBuilder.add(warmNode1); - DiscoveryNode warmNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); - discoBuilder.add(warmNode2); - DiscoveryNode warmNode3 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); - discoBuilder.add(warmNode3); - DiscoveryNode warmNode4 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); - discoBuilder.add(warmNode4); - DiscoveryNode warmNode5 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); - discoBuilder.add(warmNode5); - DiscoveryNode coldNode1 = newNode(nodeId, DiscoveryNodeRole.DATA_COLD_NODE_ROLE); - discoBuilder.add(coldNode1); - - discoBuilder.localNodeId(hotNode1.getId()); - - // Indices: 1 Hot index, 2 Warm indices, 3 Cold indices - Metadata.Builder metadataBuilder = Metadata.builder(); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); - - IndexMetadata hotIndex1 = indexMetadata("hot_index_1", 3, 1, DataTier.DATA_HOT); - metadataBuilder.put(hotIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(hotIndex1.getIndex()); - routeTestShardToNodes(hotIndex1, 0, indexRoutingTableBuilder, hotNode1, hotNode2); - routeTestShardToNodes(hotIndex1, 1, indexRoutingTableBuilder, hotNode2, hotNode3); - routeTestShardToNodes(hotIndex1, 2, indexRoutingTableBuilder, hotNode3, hotNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - IndexMetadata warmIndex1 = indexMetadata("warm_index_1", 1, 1, DataTier.DATA_WARM); - metadataBuilder.put(warmIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(warmIndex1.getIndex()); - routeTestShardToNodes(warmIndex1, 0, indexRoutingTableBuilder, warmNode1, warmNode2); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - IndexMetadata warmIndex2 = indexMetadata("warm_index_2", 1, 1, DataTier.DATA_WARM); - metadataBuilder.put(warmIndex2, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(warmIndex2.getIndex()); - routeTestShardToNodes(warmIndex2, 0, indexRoutingTableBuilder, warmNode3, warmNode4); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - IndexMetadata coldIndex1 = indexMetadata("cold_index_1", 1, 0, DataTier.DATA_COLD); - metadataBuilder.put(coldIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(coldIndex1.getIndex()); - routeTestShardToNodes(coldIndex1, 0, indexRoutingTableBuilder, coldNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - IndexMetadata coldIndex2 = indexMetadata("cold_index_2", 1, 0, DataTier.DATA_COLD); - metadataBuilder.put(coldIndex2, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(coldIndex2.getIndex()); - routeTestShardToNodes(coldIndex2, 0, indexRoutingTableBuilder, coldNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - IndexMetadata coldIndex3 = indexMetadata("cold_index_3", 1, 0, DataTier.DATA_COLD); - metadataBuilder.put(coldIndex3, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(coldIndex3.getIndex()); - routeTestShardToNodes(coldIndex3, 0, indexRoutingTableBuilder, coldNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - // Cluster State and create stats responses - ClusterState clusterState = ClusterState.builder(new ClusterName("test")) - .nodes(discoBuilder) - .metadata(metadataBuilder) - .routingTable(routingTableBuilder.build()) - .build(); - - long byteSize = randomLongBetween(1024L, 1024L * 1024L * 1024L * 30L); // 1 KB to 30 GB - long docCount = randomLongBetween(100L, 100000000L); // one hundred to one hundred million - List nodeStatsList = buildNodeStats(clusterState, byteSize, docCount); - - // Calculate usage - Map indexByTier = DataTiersUsageTransportAction.tierIndices(clusterState.metadata().indices()); - Map tierSpecificStats = DataTiersUsageTransportAction.calculateStats( - nodeStatsList, - indexByTier, - clusterState.getRoutingNodes() - ); - - // Verify - Node and Index stats are both collected - assertThat(tierSpecificStats.size(), is(3)); - - DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); - assertThat(hotStats, is(notNullValue())); - assertThat(hotStats.nodeCount, is(3)); - assertThat(hotStats.indexCount, is(1)); - assertThat(hotStats.totalShardCount, is(6)); - assertThat(hotStats.docCount, is(6 * docCount)); - assertThat(hotStats.totalByteCount, is(6 * byteSize)); - assertThat(hotStats.primaryShardCount, is(3)); - assertThat(hotStats.primaryByteCount, is(3 * byteSize)); - assertThat(hotStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size - - DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); - assertThat(warmStats, is(notNullValue())); - assertThat(warmStats.nodeCount, is(5)); - assertThat(warmStats.indexCount, is(2)); - assertThat(warmStats.totalShardCount, is(4)); - assertThat(warmStats.docCount, is(4 * docCount)); - assertThat(warmStats.totalByteCount, is(4 * byteSize)); - assertThat(warmStats.primaryShardCount, is(2)); - assertThat(warmStats.primaryByteCount, is(2 * byteSize)); - assertThat(warmStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size - - DataTiersFeatureSetUsage.TierSpecificStats coldStats = tierSpecificStats.get(DataTier.DATA_COLD); - assertThat(coldStats, is(notNullValue())); - assertThat(coldStats.nodeCount, is(1)); - assertThat(coldStats.indexCount, is(3)); - assertThat(coldStats.totalShardCount, is(3)); - assertThat(coldStats.docCount, is(3 * docCount)); - assertThat(coldStats.totalByteCount, is(3 * byteSize)); - assertThat(coldStats.primaryShardCount, is(3)); - assertThat(coldStats.primaryByteCount, is(3 * byteSize)); - assertThat(coldStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(coldStats.primaryShardBytesMAD, is(0L)); // All same size - } - - public void testCalculateStatsMixedTiers() { - // Nodes: 3 Hot+Warm - Nodes that are marked as part of multiple tiers - int nodeId = 0; - DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); - DiscoveryNode leader = newNode(nodeId++, DiscoveryNodeRole.MASTER_ROLE); - discoBuilder.add(leader); - discoBuilder.masterNodeId(leader.getId()); - - DiscoveryNode mixedNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); - discoBuilder.add(mixedNode1); - DiscoveryNode mixedNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); - discoBuilder.add(mixedNode2); - DiscoveryNode mixedNode3 = newNode(nodeId, DiscoveryNodeRole.DATA_HOT_NODE_ROLE, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); - discoBuilder.add(mixedNode3); - - discoBuilder.localNodeId(mixedNode1.getId()); - - // Indices: 1 Hot index, 2 Warm indices - Metadata.Builder metadataBuilder = Metadata.builder(); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); - - IndexMetadata hotIndex1 = indexMetadata("hot_index_1", 3, 1, DataTier.DATA_HOT); - metadataBuilder.put(hotIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(hotIndex1.getIndex()); - routeTestShardToNodes(hotIndex1, 0, indexRoutingTableBuilder, mixedNode1, mixedNode2); - routeTestShardToNodes(hotIndex1, 1, indexRoutingTableBuilder, mixedNode3, mixedNode1); - routeTestShardToNodes(hotIndex1, 2, indexRoutingTableBuilder, mixedNode2, mixedNode3); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - IndexMetadata warmIndex1 = indexMetadata("warm_index_1", 1, 1, DataTier.DATA_WARM); - metadataBuilder.put(warmIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(warmIndex1.getIndex()); - routeTestShardToNodes(warmIndex1, 0, indexRoutingTableBuilder, mixedNode1, mixedNode2); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - IndexMetadata warmIndex2 = indexMetadata("warm_index_2", 1, 1, DataTier.DATA_WARM); - metadataBuilder.put(warmIndex2, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(warmIndex2.getIndex()); - routeTestShardToNodes(warmIndex2, 0, indexRoutingTableBuilder, mixedNode3, mixedNode1); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - // Cluster State and create stats responses - ClusterState clusterState = ClusterState.builder(new ClusterName("test")) - .nodes(discoBuilder) - .metadata(metadataBuilder) - .routingTable(routingTableBuilder.build()) - .build(); - - long byteSize = randomLongBetween(1024L, 1024L * 1024L * 1024L * 30L); // 1 KB to 30 GB - long docCount = randomLongBetween(100L, 100000000L); // one hundred to one hundred million - List nodeStatsList = buildNodeStats(clusterState, byteSize, docCount); - - // Calculate usage - Map indexByTier = DataTiersUsageTransportAction.tierIndices(clusterState.metadata().indices()); - Map tierSpecificStats = DataTiersUsageTransportAction.calculateStats( - nodeStatsList, - indexByTier, - clusterState.getRoutingNodes() - ); - - // Verify - Index stats are separated by their preferred tier, instead of counted - // toward multiple tiers based on their current routing. Nodes are counted for each tier they are in. - assertThat(tierSpecificStats.size(), is(2)); - - DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); - assertThat(hotStats, is(notNullValue())); - assertThat(hotStats.nodeCount, is(3)); - assertThat(hotStats.indexCount, is(1)); - assertThat(hotStats.totalShardCount, is(6)); - assertThat(hotStats.docCount, is(6 * docCount)); - assertThat(hotStats.totalByteCount, is(6 * byteSize)); - assertThat(hotStats.primaryShardCount, is(3)); - assertThat(hotStats.primaryByteCount, is(3 * byteSize)); - assertThat(hotStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size - - DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); - assertThat(warmStats, is(notNullValue())); - assertThat(warmStats.nodeCount, is(3)); - assertThat(warmStats.indexCount, is(2)); - assertThat(warmStats.totalShardCount, is(4)); - assertThat(warmStats.docCount, is(4 * docCount)); - assertThat(warmStats.totalByteCount, is(4 * byteSize)); - assertThat(warmStats.primaryShardCount, is(2)); - assertThat(warmStats.primaryByteCount, is(2 * byteSize)); - assertThat(warmStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size - } - - public void testCalculateStatsStuckInWrongTier() { - // Nodes: 3 Hot, 0 Warm - Emulating indices stuck on non-preferred tiers - int nodeId = 0; - DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); - DiscoveryNode leader = newNode(nodeId++, DiscoveryNodeRole.MASTER_ROLE); - discoBuilder.add(leader); - discoBuilder.masterNodeId(leader.getId()); - - DiscoveryNode hotNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); - discoBuilder.add(hotNode1); - DiscoveryNode hotNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); - discoBuilder.add(hotNode2); - DiscoveryNode hotNode3 = newNode(nodeId, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); - discoBuilder.add(hotNode3); - - discoBuilder.localNodeId(hotNode1.getId()); - - // Indices: 1 Hot index, 1 Warm index (Warm index is allocated to less preferred hot node because warm nodes are missing) - Metadata.Builder metadataBuilder = Metadata.builder(); - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); - - IndexMetadata hotIndex1 = indexMetadata("hot_index_1", 3, 1, DataTier.DATA_HOT); - metadataBuilder.put(hotIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(hotIndex1.getIndex()); - routeTestShardToNodes(hotIndex1, 0, indexRoutingTableBuilder, hotNode1, hotNode2); - routeTestShardToNodes(hotIndex1, 1, indexRoutingTableBuilder, hotNode3, hotNode1); - routeTestShardToNodes(hotIndex1, 2, indexRoutingTableBuilder, hotNode2, hotNode3); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - IndexMetadata warmIndex1 = indexMetadata("warm_index_1", 1, 1, DataTier.DATA_WARM, DataTier.DATA_HOT); - metadataBuilder.put(warmIndex1, false).generateClusterUuidIfNeeded(); - { - IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(warmIndex1.getIndex()); - routeTestShardToNodes(warmIndex1, 0, indexRoutingTableBuilder, hotNode1, hotNode2); - routingTableBuilder.add(indexRoutingTableBuilder.build()); - } - - // Cluster State and create stats responses - ClusterState clusterState = ClusterState.builder(new ClusterName("test")) - .nodes(discoBuilder) - .metadata(metadataBuilder) - .routingTable(routingTableBuilder.build()) - .build(); - - long byteSize = randomLongBetween(1024L, 1024L * 1024L * 1024L * 30L); // 1 KB to 30 GB - long docCount = randomLongBetween(100L, 100000000L); // one hundred to one hundred million - List nodeStatsList = buildNodeStats(clusterState, byteSize, docCount); - - // Calculate usage - Map indexByTier = DataTiersUsageTransportAction.tierIndices(clusterState.metadata().indices()); - Map tierSpecificStats = DataTiersUsageTransportAction.calculateStats( - nodeStatsList, - indexByTier, - clusterState.getRoutingNodes() - ); - - // Verify - Warm indices are still calculated separately from Hot ones, despite Warm nodes missing - assertThat(tierSpecificStats.size(), is(2)); - - DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); - assertThat(hotStats, is(notNullValue())); - assertThat(hotStats.nodeCount, is(3)); - assertThat(hotStats.indexCount, is(1)); - assertThat(hotStats.totalShardCount, is(6)); - assertThat(hotStats.docCount, is(6 * docCount)); - assertThat(hotStats.totalByteCount, is(6 * byteSize)); - assertThat(hotStats.primaryShardCount, is(3)); - assertThat(hotStats.primaryByteCount, is(3 * byteSize)); - assertThat(hotStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size - - DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); - assertThat(warmStats, is(notNullValue())); - assertThat(warmStats.nodeCount, is(0)); - assertThat(warmStats.indexCount, is(1)); - assertThat(warmStats.totalShardCount, is(2)); - assertThat(warmStats.docCount, is(2 * docCount)); - assertThat(warmStats.totalByteCount, is(2 * byteSize)); - assertThat(warmStats.primaryShardCount, is(1)); - assertThat(warmStats.primaryByteCount, is(byteSize)); - assertThat(warmStats.primaryByteCountMedian, is(byteSize)); // All same size - assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size - } - - private static DiscoveryNode newNode(int nodeId, DiscoveryNodeRole... roles) { - return DiscoveryNodeUtils.builder("node_" + nodeId).roles(Set.of(roles)).build(); - } - - private static IndexMetadata indexMetadata(String indexName, int numberOfShards, int numberOfReplicas, String... dataTierPrefs) { - Settings.Builder settingsBuilder = indexSettings(IndexVersion.current(), numberOfShards, numberOfReplicas).put( - SETTING_CREATION_DATE, - System.currentTimeMillis() - ); - - if (dataTierPrefs.length > 1) { - StringBuilder tierBuilder = new StringBuilder(dataTierPrefs[0]); - for (int idx = 1; idx < dataTierPrefs.length; idx++) { - tierBuilder.append(',').append(dataTierPrefs[idx]); - } - settingsBuilder.put(DataTier.TIER_PREFERENCE, tierBuilder.toString()); - } else if (dataTierPrefs.length == 1) { - settingsBuilder.put(DataTier.TIER_PREFERENCE, dataTierPrefs[0]); - } - - return IndexMetadata.builder(indexName).settings(settingsBuilder.build()).timestampRange(IndexLongFieldRange.UNKNOWN).build(); - } - - private static void routeTestShardToNodes( - IndexMetadata index, - int shard, - IndexRoutingTable.Builder indexRoutingTableBuilder, - DiscoveryNode... nodes - ) { - ShardId shardId = new ShardId(index.getIndex(), shard); - IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); - boolean primary = true; - for (DiscoveryNode node : nodes) { - indexShardRoutingBuilder.addShard( - TestShardRouting.newShardRouting(shardId, node.getId(), null, primary, ShardRoutingState.STARTED) - ); - primary = false; - } - indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder); - } - - private List buildNodeStats(ClusterState clusterState, long bytesPerShard, long docsPerShard) { - DiscoveryNodes nodes = clusterState.getNodes(); - RoutingNodes routingNodes = clusterState.getRoutingNodes(); - List nodeStatsList = new ArrayList<>(); - for (DiscoveryNode node : nodes) { - RoutingNode routingNode = routingNodes.node(node.getId()); - if (routingNode == null) { - continue; - } - Map> indexStats = new HashMap<>(); - for (ShardRouting shardRouting : routingNode) { - ShardId shardId = shardRouting.shardId(); - ShardStats shardStat = shardStat(bytesPerShard, docsPerShard, shardRouting); - IndexShardStats shardStats = new IndexShardStats(shardId, new ShardStats[] { shardStat }); - indexStats.computeIfAbsent(shardId.getIndex(), k -> new ArrayList<>()).add(shardStats); - } - NodeIndicesStats nodeIndexStats = new NodeIndicesStats(new CommonStats(), Collections.emptyMap(), indexStats, true); - nodeStatsList.add(mockNodeStats(node, nodeIndexStats)); - } - return nodeStatsList; - } - - private static ShardStats shardStat(long byteCount, long docCount, ShardRouting routing) { - StoreStats storeStats = new StoreStats(randomNonNegativeLong(), byteCount, 0L); - DocsStats docsStats = new DocsStats(docCount, 0L, byteCount); - - CommonStats commonStats = new CommonStats(CommonStatsFlags.ALL); - commonStats.getStore().add(storeStats); - commonStats.getDocs().add(docsStats); - - Path fakePath = PathUtils.get("test/dir/" + routing.shardId().getIndex().getUUID() + "/" + routing.shardId().id()); - ShardPath fakeShardPath = new ShardPath(false, fakePath, fakePath, routing.shardId()); - - return new ShardStats(routing, fakeShardPath, commonStats, null, null, null, false, 0); - } - - private static NodeStats mockNodeStats(DiscoveryNode node, NodeIndicesStats indexStats) { - NodeStats stats = mock(NodeStats.class); - when(stats.getNode()).thenReturn(node); - when(stats.getIndices()).thenReturn(indexStats); - return stats; - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTierUsageFixtures.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTierUsageFixtures.java new file mode 100644 index 0000000000000..63cc6e4d7914e --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTierUsageFixtures.java @@ -0,0 +1,114 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.datatiers; + +import org.elasticsearch.action.admin.indices.stats.CommonStats; +import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; +import org.elasticsearch.action.admin.indices.stats.IndexShardStats; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.PathUtils; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.shard.DocsStats; +import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.index.store.StoreStats; +import org.elasticsearch.indices.NodeIndicesStats; +import org.elasticsearch.test.ESTestCase; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; + +class DataTierUsageFixtures extends ESTestCase { + + private static final CommonStats COMMON_STATS = new CommonStats( + CommonStatsFlags.NONE.set(CommonStatsFlags.Flag.Docs, true).set(CommonStatsFlags.Flag.Store, true) + ); + + static DiscoveryNode newNode(int nodeId, DiscoveryNodeRole... roles) { + return DiscoveryNodeUtils.builder("node_" + nodeId).roles(Set.of(roles)).build(); + } + + static void routeTestShardToNodes( + IndexMetadata index, + int shard, + IndexRoutingTable.Builder indexRoutingTableBuilder, + DiscoveryNode... nodes + ) { + ShardId shardId = new ShardId(index.getIndex(), shard); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + boolean primary = true; + for (DiscoveryNode node : nodes) { + indexShardRoutingBuilder.addShard( + TestShardRouting.newShardRouting(shardId, node.getId(), null, primary, ShardRoutingState.STARTED) + ); + primary = false; + } + indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder); + } + + static NodeIndicesStats buildNodeIndicesStats(RoutingNode routingNode, long bytesPerShard, long docsPerShard) { + Map> indexStats = new HashMap<>(); + for (ShardRouting shardRouting : routingNode) { + ShardId shardId = shardRouting.shardId(); + ShardStats shardStat = shardStat(bytesPerShard, docsPerShard, shardRouting); + IndexShardStats shardStats = new IndexShardStats(shardId, new ShardStats[] { shardStat }); + indexStats.computeIfAbsent(shardId.getIndex(), k -> new ArrayList<>()).add(shardStats); + } + return new NodeIndicesStats(COMMON_STATS, Map.of(), indexStats, true); + } + + private static ShardStats shardStat(long byteCount, long docCount, ShardRouting routing) { + StoreStats storeStats = new StoreStats(randomNonNegativeLong(), byteCount, 0L); + DocsStats docsStats = new DocsStats(docCount, 0L, byteCount); + Path fakePath = PathUtils.get("test/dir/" + routing.shardId().getIndex().getUUID() + "/" + routing.shardId().id()); + ShardPath fakeShardPath = new ShardPath(false, fakePath, fakePath, routing.shardId()); + CommonStats commonStats = new CommonStats(CommonStatsFlags.ALL); + commonStats.getStore().add(storeStats); + commonStats.getDocs().add(docsStats); + return new ShardStats(routing, fakeShardPath, commonStats, null, null, null, false, 0); + } + + static IndexMetadata indexMetadata(String indexName, int numberOfShards, int numberOfReplicas, String... dataTierPrefs) { + Settings.Builder settingsBuilder = indexSettings(IndexVersion.current(), numberOfShards, numberOfReplicas).put( + SETTING_CREATION_DATE, + System.currentTimeMillis() + ); + + if (dataTierPrefs.length > 1) { + StringBuilder tierBuilder = new StringBuilder(dataTierPrefs[0]); + for (int idx = 1; idx < dataTierPrefs.length; idx++) { + tierBuilder.append(',').append(dataTierPrefs[idx]); + } + settingsBuilder.put(DataTier.TIER_PREFERENCE, tierBuilder.toString()); + } else if (dataTierPrefs.length == 1) { + settingsBuilder.put(DataTier.TIER_PREFERENCE, dataTierPrefs[0]); + } + + return IndexMetadata.builder(indexName).settings(settingsBuilder.build()).timestampRange(IndexLongFieldRange.UNKNOWN).build(); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/DataTiersFeatureSetUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsageTests.java similarity index 97% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/DataTiersFeatureSetUsageTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsageTests.java index e5f37dfb5764c..0951408441b3f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/DataTiersFeatureSetUsageTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTiersFeatureSetUsageTests.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.core; +package org.elasticsearch.xpack.core.datatiers; import org.elasticsearch.cluster.routing.allocation.DataTier; import org.elasticsearch.common.io.stream.Writeable; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTiersUsageTransportActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTiersUsageTransportActionTests.java new file mode 100644 index 0000000000000..bb8dce7db0e23 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/DataTiersUsageTransportActionTests.java @@ -0,0 +1,535 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.datatiers; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.search.aggregations.metrics.TDigestState; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.IntStream; + +import static org.elasticsearch.xpack.core.datatiers.DataTierUsageFixtures.indexMetadata; +import static org.elasticsearch.xpack.core.datatiers.DataTierUsageFixtures.newNode; +import static org.elasticsearch.xpack.core.datatiers.DataTierUsageFixtures.routeTestShardToNodes; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class DataTiersUsageTransportActionTests extends ESTestCase { + + private long byteSize; + private long docCount; + + @Before + public void setup() { + byteSize = randomLongBetween(1024L, 1024L * 1024L * 1024L * 30L); // 1 KB to 30 GB + docCount = randomLongBetween(100L, 100000000L); // one hundred to one hundred million + } + + public void testTierIndices() { + DiscoveryNode dataNode = newNode(0, DiscoveryNodeRole.DATA_ROLE); + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + discoBuilder.add(dataNode); + + IndexMetadata hotIndex1 = indexMetadata("hot-1", 1, 0, DataTier.DATA_HOT); + IndexMetadata hotIndex2 = indexMetadata("hot-2", 1, 0, DataTier.DATA_HOT); + IndexMetadata warmIndex1 = indexMetadata("warm-1", 1, 0, DataTier.DATA_WARM); + IndexMetadata coldIndex1 = indexMetadata("cold-1", 1, 0, DataTier.DATA_COLD); + IndexMetadata coldIndex2 = indexMetadata("cold-2", 1, 0, DataTier.DATA_COLD, DataTier.DATA_WARM); // Prefers cold over warm + IndexMetadata nonTiered = indexMetadata("non-tier", 1, 0); // No tier + IndexMetadata hotIndex3 = indexMetadata("hot-3", 1, 0, DataTier.DATA_HOT); + + Metadata.Builder metadataBuilder = Metadata.builder() + .put(hotIndex1, false) + .put(hotIndex2, false) + .put(warmIndex1, false) + .put(coldIndex1, false) + .put(coldIndex2, false) + .put(nonTiered, false) + .put(hotIndex3, false) + .generateClusterUuidIfNeeded(); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + routingTableBuilder.add(getIndexRoutingTable(hotIndex1, dataNode)); + routingTableBuilder.add(getIndexRoutingTable(hotIndex2, dataNode)); + routingTableBuilder.add(getIndexRoutingTable(hotIndex2, dataNode)); + routingTableBuilder.add(getIndexRoutingTable(warmIndex1, dataNode)); + routingTableBuilder.add(getIndexRoutingTable(coldIndex1, dataNode)); + routingTableBuilder.add(getIndexRoutingTable(coldIndex2, dataNode)); + routingTableBuilder.add(getIndexRoutingTable(nonTiered, dataNode)); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")) + .nodes(discoBuilder) + .metadata(metadataBuilder) + .routingTable(routingTableBuilder.build()) + .build(); + Map> result = DataTiersUsageTransportAction.getIndicesGroupedByTier( + clusterState, + List.of(new NodeDataTiersUsage(dataNode, Map.of(DataTier.DATA_WARM, createStats(5, 5, 0, 10)))) + ); + assertThat(result.keySet(), equalTo(Set.of(DataTier.DATA_HOT, DataTier.DATA_WARM, DataTier.DATA_COLD))); + assertThat(result.get(DataTier.DATA_HOT), equalTo(Set.of(hotIndex1.getIndex().getName(), hotIndex2.getIndex().getName()))); + assertThat(result.get(DataTier.DATA_WARM), equalTo(Set.of(warmIndex1.getIndex().getName()))); + assertThat(result.get(DataTier.DATA_COLD), equalTo(Set.of(coldIndex1.getIndex().getName(), coldIndex2.getIndex().getName()))); + } + + public void testCalculateMAD() { + assertThat(DataTiersUsageTransportAction.computeMedianAbsoluteDeviation(TDigestState.create(10)), equalTo(0L)); + + TDigestState sketch = TDigestState.create(randomDoubleBetween(1, 1000, false)); + sketch.add(1); + sketch.add(1); + sketch.add(2); + sketch.add(2); + sketch.add(4); + sketch.add(6); + sketch.add(9); + assertThat(DataTiersUsageTransportAction.computeMedianAbsoluteDeviation(sketch), equalTo(1L)); + } + + public void testCalculateStatsNoTiers() { + // Nodes: 0 Tiered Nodes, 1 Data Node, no indices on tiered nodes + DiscoveryNode leader = newNode(0, DiscoveryNodeRole.MASTER_ROLE); + DiscoveryNode dataNode1 = newNode(1, DiscoveryNodeRole.DATA_ROLE); + + List nodeDataTiersUsages = List.of( + new NodeDataTiersUsage(leader, Map.of()), + new NodeDataTiersUsage(dataNode1, Map.of()) + ); + Map tierSpecificStats = DataTiersUsageTransportAction.aggregateStats( + nodeDataTiersUsages, + Map.of() + ); + + // Verify - No results when no tiers present + assertThat(tierSpecificStats.size(), is(0)); + } + + public void testCalculateStatsTieredNodesOnly() { + // Nodes: 1 Data, 1 Hot, 1 Warm, 1 Cold, 1 Frozen + DiscoveryNode leader = newNode(0, DiscoveryNodeRole.MASTER_ROLE); + DiscoveryNode dataNode1 = newNode(1, DiscoveryNodeRole.DATA_ROLE); + DiscoveryNode hotNode1 = newNode(2, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); + DiscoveryNode warmNode1 = newNode(3, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); + DiscoveryNode coldNode1 = newNode(4, DiscoveryNodeRole.DATA_COLD_NODE_ROLE); + DiscoveryNode frozenNode1 = newNode(5, DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE); + + List nodeDataTiersUsages = List.of( + new NodeDataTiersUsage(leader, Map.of()), + new NodeDataTiersUsage(dataNode1, Map.of()), + new NodeDataTiersUsage(hotNode1, Map.of()), + new NodeDataTiersUsage(warmNode1, Map.of()), + new NodeDataTiersUsage(coldNode1, Map.of()), + new NodeDataTiersUsage(frozenNode1, Map.of()) + ); + + Map tierSpecificStats = DataTiersUsageTransportAction.aggregateStats( + nodeDataTiersUsages, + Map.of() + ); + + // Verify - Results are present, but they lack index numbers because none are tiered + assertThat(tierSpecificStats.size(), is(4)); + + DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); + assertThat(hotStats, is(notNullValue())); + assertThat(hotStats.nodeCount, is(1)); + assertThat(hotStats.indexCount, is(0)); + assertThat(hotStats.totalShardCount, is(0)); + assertThat(hotStats.docCount, is(0L)); + assertThat(hotStats.totalByteCount, is(0L)); + assertThat(hotStats.primaryShardCount, is(0)); + assertThat(hotStats.primaryByteCount, is(0L)); + assertThat(hotStats.primaryByteCountMedian, is(0L)); // All same size + assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size + + DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); + assertThat(warmStats, is(notNullValue())); + assertThat(warmStats.nodeCount, is(1)); + assertThat(warmStats.indexCount, is(0)); + assertThat(warmStats.totalShardCount, is(0)); + assertThat(warmStats.docCount, is(0L)); + assertThat(warmStats.totalByteCount, is(0L)); + assertThat(warmStats.primaryShardCount, is(0)); + assertThat(warmStats.primaryByteCount, is(0L)); + assertThat(warmStats.primaryByteCountMedian, is(0L)); // All same size + assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size + + DataTiersFeatureSetUsage.TierSpecificStats coldStats = tierSpecificStats.get(DataTier.DATA_COLD); + assertThat(coldStats, is(notNullValue())); + assertThat(coldStats.nodeCount, is(1)); + assertThat(coldStats.indexCount, is(0)); + assertThat(coldStats.totalShardCount, is(0)); + assertThat(coldStats.docCount, is(0L)); + assertThat(coldStats.totalByteCount, is(0L)); + assertThat(coldStats.primaryShardCount, is(0)); + assertThat(coldStats.primaryByteCount, is(0L)); + assertThat(coldStats.primaryByteCountMedian, is(0L)); // All same size + assertThat(coldStats.primaryShardBytesMAD, is(0L)); // All same size + + DataTiersFeatureSetUsage.TierSpecificStats frozenStats = tierSpecificStats.get(DataTier.DATA_FROZEN); + assertThat(frozenStats, is(notNullValue())); + assertThat(frozenStats.nodeCount, is(1)); + assertThat(frozenStats.indexCount, is(0)); + assertThat(frozenStats.totalShardCount, is(0)); + assertThat(frozenStats.docCount, is(0L)); + assertThat(frozenStats.totalByteCount, is(0L)); + assertThat(frozenStats.primaryShardCount, is(0)); + assertThat(frozenStats.primaryByteCount, is(0L)); + assertThat(frozenStats.primaryByteCountMedian, is(0L)); // All same size + assertThat(frozenStats.primaryShardBytesMAD, is(0L)); // All same size + } + + public void testCalculateStatsTieredIndicesOnly() { + // Nodes: 3 Data, 0 Tiered - Only hosting indices on generic data nodes + int nodeId = 0; + DiscoveryNode leader = newNode(nodeId++, DiscoveryNodeRole.MASTER_ROLE); + DiscoveryNode dataNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_ROLE); + DiscoveryNode dataNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_ROLE); + DiscoveryNode dataNode3 = newNode(nodeId, DiscoveryNodeRole.DATA_ROLE); + + // Indices: + // 1 Hot index: 3 primaries, 3 replicas one on each node + // 2 Warm indices, each index 1 primary one replica + // 3 Cold indices, each index 1 primary on a different node + String hotIndex = "hot_index_1"; + String warmIndex1 = "warm_index_1"; + String warmIndex2 = "warm_index_2"; + String coldIndex1 = "cold_index_1"; + String coldIndex2 = "cold_index_2"; + String coldIndex3 = "cold_index_3"; + + List nodeDataTiersUsages = List.of( + new NodeDataTiersUsage(leader, Map.of()), + new NodeDataTiersUsage( + dataNode1, + Map.of( + DataTier.DATA_HOT, + createStats(1, 2, docCount, byteSize), + DataTier.DATA_WARM, + createStats(0, 2, docCount, byteSize), + DataTier.DATA_COLD, + createStats(1, 1, docCount, byteSize) + ) + ), + new NodeDataTiersUsage( + dataNode2, + Map.of( + DataTier.DATA_HOT, + createStats(1, 2, docCount, byteSize), + DataTier.DATA_WARM, + createStats(1, 1, docCount, byteSize), + DataTier.DATA_COLD, + createStats(1, 1, docCount, byteSize) + ) + ), + new NodeDataTiersUsage( + dataNode3, + Map.of( + DataTier.DATA_HOT, + createStats(1, 2, docCount, byteSize), + DataTier.DATA_WARM, + createStats(1, 1, docCount, byteSize), + DataTier.DATA_COLD, + createStats(1, 1, docCount, byteSize) + ) + ) + ); + // Calculate usage + Map tierSpecificStats = DataTiersUsageTransportAction.aggregateStats( + nodeDataTiersUsages, + Map.of( + DataTier.DATA_HOT, + Set.of(hotIndex), + DataTier.DATA_WARM, + Set.of(warmIndex1, warmIndex2), + DataTier.DATA_COLD, + Set.of(coldIndex1, coldIndex2, coldIndex3) + ) + ); + + // Verify - Index stats exist for the tiers, but no tiered nodes are found + assertThat(tierSpecificStats.size(), is(3)); + + DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); + assertThat(hotStats, is(notNullValue())); + assertThat(hotStats.nodeCount, is(0)); + assertThat(hotStats.indexCount, is(1)); + assertThat(hotStats.totalShardCount, is(6)); + assertThat(hotStats.docCount, is(6 * docCount)); + assertThat(hotStats.totalByteCount, is(6 * byteSize)); + assertThat(hotStats.primaryShardCount, is(3)); + assertThat(hotStats.primaryByteCount, is(3 * byteSize)); + assertThat(hotStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size + + DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); + assertThat(warmStats, is(notNullValue())); + assertThat(warmStats.nodeCount, is(0)); + assertThat(warmStats.indexCount, is(2)); + assertThat(warmStats.totalShardCount, is(4)); + assertThat(warmStats.docCount, is(4 * docCount)); + assertThat(warmStats.totalByteCount, is(4 * byteSize)); + assertThat(warmStats.primaryShardCount, is(2)); + assertThat(warmStats.primaryByteCount, is(2 * byteSize)); + assertThat(warmStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size + + DataTiersFeatureSetUsage.TierSpecificStats coldStats = tierSpecificStats.get(DataTier.DATA_COLD); + assertThat(coldStats, is(notNullValue())); + assertThat(coldStats.nodeCount, is(0)); + assertThat(coldStats.indexCount, is(3)); + assertThat(coldStats.totalShardCount, is(3)); + assertThat(coldStats.docCount, is(3 * docCount)); + assertThat(coldStats.totalByteCount, is(3 * byteSize)); + assertThat(coldStats.primaryShardCount, is(3)); + assertThat(coldStats.primaryByteCount, is(3 * byteSize)); + assertThat(coldStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(coldStats.primaryShardBytesMAD, is(0L)); // All same size + } + + public void testCalculateStatsReasonableCase() { + // Nodes: 3 Hot, 5 Warm, 1 Cold + int nodeId = 0; + DiscoveryNode leader = newNode(nodeId++, DiscoveryNodeRole.MASTER_ROLE); + DiscoveryNode hotNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); + DiscoveryNode hotNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); + DiscoveryNode hotNode3 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); + DiscoveryNode warmNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); + DiscoveryNode warmNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); + DiscoveryNode warmNode3 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); + DiscoveryNode warmNode4 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); + DiscoveryNode warmNode5 = newNode(nodeId++, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); + DiscoveryNode coldNode1 = newNode(nodeId, DiscoveryNodeRole.DATA_COLD_NODE_ROLE); + + // Indices: + // 1 Hot index: 3 primaries, 3 replicas one on each node + // 2 Warm indices: each index has 1 primary and 1 replica residing in 4 nodes + // 3 Cold indices: 1 primary each on the cold node + String hotIndex1 = "hot_index_1"; + String warmIndex1 = "warm_index_1"; + String warmIndex2 = "warm_index_2"; + String coldIndex1 = "cold_index_1"; + String coldIndex2 = "cold_index_2"; + String coldIndex3 = "cold_index_3"; + + List nodeDataTiersUsages = List.of( + new NodeDataTiersUsage(leader, Map.of()), + new NodeDataTiersUsage(hotNode1, Map.of(DataTier.DATA_HOT, createStats(1, 2, docCount, byteSize))), + new NodeDataTiersUsage(hotNode2, Map.of(DataTier.DATA_HOT, createStats(1, 2, docCount, byteSize))), + new NodeDataTiersUsage(hotNode3, Map.of(DataTier.DATA_HOT, createStats(1, 2, docCount, byteSize))), + new NodeDataTiersUsage(warmNode1, Map.of(DataTier.DATA_WARM, createStats(1, 1, docCount, byteSize))), + new NodeDataTiersUsage(warmNode2, Map.of(DataTier.DATA_WARM, createStats(0, 1, docCount, byteSize))), + new NodeDataTiersUsage(warmNode3, Map.of(DataTier.DATA_WARM, createStats(1, 1, docCount, byteSize))), + new NodeDataTiersUsage(warmNode4, Map.of(DataTier.DATA_WARM, createStats(0, 1, docCount, byteSize))), + new NodeDataTiersUsage(warmNode5, Map.of()), + new NodeDataTiersUsage(coldNode1, Map.of(DataTier.DATA_COLD, createStats(3, 3, docCount, byteSize))) + + ); + // Calculate usage + Map tierSpecificStats = DataTiersUsageTransportAction.aggregateStats( + nodeDataTiersUsages, + Map.of( + DataTier.DATA_HOT, + Set.of(hotIndex1), + DataTier.DATA_WARM, + Set.of(warmIndex1, warmIndex2), + DataTier.DATA_COLD, + Set.of(coldIndex1, coldIndex2, coldIndex3) + ) + ); + + // Verify - Node and Index stats are both collected + assertThat(tierSpecificStats.size(), is(3)); + + DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); + assertThat(hotStats, is(notNullValue())); + assertThat(hotStats.nodeCount, is(3)); + assertThat(hotStats.indexCount, is(1)); + assertThat(hotStats.totalShardCount, is(6)); + assertThat(hotStats.docCount, is(6 * docCount)); + assertThat(hotStats.totalByteCount, is(6 * byteSize)); + assertThat(hotStats.primaryShardCount, is(3)); + assertThat(hotStats.primaryByteCount, is(3 * byteSize)); + assertThat(hotStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size + + DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); + assertThat(warmStats, is(notNullValue())); + assertThat(warmStats.nodeCount, is(5)); + assertThat(warmStats.indexCount, is(2)); + assertThat(warmStats.totalShardCount, is(4)); + assertThat(warmStats.docCount, is(4 * docCount)); + assertThat(warmStats.totalByteCount, is(4 * byteSize)); + assertThat(warmStats.primaryShardCount, is(2)); + assertThat(warmStats.primaryByteCount, is(2 * byteSize)); + assertThat(warmStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size + + DataTiersFeatureSetUsage.TierSpecificStats coldStats = tierSpecificStats.get(DataTier.DATA_COLD); + assertThat(coldStats, is(notNullValue())); + assertThat(coldStats.nodeCount, is(1)); + assertThat(coldStats.indexCount, is(3)); + assertThat(coldStats.totalShardCount, is(3)); + assertThat(coldStats.docCount, is(3 * docCount)); + assertThat(coldStats.totalByteCount, is(3 * byteSize)); + assertThat(coldStats.primaryShardCount, is(3)); + assertThat(coldStats.primaryByteCount, is(3 * byteSize)); + assertThat(coldStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(coldStats.primaryShardBytesMAD, is(0L)); // All same size + } + + public void testCalculateStatsMixedTiers() { + // Nodes: 3 Hot+Warm - Nodes that are marked as part of multiple tiers + int nodeId = 0; + DiscoveryNode leader = newNode(nodeId++, DiscoveryNodeRole.MASTER_ROLE); + + DiscoveryNode mixedNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); + DiscoveryNode mixedNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); + DiscoveryNode mixedNode3 = newNode(nodeId, DiscoveryNodeRole.DATA_HOT_NODE_ROLE, DiscoveryNodeRole.DATA_WARM_NODE_ROLE); + + String hotIndex1 = "hot_index_1"; + String warmIndex1 = "warm_index_1"; + String warmIndex2 = "warm_index_2"; + + // Indices: 1 Hot index, 2 Warm indices + List nodeDataTiersUsages = List.of( + new NodeDataTiersUsage(leader, Map.of()), + new NodeDataTiersUsage( + mixedNode1, + Map.of(DataTier.DATA_HOT, createStats(1, 2, docCount, byteSize), DataTier.DATA_WARM, createStats(1, 2, docCount, byteSize)) + ), + new NodeDataTiersUsage( + mixedNode2, + Map.of(DataTier.DATA_HOT, createStats(1, 2, docCount, byteSize), DataTier.DATA_WARM, createStats(0, 1, docCount, byteSize)) + ), + new NodeDataTiersUsage( + mixedNode3, + Map.of(DataTier.DATA_HOT, createStats(1, 2, docCount, byteSize), DataTier.DATA_WARM, createStats(1, 1, docCount, byteSize)) + ) + ); + + // Calculate usage + Map tierSpecificStats = DataTiersUsageTransportAction.aggregateStats( + nodeDataTiersUsages, + Map.of(DataTier.DATA_HOT, Set.of(hotIndex1), DataTier.DATA_WARM, Set.of(warmIndex1, warmIndex2)) + ); + + // Verify - Index stats are separated by their preferred tier, instead of counted + // toward multiple tiers based on their current routing. Nodes are counted for each tier they are in. + assertThat(tierSpecificStats.size(), is(2)); + + DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); + assertThat(hotStats, is(notNullValue())); + assertThat(hotStats.nodeCount, is(3)); + assertThat(hotStats.indexCount, is(1)); + assertThat(hotStats.totalShardCount, is(6)); + assertThat(hotStats.docCount, is(6 * docCount)); + assertThat(hotStats.totalByteCount, is(6 * byteSize)); + assertThat(hotStats.primaryShardCount, is(3)); + assertThat(hotStats.primaryByteCount, is(3 * byteSize)); + assertThat(hotStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size + + DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); + assertThat(warmStats, is(notNullValue())); + assertThat(warmStats.nodeCount, is(3)); + assertThat(warmStats.indexCount, is(2)); + assertThat(warmStats.totalShardCount, is(4)); + assertThat(warmStats.docCount, is(4 * docCount)); + assertThat(warmStats.totalByteCount, is(4 * byteSize)); + assertThat(warmStats.primaryShardCount, is(2)); + assertThat(warmStats.primaryByteCount, is(2 * byteSize)); + assertThat(warmStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size + } + + public void testCalculateStatsStuckInWrongTier() { + // Nodes: 3 Hot, 0 Warm - Emulating indices stuck on non-preferred tiers + int nodeId = 0; + DiscoveryNode leader = newNode(nodeId++, DiscoveryNodeRole.MASTER_ROLE); + DiscoveryNode hotNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); + DiscoveryNode hotNode2 = newNode(nodeId++, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); + DiscoveryNode hotNode3 = newNode(nodeId, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); + + String hotIndex1 = "hot_index_1"; + String warmIndex1 = "warm_index_1"; + + List nodeDataTiersUsages = List.of( + new NodeDataTiersUsage(leader, Map.of()), + new NodeDataTiersUsage( + hotNode1, + Map.of(DataTier.DATA_HOT, createStats(1, 2, docCount, byteSize), DataTier.DATA_WARM, createStats(1, 1, docCount, byteSize)) + ), + new NodeDataTiersUsage( + hotNode2, + Map.of(DataTier.DATA_HOT, createStats(1, 2, docCount, byteSize), DataTier.DATA_WARM, createStats(0, 1, docCount, byteSize)) + ), + new NodeDataTiersUsage(hotNode3, Map.of(DataTier.DATA_HOT, createStats(1, 2, docCount, byteSize))) + ); + + // Calculate usage + Map tierSpecificStats = DataTiersUsageTransportAction.aggregateStats( + nodeDataTiersUsages, + Map.of(DataTier.DATA_HOT, Set.of(hotIndex1), DataTier.DATA_WARM, Set.of(warmIndex1)) + ); + + // Verify - Warm indices are still calculated separately from Hot ones, despite Warm nodes missing + assertThat(tierSpecificStats.size(), is(2)); + + DataTiersFeatureSetUsage.TierSpecificStats hotStats = tierSpecificStats.get(DataTier.DATA_HOT); + assertThat(hotStats, is(notNullValue())); + assertThat(hotStats.nodeCount, is(3)); + assertThat(hotStats.indexCount, is(1)); + assertThat(hotStats.totalShardCount, is(6)); + assertThat(hotStats.docCount, is(6 * docCount)); + assertThat(hotStats.totalByteCount, is(6 * byteSize)); + assertThat(hotStats.primaryShardCount, is(3)); + assertThat(hotStats.primaryByteCount, is(3 * byteSize)); + assertThat(hotStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(hotStats.primaryShardBytesMAD, is(0L)); // All same size + + DataTiersFeatureSetUsage.TierSpecificStats warmStats = tierSpecificStats.get(DataTier.DATA_WARM); + assertThat(warmStats, is(notNullValue())); + assertThat(warmStats.nodeCount, is(0)); + assertThat(warmStats.indexCount, is(1)); + assertThat(warmStats.totalShardCount, is(2)); + assertThat(warmStats.docCount, is(2 * docCount)); + assertThat(warmStats.totalByteCount, is(2 * byteSize)); + assertThat(warmStats.primaryShardCount, is(1)); + assertThat(warmStats.primaryByteCount, is(byteSize)); + assertThat(warmStats.primaryByteCountMedian, is(byteSize)); // All same size + assertThat(warmStats.primaryShardBytesMAD, is(0L)); // All same size + } + + private NodeDataTiersUsage.UsageStats createStats(int primaryShardCount, int totalNumberOfShards, long docCount, long byteSize) { + return new NodeDataTiersUsage.UsageStats( + primaryShardCount > 0 ? IntStream.range(0, primaryShardCount).mapToObj(i -> byteSize).toList() : List.of(), + totalNumberOfShards, + totalNumberOfShards * docCount, + totalNumberOfShards * byteSize + ); + } + + private IndexRoutingTable.Builder getIndexRoutingTable(IndexMetadata indexMetadata, DiscoveryNode node) { + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(indexMetadata.getIndex()); + routeTestShardToNodes(indexMetadata, 0, indexRoutingTableBuilder, node); + return indexRoutingTableBuilder; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/NodesDataTiersUsageTransportActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/NodesDataTiersUsageTransportActionTests.java new file mode 100644 index 0000000000000..fb4291530d037 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/datatiers/NodesDataTiersUsageTransportActionTests.java @@ -0,0 +1,214 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.datatiers; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.indices.NodeIndicesStats; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.core.datatiers.DataTierUsageFixtures.buildNodeIndicesStats; +import static org.elasticsearch.xpack.core.datatiers.DataTierUsageFixtures.indexMetadata; +import static org.elasticsearch.xpack.core.datatiers.DataTierUsageFixtures.newNode; +import static org.elasticsearch.xpack.core.datatiers.DataTierUsageFixtures.routeTestShardToNodes; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class NodesDataTiersUsageTransportActionTests extends ESTestCase { + + private long byteSize; + private long docCount; + + @Before + public void setup() { + byteSize = randomLongBetween(1024L, 1024L * 1024L * 1024L * 30L); // 1 KB to 30 GB + docCount = randomLongBetween(100L, 100000000L); // one hundred to one hundred million + } + + public void testCalculateStatsNoTiers() { + // Nodes: 0 Tiered Nodes, 1 Data Node + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + DiscoveryNode dataNode1 = newNode(1, DiscoveryNodeRole.DATA_ROLE); + discoBuilder.add(dataNode1); + discoBuilder.localNodeId(dataNode1.getId()); + + // Indices: 1 Regular index + Metadata.Builder metadataBuilder = Metadata.builder(); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + + IndexMetadata index1 = indexMetadata("index_1", 3, 1); + metadataBuilder.put(index1, false).generateClusterUuidIfNeeded(); + + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index1.getIndex()); + routeTestShardToNodes(index1, 0, indexRoutingTableBuilder, dataNode1); + routeTestShardToNodes(index1, 1, indexRoutingTableBuilder, dataNode1); + routeTestShardToNodes(index1, 2, indexRoutingTableBuilder, dataNode1); + routingTableBuilder.add(indexRoutingTableBuilder.build()); + + // Cluster State and create stats responses + ClusterState clusterState = ClusterState.builder(new ClusterName("test")) + .metadata(metadataBuilder) + .nodes(discoBuilder) + .routingTable(routingTableBuilder.build()) + .build(); + NodeIndicesStats nodeIndicesStats = buildNodeIndicesStats( + clusterState.getRoutingNodes().node(dataNode1.getId()), + byteSize, + docCount + ); + + // Calculate usage + Map usageStats = NodesDataTiersUsageTransportAction.aggregateStats( + clusterState.getRoutingNodes().node(dataNode1.getId()), + clusterState.metadata(), + nodeIndicesStats + ); + + // Verify - No results when no tiers present + assertThat(usageStats.size(), is(0)); + } + + public void testCalculateStatsNoIndices() { + // Nodes: 1 Data, 1 Hot, 1 Warm, 1 Cold, 1 Frozen + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + DiscoveryNode dataNode1 = newNode(1, DiscoveryNodeRole.DATA_HOT_NODE_ROLE); + discoBuilder.add(dataNode1); + discoBuilder.localNodeId(dataNode1.getId()); + + // Indices: 1 Regular index, not hosted on any tiers + Metadata.Builder metadataBuilder = Metadata.builder(); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + + // Cluster State and create stats responses + ClusterState clusterState = ClusterState.builder(new ClusterName("test")) + .metadata(metadataBuilder) + .nodes(discoBuilder) + .routingTable(routingTableBuilder.build()) + .build(); + NodeIndicesStats nodeIndicesStats = buildNodeIndicesStats( + clusterState.getRoutingNodes().node(dataNode1.getId()), + byteSize, + docCount + ); + + // Calculate usage + Map usageStats = NodesDataTiersUsageTransportAction.aggregateStats( + clusterState.getRoutingNodes().node(dataNode1.getId()), + clusterState.metadata(), + nodeIndicesStats + ); + + // Verify - No results when no tiers present + assertThat(usageStats.size(), is(0)); + } + + public void testCalculateStatsTieredIndicesOnly() { + // Nodes: 3 Data, 0 Tiered - Only hosting indices on generic data nodes + int nodeId = 0; + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + + DiscoveryNode dataNode1 = newNode(nodeId++, DiscoveryNodeRole.DATA_ROLE); + discoBuilder.add(dataNode1); + DiscoveryNode dataNode2 = newNode(nodeId, DiscoveryNodeRole.DATA_ROLE); + discoBuilder.add(dataNode2); + + discoBuilder.localNodeId(dataNode1.getId()); + + // Indices: 1 Hot index, 2 Warm indices, 3 Cold indices + Metadata.Builder metadataBuilder = Metadata.builder(); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + + IndexMetadata hotIndex1 = indexMetadata("hot_index_1", 3, 1, DataTier.DATA_HOT); + metadataBuilder.put(hotIndex1, false).generateClusterUuidIfNeeded(); + { + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(hotIndex1.getIndex()); + routeTestShardToNodes(hotIndex1, 0, indexRoutingTableBuilder, dataNode1, dataNode2); + routeTestShardToNodes(hotIndex1, 1, indexRoutingTableBuilder, dataNode2, dataNode1); + routingTableBuilder.add(indexRoutingTableBuilder.build()); + } + + IndexMetadata warmIndex1 = indexMetadata("warm_index_1", 1, 1, DataTier.DATA_WARM); + metadataBuilder.put(warmIndex1, false).generateClusterUuidIfNeeded(); + { + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(warmIndex1.getIndex()); + routeTestShardToNodes(warmIndex1, 0, indexRoutingTableBuilder, dataNode1, dataNode2); + routingTableBuilder.add(indexRoutingTableBuilder.build()); + } + IndexMetadata warmIndex2 = indexMetadata("warm_index_2", 1, 1, DataTier.DATA_WARM); + metadataBuilder.put(warmIndex2, false).generateClusterUuidIfNeeded(); + { + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(warmIndex2.getIndex()); + routeTestShardToNodes(warmIndex2, 0, indexRoutingTableBuilder, dataNode2, dataNode1); + routingTableBuilder.add(indexRoutingTableBuilder.build()); + } + + IndexMetadata coldIndex1 = indexMetadata("cold_index_1", 1, 0, DataTier.DATA_COLD); + metadataBuilder.put(coldIndex1, false).generateClusterUuidIfNeeded(); + { + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(coldIndex1.getIndex()); + routeTestShardToNodes(coldIndex1, 0, indexRoutingTableBuilder, dataNode1); + routingTableBuilder.add(indexRoutingTableBuilder.build()); + } + + // Cluster State and create stats responses + ClusterState clusterState = ClusterState.builder(new ClusterName("test")) + .nodes(discoBuilder) + .metadata(metadataBuilder) + .routingTable(routingTableBuilder.build()) + .build(); + NodeIndicesStats nodeIndicesStats = buildNodeIndicesStats( + clusterState.getRoutingNodes().node(dataNode1.getId()), + byteSize, + docCount + ); + + // Calculate usage + Map usageStats = NodesDataTiersUsageTransportAction.aggregateStats( + clusterState.getRoutingNodes().node(dataNode1.getId()), + clusterState.metadata(), + nodeIndicesStats + ); + + // Verify - Index stats exist for the tiers, but no tiered nodes are found + assertThat(usageStats.size(), is(3)); + + NodeDataTiersUsage.UsageStats hotStats = usageStats.get(DataTier.DATA_HOT); + assertThat(hotStats, is(notNullValue())); + assertThat(hotStats.getPrimaryShardSizes(), equalTo(List.of(byteSize))); + assertThat(hotStats.getTotalShardCount(), is(2)); + assertThat(hotStats.getDocCount(), is(hotStats.getTotalShardCount() * docCount)); + assertThat(hotStats.getTotalSize(), is(hotStats.getTotalShardCount() * byteSize)); + + NodeDataTiersUsage.UsageStats warmStats = usageStats.get(DataTier.DATA_WARM); + assertThat(warmStats, is(notNullValue())); + assertThat(warmStats.getPrimaryShardSizes(), equalTo(List.of(byteSize))); + assertThat(warmStats.getTotalShardCount(), is(2)); + assertThat(warmStats.getDocCount(), is(warmStats.getTotalShardCount() * docCount)); + assertThat(warmStats.getTotalSize(), is(warmStats.getTotalShardCount() * byteSize)); + + NodeDataTiersUsage.UsageStats coldStats = usageStats.get(DataTier.DATA_COLD); + assertThat(coldStats, is(notNullValue())); + assertThat(coldStats.getPrimaryShardSizes(), equalTo(List.of(byteSize))); + assertThat(coldStats.getTotalShardCount(), is(1)); + assertThat(coldStats.getDocCount(), is(coldStats.getTotalShardCount() * docCount)); + assertThat(coldStats.getTotalSize(), is(coldStats.getTotalShardCount() * byteSize)); + } +} diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index 9f490792d800f..fc5f5ba616ab8 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -302,6 +302,7 @@ public class Constants { "cluster:monitor/update/health/info", "cluster:monitor/ingest/geoip/stats", "cluster:monitor/main", + "cluster:monitor/nodes/data_tier_usage", "cluster:monitor/nodes/hot_threads", "cluster:monitor/nodes/info", "cluster:monitor/nodes/stats",